repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/rewind_info.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class rewind_info():
def __init__(self, pivots, dx, dq):
self._pivots = pivots
self._dx = dx
self._dq = dq
@property
def pivots(self):
return self._pivots
@property
def dx(self):
return self._dx
@property
def dq(self):
return self._dq
| 892 | 25.264706 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/memory_manager.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import psutil
import math
class memory_manager():
def __init__(self, K, J, I=0):
self.matrix_size = (K+I) * J * 8
mem = psutil.virtual_memory()
self.max_bs = math.floor(mem.free / self.matrix_size - 5)
self.magic_numbers = [1,1.5,2,2.75,4,6.17,10,16.88,29.34]
def num_bases_to_remove(self):
mem = psutil.virtual_memory()
bs = math.floor(mem.free/self.matrix_size - 5)
return -bs if bs < 0 else 0
| 1,042 | 30.606061 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/matlab_utils.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def ismember(a,b):
return np.isin(a, b, assume_unique=True)
def find(a):
return np.where(a)[0]
| 700 | 30.863636 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/parametric_line.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from enum import Enum
from .lp_tools.LP_formulation import get_value_by_name
class line_type(Enum):
SCLP_x0 = -1
SCLP_main = 0
SCLP_sub = 1
SCLP_orthogonal = 2
class parametric_line():
def __init__(self, x_0, q_N, theta_bar, T=0, del_T =1, del_x_0=None, del_q_N=None, Kset_0=None, Jset_N=None, B1=None, B2=None, ltype = line_type.SCLP_main):
self._x_0 = x_0
self._q_N = q_N
self._theta_bar = theta_bar
self._T = T
self._del_T = del_T
self._del_x_0 = del_x_0
self._del_q_N = del_q_N
self._Kset_0 = Kset_0
self._Jset_N = Jset_N
self._B1 = B1
self._B2 = B2
self._ltype = ltype
self._theta = 0
self._back_direction = False
def build_boundary_sets(self, klist, jlist):
if self._del_x_0 is None:
self._Kset_0 = klist[self._x_0 > 0]
else:
self._Kset_0 = klist[np.logical_or(self._x_0 > 0, self._del_x_0 > 0)]
if self._del_q_N is None:
self._Jset_N = jlist[self._q_N > 0]
else:
self._Jset_N = jlist[np.logical_or(self._q_N > 0, self._del_q_N > 0)]
def scale(self, factor):
self._theta_bar *= factor
self._theta *= factor
self._del_T /= factor
if self._del_x_0 is not None:
self._del_x_0 /= factor
if self._del_q_N is not None:
self._del_q_N /= factor
@property
def x_0(self):
return self._x_0
@property
def q_N(self):
return self._q_N
@property
def del_x_0(self):
if self._back_direction and self._del_x_0 is not None:
return -self._del_x_0
else:
return self._del_x_0
@property
def del_q_N(self):
if self._back_direction and self._del_q_N is not None:
return -self._del_q_N
else:
return self._del_q_N
@property
def T(self):
return self._T
@T.setter
def T(self, value):
self._T = value
@property
def del_T(self):
if self._back_direction:
return -self._del_T
else:
return self._del_T
@property
def Kset_0(self):
return self._Kset_0
@property
def Jset_N(self):
return self._Jset_N
@property
def theta_bar(self):
return self._theta_bar
@theta_bar.setter
def theta_bar(self, value):
if value < self._theta:
self._back_direction = True
else:
self._back_direction = False
self._theta_bar = value
@property
def B1(self):
return self._B1
@property
def B2(self):
return self._B2
@property
def theta(self):
return self._theta
def is_main(self):
return self._ltype == line_type.SCLP_main
def is_sub(self):
return self._ltype == line_type.SCLP_sub
def is_orthogonal(self):
return self._ltype == line_type.SCLP_orthogonal
def is_end(self, delta):
if self._back_direction:
return self._theta - delta <= self._theta_bar
else:
return self._theta + delta >= self._theta_bar
def forward_to_end(self):
self._forward_to(self._theta_bar - self._theta)
def _forward_to(self, delta):
if self._del_x_0 is not None:
self._x_0 += self._del_x_0 * delta
if self._del_q_N is not None:
self._q_N += self._del_q_N * delta
self._T += self._del_T * delta
self._theta += delta
def forward_to(self, delta):
if self._back_direction:
self._backward_to(delta)
else:
self._forward_to(delta)
def backward_to(self, delta):
if self._back_direction:
self._forward_to(delta)
else:
self._backward_to(delta)
def _backward_to(self, delta):
if self._del_x_0 is not None:
self._x_0 -= self._del_x_0 * delta
if self._del_q_N is not None:
self._q_N -= self._del_q_N * delta
self._T -= self._del_T * delta
self._theta -= delta
def get_orthogonal_line(self, theta_bar= 1, type=1):
del_x = None
del_q = None
if type == 1 or type == 3:
del_x = np.zeros_like(self._x_0)
del_x[self._Kset_0-1] = np.random.rand(len(self._Kset_0)) - 0.5
theta_bar =min(theta_bar, 1/np.max(np.divide(-del_x, self._x_0, where = np.logical_and(del_x < 0, self._Kset_0))))
if type == 2 or type == 3:
del_q = np.zeros_like(self._q_N)
del_q[self._Jset_N-1] = np.random.rand(len(self._Jset_N)) - 0.5
theta_bar = min(theta_bar, 1/np.max(np.divide(-del_q, self._q_N, where=np.logical_and(del_q < 0, self._Jset_N))))
return parametric_line(self._x_0, self._q_N, theta_bar, self._theta, 0, del_x, del_q,
self._Kset_0, self._Jset_N, None, None, line_type.SCLP_orthogonal)
def get_x0_parline(self, solution, v1, v_x0):
lk1 = solution.klist == v1
del_x_0 = np.zeros_like(self._x_0)
del_x_0[lk1] = v_x0
return parametric_line(self._x_0, self._q_N, self._T, self._T, 0, del_x_0, np.zeros_like(self._q_N),
self._Kset_0, self._Jset_N, None, None, line_type.SCLP_x0)
@staticmethod
def get_subproblem_parametric_line(basis, solution, v1, v2, AAN1, AAN2, pbaseB1red, pbaseB2red):
x_0 = np.zeros(solution.KK, order='C')
q_N = np.zeros(solution.JJ, order='C')
del_x_0 = np.zeros(solution.KK, order='C')
del_q_N = np.zeros(solution.JJ, order='C')
# Boundary values for one sided subproblem, collision at t=0
if AAN1 is None:
if not isinstance(v1, list):
# The case of v1 > 0, collision case iv_a
if v1 > 0:
dx_DD_v1 = get_value_by_name(basis, v1, True)
lk1 = solution.klist == v1
x_0[lk1] = -dx_DD_v1
del_x_0[lk1] = dx_DD_v1
# The case of v1 < 0, collision case iii_a
elif v1 < 0:
dq_B2_v1 = get_value_by_name(AAN2, v1, False)
lj1 = solution.jlist == -v1
del_q_N[lj1] = -dq_B2_v1
#
# Boundary values for one sided subproblem, collision at t=T
elif AAN2 is None:
if not isinstance(v2, list):
# The case of v2 > 0, collision case iii_b
if v2 > 0:
dx_B1_v2 = get_value_by_name(AAN1, v2, True)
lk2 = solution.klist == v2
del_x_0[lk2] = -dx_B1_v2
# The case of v2 < 0, collision case iv_b
elif v2 < 0:
dq_DD_v2 = get_value_by_name(basis, v2, False)
lj2 = solution.jlist == -v2
q_N[lj2] = -dq_DD_v2
del_q_N[lj2] = dq_DD_v2
#
# Boundary values for two sided subproblem, collision at 0<t<T
# setting boundaries for the second exiting variable v1
else:
if not isinstance(v1, list):
if v1 > 0:
dx_DD_v1 = get_value_by_name(basis, v1, True)
lk1 = solution.klist == v1
x_0[lk1] = -dx_DD_v1
dx_B1_v1 = get_value_by_name(AAN1, v1, True)
del_x_0[lk1] = -0.5 * dx_B1_v1 + dx_DD_v1
elif v1 < 0:
dq_B2_v1 = get_value_by_name(AAN2, v1, False)
lj1 = solution.jlist == -v1
del_q_N[lj1] = -0.5 * dq_B2_v1
# setting boundaries for the first exiting variable v2
if not isinstance(v2, list):
if v2 > 0:
dx_B1_v2 = get_value_by_name(AAN1, v2, True)
lk2 = solution.klist == v2
del_x_0[lk2] = -0.5 * dx_B1_v2
elif v2 < 0:
dq_DD_v2 = get_value_by_name(basis, v2, False)
lj2 = solution.jlist == -v2
q_N[lj2] = -dq_DD_v2
dq_B2_v2 = get_value_by_name(AAN2, v2, False)
del_q_N[lj2] = -0.5 * dq_B2_v2 + dq_DD_v2
par_line = parametric_line(x_0, q_N, 1, 1, 0, del_x_0, del_q_N, None, None, pbaseB1red, pbaseB2red, line_type.SCLP_sub)
par_line.build_boundary_sets(solution.klist, solution.jlist)
return par_line
| 9,143 | 33.37594 | 160 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/pivot_storage.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, overload, Iterable, List
from collections import Counter
import numpy as np
class pivot_iterator(Iterator):
def __init__(self, iter, reversed = False):
self._iterable = iter
self._reversed = reversed
if not self._reversed:
self._current = 0
else:
self._current = len(self._iterable)
def __next__(self):
if not self._reversed:
if self._current >= len(self._iterable):
raise StopIteration
else:
self._current += 1
return self._iterable.__getitem__(self._current - 1)
else:
if self._current <= 0:
raise StopIteration
else:
self._current -= 1
return self._iterable.__getitem__(self._current)
def __iter__(self):
return self
class pivot_storage():
__slots__ = ['_in','_out']
def __init__(self, outpivots = None, inpivots= None):
super().__init__()
if inpivots is None:
self._in = []
self._out = []
else:
self._in = inpivots
self._out = outpivots
def copy(self):
return pivot_storage(self._out.copy(),self._in.copy())
@property
def inpivots(self):
return self._in
@property
def outpivots(self):
return self._out
def clear(self) -> None:
self._in.clear()
self._out.clear()
def append(self, object: list) -> None:
self._in.append(object[1])
self._out.append(object[0])
def extend(self, other) -> None:
if hasattr(self, '_in'):
self._in.extend(other.inpivots)
self._out.extend(other.outpivots)
else:
self._in = other.inpivots.copy()
self._out = other.outpivots.copy()
def insert(self, index: int, object) -> None:
self._in.insert(index, object[1])
self._out.insert(index, object[0])
def reverse(self) -> None:
self._in.reverse()
self._out.reverse()
def __len__(self) -> int:
return len(self._out)
def __iter__(self):
return pivot_iterator(self)
@overload
def __getitem__(self, i: int):
return [self._out[i], self._in[i]]
@overload
def __getitem__(self, s: slice):
return pivot_storage(self._out.__getitem__(s), self._in.__getitem__(s))
def __getitem__(self, i: int):
if isinstance(i, slice):
return pivot_storage(self._out.__getitem__(i), self._in.__getitem__(i))
else:
return [self._out[i], self._in[i]]
@overload
def __setitem__(self, i: slice, o) -> None:
self._in.__setitem__(i, o.inpivots)
self._out.__setitem__(i, o.outpivots)
@overload
def __setitem__(self, s: slice, o) -> None:
self._in.__setitem__(s, o[1])
self._out.__setitem__(s, o[0])
def __setitem__(self, i: int, o) -> None:
if isinstance(i, slice):
self._in.__setitem__(i, o.inpivots)
self._out.__setitem__(i, o.outpivots)
else:
self._in.__setitem__(i, o[1])
self._out.__setitem__(i, o[0])
def __delitem__(self, i) -> None:
self._in.__delitem__(i)
self._out.__delitem__(i)
def __add__(self, other):
return pivot_storage(self._out.__add__(other.outpivots), self._in.__add__(other.inpivots))
def __contains__(self, o) -> bool:
for i, x in enumerate(self._out):
if x == o[0]:
if self._in[i] == o[1]:
return True
return False
def __reversed__(self):
return pivot_iterator(self, True)
def get_out_difference(self, N1, N2):
if N2 - N1 == 2:
return {self._out[N1],self._out[N1+1]}.difference({self._in[N1],self._in[N1+1]})
else:
diff = Counter(self._out[N1:N2]) - Counter(self._in[N1:N2])
return list(diff.elements())
def get_in_difference(self, N1, N2):
if N2 - N1 == 2:
return {self._in[N1],self._in[N1+1]}.difference({self._out[N1],self._out[N1+1]})
else:
diff = Counter(self._in[N1:N2]) - Counter(self._out[N1:N2])
return list(diff.elements())
def remove_pivots(self, N1, N2):
c1 = Counter(self._in[N1:N2])
c2 = Counter(self._out[N1:N2])
diff1 = c1 - c2
diff2 = c2 - c1
if N1 >=0:
if N2 < len(self._in):
self._in = self._in[:N1]+list(diff1.elements())+self._in[N2:]
self._out = self._out[:N1] + list(diff2.elements()) + self._out[N2:]
elif N2 == len(self._in):
self._in = self._in[:N1] + list(diff1.elements())
self._out = self._out[:N1] + list(diff2.elements())
else:
self._in = self._in[:N1]
self._out = self._out[:N1]
else:
self._in = self._in[N2:]
self._out = self._out[N2:]
def get_prim_name_at0(self, place, name):
if place == 0:
return name
else:
c1 = Counter(self._in[:place])
c2 = Counter(self._out[:place])
diff1 = c1 - c2
diff2 = c2 - c1
a1 = np.setdiff1d(name,np.asarray(list(diff1.elements())), assume_unique = True)
a2 = np.union1d(a1, np.asarray(list(diff2.elements())))
return a2
def get_prim_name_atN(self, place, name):
if place == len(self._in):
return name
else:
c1 = Counter(self._in[place-1:])
c2 = Counter(self._out[place-1:])
diff1 = c1 - c2
diff2 = c2 - c1
a1 = np.setdiff1d(name,np.asarray(list(diff2.elements())), assume_unique = True)
a2 = np.union1d(a1, np.asarray(list(diff1.elements())))
return a2
def replace_pivots(self, N1, N2, pivots):
if N1 >= 0:
self._in = self._in[:N1] + pivots.inpivots + self._in[N2:]
self._out = self._out[:N1] + pivots.outpivots + self._out[N2:]
else:
self._in = pivots.inpivots + self._in[N2:]
self._out = pivots.outpivots + self._out[N2:]
def get_previous_in(self, n):
v = self._out[n]
w = self._in[:n+1][::-1]
if v in w:
return n - w.index(v)
else:
return None
def find_N1_N2_around(self, Nlist, N1=None, N2=None, N1trials=10, N2trials=10):
if N1 is None:
N1 = Nlist[0] - 1
if N1 < 0:
N1 = 0
if N2 is None:
N2 = Nlist[-1] + 1
if N2 >= len(self._in):
N2 = len(self._in) - 1
diff = Counter(self._out[N1:N2]) - Counter(self._in[N1:N2])
if len(list(diff.elements())) <=2:
return (N1,N2)
else:
for i in range(N1, max(N1-N1trials, -1), -1):
for j in range(N2, min(N2 + N2trials, len(self._in))):
diff = Counter(self._out[i:j]) - Counter(self._in[i:j])
if len(list(diff.elements())) <= 2:
return (i, j)
return None
def get_next_in(self, n):
v = self._out[n]
w = self._in[n:]
if v in w:
return self._in.index(v,n)
else:
return None
| 7,981 | 30.674603 | 98 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/col_info_stack.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class col_info_stack():
def __init__(self, clear_after=5):
self._data = []
self._last = None
self._clear_after = clear_after
def pop(self):
if len(self._data) > 0:
col = self._data.pop()
if len(self._data) > 0:
self._last = self._data[-1]
else:
self._last = None
return col
else:
return None
def push(self, info):
self._data.append(info)
self._last = info
self._auto_clear()
def clear(self):
self._data.clear()
def is_empty(self):
return len(self._data) == 0
def __len__(self):
return len(self._data)
@property
def last(self):
return self._last
def _auto_clear(self):
if len(self._data) > self._clear_after:
cols_to_check = self._data[-self._clear_after:]
for col in cols_to_check:
if col.had_resolution or col.alternative is not None:
return
self._data = cols_to_check
| 1,659 | 27.135593 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/calc_order_ratio.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .matlab_utils import find
#check indexing
def calc_order_ratio(v1, v2, N1, N2, solution, param_line, delta):
correct = True
if v1 > 0 and v2 > 0:
k1 = find(solution.klist == v1)
dx1 = solution.state.dx[k1, N1]
if dx1 > 0:
correct = False
print('Incorrect dx!', k1, N1)
x1 = solution.get_specific_state(N1+1, k1, True, False, param_line) + delta * \
solution.get_specific_state(N1+1, k1, True, True, param_line)
k2 = find(solution.klist == v2)
dx2 = solution.state.dx[k2, N1]
if dx2 > 0:
correct = False
print('Incorrect dx!', k2, N1)
x2 = solution.get_specific_state(N1 + 1, k2, True, False, param_line) + delta * \
solution.get_specific_state(N1 + 1, k2, True, True, param_line)
return (x1 / dx1) / (x2 / dx2), correct
elif v1 < 0 and v2 < 0:
j1 = find(solution.jlist == -v1)
dq1 = solution.state.dq[j1, N2]
if dq1 > 0:
correct = False
print('Incorrect dq!', j1, N2)
q1 = solution.get_specific_state(N2, j1, False, False, param_line) + delta * \
solution.get_specific_state(N2, j1, False, True, param_line)
j2 = find(solution.jlist == -v2)
dq2 = solution.state.dq[j2, N2]
if dq2 > 0:
correct = False
print('Incorrect dq!', j2, N2)
q2 = solution.get_specific_state(N2, j2, False, False, param_line) + delta * \
solution.get_specific_state(N2, j2, False, True, param_line)
return (q2 / dq2) / (q1 / dq1), correct
elif v1 > 0 and v2 < 0:
k1 = find(solution.klist == v1)
dx1 = solution.state.dx[k1, N1]
if dx1 > 0:
correct = False
print('Incorrect dx!', k1, N1)
x1 = solution.get_specific_state(N1+1, k1, True, False, param_line) + delta * \
solution.get_specific_state(N1+1, k1, True, True, param_line)
j2 = find(solution.jlist == -v2)
dq2 = solution.state.dq[j2, N2]
if dq2 > 0:
correct = False
print('Incorrect dq!', j2, N2)
q2 = solution.get_specific_state(N2, j2, False, False, param_line) + delta * \
solution.get_specific_state(N2, j2, False, True, param_line)
t_interval = np.sum(solution.state.tau[N1 + 1:N2]) + delta * np.sum(solution.state.dtau[N1 + 1:N2])
return -(x1 / dx1 + q2 / dq2) / t_interval, correct
elif v1 < 0 and v2 > 0:
j1 = find(solution.jlist == -v1)
dq1 = solution.state.dq[j1, N2]
if dq1 > 0:
correct = False
print('Incorrect dq!', j1, N2)
q1 = solution.get_specific_state(N2, j1, False, False, param_line) + delta * \
solution.get_specific_state(N2, j1, False, True, param_line)
k2 = find(solution.klist == v2)
dx2 = solution.state.dx[k2, N1]
if dx2 > 0:
correct = False
print('Incorrect dx!', k2, N1)
x2 = solution.get_specific_state(N1 + 1, k2, True, False, param_line) + delta * \
solution.get_specific_state(N1 + 1, k2, True, True, param_line)
t_interval = np.sum(solution.state.tau[N1 + 1:N2]) + delta * np.sum(solution.state.dtau[N1 + 1:N2])
return -t_interval / (q1 / dq1 + x2 / dx2), correct
| 3,947 | 44.37931 | 107 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/sparse_matrix_constructor.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.sparse import coo_matrix
class sparse_matrix_constructor():
def __init__(self, data, indexes, row_num, copy=False):
if data is None:
self._data = []
self._indexes = []
else:
if copy:
self._data = data.copy()
self._indexes = indexes.copy()
else:
self._data = [data]
self._indexes = [indexes]
self._row_num = row_num
@property
def data(self):
return self._data
@property
def indexes(self):
return self._indexes
@property
def row_num(self):
return self._row_num
#'#@profile
def insert(self, after, data, indexes):
if after == -1:
self._data = data + self._data
self._indexes = indexes + self._indexes
elif after >= len(self._data) -1:
self._data = self._data + data
self._indexes = self._indexes + indexes
else:
self._data = self._data[:after + 1] + data + self._data[after + 1:]
self._indexes = self._indexes[:after + 1] + indexes + self._indexes[after + 1:]
#'#@profile
def remove(self, from_, to_):
if from_ <= 0:
self._data = self._data[to_:]
self._indexes = self._indexes[to_:]
elif to_ >= len(self._data)-1:
self._data = self._data[:from_]
self._indexes = self._indexes[:from_]
else:
self._data = self._data[:from_] + self._data[to_:]
self._indexes = self._indexes[:from_] + self._indexes[to_:]
#'#@profile
def replace(self, from_, to_, data, indexes):
if from_ <= 0:
self._data = data + self._data[to_:]
self._indexes = indexes + self._indexes[to_:]
elif to_ >= len(self._data):
self._data = self._data[:from_] + data
self._indexes = self._indexes[:from_] + indexes
else:
self._data = self._data[:from_] + data + self._data[to_:]
self._indexes = self._indexes[:from_] + indexes + self._indexes[to_:]
#'#@profile
def get_sub_matrix(self, from_, to_):
if from_ <= 0:
return sparse_matrix_constructor(self._data[:to_], self._indexes[:to_], self._row_num, True)
elif to_ >= len(self._data):
return sparse_matrix_constructor(self._data[from_:], self._indexes[from_:], self._row_num, True)
else:
return sparse_matrix_constructor(self._data[from_:to_], self._indexes[from_:to_], self._row_num, True)
#'#@profile
def insert_matrix(self, after, other):
if isinstance(other, sparse_matrix_constructor):
if other.row_num != self._row_num:
raise ValueError('Row numbers must be equal!')
self.insert(after, other.data, other.indexes)
#'#@profile
def append(self, other):
if isinstance(other, sparse_matrix_constructor):
if other.row_num != self._row_num:
raise ValueError('Row numbers must be equal!')
self._data = self._data + other.data
self._indexes = self._indexes + other.indexes
#'#@profile
def prepend(self, other):
if isinstance(other, sparse_matrix_constructor):
if other.row_num != self._row_num:
raise ValueError('Row numbers must be equal!')
self._data = other.data + self._data
self._indexes = other.indexes + self._indexes
#'#@profile
def replace_matrix(self, from_, to_, other):
if isinstance(other, sparse_matrix_constructor):
if other.row_num != self._row_num:
raise ValueError('Row numbers must be equal!')
self.replace(from_, to_, other.data, other.indexes)
#'#@profile
def get_coo_matrix(self):
col_num = len(self._indexes)
if col_num > 1:
cols = np.concatenate([np.full_like(p,i) for i,p in enumerate(self._indexes)])
data = np.concatenate(self._data, axis=0)
rows = np.concatenate(self._indexes, axis=0)
return coo_matrix((data,(rows,cols)),shape=(self._row_num,col_num))
else:
return coo_matrix((self._data[0], (self._indexes[0], np.zeros(len(self._indexes[0])))), shape=(self._row_num, col_num))
#'#@profile
def get_matrix(self):
return self.get_coo_matrix().toarray()
| 5,025 | 36.507463 | 131 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_formulation.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from enum import Enum
from .matlab_utils import find, ismember
from .lp_tools.LP_formulation import LP_formulation
from .lp_tools.simplex_procedures import unsigned_simplex
from .parametric_line import parametric_line
class SCLP_formulation_type(Enum):
primal_classic = 0
dual_classic = 1
weiss = 2
not_bounded = 3
primal_MCLP = 4
dual_MCLP = 5
both_MCLP = 6
primal_infeasible = 7
dual_infeasible = 8
both_infeasible = 9
class SCLP_data_type(Enum):
linear = 0
primal_piecewise_linear = 1
dual_piecewise_linear = 2
piecewise_linear = 3
# We are going to extend this class
# Assume that a,b,c,d are matrix
class SCLP_formulation():
__slots__ = ["G", "F", "H", "a", "b", "c", "d", "alpha", "gamma", "T", "I", "J", "K", "L", "_formulation_type", "_data_type"]
def __init__(self, G, F, H, a, b, c, d, alpha, gamma, T):
self.G, self.F, self.H, self.a, self.b, self.c, self.d, self.alpha, self.gamma, self.T = G, F, H, a, b, c, d, alpha, gamma, T
self.K = G.shape[0]
self.J = G.shape[1]
self.I = H.shape[0]
self.L = F.shape[1]
if self.L == 0:
if self.I == 0:
if np.any(self.alpha < 0):
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.both_MCLP
else:
self._formulation_type = SCLP_formulation_type.primal_MCLP
else:
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.dual_MCLP
else:
self._formulation_type = SCLP_formulation_type.not_bounded
else:
self._formulation_type = SCLP_formulation_type.primal_classic
if np.any(self.alpha < 0):
self._formulation_type = SCLP_formulation_type.primal_MCLP
else:
if self.I == 0:
self._formulation_type = SCLP_formulation_type.dual_classic
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.dual_MCLP
else:
self._formulation_type = SCLP_formulation_type.weiss
# if isinstance(a, piecewise_data):
# if isinstance(c, piecewise_data):
# self._data_type = SCLP_data_type.piecewise_linear
# else:
# self._data_type = SCLP_data_type.primal_piecewise_linear
# else:
# if isinstance(c, piecewise_data):
# self._data_type = SCLP_data_type.dual_piecewise_linear
# else:
# self._data_type = SCLP_data_type.linear
@property
def data_type(self):
return self._data_type
@property
def formulation_type(self):
return self._formulation_type
def formulate_ratesLP(self, x_0, q_N):
Kset = find(x_0)
Jset = find(q_N)
DD = np.vstack((-np.hstack((0, self.c, self.d)), np.hstack((np.vstack(self.a), self.G, self.F)),
np.hstack((np.vstack(self.b), self.H, np.zeros((self.I, self.L))))))
DD = np.ascontiguousarray(DD)
pn = np.ascontiguousarray(np.hstack((np.arange(1, self.K + 1, dtype = np.int32),
-np.arange(self.J + 1, self.J + self.I + 1, dtype = np.int32))), dtype = np.int32)
psx = ismember(np.arange(0, self.K), Kset).astype(np.int32)
psu = -ismember(np.arange(self.J, self.J + self.I), Jset).astype(np.int32)
ps = np.hstack((psx, psu))
dn = np.ascontiguousarray(np.hstack((-np.arange(1, self.J + 1, dtype = np.int32),
np.arange(self.K + 1, self.K + self.L + 1, dtype = np.int32))), dtype = np.int32)
dsq = ismember(np.arange(0, self.J), Jset).astype(np.int32)
dsp = -ismember(np.arange(self.K, self.K + self.L), Kset).astype(np.int32)
ds = np.hstack((dsq, dsp))
return LP_formulation(DD, pn, dn), ps, ds
def get_primalBoundaryLP(self):
DD1 = np.ascontiguousarray(np.vstack((-np.hstack((0, self.d)), np.hstack((np.vstack(self.alpha), self.F)))))
pn1 = np.arange(1, self.K + 1, dtype = np.int32)
dn1 = np.arange(self.K + 1, self.K + self.L + 1, dtype = np.int32)
return LP_formulation(DD1, pn1, dn1)
def get_dualBoundaryLP(self):
DD1 = np.ascontiguousarray(np.vstack((np.hstack((0, np.hstack(self.b))), np.hstack((np.vstack(-self.gamma), -self.H.transpose())))))
pn1 = np.arange(1, self.J + 1, dtype = np.int32)
dn1 = np.arange(self.J + 1, self.J + self.I + 1, dtype = np.int32)
return LP_formulation(DD1, pn1, dn1)
def get_generalBoundaryLP(self):
DD0 = np.ascontiguousarray(np.vstack((np.hstack((0, -self.gamma, np.zeros((self.L)), self.d)), np.hstack((self.alpha[...,np.newaxis], self.G, self.F)),
np.hstack((np.zeros((self.I, 1)), self.H, np.zeros((self.I, self.L)))))))
pn = np.ascontiguousarray(np.concatenate((np.arange(1, self.K + 1), -np.arange(self.J + 1, self.J + self.I + 1))), dtype = np.int32)
dn = np.ascontiguousarray(np.concatenate((-np.arange(1, self.J + 1), np.arange(self.K + 1, self.K + self.L + 1))), dtype = np.int32)
return LP_formulation(DD0, pn, dn)
def get_general_dualBoundaryLP(self):
DD0 = np.ascontiguousarray(np.vstack(
(np.hstack((0, -self.gamma, np.zeros((1, self.L)), self.d)), np.hstack((self.alpha + self.a * self.T, self.G, self.F)),
np.hstack((np.zeros((self.I, 1)), self.H, np.zeros((self.I, self.L)))))))
pn = np.ascontiguousarray(np.concatenate((np.arange(1, self.K + 1), -np.arange(self.J + 1, self.J + self.I + 1))), dtype = np.int32)
dn = np.ascontiguousarray(np.concatenate((-np.arange(1, self.J + 1), np.arange(self.K + 1, self.K + self.L + 1))), dtype = np.int32)
return LP_formulation(DD0, pn, dn)
def get_dualBoundaryLP_solution(self, tolerance = 0):
if self._formulation_type == SCLP_formulation_type.not_bounded or self._formulation_type == SCLP_formulation_type.dual_classic:
return np.ascontiguousarray(-self.gamma)
elif self._formulation_type == SCLP_formulation_type.primal_classic or self._formulation_type == SCLP_formulation_type.weiss:
LP_form = self.get_dualBoundaryLP()
LP_form, err = unsigned_simplex(LP_form, tolerance)
if err['result'] == 0:
q_N = np.zeros(self.J + self.I, order='C')
q_N[LP_form.prim_name - 1] = LP_form.simplex_dict[1:, 0]
return q_N
LP_form = self.get_generalBoundaryLP()
LP_form, err = unsigned_simplex(LP_form, tolerance)
if err['result'] == 0:
q_N = np.zeros(self.J + self.I, order='C')
q_N[LP_form.prim_name - 1] = LP_form.simplex_dict[1:, 0]
return q_N
return None
def get_primalBoundaryLP_solution(self, tolerance = 0):
if self._formulation_type == SCLP_formulation_type.not_bounded or self._formulation_type == SCLP_formulation_type.primal_classic:
return np.ascontiguousarray(self.alpha)
elif self._formulation_type == SCLP_formulation_type.dual_classic or self._formulation_type == SCLP_formulation_type.weiss:
LP_form = self.get_primalBoundaryLP()
LP_form, err = unsigned_simplex(LP_form, tolerance)
if err['result'] == 0:
x_0 = np.zeros(self.K + self.L, order='C')
x_0[LP_form.prim_name - 1] = LP_form.simplex_dict[1:, 0]
return x_0
# MCLP not supported yet
return None
def get_parametric_line(self, tolerance = 0):
x_0 = self.get_primalBoundaryLP_solution(tolerance)
q_N = self.get_dualBoundaryLP_solution(tolerance)
return parametric_line(x_0, q_N, self.T)
def show_task_capacity_per_server(self):
from bokeh.io import output_file, show
from bokeh.models import GraphRenderer, Oval, StaticLayoutProvider, ColumnDataSource, LabelSet
from bokeh.plotting import figure
from bokeh.palettes import Category20c, Category20
# we have 12 kinds of tasks (number of columns in H) and 4 time_slots (number of rows in H)
number_of_servers = len(self.H)
tasks = ['task ' + str(i) for i in range(1, len(self.H[0]) + 1)]
index_array_of_tasks = list(range(1, len(tasks) + 1))
index_array_of_servers = list(range(len(tasks) + 1, len(tasks) + number_of_servers + 1))
number_of_tasks = len(tasks)
node_indices = np.concatenate((index_array_of_tasks, index_array_of_servers), axis=None).tolist()
node_x_location = np.concatenate((index_array_of_tasks, list(range(1, len(index_array_of_servers) + 1))),
axis=None).tolist()
node_y_location = np.concatenate(
(np.full(len(index_array_of_tasks), 5), np.full(len(index_array_of_servers), 3)), axis=None).tolist()
plot = figure(title='Task capacity per server', x_range=(0, max(number_of_servers, number_of_tasks) + 1),
y_range=(0, 8),
tools='', toolbar_location=None)
graph = GraphRenderer()
graph.node_renderer.data_source.add(node_indices, 'index')
graph.node_renderer.data_source.add(Category20c[len(node_indices)], 'color')
graph.node_renderer.glyph = Oval(height=0, width=0, fill_color='color')
network_graph_tasks_indices = []
network_graph_server_indices = []
network_graph_tasks_server_hash = {}
for k in range(number_of_servers): # servers
for j in range(number_of_tasks): # tasks
if self.H[k, j] > 0:
network_graph_tasks_indices.append(j + 1)
network_graph_server_indices.append(len(tasks) + k + 1)
network_graph_tasks_server_hash[j + 1] = self.H[k, j]
graph.edge_renderer.data_source.data = dict(
start=list(network_graph_tasks_indices),
end=list(network_graph_server_indices)
)
x = node_x_location
y = node_y_location
graph_layout = dict(zip(node_indices, zip(x, y)))
graph.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
plot.renderers.append(graph)
x_servers = list(range(1, len(index_array_of_servers) + 1))
y_servers = np.full(len(index_array_of_servers), 3)
plot.square(x_servers, y_servers, size=30, color=Category20[number_of_servers], alpha=0.5)
x_tasks = index_array_of_tasks
y_tasks = np.full(len(index_array_of_tasks), 5)
plot.circle(x_tasks, y_tasks, size=30, color=Category20[len(index_array_of_tasks)], alpha=0.5)
text_label_values = np.round(
np.multiply(np.round(list(network_graph_tasks_server_hash.values()), 2), 100)).tolist()
text_label_values = [str(int(capacity)) + '%' for capacity in text_label_values]
source = ColumnDataSource(data=dict(x=list(network_graph_tasks_server_hash.keys()),
y=np.full(len(network_graph_tasks_indices), 4.8),
values=text_label_values))
capacityLabels = LabelSet(x='x', y='y', text='values', level='glyph',
x_offset=-8, y_offset=10, source=source, render_mode='canvas', text_font_size="10pt")
plot.add_layout(capacityLabels)
source = ColumnDataSource(data=dict(x=[6, 6],
y=[2.5, 5.5],
values=['servers', 'tasks']))
typeLabel = LabelSet(x='x', y='y', text='values', level='glyph',
x_offset=0, y_offset=0, source=source, render_mode='canvas', text_font_size="10pt")
plot.add_layout(typeLabel)
output_file('graph.html')
show(plot)
return None
def show_flow_from_outside_to_buffers_to_tasks(self):
from bokeh.io import output_file, show
from bokeh.models import GraphRenderer, Oval, StaticLayoutProvider, ColumnDataSource, LabelSet, Arrow, OpenHead
from bokeh.plotting import figure
from bokeh.palettes import Plasma256
# vector alpha >0 , vector a can be any value
# a is input/output coming from outside
# alpha is initial value in buffer
# matrix G connected buffers and tasks
# in matrix G , flow between a task and multiple buffers
# a to buffer to task
number_of_io_nodes = len(self.a)
number_of_buffers = self.K
number_of_tasks = len(self.H[0])
index_array_of_io = list(range(1, number_of_io_nodes + 1))
index_array_of_buffers = list(range(number_of_io_nodes + 1, number_of_io_nodes + number_of_buffers + 1))
index_array_of_tasks = list(range(number_of_io_nodes + number_of_buffers + 1,
number_of_io_nodes + number_of_buffers + number_of_tasks + 1))
node_indices = np.concatenate((index_array_of_io, index_array_of_buffers, index_array_of_tasks),
axis=None).tolist()
node_x_location = np.concatenate((index_array_of_io, list(range(1, len(index_array_of_buffers) + 1)),
list(range(1, len(index_array_of_tasks) + 1))), axis=None).tolist()
node_y_location = np.concatenate(
(np.full(number_of_io_nodes, 7), np.full(number_of_buffers, 5), np.full(number_of_tasks, 3)),
axis=None).tolist()
max_x_range = max(number_of_io_nodes, number_of_buffers, number_of_tasks) + 1
plot = figure(title='Flow from outside to buffers to tasks', x_range=(0, max_x_range), y_range=(0, 9),
tools='', toolbar_location=None)
graph = GraphRenderer()
graph.node_renderer.data_source.add(node_indices, 'index')
graph.node_renderer.data_source.add(Plasma256[:len(node_indices)], 'color')
graph.node_renderer.glyph = Oval(height=0, width=0, fill_color='color')
start = index_array_of_io
end = index_array_of_buffers
network_graph_buffer_task_hash = {}
for buffer_index in range(number_of_buffers):
network_graph_buffer_task_hash[buffer_index + 1] = np.sum(self.G[buffer_index, :])
graph.edge_renderer.data_source.data = dict(
start=start,
end=end
)
x = node_x_location
y = node_y_location
graph_layout = dict(zip(node_indices, zip(x, y)))
graph.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
plot.renderers.append(graph)
x_io = list(range(1, number_of_io_nodes + 1))
y_io = np.full(number_of_io_nodes, 7)
plot.triangle(x_io, y_io, size=30, color=getLargePalette(number_of_io_nodes,Plasma256), alpha=0.5, line_width=2)
x_buffers = list(range(1, number_of_buffers + 1))
y_buffers = np.full(number_of_buffers, 5)
plot.rect(x_buffers, y_buffers, color=getLargePalette(number_of_buffers,Plasma256), alpha=0.5, width=0.5, height=0.5)
x_tasks = list(range(1, number_of_tasks + 1))
y_tasks = np.full(number_of_tasks, 3)
plot.circle(x_tasks, y_tasks, size=30, color=getLargePalette(number_of_tasks,Plasma256), alpha=0.5)
for i in range(number_of_buffers):
for j in range(number_of_tasks):
if self.G[i, j] > 0:
x_start_node = x_buffers[i]
y_start_node = y_buffers[i]
x_end_node = x_tasks[j]
y_end_node = y_tasks[j]
elif self.G[i, j] < 0:
x_start_node = x_tasks[j]
y_start_node = y_tasks[j]
x_end_node = x_buffers[i]
y_end_node = y_buffers[i]
plot.add_layout(Arrow(end=OpenHead(),
x_start=x_start_node, y_start=y_start_node, x_end=x_end_node, y_end=y_end_node))
text_label_values = np.round(
np.multiply(np.round(list(network_graph_buffer_task_hash.values()), 2), 100)).tolist()
text_label_values = [str(int(capacity)) + '%' for capacity in text_label_values]
source = ColumnDataSource(data=dict(x=list(network_graph_buffer_task_hash.keys()),
y=np.full(number_of_buffers, 4.8),
values=text_label_values))
capacityLabels = LabelSet(x='x', y='y', text='values', level='glyph',
x_offset=-8, y_offset=10, source=source, render_mode='canvas', text_font_size="10pt")
plot.add_layout(capacityLabels)
source = ColumnDataSource(data=dict(x=[max_x_range / 2 - 0.5, max_x_range / 2 - 0.5, max_x_range / 2 - 0.5],
y=[2.5, 5.5, 7.5],
values=['tasks', 'buffers', 'outside sources']))
typeLabel = LabelSet(x='x', y='y', text='values', level='glyph',
x_offset=0, y_offset=0, source=source, render_mode='canvas', text_font_size="10pt")
plot.add_layout(typeLabel)
output_file('graph.html')
show(plot)
return None
def getLargePalette(size, palette):
if size < 256:
return palette[size]
p = palette[:256]
out = []
for i in range(size):
idx = int(i * 256.0 / size)
out.append(p[idx])
return out
| 18,372 | 46.722078 | 159 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/extract_rates.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .matlab_utils import find
from .matrix_constructor import matrix_constructor
from .lp_tools.pivot import pivot_mn
#'#@profile
def extract_rates_from_basis(dct, problem_dims):
klist1 = find(dct.prim_name > 0)
jlist2 = find(dct.dual_name < 0)
kn1 = dct.prim_name[klist1]
jn2 = -dct.dual_name[jlist2]
if problem_dims.KK < problem_dims.totalK:
kn2 = dct.dual_name[dct.dual_name > 0]
kord = np.argsort(np.argsort(np.hstack((kn1, kn2))))[:len(kn1)]
dx = (dct.simplex_dict[klist1+1,0], kord)
else:
dx = (dct.simplex_dict[klist1+1,0], kn1-1)
if problem_dims.JJ < problem_dims.totalJ:
jn1 = -dct.prim_name[dct.prim_name < 0]
jord = np.argsort(np.argsort(np.hstack((jn1, jn2))))[len(jn1):]
dq = (dct.simplex_dict[0,jlist2+1], jord)
else:
dq = (dct.simplex_dict[0,jlist2+1], jn2-1)
return dx, dq
def extract_rates_from_partial(prim_vars, dual_vars, prim_name, dual_name, problem_dims):
klist1 = find(prim_name > 0)
jlist2 = find(dual_name < 0)
kn1 = prim_name[klist1]
jn2 = -dual_name[jlist2]
if problem_dims.KK < problem_dims.totalK:
kn2 = dual_name[dual_name > 0]
kord = np.argsort(np.argsort(np.hstack((kn1, kn2))))[:len(kn1)]
dx = (prim_vars[klist1+1], kord)
else:
dx = (prim_vars[klist1+1], kn1-1)
if problem_dims.JJ < problem_dims.totalJ:
jn1 = -prim_name[prim_name < 0]
jord = np.argsort(np.argsort(np.hstack((jn1, jn2))))[len(jn1):]
dq = (dual_vars[jlist2+1], jord)
else:
dq = (dual_vars[jlist2+1], jn2-1)
return dx, dq
def extract_rates_from_subproblem(pivots, AAN1, AAN2, problem_dims):
# Warning this based on assumption that first basis in new_base_sequence is equal to the AAN1 and/or last basis is equal to the AAN2
if len(pivots) > 0:
if AAN1 is not None:
AAN1 = AAN1.copy()
if AAN2 is not None:
ran = enumerate(pivots[:-1])
dx = matrix_constructor(None, None, problem_dims.KK, -1, len(pivots))
dq = matrix_constructor(None, None, problem_dims.JJ, -1, len(pivots))
else:
ran = enumerate(pivots)
dx = matrix_constructor(None, None, problem_dims.KK, -1, len(pivots)+1)
dq = matrix_constructor(None, None, problem_dims.JJ, -1, len(pivots)+1)
for i, piv1 in ran:
AAN1 = pivot_mn(AAN1, piv1[0], piv1[1])
ndx, ndq = extract_rates_from_basis(AAN1, problem_dims)
dx.append(ndx)
dq.append(ndq)
else:
dx = matrix_constructor(None, None, problem_dims.KK, 1, len(pivots) + 1)
dq = matrix_constructor(None, None, problem_dims.JJ, 1, len(pivots) + 1)
AAN2 = AAN2.copy()
for i, piv1 in enumerate(reversed(pivots)):
AAN2 = pivot_mn(AAN2, piv1[1], piv1[0])
ndx, ndq = extract_rates_from_basis(AAN2, problem_dims)
dx.prepend(ndx)
dq.prepend(ndq)
else:
dx = matrix_constructor(None, None, problem_dims.KK, -1, 1)
dq = matrix_constructor(None, None, problem_dims.JJ, -1, 1)
return dx.get_matrix(), dq.get_matrix()
| 3,873 | 42.044444 | 136 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/problem_dimensions.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class problem_dimensions():
def __init__(self, KK, JJ, totalK=None, totalJ=None):
self._j = JJ
self._k = KK
self._totalJ = totalJ
self._totalK = totalK
@property
def JJ(self):
return self._j
@property
def KK(self):
return self._k
@property
def totalJ(self):
if self._totalJ is None:
return self._j
else:
return self._totalJ
@property
def totalK(self):
if self._totalK is None:
return self._k
else:
return self._totalK
| 1,164 | 24.888889 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/generic_SCLP_solution.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .pivot_storage import pivot_storage
from .col_info_stack import col_info_stack
from .extract_rates import extract_rates_from_basis, extract_rates_from_subproblem, extract_rates_from_partial
from .SCLP_base_sequence import SCLP_base_sequence
from .rewind_info import rewind_info
from .problem_dimensions import problem_dimensions
from .solution_state import solution_state
from .matrix_constructor import matrix_constructor
from .equation_tools.calc_equations import time_equations
from .state_tools.loc_min_storage import loc_min_storage
from .state_tools.calc_states import calc_states, check_state, calc_specific_state
from .bases_memory_manager import bases_memory_manager
class generic_SCLP_solution():
def __init__(self, LP_form, settings, KK=None, JJ=None, totalK = None, totalJ = None):
self._klist = np.ascontiguousarray(np.sort(np.append(LP_form.prim_name[LP_form.prim_name > 0], LP_form.dual_name[LP_form.dual_name > 0])), dtype=np.int32)
self._jlist = np.ascontiguousarray(np.sort(-np.append(LP_form.prim_name[LP_form.prim_name < 0], LP_form.dual_name[LP_form.dual_name < 0])), dtype=np.int32)
if KK is None:
KK = np.size(self._klist)
if JJ is None:
JJ = np.size(self._jlist)
self._problem_dims = problem_dimensions(KK, JJ, totalK, totalJ)
self._pivots = pivot_storage()
self._base_sequence = SCLP_base_sequence(LP_form)
dx, dq = extract_rates_from_basis(LP_form, self._problem_dims)
self._dx = matrix_constructor(dx[0], dx[1], KK)
self._dq = matrix_constructor(dq[0], dq[1], JJ)
self.loc_min_storage = loc_min_storage(self._dx.get_matrix(), self._dq.get_matrix())
self._equations = time_equations()
self._col_info_stack = col_info_stack()
self.bases_mm = bases_memory_manager(LP_form.prim_name.shape[0], LP_form.dual_name.shape[0])
self._last_collision = None
self._suppress_printing = settings.suppress_printing
self.rewind_max_delta = settings.rewind_max_delta
if self._problem_dims.totalK >= 100:
self.partial_states = True
else:
self.partial_states = False
self._state = solution_state(4*max(KK,JJ), JJ, KK)
self.stable_iteration = True
@property
def klist(self):
return self._klist
@property
def jlist(self):
return self._jlist
@property
def last_collision(self):
return self._last_collision
@property
def pivots(self):
return self._pivots
@property
def base_sequence(self):
return self._base_sequence
@property
def state(self):
return self._state
@property
def NN(self):
return len(self._pivots) + 1
@property
def KK(self):
return self._problem_dims.KK
@property
def JJ(self):
return self._problem_dims.JJ
@property
def totalK(self):
return self._problem_dims.totalK
@property
def totalJ(self):
return self._problem_dims.totalJ
def get_raw_dx(self):
return self._dx.get_raw_matrix()
def get_raw_dq(self):
return self._dq.get_raw_matrix()
#'#@profile
def update_state(self, param_line, check_state = False, tolerance=0, up_rewind =False):
#state = solution_state()
self._state.dx = self._dx.get_matrix()
self._state.dq = self._dq.get_matrix()
if self._last_collision and self._equations.can_update(self._last_collision) and not check_state and\
param_line.is_main() and not up_rewind:
dtau = self._equations.update(self._last_collision.N1 + 1, self._state.dx, self._state.dq)
self._state.update_tau(self._last_collision, param_line.T)
self.stable_iteration = False
else:
left_idx = self._equations.build(param_line, self._klist, self._jlist, self._pivots,
self._state.dx, self._state.dq)
tau, dtau = self._equations.solve()
if np.any(tau < -tolerance):
idx = np.argmin(tau)
if self._last_collision:
next_tau = self._last_collision.delta * dtau[idx] + tau[idx]
else:
next_tau = 0.01 * dtau[idx] + tau[idx]
print('Warning tau=', idx, 'has value', tau[idx], 'increase by', dtau[idx], 'to', next_tau)
if self._last_collision and not check_state and param_line.is_main() and not up_rewind and\
dtau[idx] > -tolerance and next_tau > -10E-5:
print('Updating...')
self.stable_iteration = False
self._state.update_tau(self._last_collision, param_line.T)
else:
self.stable_iteration = True
self._state.tau = tau
else:
self._state.tau = tau
self.stable_iteration = True
self._state.dtau = dtau
if check_state or not self.partial_states:
x, del_x, q, del_q = calc_states(self._dx.get_raw_matrix(), self._dq.get_raw_matrix(), param_line, self._state.tau,
self._state.dtau, check_state)
self._state.x = x
self._state.del_x = del_x
self._state.q = q
self._state.del_q = del_q
if check_state:
if self._check_state(self._state, tolerance*100):
return True
else:
return False
else:
return True
def recalc_tau(self, param_line, check_state = False):
left_idx = self._equations.build(param_line, self._klist, self._jlist, self._pivots,
self._state.dx, self._state.dq)
tau, dtau = self._equations.solve()
self._state.tau = tau
self._state.dtau = dtau
if check_state or not self.partial_states:
self._state.x, self._state.del_x, self._state.q, self._state.del_q \
= calc_states(self._dx.get_raw_matrix(), self._dq.get_raw_matrix(), param_line, self._state.tau,
self._state.dtau, check_state)
def get_specific_state(self, n, i, is_primal, is_del, param_line):
if self.partial_states:
return calc_specific_state(n, i, is_primal, is_del, self._dx.get_raw_matrix(), self._dq.get_raw_matrix(),
param_line, self._state.tau, self._state.dtau)
else:
if is_primal:
if is_del:
return self._state.del_x[i, n]
else:
return self._state.x[i, n]
else:
if is_del:
return self._state.del_q[i, n]
else:
return self._state.q[i, n]
def _check_state(self, state, tolerance):
if self._state is not None:
res = check_state(state.x, tolerance)
res = res and check_state(state.q, tolerance, False)
return res
return False
def update_from_subproblem(self, col_info, pivots, AAN1, AAN2):
dx, dq = extract_rates_from_subproblem(pivots, AAN1, AAN2, self._problem_dims)
Nnew = len(pivots)
if AAN1 is not None and AAN2 is not None:
Nnew -=1
self._update_caseII(col_info, dx, dq, AAN1, AAN2, pivots, Nnew)
def update_from_basis(self, col_info, piv, AAN1, AAN2, basis):
dx, dq = extract_rates_from_basis(basis, self._problem_dims)
self._update_caseII(col_info, dx, dq, AAN1, AAN2, piv, 1, basis, False)
def update_from_partial(self, col_info, piv, AAN1, AAN2, prim_vars, dual_vars, prim_names, dual_names):
dx, dq = extract_rates_from_partial(prim_vars, dual_vars, prim_names, dual_names, self._problem_dims)
self._update_caseII(col_info, dx, dq, AAN1, AAN2, piv, 1, None, False)
#'#@profile
def update_caseI(self, col_info):
NN = self.NN
self.store_rewind_info(col_info)
self._last_collision = col_info
N1 = col_info.N1
N2 = col_info.N2
self._base_sequence.remove_bases(N1, N2, self._pivots, self.bases_mm)
self._dx.remove(N1 + 1, N2)
self._dq.remove(N1 + 1, N2)
self._pivots.remove_pivots(N1, N2)
if N2 == NN:
N2 = None
self.loc_min_storage.update_caseI(N1, N2, self._dx.get_matrix(), self._dq.get_matrix())
col_info.Nnew = self.NN - NN
#'#@profile
def _update_caseII(self, col_info, dx, dq, AAN1, AAN2, pivots, Nnew, basis = None, matrix = True):
NN = self.NN
self._last_collision = col_info
self.store_rewind_info(col_info)
N1 = col_info.N1
N2 = col_info.N2
self._base_sequence.replace_bases(N1, N2, Nnew, AAN1, AAN2, self.bases_mm)
self._pivots.replace_pivots(N1, N2, pivots)
if matrix:
self._dx.replace_matrix(N1 + 1, N2, dx)
self._dq.replace_matrix(N1 + 1, N2, dq)
Nadd = Nnew
else:
self._dx.replace(N1 + 1, N2, dx[0], dx[1])
self._dq.replace(N1 + 1, N2, dq[0], dq[1])
Nadd = 1
col_info.Nnew = self.NN - NN
if N2 == NN:
N2 = None
self.loc_min_storage.update_caseII(N1, N2, Nadd, self._dx.get_matrix(), self._dq.get_matrix())
if basis is not None:
self._base_sequence.insert_basis(basis,N1+1)
#'#@profile
def update_rewind(self):
if self.can_rewind():
NN = self.NN
col_info = self._col_info_stack.pop()
N2_cor = col_info.N2 + col_info.Nnew
self._base_sequence.remove_bases(col_info.N1, N2_cor, self._pivots, self.bases_mm, col_info.N2-col_info.N1-1)
Npivots = len(col_info.rewind_info.pivots)
self._pivots.replace_pivots(col_info.N1, col_info.N1 + col_info.Nnew + Npivots, col_info.rewind_info.pivots)
self._dx.replace_matrix(col_info.N1 + 1, N2_cor, col_info.rewind_info.dx)
self._dq.replace_matrix(col_info.N1 + 1, N2_cor, col_info.rewind_info.dq)
Nadd = col_info.rewind_info.dx.shape[1]
if N2_cor == NN:
N2_cor = None
if Nadd == 0:
self.loc_min_storage.update_caseI(col_info.N1, N2_cor, self._dx.get_matrix(), self._dq.get_matrix())
else:
self.loc_min_storage.update_caseII(col_info.N1, N2_cor, Nadd, self._dx.get_matrix(), self._dq.get_matrix())
self._last_collision = self._col_info_stack.last
return col_info
else:
return None
def can_rewind(self):
return not self._col_info_stack.is_empty()
#'#@profile
def store_rewind_info(self, col_info):
if col_info.delta < self.rewind_max_delta:
N1 = col_info.N1
N2 = col_info.N2
cor_N1 = N1 + 1
if col_info.case == 'Case iii':
if N1 > -1:
pivots = self.pivots[N1:N2].copy()
else:
pivots = pivot_storage()
else:
if N1 > -1:
pivots = self.pivots[N1:N2 + 1].copy()
else:
pivots = self.pivots[N1 + 1:N2 + 1].copy()
dx = self._dx.get_sub_matrix(cor_N1, N2)
dq = self._dq.get_sub_matrix(cor_N1, N2)
col_info.rewind_info = rewind_info(pivots, dx, dq)
if not col_info.had_resolution:
if len(self._col_info_stack) == 1 and not self._col_info_stack.last.had_resolution:
self._col_info_stack.clear()
self._col_info_stack.push(col_info)
else:
self._col_info_stack.clear()
def store_ztau_ind(self, ztau_ind):
if len(ztau_ind) > 0:
if self._col_info_stack.last is not None:
self._col_info_stack.last.ztau_ind = ztau_ind
def get_ztau_ind(self):
if self._col_info_stack.last is not None:
return self._col_info_stack.last.ztau_ind
else:
return None
#'#@profile
def get_basis_at(self, place):
new_mat, new_place = self._base_sequence.get_basis_at(place, self._pivots)
self._base_sequence.insert_basis(new_mat, new_place)
return new_mat
#'#@profile
def get_name_diff_with0(self, name):
place, basis = self._base_sequence.get_nearby_basis_at0()
pn = basis.prim_name
pn0 = self._pivots.get_prim_name_at0(place,pn)
return np.setdiff1d(pn0,name, assume_unique=True)
#'#@profile
def get_name_diff_withN(self, name):
place, basis = self._base_sequence.get_nearby_basis_atN()
pn = basis.prim_name
pnN = self._pivots.get_prim_name_atN(self.NN + place,pn)
return np.setdiff1d(pnN,name, assume_unique=True)
def check_if_complete(self, param_line):
if param_line.is_sub():
res = True
if param_line.B1 is not None:
res = res and self.get_name_diff_with0(param_line.B1).size == 0
if param_line.B2 is not None:
res = res and self.get_name_diff_withN(param_line.B2).size == 0
return res
return False
#'#@profile
def get_bases(self, N1, N2):
new_mat, new_place = self._base_sequence.get_nearby_basis(N1, N2, self._pivots)
self._base_sequence.insert_basis(new_mat, new_place)
if new_place == N1:
return new_mat, self.get_basis_at(N2)
elif new_place == N2:
return self.get_basis_at(N1), new_mat
else:
raise Exception('Cannot calculate correct bases!')
#'#@profile
def get_next_basis_for_solution(self, basis, place, preserve = True):
return self._base_sequence.get_next_basis(basis, place, self._pivots, preserve)
def print_status(self, STEPCOUNT, DEPTH, ITERATION, theta, col_info):
if not self._suppress_printing:
print(STEPCOUNT, DEPTH, ITERATION, self.JJ, 'x', self.KK, self.NN, theta, theta + col_info.delta,
col_info.case, col_info.N1, col_info.N2, col_info.v1, col_info.v2, self.base_sequence.num_bases)
def print_short_status(self, STEPCOUNT, DEPTH, ITERATION, theta, theta1, case):
if not self._suppress_printing:
print(STEPCOUNT, DEPTH, ITERATION, self.JJ, 'x', self.KK, self.NN, theta, theta1,
case, self.base_sequence.num_bases)
def prepare_to_save(self):
self._base_sequence.keep_only_one()
#self._state = None
def clear_collision_stack(self):
self._last_collision = None
self._col_info_stack.clear()
def clear_base_sequence(self, mm):
if mm is not None:
self._base_sequence.clear_base_sequense(mm.num_bases_to_remove(), mm.max_bs, self.NN)
| 15,553 | 40.477333 | 163 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/equation_tools/setup.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
import sys
'''
To compile this (C code) in cython on Mac:
$ $ brew install gcc
$ export CC=/usr/local/bin/gcc-9
$ python setup.py build_ext --inplace
'''
if sys.platform == 'darwin':
extra_compile_args = []#["-fopenmp"]
extra_link_args = ["-lomp"]
else:
extra_compile_args = ["/openmp"]
extra_link_args = ["/openmp"]
setup(
name='eq tools',
ext_modules=cythonize([Extension("eq_tools", ["eq_tools.pyx"], include_dirs=[numpy.get_include()], extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args)]),
zip_safe=False
)
| 1,246 | 28 | 141 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/equation_tools/calc_equations.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from .eq_tools import build_equations, get_rows, get_new_col, get_left_stability_idx, ftran2eta
#TODO: build method can further improved if we reuse all matricies - note LU factorization is slower if matrix is not contnignous
class time_equations():
def __init__(self):
self.coeff = None
self.lu = None
self.rrhs = None
self.var_names = None
self.var_nums = None
self.iteration = 0
self._max_iterations = 30
self.row_order = np.zeros((2,self._max_iterations), dtype=np.int32, order='C')
self.pivot_idxs = np.zeros(self._max_iterations, dtype=np.int32, order='C')
self.solution = None
self.etas = None
def build(self, param_line, klist, jlist, pivots, dx, dq, test=False):
if not test:
self.iteration = 0
if len(pivots.outpivots) > 0:
outp = np.asarray(pivots.outpivots, dtype=np.int32, order='C')
inp = np.asarray(pivots.inpivots, dtype=np.int32, order='C')
left_idx = None #get_left_stability_idx(outp, inp)
self.coeff, self.var_names, self.var_nums, self.rrhs=\
build_equations(klist, jlist, outp, inp, dx, dq, param_line.x_0, param_line.del_x_0, param_line.q_N, param_line.del_q_N)
else:
self.coeff = np.eye(1)
self.rrhs = np.zeros((1, 2))
left_idx = 0
self.rrhs[-1, 0] = param_line.T
self.rrhs[-1, 1] = param_line.del_T
return left_idx
def can_update(self, col_info):
if col_info.case == 'Case ii_' and col_info.N2 - col_info.N1 == 2 and col_info.Nnew == 0:
return self.iteration < self._max_iterations and col_info.delta > 1E-5
else:
return False
#TODO: this function does not always correct - however it is not used
def get_reordered_coeff(self):
cf = self.coeff.copy()
for n in range(self.iteration):
tmp = cf[self.row_order[0,n],:].copy()
cf[self.row_order[0,n],:] = cf[self.row_order[1,n],:]
cf[self.row_order[1,n], :] = tmp
return cf
def update(self, n, dx, dq):
if self.iteration == 0:
self.etas = np.zeros((self._max_iterations, self.coeff.shape[0]), dtype=np.double, order='C')
row1, row2 = get_rows(n-1, n, self.row_order, self.iteration)
vec = get_new_col(self.coeff, self.var_nums, self.var_names, n, row1, row2, dx, dq)
self.coeff[:, n] = vec
vec1 = lu_solve(self.lu, vec, check_finite=False)
ftran2eta(vec1, self.etas, self.pivot_idxs, self.iteration, n)
ftran(self.solution[:, 1], self.etas[self.iteration, :], n)
self.iteration += 1
return self.solution[:, 1].copy()
def solve(self):
self.lu = lu_factor(self.coeff, check_finite=False)
self.solution = lu_solve(self.lu, self.rrhs, check_finite=False)
return self.solution[:, 0], self.solution[:, 1].copy()
def to_eta(values, index_to_pivot):
pivot_val = values[index_to_pivot]
values /= -values[index_to_pivot]
values[index_to_pivot] = 1./pivot_val
return values
def ftran(values, eta, index_to_pivot):
if values[index_to_pivot] != 0:
pivot_val = values[index_to_pivot] * eta[index_to_pivot]
values[:len(eta)] += values[index_to_pivot] * eta
values[index_to_pivot] = pivot_val
return values
| 4,045 | 40.71134 | 136 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/equation_tools/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/vector_list.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class vector_list():
def __init__(self, vec=None):
if vec is None:
self.data = []
self.sizes = []
self.total_size = 0
else:
self.data = [vec]
self.sizes = [vec.size]
self.total_size = vec.size
def insert(self, pos, vec):
self.data.insert(pos, vec)
self.sizes.insert(pos, vec.size)
self.total_size += vec.size
def delete(self, pos_from, pos_to):
for i in range(pos_from, pos_to):
del self.data[pos_from]
self.total_size -= self.sizes.pop(pos_from)
| 1,184 | 30.184211 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/setup.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
import sys
'''
To compile this (C++ code) in cython on Mac:
$ brew install cliutils/apple/libomp
$ export CC="/usr/bin/clang++ -Xpreprocessor -fopenmp -stdlib=libc++"
$ python setup.py build_ext --inplace
'''
if sys.platform == 'darwin':
extra_compile_args = []
extra_link_args = ["-lomp"]
else:
extra_compile_args = ["/openmp"]
extra_link_args = ["/openmp"]
setup(
name='state_tools lib',
ext_modules=cythonize(
[Extension("state_tools", ["state_tools.pyx"], include_dirs=[numpy.get_include()], extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension("state_collide", ["state_collide.pyx"], include_dirs=[numpy.get_include()], extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args)]),
zip_safe=False
)
| 1,498 | 33.068182 | 133 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/loc_min_storage.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .vector_list import vector_list
from .state_tools import get_right_loc_min, get_loc_min, get_prim_loc_mins, get_dual_loc_mins
class loc_min_storage():
def __init__(self, dx, dq):
self.dx_min = vector_list(get_right_loc_min(dx[:, 0]))
self.dq_min = vector_list(get_right_loc_min(dq[:, 0]))
def update_caseI(self, N1, N2, dx, dq):
if N1 == -1:
self.dx_min.delete(0, N2)
self.dq_min.delete(0, N2 + 1)
self.dq_min.insert(0, get_right_loc_min(dq[:, 0]))
elif N2 is None:
self.dx_min.delete(N1, len(self.dx_min.data))
self.dq_min.delete(N1+1, len(self.dq_min.data))
self.dx_min.insert(N1, get_right_loc_min(dx[:,-1]))
else:
self.dx_min.delete(N1, N2)
self.dq_min.delete(N1+1, N2+1)
self.dx_min.insert(N1, get_loc_min(dx[:, N1], dx[:, N1+1]))
self.dq_min.insert(N1+1, get_loc_min(dq[:, N1+1], dq[:, N1]))
#'#@profile
def update_caseII(self, N1, N2, Nnew, dx, dq):
if N1 == -1:
self.dx_min.delete(0, N2)
self.dq_min.delete(0, N2 + 1)
ds, lens = get_prim_loc_mins(dx[:,:Nnew+1])
for i, l in enumerate(lens):
self.dx_min.insert(i, ds[:l,i])
ds, lens = get_dual_loc_mins(dq[:, :Nnew+1])
for i, l in enumerate(lens):
self.dq_min.insert(i, ds[:l,i])
self.dq_min.insert(0, get_right_loc_min(dq[:, 0]))
elif N2 is None:
self.dx_min.delete(N1, len(self.dx_min.data))
self.dq_min.delete(N1+1, len(self.dq_min.data))
self.dx_min.insert(N1, get_right_loc_min(dx[:,-1]))
ds, lens = get_prim_loc_mins(dx[:,-Nnew-1:])
for i, l in enumerate(lens):
self.dx_min.insert(N1 + i, ds[:l,i])
ds, lens = get_dual_loc_mins(dq[:,-Nnew-1:])
for i, l in enumerate(lens):
self.dq_min.insert(N1 + i + 1, ds[:l,i])
else:
self.dx_min.delete(N1, N2)
self.dq_min.delete(N1 +1, N2+1)
ds, lens = get_prim_loc_mins(dx[:,N1:N1+Nnew+2])
for i, l in enumerate(lens):
self.dx_min.insert(N1 + i, ds[:l,i])
ds, lens = get_dual_loc_mins(dq[:,N1:N1+Nnew+2])
for i, l in enumerate(lens):
self.dq_min.insert(N1 + i + 1, ds[:l,i])
def update_primal(self, N1, N2, dv):
if N1 ==-1:
self.dx_min.delete(0, N2)
if dv is not None:
ds, lens = get_prim_loc_mins(dv)
for i,l in enumerate(lens):
self.dx_min.insert(i, ds[:l])
elif N2 is None:
self.dx_min.delete(N1, len(self.dx_min.data))
self.dx_min.insert(N1, get_right_loc_min(dv[:,-1]))
if dv.shape[1] > 1:
ds, lens = get_prim_loc_mins(dv[:,:-1])
for i,l in enumerate(lens):
self.dx_min.insert(N1+i, ds[:l])
else:
self.dx_min.delete(N1, N2)
ds, lens = get_prim_loc_mins(dv)
for i, l in enumerate(lens):
self.dx_min.insert(N1 + i, ds[:l])
def update_dual(self, N1, N2, dv):
if N1 == -1:
self.dq_min.delete(0, N2 + 1)
self.dq_min.insert(0, get_right_loc_min(dv[:,-1]))
if dv.shape[1] > 1:
ds, lens = get_dual_loc_mins(dv[:, :-1])
for i, l in enumerate(lens):
self.dq_min.insert(i+1, ds[:l])
elif N2 is None:
self.dq_min.delete(N1, len(self.dq_min.data))
if dv is not None:
ds, lens = get_dual_loc_mins(dv)
for i, l in enumerate(lens):
self.dq_min.insert(N1+i, ds[:l])
else:
self.dq_min.delete(N1, N2)
ds, lens = get_dual_loc_mins(dv)
for i, l in enumerate(lens):
self.dq_min.insert(N1 + i, ds[:l])
| 4,596 | 40.790909 | 93 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/calc_states.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .state_tools import calc_state_prim, calc_state_dual, calc_specific_state_prim, calc_specific_state_dual
from collections import namedtuple
State = namedtuple('State', ['x', 'del_x', 'q', 'del_q'])
#'#@profile
def calc_states(dx_tuple, dq_tuple, param_line, tau, dtau, checked = False):
K1 = dx_tuple[0].shape[0]
J1 = dq_tuple[0].shape[0]
# TODO: parallelize
x, del_x = _calc_states(K1, param_line.x_0, param_line.del_x_0, tau, dtau, dx_tuple, True, checked)
q, del_q = _calc_states(J1, param_line.q_N, param_line.del_q_N, tau, dtau, dq_tuple, False, checked)
return x, del_x, q, del_q
def _calc_states(vdim, state0, del_state0, tau, dtau, dstate_tuple, is_primal, checked = False):
if vdim > 0:
if is_primal:
state = calc_state_prim(dstate_tuple[0], dstate_tuple[1], dstate_tuple[2], tau, state0, checked)
del_state = calc_state_prim(dstate_tuple[0], dstate_tuple[1], dstate_tuple[2], dtau, del_state0, checked)
else:
# TODO: parallelize
state = calc_state_dual(dstate_tuple[0], dstate_tuple[1], dstate_tuple[2], tau, state0, checked)
del_state = calc_state_dual(dstate_tuple[0], dstate_tuple[1], dstate_tuple[2], dtau, del_state0, checked)
return state, del_state
def check_state(state, tolerance, is_primal = True):
test1 = state < -tolerance
if np.any(test1):
print('Negative ' + ('primal' if is_primal else 'dual') + ' state! ', state.min())
print(np.where(test1))
return False
return True
def calc_specific_state(n, i, is_primal, is_del, dx_tuple, dq_tuple, param_line, tau, dtau):
if is_primal:
if is_del:
return calc_specific_state_prim(n, i, dx_tuple[0], dx_tuple[1], dx_tuple[2], dtau, param_line.del_x_0)
else:
return calc_specific_state_prim(n, i, dx_tuple[0], dx_tuple[1], dx_tuple[2], tau, param_line.x_0)
else:
if is_del:
return calc_specific_state_dual(n, i, dq_tuple[0], dq_tuple[1], dq_tuple[2], dtau, param_line.del_q_N)
else:
return calc_specific_state_dual(n, i, dq_tuple[0], dq_tuple[1], dq_tuple[2], tau, param_line.q_N)
| 2,784 | 46.20339 | 117 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/state_tools/calc_statecollide.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .state_tools import get_rz_bb
from .state_collide import calc_state_ratio_prim, calc_state_ratio_dual
#'#@profile
def calc_statecollide(klist, jlist, state, raw_dx, raw_dq, param_line, loc_min, has_no_state, tolerance):
# Calculates time and variable for which state shrinks to zero, and performs testing
# problem result = 0 Ok
# result = 1 immediate collision data = TODO
# result = 2 multiple states hit zero data = TODO
problem = {'result': 0, 'data': []}
#TODO: paralellize?
if has_no_state:
bb_x, kk_x, nn_x = calc_state_ratio_prim(raw_dx[0], raw_dx[1], raw_dx[2], state.tau, state.dtau, param_line.x_0,
param_line.del_x_0, loc_min.dx_min.data, loc_min.dx_min.sizes, state.del_x, state.x, 0)
bb_q, kk_q, nn_q = calc_state_ratio_dual(raw_dq[0], raw_dq[1], raw_dq[2], state.tau, state.dtau, param_line.q_N,
param_line.del_q_N, loc_min.dq_min.data, loc_min.dq_min.sizes, state.del_q, state.q, 0)
else:
bb_x, kk_x, nn_x = get_rz_bb(state.del_x[:, 1:], state.x[:, 1:], loc_min.dx_min.data, loc_min.dx_min.sizes)
bb_q, kk_q, nn_q = get_rz_bb(state.del_q[:, :-1], state.q[:, :-1], loc_min.dq_min.data, loc_min.dq_min.sizes)
if bb_x > bb_q:
if bb_x == 0:
#print(kk_x, nn_x)
return [np.inf, 0, 0], problem
else:
test1 = 1. / bb_x
#nn = nn_x - 1 #because we have no x_0
nn = nn_x
vv = klist[kk_x]
if test1 <= -tolerance:
return [], problem
elif abs(test1) < tolerance:
print('immediate collision',nn,vv)
problem['result'] = 1
return [test1, nn, vv], problem
else: # test1 >= tolerance
bb = bb_x
else:
if bb_q == 0:
#print(kk_q, nn_q)
return [np.inf, 0, 0], problem
else:
nn = nn_q - 1
vv = -jlist[kk_q]
test1 = 1. / bb_q
if test1 <= -tolerance:
return [], problem
elif abs(test1) < tolerance:
print('immediate collision',nn,vv)
problem['result'] = 1
return [test1, nn, vv], problem
else: # test1 >= tolerance
bb = bb_q
# TODO: this check now impossible
# test2 = np.add(np.divide(rz_x, bb, where=w_x), -1.0, where=w_x)
# zstates = np.less(np.fabs(test2, where=w_x), tolerance, where = w_x, out=w_x)
# sz_x = np.sum(zstates)
# ###
# test2 = np.add(np.divide(rz_q, bb, where=w_q), -1.0, where=w_q)
# zstates = np.less(np.fabs(test2, where=w_q), tolerance, where = w_q, out=w_q)
# sz_q = np.sum(zstates)
# #end
# if sz_x + sz_q > 1:
# print('multiple states hit zero\n')
# problem['result'] = 2
# return [test1, nn, vv], problem
# else:
return [test1, nn, vv], problem
| 3,664 | 42.117647 | 130 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/setup.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
import sys
'''
To compile this (C code) in cython on Mac:
$ $ brew install gcc
$ export CC=/usr/local/bin/gcc-9
$ python setup.py build_ext --inplace
'''
if sys.platform == 'darwin':
extra_compile_args = []#["-fopenmp"]
extra_link_args = ["-lomp"]
else:
extra_compile_args = ["/openmp"]
extra_link_args = ["/openmp"]
setup(
name='pivot app',
ext_modules=cythonize([Extension("cy_lp_tools", ["cy_lp_tools.pyx"], include_dirs=[numpy.get_include()], extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args)]),
zip_safe=False
)
| 1,253 | 28.162791 | 147 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/pivot.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .matlab_utils import find
from .cy_lp_tools import cy_pivot, copy_pivot
#from scipy.linalg.blas import dger
def pivot_ij(dct, i, j, tmp_dict = None, counter: list = None):
if counter is not None:
counter[0] += 1
if tmp_dict is None:
out_ = dct.prim_name[i]
in_ = dct.dual_name[j]
dct.prim_name[i] = in_
dct.dual_name[j] = out_
cy_pivot(dct.simplex_dict, i, j)
return dct, in_, out_
else:
tmp_dict.prim_name = dct.prim_name.copy()
tmp_dict.dual_name = dct.dual_name.copy()
out_ = tmp_dict.prim_name[i]
in_ = tmp_dict.dual_name[j]
tmp_dict.prim_name[i] = in_
tmp_dict.dual_name[j] = out_
copy_pivot(dct.simplex_dict, i, j, tmp_dict.simplex_dict)
return tmp_dict, in_, out_
def pivot_mn(dct, m, n, tmp_dict = None):
i = find(dct.prim_name == m)
j = find(dct.dual_name == n)
if i.size != 1 or j.size != 1:
raise Exception('Bad pivot names!')
return pivot_ij(dct, i, j, tmp_dict)[0]
def signed_pivot_ij(dct, ps, ds, i, j, tmp_dict = None):
sam = ps[i]
ps[i] = - ds[j]
ds[j] = - sam
return pivot_ij(dct, i, j, tmp_dict), ps, ds
def signed_pivot_mn(dct, ps, ds, m, n, tmp_dict = None):
i = find(dct.prim_name == m)
j = find(dct.dual_name == n)
if i.size != 1 or j.size != 1:
raise Exception('Bad pivot names!')
return signed_pivot_ij(dct, ps, ds, i, j, tmp_dict)
| 2,036 | 34.736842 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/in_out_pivot.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class in_out_pivot():
__slots__ =['in_', 'out_']
def __init__(self):
self.in_ = set()
self.out_ = set()
def pivot(self, in_, out_):
"""Normalizes the in_ and out_ according the active pivot.
Parameters
----------
in_: int
name of incoming variable for the basis.
out_: int
name of the outgoing variable for the basis.
"""
if in_ in self.out_:
self.out_.remove(in_)
else:
self.in_.add(in_)
if out_ in self.in_:
self.in_.remove(out_)
else:
self.out_.add(out_)
def extr(self, set_out_, set_in_):
"""Update the internal in_ and out_ members with names of variables that changed.
:param set_out_: set
Set containing the names of the variables that left the basis
:param set_in_: set
Set containing the names of the variables that entered the basis
"""
for p in self.out_:
if p in set_in_:
set_in_.remove(p)
else:
set_out_.add(p)
for p in self.in_:
if p in set_out_:
set_out_.remove(p)
else:
set_in_.add(p)
self.in_ = set_out_
self.out_ = set_in_
| 1,906 | 30.262295 | 89 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/simplex_procedures.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .matlab_utils import find
from .LP_formulation import LP_formulation
from .pivot import pivot_ij, signed_pivot_ij
from .in_out_pivot import in_out_pivot
from .cy_lp_tools import prim_ratio_test, dual_ratio_test, find_i, signed_prim_ratio_test, signed_dual_ratio_test
def simplex_procedures(dct, ps, ds, tolerance = 0, res_dct = None):
err = dict()
err['result'] = 0
mm = dct.simplex_dict.shape[0]
nn = dct.simplex_dict.shape[1]
pivots = in_out_pivot()
pneg = find(ps == -1)
while pneg.size > 0:
i = pneg[0]
if tolerance == 0:
cond = dct.simplex_dict[i + 1, 1:] != 0
else:
cond = np.absolute(dct.simplex_dict[i + 1, 1:]) > tolerance
jj = find(np.logical_and(ds == -1, cond))
if jj.size > 0:
j = jj[0]
else:
mat = dct.simplex_dict[i + 1, 1:] / dct.simplex_dict[0, 1:]
if dct.simplex_dict[i + 1, 0] > 0:
j = np.argmax(mat * (ds != 1))
m = mat[j]
else:
j = np.argmin(mat * (ds != 1))
m = -mat[j]
if m <=0:
jj = find(dct.simplex_dict[i + 1, 1:])
if jj.size > 0:
j = jj[0]
else:
raise Exception('*** No pivot available')
(dct, in_, out_), ps, ds = signed_pivot_ij(dct, ps, ds, i, j, res_dct)
pivots.pivot(in_, out_)
res_dct = None
pneg = find(ps == -1)
dneg = find(ds == -1)
while dneg.size > 0:
j = dneg[0]
#TODO: this should be double-checked
i = find_i(dct.simplex_dict, j, ps, 0.)
# if tolerance == 0:
# cond = dct.simplex_dict[1:, j + 1] != 0
# else:
# cond = np.absolute(dct.simplex_dict[1:, j + 1]) > tolerance
# ii = find(np.logical_and(ps == -1, cond))
# if ii.size > 0:
# i = ii[0]
# else:
# mat = -dct.simplex_dict[1:, j + 1] / dct.simplex_dict[1:, 0]
# if dct.simplex_dict[0, j + 1] < 0:
# i = np.nanargmax(mat * (ps != 1))
# print('max_pivot:', i, j)
# test = True
# m = mat[i]
# else:
# i = np.nanargmin(mat * (ps != 1))
# print('min_pivot:', i, j)
# test = True
# m = -mat[i]
# if m <=0:
# ii = find(dct.simplex_dict[1:, j+1])
# if ii.size > 0:
# i = ii[0]
# else:
# raise Exception('*** No pivot available')
if i < 0:
raise Exception('*** No pivot available')
# if i1 != i:
# raise Exception('Incorrect i')
(dct, in_, out_), ps, ds= signed_pivot_ij(dct, ps, ds, i, j, res_dct)
pivots.pivot(in_, out_)
res_dct = None
dneg = find(ds == -1)
ptest = find(np.logical_and(ps == 0, dct.simplex_dict[1:, 0] < 0))
dtest = find(np.logical_and(ds == 0, dct.simplex_dict[0, 1:] < 0))
if ptest.size > 0 and dtest.size == 0:
while ptest.size > 0:
i = ptest[0]
j = signed_prim_ratio_test(dct.simplex_dict, i, ds, tolerance) - 1
if j < -1:
dct.simplex_dict[0, 0] = -np.inf
err['result'] = 1
err['message'] = '*** problem is primal infeasible'
return dct, ps, ds, pivots, err
(dct, in_, out_), ps, ds = signed_pivot_ij(dct, ps, ds, i, j, res_dct)
pivots.pivot(in_, out_)
res_dct = None
ptest = find(np.logical_and(ps == 0, dct.simplex_dict[1:, 0] < 0))
elif ptest.size == 0 and dtest.size > 0:
while dtest.size > 0:
j = dtest[0]
i = signed_dual_ratio_test(dct.simplex_dict, j, ps)-1
if i < -1:
dct.simplex_dict[0, 0] = np.inf
err['result'] = 2
err['message'] = '*** problem is dual infeasible'
return dct, ps, ds, pivots, err
(dct, in_, out_), ps, ds = signed_pivot_ij(dct, ps, ds, i, j, res_dct)
pivots.pivot(in_, out_)
res_dct = None
dtest = find(np.logical_and(ds == 0, dct.simplex_dict[0, 1:] < 0))
elif ptest.size > 0 and dtest.size > 0:
B = np.zeros((mm+1,nn+1), order='C')
tmp_matrix = np.zeros_like(B)
B[:-1,-1:] = np.random.rand(mm, 1) + 1
B[-1:,:-1] = np.random.rand(1, nn) + 1
B[:-1, :-1] = dct.simplex_dict
mat = np.divide(-dct.simplex_dict[0, 1:], B[-1, 1:-1], out=np.zeros_like(dct.simplex_dict[0, 1:]), where=np.logical_and(B[-1, 1:-1] > 0, ds != 1) )
j = np.argmax(mat)
mu1 = mat[j]
mat = np.divide(-dct.simplex_dict[1:, 0], B[1:-1, -1], out=np.zeros_like(dct.simplex_dict[1:, 0]), where=np.logical_and(B[1:-1, -1] > 0, ps != 1) )
i = np.argmax(mat)
mu2 = mat[i]
mu = max(mu1,mu2)
from .LP_formulation import LP_formulation
dct2 = LP_formulation(B, dct.prim_name.copy(), dct.dual_name.copy())
while mu > 0:
if mu1 > mu2:
div = dct2.simplex_dict[1:-1, 0] + mu * dct2.simplex_dict[1:-1, -1]
mat = np.divide(dct2.simplex_dict[1:-1, j+1], div, out=np.zeros_like(dct2.simplex_dict[1:-1, j+1]), where= np.logical_and(div !=0, ps != 1))
i = np.argmax(mat)
if mat[i] <= 0:
dct2.simplex_dict[0, 0] = np.inf
err['result'] = 2
err['message'] = '*** problem is dual infeasible'
return LP_formulation(np.ascontiguousarray(dct2.simplex_dict[:-1,:-1]), dct2.prim_name, dct2.dual_name), ps, ds, pivots, err
else:
div = dct2.simplex_dict[0, 1:-1] + mu * dct2.simplex_dict[-1, 1:-1]
mat = np.divide(-dct2.simplex_dict[i + 1, 1:-1], div, out=np.zeros_like(dct2.simplex_dict[i + 1, 1:-1]), where= np.logical_and(div !=0, ds != 1))
j = np.argmax(mat)
if mat[j] <= 0:
dct2.simplex_dict[0, 0] = - np.inf
err['result'] = 1
err['message'] = '*** problem is primal infeasible'
return LP_formulation(np.ascontiguousarray(dct2.simplex_dict[:-1,:-1]), dct2.prim_name, dct2.dual_name), ps, ds, pivots, err
(dct2, in_, out_), ps, ds = signed_pivot_ij(dct2, ps, ds, i, j)
pivots.pivot(in_, out_)
mat = np.divide(-dct2.simplex_dict[0, 1:-1], dct2.simplex_dict[-1, 1:-1], out=np.zeros_like(dct2.simplex_dict[0, 1:-1]), where=np.logical_and(dct2.simplex_dict[-1, 1:-1] > 0, ds != 1))
j = np.argmax(mat)
mu1 = mat[j]
mat = np.divide(-dct2.simplex_dict[1:-1, 0], dct2.simplex_dict[1:-1, -1], out=np.zeros_like(dct2.simplex_dict[1:-1, 0]), where=np.logical_and(dct2.simplex_dict[1:-1, -1] > 0, ps != 1))
i = np.argmax(mat)
mu2 = mat[i]
mu = max(mu1, mu2)
dct = LP_formulation(np.ascontiguousarray(dct2.simplex_dict[:-1,:-1]), dct2.prim_name, dct2.dual_name)
return dct, ps, ds, pivots, err
def unsigned_simplex(lp: LP_formulation, tolerance: float = 1E-12, counter: list = None) -> (LP_formulation, dict):
err = dict()
err['result'] = 0
ptest = find(lp.simplex_dict[1:, 0] < -tolerance)
dtest = find(lp.simplex_dict[0, 1:] < -tolerance)
if ptest.size > 0 and dtest.size == 0:
while ptest.size > 0:
i = ptest[0]
j = prim_ratio_test(lp.simplex_dict, i, tolerance=tolerance) - 1
if j < -1:
lp.simplex_dict[0, 0] = -np.inf
err['result'] = 1
err['message'] = '*** problem is primal infeasible'
return lp, err
lp, in_, out_ = pivot_ij(lp, i, int(j), counter=counter)
ptest = find(lp.simplex_dict[1:, 0] < -tolerance)
elif ptest.size == 0 and dtest.size > 0:
while dtest.size > 0:
j = dtest[0]
i = dual_ratio_test(lp.simplex_dict, j, tolerance=tolerance) - 1
if i < -1:
lp.simplex_dict[0, 0] = np.inf
err['result'] = 2
err['message'] = '*** problem is dual infeasible'
return lp, err
lp, in_, out_ = pivot_ij(lp, int(i), j, counter=counter)
dtest = find(lp.simplex_dict[0, 1:] < -tolerance)
elif ptest.size > 0 and dtest.size > 0:
mm, nn = lp.simplex_dict.shape
B = np.zeros((mm+1,nn+1), order='C')
B[:-1, -1:] = np.random.rand(mm, 1) + 1
B[-1:, :-1] = np.random.rand(1, nn) + 1
B[:-1, :-1] = lp.simplex_dict
lp_form2 = LP_formulation(B, lp.prim_name, lp.dual_name)
mat = np.divide(-lp.simplex_dict[0, 1:], B[-1, 1:-1], out=np.zeros_like(lp.simplex_dict[0, 1:]), where=B[-1, 1:-1] > 0)
j = np.argmax(mat)
mu1 = mat[j]
mat = np.divide(-lp.simplex_dict[1:, 0], B[1:-1, -1], out=np.zeros_like(lp.simplex_dict[1:, 0]), where=B[1:-1, -1] > 0)
i = np.argmax(mat)
mu2 = mat[i]
mu = max(mu1,mu2)
while mu > 0:
if mu1 > mu2:
div = lp_form2.simplex_dict[1:-1, 0] + mu * lp_form2.simplex_dict[1:-1, -1]
mat = np.divide(lp_form2.simplex_dict[1:-1, j+1], div, out=np.zeros_like(lp_form2.simplex_dict[1:-1, j+1]), where= div !=0)
i = np.argmax(mat)
if mat[i] <= 0:
B[0, 0] = np.inf
err['result'] = 2
err['message'] = '*** problem is dual infeasible'
lp.simplex_dict = np.ascontiguousarray(lp_form2.simplex_dict[:-1, :-1])
return lp, err
else:
div = lp_form2.simplex_dict[0, 1:-1] + mu * lp_form2.simplex_dict[-1, 1:-1]
mat = np.divide(-lp_form2.simplex_dict[i + 1, 1:-1], div, out=np.zeros_like(lp_form2.simplex_dict[i + 1, 1:-1]), where= div !=0)
j = np.argmax(mat)
if mat[j] <= 0:
B[0, 0] = - np.inf
err['result'] = 1
err['message'] = '*** problem is primal infeasible'
lp.simplex_dict = np.ascontiguousarray(lp_form2.simplex_dict[:-1, :-1])
return lp, err
lp_form2, in_, out_ = pivot_ij(lp_form2, int(i), int(j), counter=counter)
mat = np.divide(-lp_form2.simplex_dict[0, 1:-1], lp_form2.simplex_dict[-1, 1:-1],
out=np.zeros_like(lp_form2.simplex_dict[0, 1:-1]), where=lp_form2.simplex_dict[-1, 1:-1] > 0)
j = np.argmax(mat)
mu1 = mat[j]
mat = np.divide(-lp_form2.simplex_dict[1:-1, 0], lp_form2.simplex_dict[1:-1, -1], out=np.zeros_like(B[1:-1, 0]), where=lp_form2.simplex_dict[1:-1, -1] > 0)
i = np.argmax(mat)
mu2 = mat[i]
mu = max(mu1, mu2)
lp.simplex_dict = np.ascontiguousarray(lp_form2.simplex_dict[:-1, :-1])
lp.prim_name = lp_form2.prim_name
lp.dual_name = lp_form2.dual_name
return lp, err
| 11,852 | 46.60241 | 196 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/LP_formulation.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .simplex_procedures import simplex_procedures
from .cy_lp_tools import get_sign, get_sign1, partial_pivotII
from .pivot import signed_pivot_ij
class LP_formulation:
"""
LP formulation of the problem.
Attributes
----------
simplex_dict: simplex_dict
a simplex dictionary.
prim_name: set
a set of "names" of the primal variables
dual_name: set
a set of "names" of the dual variables
prim_sign: set
a set of "signs" of the primal variables
dual_sign: set
a set of "signs" of the dual variables
prim_zvars: set
a set of "names" of primal variables with zero values
dual_zvars: set
a set of "names" of dual variables with zero values
Methods
-------
copy()
Makes a copy of this object.
compute_zvars(tolerance)
Computes updated contents of prim_zvars and dual_zvars.
"""
__slots__ = ["simplex_dict", "prim_name", "dual_name", "prim_sign", "dual_sign", "prim_zvars", "dual_zvars"]
def __init__(self, simplex_dict, prim_name, dual_name, prim_sign=None, dual_sign=None, prim_zvars=set(), dual_zvars=set()):
self.simplex_dict, self.prim_name, self.dual_name, self.prim_sign, self.dual_sign, self.prim_zvars, self.dual_zvars= \
simplex_dict, prim_name, dual_name, prim_sign, dual_sign, prim_zvars, dual_zvars
def __copy__(self):
prim_zvars = self.prim_zvars.copy() if self.prim_zvars else set()
dual_zvars = self.dual_zvars.copy() if self.dual_zvars else set()
if self.prim_sign is None:
return LP_formulation(self.simplex_dict.copy(), self.prim_name.copy(), self.dual_name.copy(),
prim_zvars=prim_zvars, dual_zvars=dual_zvars)
else:
return LP_formulation(self.simplex_dict.copy(), self.prim_name.copy(), self.dual_name.copy(),
self.prim_sign.copy(), self.dual_sign.copy(), prim_zvars, dual_zvars)
def copy(self):
"""Makes a copy of this object.
"""
return self.__copy__()
def compute_zvars(self, tolerance):
"""Computes updated contents of prim_zvars and dual_zvars.
Parameters
----------
tolerance: float
The numerical tolerance for floating point comparisons.
"""
self.dual_zvars = {self.dual_name[i] for i, v in enumerate(self.simplex_dict[0, 1:]) if v < tolerance}
self.prim_zvars = {self.prim_name[i] for i, v in enumerate(self.simplex_dict[1:, 0]) if v < tolerance}
def solve_ratesLP(LP_form, Kset, nJset, bases_mm, tolerance=0, build_sign=True):
"""Solve the 'Rates LP' for the solution.
Parameters
----------
LP_form: LP_formulation
Represents the LP formulation of the problem.
Kset: np.ndarray
Array of K indexes of {k: x_k > 0} where dx_k can be either positive or negative.
nJset: np.ndarray
Array of J indexes of {j: q_j > 0} where dq_j can be either positive or negative.
bases_mm: bases_memory_manager
Bases memory manager for caching bases for reuse.
tolerance: float
The numerical tolerance for floating point comparisons.
build_sign: bool
Builds the signs when True.
Returns
-------
LP_formulation, in_out_pivot, dict
LP_form (updated), pivots, error
"""
if build_sign:
get_sign1(LP_form.prim_name, Kset, nJset, 1, bases_mm.ps)
get_sign1(LP_form.dual_name, Kset, nJset, -1, bases_mm.ds)
# part = False
# if v1 is not None:
# ok, prim_vars, dual_vars, i, j = partial_pivotII(LP_form.simplex_dict, LP_form.prim_name, LP_form.dual_name, bases_mm.ps, bases_mm.ds, v1, in_diff[0], in_diff[1])
# if ok == 1:
# part = True
# prim_name, dual_name = LP_form.prim_name, LP_form.dual_name
tmp_dict = bases_mm.pop()
if tmp_dict is None:
tmp_dict = LP_formulation(np.empty_like(LP_form.simplex_dict), None, None)
#LP_form, ps, ds, pivots, err = simplex_procedures(LP_form.copy(), prim_sign, dual_sign, tolerance)
#else:
LP_form, ps, ds, pivots, err = simplex_procedures(LP_form, bases_mm.ps, bases_mm.ds, tolerance, tmp_dict)
LP_form.compute_zvars(tolerance)
# if part:
# dual_name = dual_name.copy()
# tmp = dual_name[j]
# dual_name[j] = prim_name[i]
# prim_name = prim_name.copy()
# prim_name[i] = tmp
# if np.setdiff1d(prim_name, LP_form.prim_name, assume_unique=True).shape[0] > 0 or\
# np.setdiff1d(dual_name, LP_form.dual_name, assume_unique=True).shape[0] > 0:
# if np.any(np.fabs(prim_vars[1:] - LP_form.simplex_dict[1:,0]) > 1E-10) or\
# np.any(np.fabs(dual_vars[1:] -LP_form.simplex_dict[0,1:]) > 1E-10):
# dual_name = dual_name.copy()
# tmp = dual_name[j]
# dual_name[j] = prim_name[i]
# prim_name = prim_name.copy()
# prim_name[i] = tmp
# print(np.any(LP_form.dual_name == v1), np.any(LP_form.dual_name == v2))
# print('????')
return LP_form, pivots, err
def partial_solve_caseII(LP_form, Kset, nJset, bases_mm, v1, in_diff):
get_sign1(LP_form.prim_name, Kset, nJset, 1, bases_mm.ps)
get_sign1(LP_form.dual_name, Kset, nJset, -1, bases_mm.ds)
ok, prim_vars, dual_vars, i, j = partial_pivotII(LP_form.simplex_dict, LP_form.prim_name, LP_form.dual_name,
bases_mm.ps, bases_mm.ds, v1, in_diff[0], in_diff[1])
return ok, prim_vars, dual_vars, i, j
def solve_simple_caseII(LP_form, Kset, nJset, bases_mm, v1, in_diff):
get_sign1(LP_form.prim_name, Kset, nJset, 1, bases_mm.ps)
get_sign1(LP_form.dual_name, Kset, nJset, -1, bases_mm.ds)
ok, prim_vars, dual_vars, i, j = partial_pivotII(LP_form.simplex_dict, LP_form.prim_name, LP_form.dual_name,
bases_mm.ps, bases_mm.ds, v1, in_diff[0], in_diff[1])
tmp_dict = bases_mm.pop()
if tmp_dict is None:
tmp_dict = LP_formulation(np.empty_like(LP_form.simplex_dict), None, None)
if ok==1:
(LP_form, in_, out_), ps, ds = signed_pivot_ij(LP_form, bases_mm.ps, bases_mm.ds, i, j, tmp_dict)
return ok, LP_form, (in_, out_), None
else:
LP_form, ps, ds, pivots, err = simplex_procedures(LP_form, bases_mm.ps, bases_mm.ds, 0, tmp_dict)
return ok, LP_form, pivots, err
def solve_LP(LP_form, ps, ds, tolerance=0):
LP_form, ps, ds, pivots, err = simplex_procedures(LP_form.copy(), ps, ds, tolerance)
return LP_form, err
def solve_LP_in_place(LP_form, ps, ds, tolerance=0):
LP_form, ps, ds, pivots, err = simplex_procedures(LP_form, ps, ds, tolerance)
return LP_form, err
def get_pivot(b1, b2, prim):
if prim:
return np.setdiff1d(b1.prim_name, b2.prim_name, assume_unique=True)
else:
return np.setdiff1d(b1.dual_name, b2.dual_name, assume_unique=True)
def get_value_by_name(basis, name, prim):
if prim:
return basis.simplex_dict[1:, 0][basis.prim_name == name][0]
else:
return basis.simplex_dict[0, 1:][basis.dual_name == name][0]
def get_dx_names(basis):
return basis.prim_name[basis.prim_name > 0]
def get_dq_names(basis):
return basis.dual_name[basis.dual_name < 0]
| 7,975 | 39.48731 | 172 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/lp_tools/matlab_utils.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def find(a):
return np.where(a)[0]
| 634 | 34.277778 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/MCQN_test_new.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLP import SCLP, SCLP_settings
from doe.data_generators.MCQN import generate_MCQN_data
from subroutines.utils import relative_to_project
from doe.results_producer import write_results_to_csv
K = 400
I = 40
import time
solver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management= False, suppress_printing = False)
settings = {'alpha_rate': 1, 'cost_scale':2, 'a_rate' : 0.05, 'sum_rate':0.95, 'nz': 0.5,
'gamma_rate':0, 'c_scale': 0, 'h_rate': 0.2}
seed = 1009
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
TT = 100
# import cProfile, pstats, io
# pr = cProfile.Profile()
#pr.enable()
result = {'servers': I, 'buffers': K, 'seed': seed}
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3/12 * TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
#pr.disable()
print(obj, err, maxT)
time1 = time.time() - start_time
print("--- %s seconds ---" % time1)
result['time1'] = time1
result['STEPCOUNT1'] = STEPCOUNT
start_time = time.time()
STEPCOUNT, pivot_problem = solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
print(obj, err, maxT)
time2 = time.time() - start_time
print("--- %s seconds ---" % time2)
result['time2'] = time2
result['STEPCOUNT2'] = STEPCOUNT
start_time = time.time()
STEPCOUNT, pivot_problem =solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
print(obj, err, maxT)
time3 = time.time() - start_time
print("--- %s seconds ---" % time3)
result['time3'] = time3
result['STEPCOUNT3'] = STEPCOUNT
start_time = time.time()
STEPCOUNT, pivot_problem = solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
print(obj, err, maxT)
time4 = time.time() - start_time
print("--- %s seconds ---" % time4)
result['time4'] = time4
result['STEPCOUNT4'] = STEPCOUNT
results = [result]
res_file = relative_to_project('online_results.csv')
write_results_to_csv(results, res_file)
# s = io.StringIO()
# ps = pstats.Stats(pr, stream=s)
# ps.print_stats()
# print(s.getvalue())
| 3,212 | 40.192308 | 141 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/MCQN_test_mpc.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import os
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLP import SCLP, SCLP_settings
from doe.data_generators.MCQN import generate_MCQN_data
from subroutines.utils import relative_to_project
from doe.results_producer import write_results_to_csv
K = 400
I = 40
import time
solver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management= False, suppress_printing = False)
settings = {'alpha_rate': 1, 'cost_scale':2, 'a_rate' : 0.05, 'sum_rate':0.95, 'nz': 0.5,
'gamma_rate':0, 'c_scale': 0, 'h_rate': 0.2}
seed = 1009
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
TT = 100
# import cProfile, pstats, io
# pr = cProfile.Profile()
#pr.enable()
result = {'servers': I, 'buffers': K, 'seed': seed}
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3/12 * TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
#pr.disable()
print(obj, err, maxT)
time1 = time.time() - start_time
print("--- %s seconds ---" % time1)
result['time1'] = time1
result['STEPCOUNT1'] = STEPCOUNT
t0 = 1/12 * TT
last_breakpoint = np.where(t<=t0)[0][-1]
delta_t = t0 - t[last_breakpoint]
new_x0 = x[:, last_breakpoint] + solution._state.dx[:, last_breakpoint] * delta_t + 0.1 * a * t0
start_time = time.time()
STEPCOUNT, pivot_problem = solution.recalculate(param_line, t0, 4/12 * TT, new_x0, solver_settings, 10E-11, mm = None)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
print(obj, err, maxT)
time2 = time.time() - start_time
print("--- %s seconds ---" % time2)
result['time2'] = time2
result['STEPCOUNT2'] = STEPCOUNT
alpha = new_x0
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3/12 * TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
#pr.disable()
print(obj, err, maxT)
time3 = time.time() - start_time
print("--- %s seconds ---" % time3)
result['time3'] = time3
result['STEPCOUNT3'] = STEPCOUNT
# start_time = time.time()
# STEPCOUNT, pivot_problem =solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)
# t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
# print(obj, err, maxT)
# time3 = time.time() - start_time
# print("--- %s seconds ---" % time3)
# result['time3'] = time3
# result['STEPCOUNT3'] = STEPCOUNT
# start_time = time.time()
# STEPCOUNT, pivot_problem = solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)
# t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
# print(obj, err, maxT)
# time4 = time.time() - start_time
# print("--- %s seconds ---" % time4)
# result['time4'] = time4
# result['STEPCOUNT4'] = STEPCOUNT
# results = [result]
# res_file = relative_to_project('online_results.csv')
# write_results_to_csv(results, res_file)
# # s = io.StringIO()
# # ps = pstats.Stats(pr, stream=s)
# # ps.print_stats()
# # print(s.getvalue())
| 3,851 | 40.419355 | 141 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/simple_reentrant_test.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLP import SCLP, SCLP_settings
from doe.data_generators.simple_reentrant import generate_simple_reentrant_data
from doe.data_generators.write_CPLEX_dat import write_CPLEX_dat
from doe.doe_utils import path_utils
from subroutines.utils import relative_to_project
# I - number of servers
# K - number of buffers
# settings - data generation parameters, including
# - \alpha_k = mean_alpha * alpha_rate1 + mean_alpha * U(0, alpha_rate2), where mean_alpha = 15 -15(k-1)/K initial amount of fluids
# - h_j ~ U(0, cost_scale) holding costs
# - c_j ~ U(0, c_scale) linearly changing control cost
# - there are other possible parameters changing distributions, etc.
I = 60
K = 1200
settings = {"c_scale": 0, "cost_scale": 10, "alpha_rate1": 0.8, "alpha_rate2": 0.45}
seed = 1000
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_simple_reentrant_data(seed, K, I, **settings)
# calculating total buffer cost for the target T:
# tot_buf_cost = h' \alpha T + h' a T^2/2
tot_buf_cost = total_buffer_cost[0]*TT+total_buffer_cost[1]*TT*TT/2.0
# solver_settings - parameters fot SCLP solver
solver_settings = SCLP_settings()
# set suppress_printing = False if you would like to see summary of each iteration
solver_settings.suppress_printing = True
solver_settings.memory_management = False
import time
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(False)
# problem objective is h'x(t) or h' \alpha T + h' a T^2/2 - V(SCLP)
pobj = tot_buf_cost - obj
print("SCLP objective:", obj, "Problem objective:", pobj, "steps:", STEPCOUNT, "intervals:", len(tau))
sol_time = time.time() - start_time
print("Solution time: %s seconds" % (sol_time))
print("----------------------------------------------")
# preparing CPLEX .dat file name
ps = {'K': K, 'I': I, 'T': TT}
for k, v in settings.items():
if isinstance(v, object) and hasattr(v, '__name__'):
ps[k] = v.__name__[:4]
else:
ps[k] = str(v)
# uses current directory change if you want to store CPLEX dat file in the specific directory
pu = path_utils('')
full_file_name = pu.get_CPLEX_data_file_name('simple_reentrant', **ps)
# writing .dat file for CPLEX
write_CPLEX_dat(full_file_name, TT, G, H, alpha, a, b, gamma, c, buffer_cost)
# next line requires CPLEX - comment this line if you have no CPLEX
from doe.cplex_integration.run_cplex_experiments import run_cplex_experiments
discretization = 100
# note on discretization .mod file names
# - discretization x1 main1xobj.mod
# - discretization x10 main10xobj.mod
# - discretization x100 main100xobj.mod
# - discretization x1000 main1000xobj.mod
cplex_results = run_cplex_experiments(pu.get_CPLEX_data_path(),
relative_to_project('doe/cplex_integration/mod_files/main' + str(discretization) +'xobj.mod'),
[full_file_name])
optimality_gap = cplex_results[0]['objective'] - pobj
relative_error = optimality_gap/abs(pobj)
relative_time = cplex_results[0]['time'] / sol_time
print("Discretization relative error:", relative_error, "discretization relative solution time:", relative_time)
| 3,998 | 46.607143 | 134 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/pivot_time_test.py
|
import sys
sys.path.append('C:\DataD\Work\CERBERO\CLP\SCLPsolver')
import os
import numpy as np
from subroutines.lp_tools.cy_lp_tools import copy_pivot
def relative_to_project(file_path):
if os.path.isabs(file_path):
return file_path
else:
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
return os.path.join(proj, file_path)
import time
A = np.load(relative_to_project('tests/data/DD.dat'), allow_pickle=True)
A = np.ascontiguousarray(A)
pn = np.load(relative_to_project('tests/data/pn.dat'), allow_pickle=True).astype(int32)[:,0]
dn = np.load(relative_to_project('tests/data/dn.dat'), allow_pickle=True).astype(int32)[:,0]
B=np.zeros_like(A)
pn1 = pn.copy()
dn1 = dn.copy()
start_time = time.time()
for i in range(2,22):
if i in [2,4,6,8,10,12,14,16,18,20]:
a,b= copy_pivot(A, pn, dn, i, i, B)
else:
a, b = copy_pivot(B, pn, dn, i, i, A)
print("--- %s seconds ---" % (time.time() - start_time))
# X = A.copy()
# A = np.load(relative_to_project('tests/data/DD.dat'), allow_pickle=True)
# B=np.zeros_like(A)
# #pn = np.load(relative_to_project('tests/data/pn.dat'), allow_pickle=True)[:,0]
# #dn = np.load(relative_to_project('tests/data/dn.dat'), allow_pickle=True)[:,0]
# start_time = time.time()
# for i in range(2,12):
# if i in [2,4,6,8,10]:
# a,b= copy_pivot3(A, pn, dn, i, i, B)
# else:
# a, b = copy_pivot3(B, pn, dn, i, i, A)
# print("--- %s seconds ---" % (time.time() - start_time))
# # print(np.any((A-X) >10E-10))
# print(np.any((A-X) < -10E-10))
# print(np.any((pn-pn1) >10E-10),np.any((dn-dn1) >10E-10), np.any((pn-pn1) <-10E-10),np.any((dn-dn1) <-10E-10))
| 1,695 | 36.688889 | 111 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/MCQN_test.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLP import SCLP, SCLP_settings
from doe.data_generators.MCQN import generate_MCQN_data
from doe.data_generators.write_CPLEX_dat import write_CPLEX_dat
from doe.doe_utils import path_utils
from subroutines.utils import relative_to_project
# I - number of servers
# K - number of buffers
# settings - data generation parameters, including
# - \alpha_k ~ U(0, alpha_rate) initial amount of fluids
# - a_k ~ U(0, alpha_rate) exogeneous input rates
# - \sum_k p_{j,k} = sum_rate \forall j proportion of fluids kept in the system
# - nz proportion of non-zero p_{j,k}
# - h_j ~ U(0, cost_scale) holding costs
# - \gamma_j ~ U(0, gamma_rate) constant control cost
# - c_j ~ U(0, c_scale) linearly changing control cost
# - H_{i=s(j),j} ~ U(0, h_rate) service time for a single unit of job class j
# - there are other possible parameters changing distributions, etc.
I = 100
K = 1000
settings = {'alpha_rate': 1, 'cost_scale':2, 'a_rate' : 0.05, 'sum_rate':0.95, 'nz': 0.5,
'gamma_rate':0, 'c_scale': 0, 'h_rate': 0.2}
seed = 1000
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
TT = 100
# calculating total buffer cost for the target T:
# tot_buf_cost = h' \alpha T + h' a T^2/2
tot_buf_cost = total_buffer_cost[0]*TT+total_buffer_cost[1]*TT*TT/2.0
# solver_settings - parameters fot SCLP solver
solver_settings = SCLP_settings(find_alt_line =False)
# set suppress_printing = False if you would like to see summary of each iteration
solver_settings.suppress_printing = True
solver_settings.memory_management = False
import time
start_time = time.time()
# run simplex-type algorithm to solve SCLP
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT, solver_settings)
# extract detailed solution including primal and dual controls
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(False)
# problem objective is h'x(t) or h' \alpha T + h' a T^2/2 - V(SCLP)
pobj = tot_buf_cost - obj
print("SCLP objective:", obj, "Problem objective:", pobj, "steps:", STEPCOUNT, "intervals:", len(tau))
sol_time = time.time() - start_time
print("Solution time: %s seconds" % (sol_time))
print("----------------------------------------------")
# preparing CPLEX .dat file name
ps = {'K': K, 'I': I, 'T': TT}
for k, v in settings.items():
if isinstance(v, object) and hasattr(v, '__name__'):
ps[k] = v.__name__[:4]
else:
ps[k] = str(v)
# uses current directory change if you want to store CPLEX dat file in the specific directory
pu = path_utils('')
full_file_name = pu.get_CPLEX_data_file_name('MCQN', **ps)
# writing .dat file for CPLEX
write_CPLEX_dat(full_file_name, TT, G, H, alpha, a, b, gamma, c, buffer_cost)
# next line requires CPLEX - comment this line if you have no CPLEX
from doe.cplex_integration.run_cplex_experiments import run_cplex_experiments
discretization = 10
# note on discretization .mod file names
# - discretization x1 main1xobj.mod
# - discretization x10 main10xobj.mod
# - discretization x100 main100xobj.mod
# - discretization x1000 main1000xobj.mod
cplex_results = run_cplex_experiments(pu.get_CPLEX_data_path(),
relative_to_project('doe/cplex_integration/mod_files/main' + str(discretization) +'xobj.mod'),
[full_file_name])
optimality_gap = cplex_results[0]['objective'] - pobj
relative_error = optimality_gap/abs(pobj)
relative_time = cplex_results[0]['time'] / sol_time
print("Discretization relative error:", relative_error, "discretization relative solution time:", relative_time)
| 4,389 | 46.204301 | 132 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/tests/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/pytests/MCQN_basic_test.py
|
import pytest
import os, sys
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLPsolver.SCLP import SCLP, SCLP_settings
from SCLPsolver.doe.data_generators.MCQN import generate_MCQN_data
seeds = range(1001, 1009)
@pytest.mark.parametrize("seed", seeds)
def test_basic_mcqn(seed):
K = 400
I = 40
import time
solver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management=False,
suppress_printing=False)
settings = {'alpha_rate': 1, 'cost_scale': 2, 'a_rate': 0.05, 'sum_rate': 0.95, 'nz': 0.5,
'gamma_rate': 0, 'c_scale': 0, 'h_rate': 0.2}
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
TT = 100
result = {'servers': I, 'buffers': K, 'seed': seed}
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3 / 12 * TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
assert obj is not None, "solution obj is None!"
assert 0 < maxT < TT, f"0 < {maxT} < {TT} failed"
assert len(t) > 0, f"len(t) is {len(t)}"
@pytest.mark.parametrize("seed", seeds)
def test_degenerate_mcqn(seed):
K = 400
I = 40
import time
solver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management=False,
suppress_printing=False)
settings = {'alpha_rate': 1, 'cost_scale': 2, 'a_rate': 0.05, 'sum_rate': 0.95, 'nz': 0.5,
'gamma_rate': 0, 'c_scale': 0, 'h_rate': 0.2}
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
a[0:4] = [0, 0, 0, 0]
c[6:8] = [0, 0]
TT = 100
result = {'servers': I, 'buffers': K, 'seed': seed}
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3 / 12 * TT, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
assert obj is not None, "solution obj is None!"
assert 0 < maxT < TT, f"0 < {maxT} < {TT} failed"
assert len(t) > 0, f"len(t) is {len(t)}"
| 2,366 | 37.177419 | 118 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/pytests/WorkloadPlacementPaper_test.py
|
import pandas as pd
import pytest
from scipy.integrate import quad
import matplotlib.pyplot as plt
import os, sys
import numpy as np
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLPsolver.SCLP import SCLP, SCLP_settings
from SCLPsolver.doe.data_generators.WorkloadPlacement import *
from SCLPsolver.doe.doe import *
seeds = [123]
# def integrate(f, low, up, intervals=200):
# tt = np.linspace(low, up, intervals+1)
# y = [f(t) for t in tt]
# F = (0.5*(y[0]+y[-1]) + np.sum(y[1:-1]))*(up-low)/intervals
# return F
def integrate(f, low, up):
val, est_err = quad(f,low,up)
return val
np.random.seed(seeds[0])
nparamsets = 10
ntests = 10
tolerance = 1e-3
epsilons = [(0.01), (0.02), (0.05), (0.1), (0.2)]
TT = 100
I = 10 # num servers
K = 100 # num buffers
J = K # num flows
lambda_min, lambda_max = 2.0, 5.0 # arrival rate range
cost_min, cost_max = 1.0, 2.0 # cost range
x0_min, x0_max = 10.0, 20.0 # initial buffer quantity range
mu_min, mu_max = 5.0, 25.0 # service rate range
means_df = pd.DataFrame({'epsilon': [], 'MeanSOA': [], 'MeanSEP':[], 'Improvement':[]})
@pytest.mark.parametrize("epsilon", epsilons)
def test_perturbed(epsilon):
aa = np.random.uniform(lambda_min, lambda_max, K) # arrival rates at buffers tasks per unit time
bb = np.ones(I) # cpu limit
cc = np.random.uniform(cost_min, cost_max, K) # cost per unit time of buffers
alpha0 = np.random.uniform(x0_min, x0_max, K) # initial buffer quantities
mu = np.random.uniform(mu_min, mu_max, K) # mean job service rates
tau = np.divide(1, mu)
# 1. Model 1, eta is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau,
alpha0,
False)
print(f'Model 1 G={G} H={H}')
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj1 = tot_buf_cost - SCLP_obj
eta = u[0:J, :]
u = np.multiply(eta.transpose(), mu).transpose()
u_m1 = u.copy()
t_m1 = t.copy()
print(f'Step 1 (Model 1): t={t} tau={tau} mu={mu} u={u} eta={eta}')
# 2. Model 2, u is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau,
alpha0,
True)
print(f'Model 2 G={G} H={H}')
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj2 = tot_buf_cost - SCLP_obj
u = u[0:J, :]
eta = np.multiply(u.transpose(), tau).transpose()
print(f'Step 2 (Model 2): t={t} tau={tau} mu={mu} u={u} eta={eta}')
# 3. make sure objectives of 1,2 are the same
print(f'Step 3: objectives obj1={real_obj1} obj2={real_obj2}')
assert abs(real_obj1 - real_obj2) < tolerance
t_print = tuple(range(0, TT + 1))
tau_t_1 = gen_uncertain_param(tau, (0, TT), (-epsilon, epsilon), uncertain_func=sin_uncertainty_low)
tau_t_2 = gen_uncertain_param(tau, (0, TT), (-epsilon, epsilon), uncertain_func=sin_uncertainty_low)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times','Palatino', 'New Century Schoolbook', 'Bookman', 'Computer Modern Roman']
plt.rcParams['text.usetex'] = True
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['axes.labelsize'] = 12
plt.clf()
plt.plot(t_print, [(tau_t_1[0](t), tau_t_2[0](t)) for t in t_print])
plt.axhline(y=tau[0], color='magenta', linestyle=':', xmin=0.05, xmax=0.95)
plt.ylim(tau[0] * (1-0.25), tau[0]*(1+0.25))
plt.xlabel('Time $t$')
plt.ylabel(r'Mean Service Time $\tau(t)$')
plt.grid(True)
plt.savefig(f'tau_t_{epsilon}.pdf')
# plt.show()
# return
results = list()
for nparam in range(nparamsets):
aa = np.random.uniform(2.0, 5.0, K) # arrival rates at buffers tasks per unit time
bb = np.ones(I) # cpu limit
cc = np.random.uniform(1.0, 2.0, K) # cost per unit time of buffers
alpha0 = np.random.uniform(10.0, 20.0, K) # initial buffer quantities
mu = np.random.uniform(5.0, 25.0, K) # mean job service rates
tau = np.divide(1, mu)
# 1. Model 1, eta is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau,
alpha0,
False)
print(f'Model 1 G={G} H={H}')
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj1 = tot_buf_cost - SCLP_obj
eta = u[0:J, :]
u = np.multiply(eta.transpose(), mu).transpose()
u_m1 = u.copy()
t_m1 = t.copy()
print(f'Step 1 (Model 1): t={t} tau={tau} mu={mu} u={u} eta={eta}')
# 2. Model 2, u is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau,
alpha0,
True)
print(f'Model 2 G={G} H={H}')
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj2 = tot_buf_cost - SCLP_obj
# 3. make sure objectives of 1,2 are the same
print(f'Step 3: objectives obj1={real_obj1} obj2={real_obj2}')
assert abs(real_obj1 - real_obj2) < tolerance
for nt in range(ntests):
## Bertsimas State of the Art
#
#
# 4. Define tau_j(t)
tau_t = gen_uncertain_param(tau, (0, TT), (-epsilon, epsilon), uncertain_func=sin_uncertainty_low)
# print(f'Step 4: uncertain tau({t_print})={np.array([list(tau_t[j](t) for j in range(J)) for t in t_print]).transpose()}')
# 5. Robust model with Box uncertainty Model 2 with tau_bar
tau_bar = tau * (1 + epsilon)
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau_bar,
alpha0,
True)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj5 = tot_buf_cost - SCLP_obj
u = u[0:J,:]
eta = np.multiply(u.transpose(), tau).transpose()
# print(f'Step 5 (Model 2 with tau_bar): t={t} tau_bar={tau_bar} mu={mu} u={u} eta={eta}')
# print(f' Objectives: model 2: {real_obj2} model 5: {real_obj5}')
# assert real_obj5 - real_obj2 >= 1e-3
t_index = lambda x: min([i-1 for i, ti in enumerate(t) if ti > x] + [len(t)-2])
def make_eta_t(k):
return lambda t: u[k, t_index(t)] * tau[k] * (1 - epsilon)
eta_t = np.array([make_eta_t(k) for k in range(K)])
u_mat = np.array([[eta_t[k]((t+5.0)/10.0)/tau_t[k]((t+5.0)/10.0) for t in range(TT*10)] for k in range(K)])
def make_x_R_t(k):
# return lambda t: alpha[k] + a[k] * t - integrate(u_t[k], 0, t)
return lambda t: alpha[k] + a[k] * t - np.sum(u_mat[k,0:round(t*10)])/10.0
x_R_t = np.array([make_x_R_t(k) for k in range(K)])
obj_5 = np.sum([integrate(x_R_t[k], 0, TT) * cost[k] for k in range(K)])
print(f'run {nt} Step 5: real_obj2={real_obj2} real_obj5={real_obj5} obj_5={obj_5}')
# Server effort proportion model
#
# 6. Robust model with Box uncertainty Model 1 with mu_bar
mu_bar = mu / (1 - epsilon)
tau_bar_bar = np.divide(1, mu_bar)
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_paper(aa,
bb,
cc,
tau_bar_bar,
alpha0,
False)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj6 = tot_buf_cost - SCLP_obj
eta = u[0:J, :]
u = np.multiply(eta.transpose(), mu).transpose()
assert real_obj6 - real_obj1 <= tolerance
t_index = lambda x: min([i-1 for i, ti in enumerate(t) if ti > x] + [len(t)-2])
u_mat2 = np.array([[eta[k,t_index((t+5.0)/10.0)]/tau_t[k]((t+5.0)/10.0) for t in range(TT*10)] for k in range(K)])
def make_x_R2_t(k):
# return lambda x: alpha[k] + a[k] * x - integrate(u2_t[k], 0, x)
return lambda t: alpha[k] + a[k] * t - np.sum(u_mat2[k,0:round(t*10)])/10.0
x_R2_t = np.array([make_x_R2_t(k) for k in range(K)])
obj_6 = np.sum([integrate(x_R2_t[k], 0, TT) * cost[k] for k in range(K)])
print(f'Run {nt} Step 6: real_obj1={real_obj1} real_obj6={real_obj6} obj_6={obj_6}')
assert obj_6 - obj_5 <= tolerance
results.append([nt, real_obj1, real_obj5, real_obj6, obj_5, obj_6])
results_df = pd.DataFrame(results, columns=['Iteration', 'Obj', 'RO_SOA', 'RO_SEP', 'Obj_SOA', 'Obj_SEP'])
plt.clf()
plt.hist(results_df['Obj_SOA'], bins=10)
plt.xlabel(f'State of the Art')
plt.savefig(f'obj_hist_soa_{epsilon}.pdf')
plt.clf()
plt.hist(results_df['Obj_SEP'], bins=10)
plt.xlabel(f'Server Effort Proportion')
plt.savefig(f'obj_hist_sep_{epsilon}.pdf')
results_df['RelDiff'] = (results_df['Obj_SOA'] - results_df['Obj_SEP']) / results_df['Obj_SOA'] * 100
results_df.to_csv(f'results_{epsilon}.csv')
means_df.loc[len(means_df.index)] = [epsilon, np.mean(results_df['Obj_SOA']), np.mean(results_df['Obj_SEP']), np.mean(results_df['RelDiff'])]
means_df.to_csv('summary.csv')
means_df.to_latex("improvement_table.tex", columns=('epsilon', 'Improvement'), float_format='%.2f',column_format='cc',
header=(r'$\epsilon$', r'Improvement (\%)'), escape=False, index=False)
plt.clf()
plt.hist(results_df['RelDiff'])
plt.xlabel('Percent improvement of objective value')
plt.savefig(f'rel_diff_hist_{epsilon}.pdf')
plt.clf()
plt.scatter(means_df['epsilon'], means_df['Improvement'])
plt.xlabel('Uncertainty $\epsilon$')
plt.ylabel('Percent improvement')
plt.xticks(np.arange(0.0, 0.21, 0.02))
plt.savefig(f'mean_rel_diff.pdf')
| 14,581 | 47.284768 | 145 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/pytests/__init__.py
| 0 | 0 | 0 |
py
|
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/pytests/WorkloadPlacement_test.py
|
import pytest
import os, sys
import numpy as np
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.append(proj)
from SCLPsolver.SCLP import SCLP, SCLP_settings
from SCLPsolver.doe.data_generators.WorkloadPlacement import *
from SCLPsolver.doe.doe import *
def test_generate_workload_placement_data():
""" Test generating data from Workload Placement model for SCLP
"""
T = 10 # 10 seconds
I = 2 # servers
J = 3 # classes
K = I * J
R = np.array([100,200])
P = np.array([1000,2000])
a = np.ones(J)
mu = np.array([4,5])
x0 = np.zeros((I,J))
r = np.ones(J)
rprime = np.ones(J)
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, cost = generate_workload_placement_data(T, I, J, R, P, a, mu, x0, r, rprime)
assert G is not None
assert np.array_equal(G, np.array([[-4, 0, 0, -5, 0, 0], [0, -4, 0, 0, -5, 0], [0, 0, -4 ,0, 0, -5]]))
assert H is not None
assert np.array_equal(H, np.array([[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]]))
assert F is not None
assert np.array_equal(F, np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
assert gamma is not None
assert np.array_equal(gamma, np.zeros(K))
assert c is not None
assert c.shape == (K,)
assert d is not None
assert d.shape == (K-J,)
assert alpha is not None
assert b is not None
assert TT is not None
assert TT == T
assert total_buffer_cost is not None
assert cost is not None
seeds = [1234]
@pytest.mark.parametrize("seed", seeds)
def test_workload_placement_to_sclp(seed):
"""
Generate random workload placement model and run SCLP
Parameters
----------
seed : int
Seed for np.random
"""
# Observe: 1) initial number of tasks in each of the K buffers
# 2) task arrival events
# Ask: 3) what am I estimating?
# 4) what are the statistical properties of the solution given the randomness in a and x0
# 5) how do I interpret u?
# 6) should we normalize u to 1's?
# 7) what are the statistical properties of u given a and x0
# 8) has anyone analyzed this for sclp?
# 9) has anyone used this for other optimization algorithms?
np.random.seed(seed)
T = 10 # 10 seconds
I = 2 # servers
J = 3 # classes
K = I * J
R = np.array([100,200])
P = np.array([1000,2000])
a = np.random.normal(1, 0.1, J)
mu = np.array([4,5])
x0 = np.random.uniform(1, 3, (I,J))
r = np.ones(J)
rprime = np.ones(J)
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, cost = generate_workload_placement_data(T, I, J, R, P, a, mu, x0, r, rprime)
solver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management=False,
suppress_printing=False)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT, solver_settings)
assert solution is not None
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)
assert t is not None
@pytest.mark.parametrize("mu1, mu2", [(60.0, 25.0), (50.0, 22.0)])
def test_generate_one_server_two_classes(mu1, mu2):
TT = 10
tau1, tau2 = 1.0/mu1, 1.0/mu2
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(40.0, 20.0, 1.0, 1.0, 1.0, tau1, tau2, 100.0, 100.0, False)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau, maxT = solution.get_final_solution(True)
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(40.0, 20.0, 1.0, 1.0, 1.0, tau1, tau2, 100.0, 100.0, True)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t1, x1, q1, u1, p1, pivots1, SCLP_obj1, err1, NN1, tau1, maxT1 = solution.get_final_solution(True)
assert np.array_equal(t, t1)
assert abs(SCLP_obj - SCLP_obj1) < 0.001
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT**2/2
real_obj = tot_buf_cost - SCLP_obj
print(f'real_obj={real_obj} SCLP_obj={SCLP_obj} t={t} u={u} u1={u1}')
def integrate(f, low, up, intervals=100):
tt = np.linspace(low, up, intervals+1)
y = [f(t) for t in tt]
F = (0.5*(y[0]+y[-1]) + np.sum(y[1:-1]))*(up-low)/intervals
return F
integrate_m = np.vectorize(integrate)
def test_integrate():
assert abs(integrate(lambda x: 1, 0, 4) - 4) < 0.001
assert abs(integrate(lambda x: x, 2, 6) - 16) < 0.001
assert abs(integrate(lambda x: np.sin(x), 0, np.pi/2.0) - 1) < 0.001
def test_integrate_m():
H1 = np.array((lambda x: 1,))
assert abs(integrate_m(H1, 0, 4) - 4) < 0.001
H2 = np.array(((lambda x: 1, lambda x: x),(lambda x: np.pi*np.sin(np.pi*x), lambda x: np.pi*np.sin(2*np.pi*x))))
assert np.allclose(integrate_m(H2, 0, 1, intervals=1000), np.array(((1.0, 0.5), (2.0, 0.0))))
@pytest.mark.parametrize("epsilon, mu1, mu2, seed", [(0.2, 60.0, 25.0, 1+3*i) for i in range(10)])
def test_generate_one_server_two_classes_perturbed(epsilon, mu1, mu2, seed):
np.random.seed(seed)
TT = 10
tau1, tau2 = 1.0/mu1, 1.0/mu2
tau = np.array((tau1, tau2))
mu = np.array((mu1, mu2))
a1, a2 = 40.0, 20.0 # arrival rates tasks per unit time
b1 = 1.0 # cpu limit
c1, c2 = 1.0, 1.0 # cost per unit time of buffers
alpha1, alpha2 = 100.0, 100.0 # initial buffer quantities
# 1. Model 1, eta is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(a1,
a2,
b1,
c1,
c2,
tau1,
tau2,
alpha1,
alpha2,
False)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj1 = tot_buf_cost - SCLP_obj
eta = u[0:2,:]
u = np.multiply(eta.transpose(), mu).transpose()
u_m1 = u.copy()
t_m1 = t.copy()
print(f'Step 1 (Model 1): t={t} tau={tau} mu={mu} u={u} eta={eta}')
# 2. Model 2, u is control var
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(a1,
a2,
b1,
c1,
c2,
tau1,
tau2,
alpha1,
alpha2,
True)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj2 = tot_buf_cost - SCLP_obj
u = u[0:2,:]
eta = np.multiply(u.transpose(), tau).transpose()
print(f'Step 2 (Model 2): t={t} tau={tau} mu={mu} u={u} eta={eta}')
# 3. make sure objectives of 1,2 are the same
print(f'Step 3: objectives obj1={real_obj1} obj2={real_obj2}')
assert abs(real_obj1 - real_obj2) < 0.001
# 4. Define tau_j(t)
tau_t = gen_uncertain_param(tau, (0, TT), (-epsilon / 2.0, epsilon / 2.0), uncertain_func=sin_uncertainty_low)
t_print = tuple(range(0, TT+1))
print(f'Step 4: uncertain tau({t_print})={np.array([(tau_t[0](t), tau_t[1](t)) for t in t_print]).transpose()}')
import matplotlib.pyplot as plt
plt.plot(t_print, [(tau_t[0](t), tau_t[1](t)) for t in t_print])
plt.show()
# 5. Robust model with Box uncertainty Model 2 with tau_bar
tau1_bar, tau2_bar = tau_bar = tau * (1 + 0.5*epsilon)
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(a1,
a2,
b1,
c1,
c2,
tau1_bar,
tau2_bar,
alpha1,
alpha2,
True)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj5 = tot_buf_cost - SCLP_obj
u = u[0:2,:]
eta = np.multiply(u.transpose(), tau).transpose()
print(f'Step 5 (Model 2 with tau_bar): t={t} tau_bar={tau_bar} mu={mu} u={u} eta={eta}')
print(f' Objectives: model 2: {real_obj2} model 5: {real_obj5}')
assert real_obj5 >= real_obj2
t_index = lambda x: min([i-1 for i, ti in enumerate(t) if ti > x] + [len(t)-2])
eta_t = np.array([
lambda x: u[0, t_index(x)] * tau[0] * (1 - 0.5 * epsilon),
lambda x: u[1, t_index(x)] * tau[1] * (1 - 0.5 * epsilon)
])
print(f'eta={eta}')
print(f'eta_t({[t for t in range(0,TT+1,2)]}) = {np.array([(eta_t[0](t), eta_t[1](t)) for t in t_print]).transpose()}')
u_t = np.array([
lambda x: eta_t[0](x) / tau_t[0](x),
lambda x: eta_t[1](x) / tau_t[1](x)
])
print(f'u_t({[t for t in range(0,TT+1,2)]}) = {np.array([(u_t[0](t), u_t[1](t)) for t in t_print]).transpose()}')
x_R_t = np.array([
lambda x: alpha1 + a1 * x - integrate(u_t[0], 0, x),
lambda x: alpha2 + a2 * x - integrate(u_t[1], 0, x)
])
print(f'x_R_t({[t for t in range(0,TT+1,2)]}) = {np.array([(x_R_t[0](t), x_R_t[1](t)) for t in t_print]).transpose()}')
obj_5 = np.sum([
integrate(x_R_t[0], 0, TT),
integrate(x_R_t[1], 0, TT)
])
print(f'Step 5: real_obj2={real_obj2} real_obj5={real_obj5} obj_5={obj_5}')
# 6. Robust model with Box uncertainty Model 1 with mu_bar
mu1_bar, mu2_bar = mu_bar = mu / (1 - 0.5*epsilon)
tau1_bar_bar, tau2_bar_bar = 1 / mu1_bar, 1 / mu2_bar
G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost = generate_workload_placement_data_new(a1,
a2,
b1,
c1,
c2,
tau1_bar_bar,
tau2_bar_bar,
alpha1,
alpha2,
False)
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, TT)
t, x, q, u, p, pivots, SCLP_obj, err, NN, tau_intervals, maxT = solution.get_final_solution(True)
tot_buf_cost = np.inner(cost, alpha * TT) + np.inner(cost, a) * TT ** 2 / 2
real_obj6 = tot_buf_cost - SCLP_obj
eta = u[0:2, :]
u = np.multiply(eta.transpose(), mu).transpose()
print(f'Step 6 (Model 1 with mu_bar): t={t} mu_bar={mu_bar} mu={mu} u={u} eta={eta}')
print(f' Objectives: model 1: {real_obj1} model 6: {real_obj6}')
assert real_obj6 <= real_obj1
t_index = lambda x: min([i-1 for i, ti in enumerate(t) if ti > x] + [len(t)-2])
eta_t = np.array([
lambda x: u[0, t_index(x)] * tau[0] * (1 - 0.5 * epsilon),
lambda x: u[1, t_index(x)] * tau[1] * (1 - 0.5 * epsilon)
])
print(f'eta={eta}')
print(f'eta_t({[t for t in range(0,TT+1,2)]}) = {np.array([(eta_t[0](t), eta_t[1](t)) for t in t_print]).transpose()}')
u_t = np.array([
lambda x: eta_t[0](x) / tau_t[0](x),
lambda x: eta_t[1](x) / tau_t[1](x)
])
print(f'u_t({[t for t in range(0,TT+1,2)]}) = {np.array([(u_t[0](t), u_t[1](t)) for t in t_print]).transpose()}')
x_R_t = np.array([
lambda x: alpha1 + a1 * x - integrate(u_t[0], 0, x),
lambda x: alpha2 + a2 * x - integrate(u_t[1], 0, x)
])
print(f'x_R_t({[t for t in range(0,TT+1,2)]}) = {np.array([(x_R_t[0](t), x_R_t[1](t)) for t in t_print]).transpose()}')
obj_6 = np.sum([
integrate(x_R_t[0], 0, TT),
integrate(x_R_t[1], 0, TT)
])
print(f'Step 6: real_obj1={real_obj1} real_obj6={real_obj6} obj_6={obj_6}')
# 7. Model 1 again
t = t_m1.copy()
u = u_m1.copy()
t_index = lambda x: min([i-1 for i, ti in enumerate(t) if ti > x] + [len(t)-2])
print('Step 7: u={u}')
# 8. Compute optimistic model
u_t = np.array([
lambda x: u[0, t_index(x)] * tau[0] / tau_t[0](x),
lambda x: u[1, t_index(x)] * tau[1] / tau_t[1](x)
])
print(f'u_t({[t for t in range(0,TT+1,2)]}) = {np.array([(u_t[0](t), u_t[1](t)) for t in t_print]).transpose()}')
x_R_t = np.array([
lambda x: max(alpha1 + a1 * x - integrate(u_t[0], 0, x), 0),
lambda x: max(alpha2 + a2 * x - integrate(u_t[1], 0, x), 0)
])
print(f'x_R_t({[t for t in range(0,TT+1,2)]}) = {np.array([(x_R_t[0](t), x_R_t[1](t)) for t in t_print]).transpose()}')
obj_8 = np.sum([
integrate(x_R_t[0], 0, TT),
integrate(x_R_t[1], 0, TT)
])
print(f'Step 8: real_obj1={real_obj1} real_obj6={real_obj6} obj_5={obj_5} obj_6={obj_6} obj_8={obj_8}')
| 16,320 | 42.756032 | 164 |
py
|
RandomNeuralField
|
RandomNeuralField-main/train.py
|
import sys
from pathlib import Path
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
from torchvision import datasets, transforms
from src.utils.factory import read_yaml
from src.models.networks import read_model
from src.utils.factory import calc_acc
def create_loader(phase):
bs = 4096
transform = transforms.Compose(
[transforms.ToTensor()]
)
dataset = datasets.MNIST(
root='data',
train=True if phase == 'train' else False,
download=True, transform=transform
)
dataloader = DataLoader(dataset)
X, y = [], []
for img, label in dataloader:
label_list = [-0.1 for _ in range(10)]
img = img.numpy()
label_list[label] = 0.9
X.append(img / np.linalg.norm(img))
y.append(label_list)
X, y = np.array(X).squeeze(axis=1), np.array(y, dtype='float32')
if phase == 'train':
train_id, val_id = train_test_split(
np.arange(50000),
test_size=0.2,
random_state=47
)
X_train, X_val = X[train_id], X[val_id]
y_train, y_val = y[train_id], y[val_id]
X_train, X_val = torch.tensor(X_train), torch.tensor(X_val)
y_train, y_val = torch.tensor(y_train), torch.tensor(y_val)
train_tensor = TensorDataset(X_train, y_train)
val_tensor = TensorDataset(X_val, y_val)
train_loader = DataLoader(train_tensor, batch_size=bs)
val_loader = DataLoader(val_tensor, batch_size=bs)
return train_loader, val_loader
elif phase == 'test':
X_test, y_test = torch.tensor(X), torch.tensor(y)
test_tensor = TensorDataset(X_test, y_test)
return DataLoader(test_tensor, batch_size=64)
else:
NotImplementedError
def train_one_epoch(cfg, net, train_loader, optimizer, criterion):
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
running_loss, running_acc = 0., 0.
for i, (imgs, labels) in enumerate(train_loader):
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
outputs = net(imgs)
loss = criterion(outputs, labels) / 2
acc = calc_acc(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
running_acc += acc
return running_loss / (i+1), running_acc / (i+1)
def train(cfg, net, lr, train_loader, val_loader):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
init_name = cfg.INITIALIZER.TYPE
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
best_val_loss = 1e10
keys = ['train/loss', 'train/acc', 'val/loss', 'val/acc']
for epoch in range(n_epochs):
net.train()
avg_train_loss, avg_train_acc = train_one_epoch(
cfg, net, train_loader, optimizer, criterion
)
net.eval()
with torch.no_grad():
running_vloss, running_vacc = 0.0, 0.0
for i, (imgs, labels) in enumerate(val_loader):
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
val_loss = criterion(outputs, labels) / 2
val_acc = calc_acc(outputs, labels)
running_vloss += val_loss.item()
running_vacc += val_acc
avg_val_loss = running_vloss / (i+1)
avg_val_acc = running_vacc / (i+1)
vals = [avg_train_loss, avg_train_acc, avg_val_loss, avg_val_acc]
file_name = Path('output') / f'{init_name}_result.csv'
x = {k: v for k, v in zip(keys, vals)}
n_cols = len(x) + 1
header = '' if file_name.exists() else (('%20s,' * n_cols % tuple(['epoch'] + keys)).rstrip(',') + '\n')
with open(file_name, 'a') as f:
f.write(header + ('%20.5g,' * n_cols % tuple([epoch] + vals)).rstrip(',') + '\n')
if (epoch + 1) % 1000 == 0:
print(
'Epoch[{}/{}], TrainLoss: {:.5f}, ValLoss: {:.5f}, ValAcc: {:.5f}'
.format(epoch+1, n_epochs, vals[0], vals[2], vals[3])
)
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
torch.save(net.state_dict(), f'pretrained/{init_name}_best.pth')
def main():
cfg = read_yaml(fpath='src/config/config.yaml')
cfg.GENERAL.EPOCH = 50000
train_loader, val_loader = create_loader(phase='train')
# init_types = ['vanilla', 'gaussian', 'withmp', 'mexican', 'matern']
init_types = ['withmp']
for it in init_types:
if it == 'gaussian':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'withmp':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'mexican':
cfg.INITIALIZER.M_SIGMA = 0.01
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'matern':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
cfg.INITIALIZER.TYPE = it
net = read_model(cfg)
train(cfg, net, 0.5, train_loader, val_loader)
if __name__ == '__main__':
main()
| 5,899 | 30.382979 | 112 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/tools/relative_frob.py
|
import sys
from os.path import join, dirname
import torch
import torch.nn as nn
from torch import optim
sys.path.append(join(dirname(__file__), "../.."))
from src.ntk.generate import generate_ntk
from src.utils.factory import calc_diff_frob, calc_acc
def calc_ntk_frob(cfg, net, lr, train_loader, test_loader):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
for epoch in range(n_epochs):
train_loss = 0
net.train()
for imgs, labels in train_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
if epoch == 0:
ntk_0, _ = generate_ntk(net, 0, imgs, imgs, cfg, calc_lr=True)
outputs = net(imgs)
train_loss = criterion(outputs, labels) / 2
train_acc = calc_acc(outputs, labels)
train_loss.backward()
optimizer.step()
if epoch == n_epochs - 1:
ntk_t, _ = generate_ntk(net, 0, imgs, imgs, cfg, calc_lr=True)
net.eval()
with torch.no_grad():
for imgs, labels in test_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
test_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
test_acc = calc_acc(outputs, labels)
ntk_diff_frob = calc_diff_frob(ntk_0, ntk_t)
return ntk_diff_frob, train_acc, test_acc
| 1,949 | 31.5 | 78 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/tools/train.py
|
import sys
from os.path import join, dirname
import torch
import torch.nn as nn
from torch import optim
sys.path.append(join(dirname(__file__), "../.."))
from src.utils.factory import calc_acc
def train(cfg, net, lr, database):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
train_loader, test_loader = database.get_loader()
results = {
'train_losses': [], 'test_losses': [],
'train_accs': [], 'test_accs': [],
'train_outputs': [], 'test_outputs': []
}
for epoch in range(n_epochs):
train_loss, test_loss = 0, 0
net.train()
for imgs, labels in train_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
outputs = net(imgs)
train_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
train_acc = calc_acc(outputs, labels)
train_loss.backward()
optimizer.step()
if epoch == 0:
f_train_0 = outputs
results['train_losses'].append(train_loss.cpu().detach().numpy())
results['train_accs'].append(train_acc)
results['train_outputs'].append(outputs)
net.eval()
with torch.no_grad():
for imgs, labels in test_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
test_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
test_acc = calc_acc(outputs, labels)
if epoch == 0:
f_test_0 = outputs
results['test_losses'].append(test_loss.cpu().detach().numpy())
results['test_accs'].append(test_acc)
results['test_outputs'].append(outputs)
print('Epoch[{}/{}], TrainLoss: {:.4f}, TestLoss: {:.4f}, TestAcc: {:.4f}'
.format(epoch+1, n_epochs, train_loss, test_loss, test_acc))
return f_train_0, f_test_0, results
| 2,554 | 32.181818 | 82 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/dataset/dataset.py
|
import numpy as np
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import DataLoader, TensorDataset
from torchvision import datasets, transforms
class MakeDataset:
def __init__(self, cfg):
self.cfg = cfg
self.dataset_name = cfg.DATA.NAME
self.n_class = cfg.DATA.CLASS
self.data_num = cfg.DATA.DATA_NUM
self.test_ratio = cfg.DATA.SPLIT_RATIO
self.dataloader = self.loader_setup()
def loader_setup(self):
transform = transforms.Compose(
[transforms.ToTensor()]
)
if self.dataset_name == 'mnist':
dataset = datasets.MNIST(
root='../data', train=True, download=True, transform=transform
)
elif self.dataset_name == 'fashion':
dataset = datasets.FashionMNIST(
root='../data', train=True, download=True, transform=transform
)
else:
NotImplementedError
dataloader = DataLoader(dataset)
return dataloader
def get_array(self):
X, y = [], []
for i, (img, label) in enumerate(self.dataloader):
label_list = [-0.1 for _ in range(self.n_class)]
img = img.numpy()
label_list[label] = 0.9
X.append(img / np.linalg.norm(img))
y.append(label_list)
if i == self.data_num - 1:
break
X, y = np.array(X).squeeze(axis=1), np.array(y, dtype='float32')
train_id, test_id = train_test_split(
np.arange(self.data_num),
test_size=self.test_ratio,
random_state=47
)
X_train, X_test = X[train_id], X[test_id]
y_train, y_test = y[train_id], y[test_id]
return X_train, X_test, y_train, y_test
def get_tensor(self):
X_train, X_test, y_train, y_test = self.get_array()
X_train, X_test = torch.tensor(X_train), torch.tensor(X_test)
y_train, y_test = torch.tensor(y_train), torch.tensor(y_test)
return X_train, X_test, y_train, y_test
def get_loader(self):
X_train, X_test, y_train, y_test = self.get_tensor()
train_tensor = TensorDataset(X_train, y_train)
test_tensor = TensorDataset(X_test, y_test)
train_loader = DataLoader(train_tensor, batch_size=self.data_num)
test_loader = DataLoader(test_tensor, batch_size=self.data_num)
return train_loader, test_loader
| 2,678 | 30.151163 | 78 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/ntk/learning_curve.py
|
import sys
from os.path import join, dirname
from tqdm.auto import tqdm
import numpy as np
import scipy as sp
sys.path.append(join(dirname(__file__), "../.."))
from src.utils.factory import calc_acc , calc_loss_NTK
class LearningCurve:
def __init__(self, cfg, lr, NTK_train, train_label, f_train_0, f_test_0):
self.time_range = np.arange(0, cfg.GENERAL.EPOCH, 1)
self.n_train = int(cfg.DATA.DATA_NUM * (1 - cfg.DATA.SPLIT_RATIO))
self.n_class = cfg.DATA.CLASS
self.NTK_train = NTK_train
self.train_label = train_label
self.f_train_0 = f_train_0
self.f_test_0 = f_test_0
self.id_mat = np.eye(self.n_train)
self.lr = lr
self.diff, self.P, self.D = self.prepare()
def prepare(self):
diff = self.f_train_0 - self.train_label
mat = self.id_mat - self.lr * self.NTK_train / (self.n_train * self.n_class)
diag, P = np.linalg.eigh(mat)
D = np.diag(diag)
return diff, P, D
def basic_calc(self, epoch, split, label, NTK_prod=None):
if epoch == 0:
p_mat = self.id_mat
else:
p_mat = self.P @ (self.D**epoch) @ self.P.T
if split == 'train':
output = np.dot(p_mat, self.diff) + label
elif split == 'test':
output = self.f_test_0 - np.dot(
NTK_prod.T, np.dot(self.id_mat - p_mat, self.diff)
)
loss = calc_loss_NTK(output, label)
acc = calc_acc(output, label)
return loss, acc, output
def train_curve(self):
train_results = {'train_losses': [], 'train_accs': [], 'train_outputs': []}
for i in tqdm(self.time_range):
loss, acc, output = self.basic_calc(i, 'train', self.train_label)
train_results['train_losses'].append(loss)
train_results['train_accs'].append(acc)
train_results['train_outputs'].append(output)
return train_results
def test_curve(self, NTK_test, test_label):
test_results = {'test_losses': [], 'test_accs': [], 'test_outputs': []}
NTK_prod = sp.linalg.solve(self.NTK_train, NTK_test.T)
for i in tqdm(self.time_range):
loss, acc, output = self.basic_calc(i, 'test', test_label, NTK_prod)
test_results['test_losses'].append(loss)
test_results['test_accs'].append(acc)
test_results['test_outputs'].append(output)
return test_results
| 2,625 | 32.240506 | 84 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/ntk/generate.py
|
from tqdm.auto import tqdm
import numpy as np
import torch
from torch.autograd import grad
def generate_ntk(net, label, train, test, cfg, calc_lr=False):
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
if len(train.size()) > 2:
train = train.view(-1, input_shape)
test = test.view(-1, input_shape)
if torch.cuda.is_available():
train = train.to(f'cuda:{device_id[0]}')
test = test.to(f'cuda:{device_id[0]}')
f_train = net(train)
train_grads = []
for i in range(len(f_train)):
train_grads.append(
grad(f_train[i][label], net.parameters(), retain_graph=True)
)
K_train = torch.zeros((len(f_train), len(f_train)))
for i in tqdm(range(len(f_train))):
grad_i = train_grads[i]
for j in range(i+1):
grad_j = train_grads[j]
K_train[i, j] = sum([torch.sum(
torch.mul(grad_i[k], grad_j[k])
) for k in range(len(grad_j))])
K_train = K_train.cpu().numpy()
K_train = K_train + K_train.T - np.diag(K_train.diagonal())
if calc_lr:
NTK_train = np.kron(K_train, np.eye(cfg.DATA.CLASS))
vals = np.linalg.eigvalsh(NTK_train)
lr = 2 / (max(vals) + 1e-12)
return NTK_train, lr
else:
f_test = net(test)
K_test = torch.zeros((len(f_test), len(f_train)))
test_grads = []
for i in tqdm(range(len(f_test))):
test_grads.append(
grad(f_test[i][label], net.parameters(), retain_graph=True)
)
for j, train_grad in enumerate(train_grads):
for k, test_grad in enumerate(test_grads):
K_test[k, j] = sum([torch.sum(
torch.mul(train_grad[u], test_grad[u])
) for u in range(len(test_grad))])
K_test = K_test.cpu().numpy()
return K_train, K_test
| 1,997 | 31.754098 | 75 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/ntk/regression.py
|
import numpy as np
def calc_acc_for_reg(yhat, y):
correct = 0
for i in range(len(y)):
label, pred = np.argmax(y[i]), np.argmax(yhat[i])
if label == pred:
correct += 1
return correct / len(y)
def ntk_regression(cfg, NTK_train, NTK_test, y_train, y_test):
n_class = cfg.DATA.CLASS
f_test = np.dot(NTK_test @ np.linalg.inv(NTK_train), y_train)
loss = np.linalg.norm(f_test - y_test)**2 / (2*len(y_test)*n_class)
acc = calc_acc_for_reg(f_test, y_test)
return loss, acc
| 562 | 22.458333 | 71 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/models/initializers.py
|
import sys
from os.path import join, dirname
import numpy as np
import torch.nn as nn
from torch import Tensor
sys.path.append(join(dirname(__file__), "../.."))
from src.models.utils import sym_mat, receptive_mat, weight_correlation, matern_kernel
class Initializers(nn.Module):
def __init__(self, cfg):
self.type = cfg.INITIALIZER.TYPE
self.r_sigma = cfg.INITIALIZER.R_SIGMA
self.s_sigma = cfg.INITIALIZER.S_SIGMA
self.m_sigma = cfg.INITIALIZER.M_SIGMA
self.nu = cfg.INITIALIZER.NU
def get_initializer(self, in_features, out_features):
if self.type == 'gaussian' or self.type == 'withmp':
init_weight = self.get_gaussian_type(in_features, out_features)
elif self.type == 'mexican':
init_weight = self.get_mexican_type(in_features, out_features)
elif self.type == 'matern':
init_weight = self.get_matern_type(in_features, out_features)
else:
NotImplementedError
return init_weight
def get_gaussian_type(self, in_features, out_features):
if in_features != out_features:
R = np.sqrt(
np.exp(
- receptive_mat(in_features,
out_features,
self.r_sigma)
)
)
elif in_features == out_features:
R = np.sqrt(
np.exp(
- sym_mat(
in_features
) / (in_features*self.r_sigma)**2
)
)
weight_correlated = weight_correlation(
in_features, out_features, self.s_sigma
)
init_weight = R * weight_correlated
return Tensor(init_weight)
def get_mexican_type(self, in_features, out_features):
coef = 2 / (np.sqrt(3*self.m_sigma) * pow(np.pi, 1/4))
if in_features != out_features:
mh = receptive_mat(in_features, out_features, self.m_sigma)
elif in_features == out_features:
mh = sym_mat(in_features) / self.m_sigma**2
M = coef * (np.ones((out_features, in_features)) - mh) * np.sqrt(np.exp(-mh))
weight_correlated = weight_correlation(
in_features, out_features, self.s_sigma
)
init_weight = M * weight_correlated
return Tensor(init_weight)
def get_matern_type(self, in_features, out_features):
if in_features != out_features:
R = np.sqrt(
np.exp(
- receptive_mat(in_features,
out_features,
self.r_sigma)
)
)
elif in_features == out_features:
R = np.sqrt(
np.exp(
- sym_mat(
in_features
) / (2 * (in_features * self.r_sigma)**2)
)
)
init_mk = matern_kernel(
in_features, out_features, self.s_sigma, self.nu
)
init_weight = R * init_mk
return Tensor(init_weight)
| 3,358 | 31.931373 | 86 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/models/utils.py
|
from math import pow
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.gaussian_process.kernels import Matern
def sym_mat(features):
vec = np.arange(features).reshape(-1, 1)
X = np.tile(vec**2, (1, features))
H = np.dot(vec, vec.T)
return X - 2*H + X.T
def receptive_mat(in_features, out_features, sigma):
vec_in = np.arange(1, in_features+1).reshape(-1, 1)
vec_out = np.arange(1, out_features+1).reshape(-1, 1)
X = np.tile((vec_out / out_features)**2, (1, in_features))
Y = np.tile((vec_in / in_features)**2, (1, out_features)).T
H = []
if in_features < out_features:
for i in range(in_features):
H.append(vec_out * (i+1))
elif in_features > out_features:
for i in range(out_features):
H.append(vec_in * (i+1))
H = np.array(H).reshape(
in_features, out_features
).T / (in_features * out_features)
return (X - 2*H + Y) / sigma**2
def weight_correlation(in_features, out_features, s_sigma):
weight = np.random.normal(0, 1, (out_features, in_features))
in_scaled_sigma = (s_sigma * in_features)**2
c = pow(2.0 / (np.pi * in_scaled_sigma), 1/4)
A_in = c * np.exp( - sym_mat(in_features) / in_scaled_sigma)
return weight @ A_in
def matern_kernel(in_features, out_features, theta, nu):
Kernel = Matern(length_scale=in_features*theta,
length_scale_bounds=(1e-5, 1e5),
nu=nu)
_range = np.linspace(0, in_features-1, num=in_features)
grids = _range.reshape(-1, 1)
kernel_mat = Kernel(grids, grids)
gen = multivariate_normal(cov=kernel_mat)
mk_init = gen.rvs(size=out_features, random_state=47)
return mk_init
| 1,806 | 26.378788 | 64 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/models/networks.py
|
import sys
from os.path import join, dirname
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append(join(dirname(__file__), "../.."))
from src.models.initializers import Initializers
def read_model(cfg):
init_type = cfg.INITIALIZER.TYPE
device_id = cfg.GENERAL.GPUS
# define the type of network
if init_type == 'vanilla':
net = VanillaNet(cfg)
else:
net = Networks(cfg)
if torch.cuda.is_available():
net.to(f'cuda:{device_id[0]}')
return net
class LinearNTK(nn.Linear):
def __init__(self, in_features, out_features, b_sig, w_sig=2, bias=True):
super(LinearNTK, self).__init__(in_features, out_features)
self.reset_parameters()
self.b_sig = b_sig
self.w_sig = w_sig
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=1)
if self.bias is not None:
nn.init.normal_(self.bias, mean=0, std=1)
def forward(self, input):
return F.linear(input,
self.w_sig * self.weight / np.sqrt(self.in_features),
self.b_sig * self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, b_sig={}'.format(
self.in_features, self.out_features, self.bias is not None, self.b_sig
)
class VanillaNet(nn.Module):
def __init__(self, cfg):
super(VanillaNet, self).__init__()
self.visualize = cfg.MODEL.VISUALIZE
in_features = cfg.MODEL.INPUT_FEATURES
mid_features = cfg.MODEL.MID_FEATURES
out_features = cfg.DATA.CLASS
b_sig = cfg.MODEL.B_SIGMA
self.l1 = LinearNTK(in_features, mid_features, b_sig)
self.l2 = LinearNTK(mid_features, mid_features, b_sig)
self.l3 = LinearNTK(mid_features, out_features, b_sig)
def forward(self, x):
if self.visualize:
return self.l1(x)
else:
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
h3 = self.l3(h2)
return h3
class Networks(nn.Module):
def __init__(self, cfg):
super(Networks, self).__init__()
self.init_type = cfg.INITIALIZER.TYPE
self.mid_features = cfg.MODEL.MID_FEATURES
self.visualize = cfg.MODEL.VISUALIZE
in_features = cfg.MODEL.INPUT_FEATURES
out_features = cfg.DATA.CLASS
b_sig = cfg.MODEL.B_SIGMA
init_weight = Initializers(cfg).get_initializer(in_features, self.mid_features)
self.l1 = LinearNTK(in_features, self.mid_features, b_sig)
self.l1.weight.data = init_weight
if self.init_type == 'withmp':
self.l2 = nn.MaxPool1d(kernel_size=3, stride=1, padding=1)
else:
self.l2 = LinearNTK(self.mid_features, self.mid_features, b_sig)
self.l3 = LinearNTK(self.mid_features, out_features, b_sig)
def forward(self, x):
if self.visualize:
return self.l1(x)
else:
h1 = F.relu(self.l1(x))
if self.init_type == 'withmp':
h2 = self.l2(h1)
h3 = self.l3(h2)
else:
h2 = F.relu(self.l2(h1))
h3 = self.l3(h2)
return h3
| 3,468 | 28.905172 | 87 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/utils/factory.py
|
import os
from omegaconf import OmegaConf
import numpy as np
def read_yaml(fpath='config/config.yaml'):
config = OmegaConf.load(fpath)
return config
def calc_acc(outputs, labels):
correct = 0
for i in range(len(outputs)):
if outputs[i].argmax() == labels[i].argmax():
correct += 1
acc = correct / len(labels)
return acc
def calc_loss_NTK(outputs, labels):
length = labels.shape[0] * labels.shape[1]
loss = (np.linalg.norm(outputs - labels)**2) / (2 * length)
return loss
def calc_diff_frob(ntk_0, ntk_t):
ntk_diff = np.linalg.norm(ntk_t - ntk_0, ord='fro')
ntk_norm = np.linalg.norm(ntk_0, ord='fro')
return ntk_diff / ntk_norm
| 748 | 18.710526 | 63 |
py
|
RandomNeuralField
|
RandomNeuralField-main/src/utils/visualize.py
|
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def mean_output_dynamics(results, ntk_results):
# train and test outputs of normal training
train_outputs = np.array(results['train_outputs'])
test_outputs = np.array(results['test_outputs'])
# train and test outputs of ntk learning curve
ntk_train_outputs = np.array(ntk_results['train_outputs'])
ntk_test_outputs = np.array(ntk_results['test_outputs'])
mean_tr_out = train_outputs.mean(axis=1)
mean_te_out = test_outputs.mean(axis=1)
mean_ntk_tr_out = ntk_train_outputs.mean(axis=1)
mean_ntk_te_out = ntk_test_outputs.mean(axis=1)
return mean_tr_out, mean_te_out, mean_ntk_tr_out, mean_ntk_te_out
def output_dynamics_per_class(outputs, labels, class_num):
outputs = np.array(outputs)
n_class = outputs.shape[-1]
m_lists = [[] for _ in range(n_class)]
for op in outputs:
c = []
for i in range(len(op)):
if labels[i][class_num] == max(labels[i]):
c.append(op[i])
c_lists = [[] for _ in range(n_class)]
for i in c:
for j, cl in zip(range(n_class), c_lists):
cl.append(i[j])
for m, cl in zip(m_lists, c_lists):
m.append(np.mean(cl))
return m_lists
def visualize(cfg, results, ntk_results, train_labels, test_labels, class_num):
n_epoch = cfg.GENERAL.EPOCH
init_name = cfg.INITIALIZER.TYPE
notebook = cfg.GENERAL.NOTEBOOK
tr_out = output_dynamics_per_class(
results['train_outputs'], train_labels, class_num
)
te_out = output_dynamics_per_class(
results['test_outputs'], test_labels, class_num
)
ntk_tr_out = output_dynamics_per_class(
ntk_results['train_outputs'], train_labels, class_num
)
ntk_te_out = output_dynamics_per_class(
ntk_results['test_outputs'], test_labels, class_num
)
fig, ax = plt.subplots(1, 4, figsize=(16, 4))
x_ticks = np.power(10, np.arange(0, int(np.log10(n_epoch))+1, 1).tolist())
for i in range(4):
if i == 0:
ax[i].plot(results['train_losses'], color='r', label='Train')
ax[i].plot(results['test_losses'], color='b', label='Test')
ax[i].plot(ntk_results['train_losses'], color='r',
linestyle='dashed', label='NTK Train')
ax[i].plot(ntk_results['test_losses'], color='b',
linestyle='dashed', label='NTK Test')
ax[i].set_title('Loss', fontsize=15)
elif i == 1:
ax[i].plot(results['train_accs'], color='r')
ax[i].plot(results['test_accs'], color='b')
ax[i].plot(ntk_results['train_accs'], color='r', linestyle='dashed')
ax[i].plot(ntk_results['test_accs'], color='b', linestyle='dashed')
ax[i].set_title('Accuracy', fontsize=15)
elif i == 2:
for j, (nn, nt) in enumerate(zip(tr_out, ntk_tr_out)):
if j == 0:
ax[i].plot(nn, label='Model')
ax[i].plot(nt, linestyle='dashed', label='NTK model')
else:
ax[i].plot(nn)
ax[i].plot(nt, linestyle='dashed')
ax[i].set_title('Train output', fontsize=15)
elif i == 3:
for j, (nn, nt) in enumerate(zip(te_out, ntk_te_out)):
ax[i].plot(nn)
ax[i].plot(nt, linestyle='dashed')
ax[i].set_title('Test output', fontsize=15)
ax[i].set_xlim(xmin=10**0, xmax=n_epoch)
ax[i].set_xlabel('Epoch', fontsize=15)
ax[i].tick_params(labelsize=15)
ax[i].set_xscale('log')
ax[i].set_xticks(x_ticks)
plt.tight_layout()
if notebook:
path = str(Path().resolve().parent)
else:
path = str(Path().resolve())
fig.savefig(path + f'/output/{init_name}_regime.png')
| 4,163 | 33.131148 | 80 |
py
|
snlpy
|
snlpy-master/scripts/structural-preprocess.py
|
from snlpy.ppr import ppr
from scipy import sparse
from sys import argv
import numpy as np
import pickle as pkl
def ppr_prepocess(adj, alpha=0.8, tol=0.00001, workers=20, seeds=None):
if seeds is None:
seeds = range(adj.shape[0])
return sparse.vstack([ppr(adj, [seed], alpha, tol) for seed in seeds])
data_path = argv[1]
adj = pkl.load(open(data_path + '/adj.pickle', 'rb'))
ppr = ppr_prepocess(adj)
pkl.dump(ppr, open('%s/structural.pickle' % (data_path), 'wb'))
| 483 | 27.470588 | 74 |
py
|
snlpy
|
snlpy-master/scripts/link-prediction.py
|
import getopt
from sys import argv
import numpy as np
import matplotlib.pyplot as plt
from snlpy import structural_feats
import random
from sklearn.model_selection import ShuffleSplit
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import argparse
parser = argparse.ArgumentParser(description='compute link prediction stuff')
parser.add_argument("--loadmodels", "-l", help="load models instead of training them",
action="store_true")
parser.add_argument("--no_roc", "-n", help="don't compute roc scores",
action="store_true")
parser.add_argument("--no_struct", "-s", help="don't compute roc scores",
action="store_true")
parser.add_argument("data_path", help="path to directory containing dataset")
parser.add_argument("num_rows", help="number of rows for reliability curve", type=int)
args = parser.parse_args()
def sample_rows(graph, num_rows):
"""
Provide coordinates for all entries in num_rows number of random rows
:param graph: CsrGraph to choose rows from.
:param num_rows: Number of rows to sample.
"""
rows = np.random.randint(0, high=graph.number_of_nodes(), size=num_rows, dtype=np.int32)
pairs = np.vstack([np.array([u, v]) for u in graph.nodes() for v in rows])
return pairs
def histofy(ps, num_bins=1000):
bins = np.zeros(num_bins, dtype=np.float32)
for p in ps:
bins[int( np.floor( p * (num_bins - 1)) )] += 1
accumulator = 0
for i in range(num_bins):
accumulator += bins[-(i + 1)]
bins[-(i + 1)] = accumulator / len(ps)
print(bins)
return bins
##
data_path = args.data_path
##
embedding_names = ['deepwalk', 'netmf', 'node2vec', 'structural']
if args.no_struct:
embedding_names = embedding_names[:-1]
#embedding_names = ['netmf']
import pickle as pkl
adj = pkl.load(open(data_path + '/adj.pickle', 'rb'))
label_counts = np.asarray(np.sum(adj, axis=1)).reshape(-1)
shuffle = ShuffleSplit(n_splits=1, test_size=0.5)
train_ind, test_ind = list( shuffle.split(range(adj.nnz)) )[0]
train_pair = np.vstack(adj.nonzero()).T[train_ind]
test_pair = np.vstack(adj.nonzero()).T[test_ind]
rng = np.random.default_rng()
neg_train = rng.integers(low=0, high=adj.shape[0], size=(len(train_ind), 2))
train_pair = np.vstack((train_pair, neg_train))
y_train = np.zeros( (2*len(train_ind), 1), dtype=bool)
y_train[:len(train_ind)] = True
neg_test = rng.integers(low=0, high=adj.shape[0], size=(len(test_ind), 2))
test_pair = np.vstack((test_pair, neg_test))
y_test= np.zeros( (2*len(test_ind), 1), dtype=bool)
y_test[:len(test_ind)] = True
fig, ax = plt.subplots()
num_rows = int(argv[2])
sample_verts = rng.integers(low=0, high=adj.shape[0], size=num_rows)
#sample_verts = random.choices(range(adj.shape[0]), weights=label_counts, k=num_rows)
print(label_counts[sample_verts])
fig, ax = plt.subplots()
for emb in embedding_names:
print(emb)
if emb == 'structural':
ppr = pkl.load(open('%s/structural.pickle' % (data_path), 'rb'))
if not args.loadmodels:
X = structural_feats(np.vstack((train_pair, test_pair)), adj, ppr)
X_train = X[:len(train_pair)]
if not args.no_roc:
X_test = X[len(train_pair):]
else:
vecs = np.load('%s/%s.npy' % (data_path, emb))
X = np.multiply(vecs, vecs)
if not args.loadmodels:
X_train = np.multiply(vecs[train_pair[:, 0]], vecs[train_pair[:, 1]])
if not args.no_roc:
X_test = np.multiply(vecs[test_pair[:, 0]], vecs[test_pair[:, 1]])
if not args.loadmodels:
clf = OneVsRestClassifier(
LogisticRegression(
solver="liblinear",
multi_class="ovr"))
print('Training model')
clf.fit(X_train, y_train)
print('done training')
pkl.dump(clf, open('%s/%s-lp-model.pickle' % (data_path, emb), 'wb'))
else:
clf = pkl.load(open('%s/%s-lp-model.pickle' % (data_path, emb), 'rb'))
print('model loaded')
if not args.no_roc:
test_scores = clf.predict_proba(X_test)[:, 1]
print("ROC AUC SCORE: %f" % roc_auc_score(y_test, test_scores))
p, r, t = precision_recall_curve(y_test, test_scores)
print("PR AUC SCORE: %f" % auc(r, p))
ps = []
for v in sample_verts:
pairs = np.array([[v, i] for i in range(adj.shape[0])], dtype=np.int32)
if emb != 'structural':
feats = np.multiply(X[v], X)
else:
feats = structural_feats(pairs, adj, ppr)
row_scores = clf.predict_proba(feats)[:, 1]
precision = np.sum([adj[pair[0], pair[1]] for pair in pairs[np.argsort(-row_scores)[:label_counts[v]]]]) / label_counts[v]
ps.append(precision)
ax.plot(np.arange(1000) / 1000, histofy(ps), label=emb)
graph_name = data_path.split('/')[-2]
ax.set_title('%s link prediction reliability curve' % (graph_name))
ax.set_ylabel('Percent of nodes')
ax.set_xlabel('Precision@d')
ax.legend()
fig.savefig('figures/%s-lp-reliability-curve.png' % (graph_name))
| 5,186 | 36.861314 | 130 |
py
|
snlpy
|
snlpy-master/snlpy/setup.py
|
from setuptools import setup
setup(name='snlpy',
version='0.1',
install_requires=['numpy', 'sklearn', 'numba', 'gensim'])
| 136 | 18.571429 | 63 |
py
|
snlpy
|
snlpy-master/snlpy/structural.py
|
import numpy as np
from multiprocessing import Pool
from scipy import sparse
from numba import njit, prange
def structural_feats(pairs, adj, ppr, chunksize=10000):
chunked_pairs = [pairs[i * chunksize:(i + 1) * chunksize]
for i in range(int(len(pairs) / chunksize))]
if int(len(pairs) / chunksize) * chunksize != len(pairs):
chunked_pairs += [pairs[int(len(pairs) / chunksize) * chunksize:]]
return np.vstack([_batch_features(batch, adj, ppr) for batch in chunked_pairs])
def _batch_features(batch, adj, ppr):
if len(batch) == 0:
return
cosines = cosine_sim(batch, adj)
connectivities = compute_three_paths(batch, adj)
pprs = np.array(list(map(
lambda x: [ppr[x[0], x[1]], ppr[x[1], x[0]]], batch)))
return np.hstack((cosines, connectivities, pprs))
def compute_three_paths(pairs, adj):
"""
Computes number of paths of legth 3 between vertices in pairs.
PARAMETERS
----------
pairs : np.ndarray(dtype=np.int32, shape=(k, 2))
array of pairs
graph : CsrGraph
graph to use
RETURNS
-------
np.ndarray(dtype=np.int32, shape=(k,1))
array of number of paths of length 3
"""
row_ptrs = adj.indptr
col_indxs = adj.indices
connectivities = np.zeros((len(pairs), 1))
u_neighb = adj[pairs[:, 1]].todense()
return _compute_three_paths_aggregate(connectivities, pairs[:, 0],
row_ptrs, col_indxs,
u_neighb)
@njit(parallel=True)
def _compute_three_paths_aggregate(feature_vec, vs, row_ptrs, col_indxs, u_neighb):
for i in prange(len(vs)):
v = vs[i]
for k in col_indxs[row_ptrs[v] : row_ptrs[v + 1]]:
for l in col_indxs[row_ptrs[k]:row_ptrs[k + 1]]:
if u_neighb[i, l]:
feature_vec[i] += 1
return feature_vec
def cosine_sim(pairs, m):
"""
Computes cosine similarities between all pairs of
vertices in pairs.
PARAMETERS
----------
pairs : np.ndarray(dtype=np.int32, shape=(k, 2))
array of pairs to compute cosine similarities for
m : scipy.sparse.csr_matrix
csr_matrix representation of graph to use to
compute cosine similarities
RETURNS
-------
np.ndarray(dtype=np.float32, shape=(k,1))
array of cosine similarities
"""
left_adj_vectors = m[pairs[:, 0]]
right_adj_vectors = m[pairs[:, 1]]
lav_ptr = left_adj_vectors.indptr
lav_col = left_adj_vectors.indices
rav_ptr = right_adj_vectors.indptr
rav_col = right_adj_vectors.indices
cosines = np.zeros(len(rav_ptr) - 1)
return _cosines(lav_ptr, lav_col, rav_ptr, rav_col, cosines).reshape(-1, 1)
@njit(parallel=True)
def _cosines(lav_ptr, lav_col, rav_ptr, rav_col, cosines):
for i in prange(len(cosines)):
cosines[i] = _cosine_sim_pair(lav_col[lav_ptr[i]:lav_ptr[i + 1]],
rav_col[rav_ptr[i]:rav_ptr[i + 1]])
return cosines
@njit()
def _cosine_sim_pair(left_ind, right_ind):
if len(left_ind) == 0 or len(right_ind) == 0:
return 0.0
factor = 1 / np.sqrt(len(left_ind) * len(right_ind))
cosine = 0
i = 0
j = 0
while i < len(left_ind) and j < len(right_ind):
if left_ind[i] == right_ind[j]:
cosine += 1
i += 1
j += 1
elif left_ind[i] < right_ind[j]:
i += 1
else:
j += 1
return factor * cosine
| 3,532 | 31.412844 | 83 |
py
|
snlpy
|
snlpy-master/snlpy/__init__.py
|
from snlpy.ppr import ppr
from snlpy.embeddings import DeepWalk, NetMF, Node2Vec, Structural
from snlpy.structural import structural_feats
| 139 | 34 | 66 |
py
|
snlpy
|
snlpy-master/snlpy/ppr.py
|
"""
Implementation for the Andersen-Chung-Lang approximate ppr algorithm
METHODS
-------
ppr(G, seed, alpha=0.85, tol=0.0001)
"""
from numba import njit, prange
from scipy import sparse
import numpy as np
import collections
def ppr(adj, seed, alpha=0.85, tol=0.0001):
"""
Compute approximate ppr vector for the given seed on the graph
note: this is stolen from dgleich's github page originally
PARAMETERS
----------
G : CsrGraph
The graph on which to perform PPR
seed : Iterable of ints
node ids for seeds for PPR walk to teleport to
alpha : float
teleportation parameter for PPR
tol : float
resolution parameter for PPR, maximum nnz size of result is
1/tol
RETURNS
-------
scipy.sparse.csr_matrix representation of approximate PPR vector
"""
p = np.zeros(adj.shape[0])
r = np.zeros(adj.shape[0])
Q = collections.deque() # initialize queue
r[seed] = 1/len(seed)
Q.extend(s for s in seed)
while len(Q) > 0:
v = Q.popleft() # v has r[v] > tol*deg(v)
p, r_prime = push(v, np.copy(r), p, adj.indptr, adj.indices, alpha)
new_verts = np.where(r_prime - r > 0)[0]
r = r_prime
Q.extend(u for u in new_verts if r[u] / np.sum(adj[u].todense()) > tol)
return sparse.csr_matrix(p)
@njit()
def push(u, r, p, adj_ptrs, adj_cols, alpha):
r_u = r[u]
p[u] += alpha * r_u
r[u] = (1 - alpha) * r_u / 2
r[adj_cols[adj_ptrs[u]:adj_ptrs[u + 1]]
] += (1 - alpha) * r_u / (2 * (adj_ptrs[u + 1] - adj_ptrs[u]))
return p, r
| 1,594 | 26.5 | 79 |
py
|
snlpy
|
snlpy-master/snlpy/embeddings/deepwalk.py
|
"""
Implementation for DeepWalk
"""
from numba import njit, prange
from numpy import random
import numpy as np
from gensim.models.word2vec import Word2Vec
def DeepWalk(adj, walk_number=10, walk_length=80, dimensions=128,
workers=4, window_size=5, epochs=1, learning_rate=0.05):
"""
Fit an embedding to graph according to DeepWalk method
PARAMETERS
----------
graph : scipy.sparse.csr_matrix
adjacency matrix of graph to which to fit an embedding
walk_number : int, optionl
number of walks for DeepWalk
walk_length : int, optionl
length of walks for DeepWalk
dimensions : int, optionl
number of dimensions for the embedding
workers : int, optionl
number of workers for the Word2Vec step
(random walks use all available cores)
window_size : int, optionl
window size for Word2Vec
epochs : int, optionl
number of iterations for Word2Vec
learning_rate : float, optionl
parameter for Word2Vec
RETURNS
-------
np.ndarray(shape=(adj.shape[0], d), dtype=np.float32)
"""
walk_container = _do_walks(adj, walk_length, walk_number)
model = Word2Vec(walk_container,
hs=1,
alpha=learning_rate,
iter=epochs,
size=dimensions,
window=window_size,
min_count=1,
workers=workers,
)
emb = np.zeros((adj.shape[0], dimensions))
for i in range(adj.shape[0]):
emb[i, :] = model[str(i)]
return emb
@njit()
def do_step(v, rows, indxs):
"""does one step of a walk from v
PARAMETERS
----------
v : int
vertex from which to step
rows : np.ndarray
array containing all rows of adjacency matrix concatenated
indxs : np.ndarray
array of pointers into rows
RETURNS
_______
int
next step in random walk
"""
return random.choice(rows[indxs[v]:indxs[v + 1]])
@njit(parallel=True)
def do_walk(rows, indxs, num_steps, endpoints, walks):
"""
does a walk from every vertex given in endpoints
PARAMETERS
----------
rows : np.ndarray
array containing column indices of all nonzero coordinates
in the adjacency matrix
indxs : np.ndarray
array of pointers into rows indicating the start of each row
num_steps : int
length of walk to perform
endpoints : np.ndarray
array of endpoints from which to start walks
walks : np.ndarray
empty placeholder array which will be filled with walk transcripts
RETURNS
_______
np.ndarray containing walk transcripts
"""
walks[:, 0] = endpoints
for v in prange(len(endpoints)):
for j in range(1, num_steps):
walks[v, j] = do_step(walks[v, j - 1], rows, indxs)
return walks
def process_adj(g):
rows = g.indices
indxs = g.indptr
return rows, indxs
class _WalkContainer():
"""Iterator containing the walk transcripts"""
def __init__(self, height, width):
"""height, width: ints for size of walk container"""
self.walks = np.zeros((height, width), dtype=np.int32)
def __iter__(self):
for walk in self.walks:
yield [str(x) for x in walk]
def _do_walks(adj, walk_length, walk_number):
"""
Perform random walks
PARAMETERS
----------
adj : scipy.sparse.csr_matrix
adjacency matrix of graph to do walks on
walk_length : int
length of random walks
walk_number : int
number of random walks per vertex
RETURNS
-------
iterator containing walk as lists of strings
"""
rows, indxs = process_adj(adj)
n = len(indxs) - 1
walk_container = _WalkContainer(n * walk_number, walk_length)
for i in range(walk_number):
endpoints = np.arange(n, dtype=np.int32)
do_walk(rows, indxs, walk_length, endpoints,
walk_container.walks[i * n:(i + 1) * n])
return walk_container
| 4,088 | 26.26 | 74 |
py
|
snlpy
|
snlpy-master/snlpy/embeddings/node2vec.py
|
"""
Node2Vec embedding implementation
"""
from gensim.models.word2vec import Word2Vec
import numpy as np
from numba import njit, prange
from snlpy.embeddings.deepwalk import _WalkContainer, process_adj
def Node2Vec(adj, walk_number=10, walk_length=80, dimensions=128,
workers=4, window_size=5, epochs=1, learning_rate=0.05, p=0.5, q=0.5):
"""
PARAMETERS
----------
adj : CsrGraph
adjacency matrix of graph to which to fit an embedding
walk_number : int, optional
number of walks for Node2Vec
walk_length : int, optional
length of walks for Node2Vec
dimensions : int, optional
number of dimensions for the embedding
workers : int, optional
number of workers for the Word2Vec step, default is 4.
window_size : int, optional
window size for Word2Vec, default is 5
epochs : int, optonal
number of iterations for Word2Vec, default is 1
learning_rate=0.05
parameter for Word2Vec
p : float, optional
parameter for Node2Vec walks
q : float, optional
parameter for Node2Vec walks
RETURNS
-------
np.ndarray(shape=(adj.shape[0], dtype=np.float32))
"""
walk_container = _do_walks(adj, walk_length, walk_number, p, q)
model = Word2Vec(walk_container,
hs=1,
alpha=learning_rate,
iter=epochs,
size=dimensions,
window=window_size,
min_count=1,
workers=workers)
emb = np.zeros((adj.shape[0], dimensions))
for i in range(adj.shape[0]):
emb[i, :] = model[str(i)]
return emb
@njit()
def _do_step(t, v, p, q, t_neighbs, v_neighbs):
"""
Returns next step of Node2Vec walk when t is the previous vertex
and v is the current one. Parameters are p and q and t_neighbs
and v_neighbs are arrays holding the neighbors of t and v
respectively.
"""
# first we need to calculate the distribution over possible
# next steps
index = 0
domain = np.concatenate((t_neighbs, v_neighbs)).astype(np.int32)
ps = np.concatenate((t_neighbs, v_neighbs)).astype(np.float32)
for u in v_neighbs:
domain[index] = u
# I would like to check if u in t_neighbs, but numba wont
# let me use "in" here, have to write our own search
# TODO: would binary search be worth implementing here?
in_t_flag = False
for w in t_neighbs:
if u == w:
in_t_flag = True
break
if in_t_flag:
ps[index] = 1.0
elif u == t:
ps[index] = 1 / p
else:
ps[index] = 1 / q
index += 1
ps[index:] = 0
# normalize and make ps contain the cdf
mass = 0
ps = ps / np.sum(ps)
for i in range(index):
mass += ps[i]
ps[i] = mass
# now sample from distribution and return endpoint
return domain[_prob_map(np.random.random_sample(), ps, 0, index)]
def _do_n2v_walks(p, q, length, rows, indxs, chunksize=10000):
"""Breaks walks up into chunks for memory reasons and performs the walks on them"""
n = len(indxs) - 1
transcripts = np.zeros((0, length), dtype=np.int32)
for chunk_start in range(0, n, chunksize):
chunk_stop = min(chunk_start + chunksize, n)
# create the arrays for this batch
t_neighbs_chunk = np.zeros(
(chunk_stop - chunk_start, n), dtype=np.bool_)
v_neighbs_chunk = np.zeros(
(chunk_stop - chunk_start, n), dtype=np.bool_)
transcript_arr = np.zeros(
(chunk_stop - chunk_start, length), dtype=np.int32)
# since Node2Vec walks depend on the last two vertices, we need to start the
# the first two columns of the transcript array to contain the walk startpoints
transcript_arr[:, 0] = range(chunk_start, chunk_stop)
transcript_arr[:, 1] = range(chunk_start, chunk_stop)
# do the walks
transcript_arr = _batch_walks(transcript_arr, length, p, q,
rows, indxs, t_neighbs_chunk, v_neighbs_chunk)
transcripts = np.vstack((transcripts, transcript_arr))
return transcripts
@njit(parallel=True)
def _batch_walks(transcript_arr, length, p, q, rows, indxs, t_neighbs_chunk, v_neighbs_chunk):
"""Performs walks for all vertices in the first two columns of transcript_arr"""
# walks for each vertex are done in parallel
for i in prange(transcript_arr.shape[0]):
for step in range(length):
# as far as I can tell, I can't do prange(start, stop), so this
# continue is necessary.
if step < 2:
continue
v = transcript_arr[i, step - 1]
t = transcript_arr[i, step - 2]
t_neighbs = rows[indxs[v]:indxs[v + 1]]
v_neighbs = rows[indxs[t]:indxs[t + 1]]
# get the probability distribution over the next step
transcript_arr[i, step] = _do_step(
t, v, p, q, t_neighbs, v_neighbs)
return transcript_arr
@njit()
def _prob_map(val, p_arr, beg, end):
"""returns the least index into p_arr with value at least val"""
# binary search, assumes p_arr is sorted
if end - beg <= 1:
return beg
else:
pivot = beg + ((end - beg) // 2)
if val < p_arr[pivot]:
return _prob_map(val, p_arr, beg, pivot)
else:
return _prob_map(val, p_arr, pivot, end)
def _do_walks(adj, walk_length, walk_number, p, q, chunksize=4000):
"""Perform node2vec's second order walks
PARAMETERS
----------
adj : CsrGraph
adjacency matrix of graph to do walks on
walk_length : int
length of random walks
walk_number : int
number of random walks per vertex
p : float
parameter for Node2Vec walks
q : float
parameter for Node2Vec walks
chunksize=4000 : int
number of walks to compute at once. Controls memory usage
RETURNS
-------
iterator containing walk transcripts as lists of strings
"""
rows, indxs = process_adj(adj)
n = len(indxs) - 1
# pool = Pool(processes=workers)
# args = [(x, walk_length, p, q, rows, indxs)
# for x in range(n) for _ in range(walk_number)]
np.random.seed()
walk_container = _WalkContainer(n * walk_number, walk_length)
walk_container.walks = np.vstack([_do_n2v_walks(
p, q, walk_length, rows, indxs, chunksize) for _ in range(walk_number)])
return walk_container
| 6,726 | 35.166667 | 94 |
py
|
snlpy
|
snlpy-master/snlpy/embeddings/structural.py
|
import numpy as np
from numba import njit, prange
@njit(parallel=True)
def compute_features(X, community, j, row_ptrs, row_indxs):
for i in prange(len(row_ptrs) - 1):
for u in row_indxs[row_ptrs[i]:row_ptrs[i + 1]]:
for c in community:
if u == c:
X[i, j] += 1
break
return X
def Structural(adj, communities):
"""
Fit structural embedding
PARAMETERS
----------
adj : scipy.sparse.csr_matrix
adjacency matrix of graph
communities : np.ndarray(shape = (n, k))
matrix of community identities
RETURNS
-------
np.ndarray(shape=(adj.shape[0], d), dtype=np.int32)
"""
X = np.zeros(communities.shape)
for i, comm in enumerate(communities):
X = compute_features(X, comm, i, adj.indptr, adj.indices)
return X
| 864 | 23.714286 | 65 |
py
|
snlpy
|
snlpy-master/snlpy/embeddings/netmf.py
|
"""
Implementation of NetMF embedding method
"""
from scipy import sparse
import numpy as np
from sklearn.decomposition import TruncatedSVD
def NetMF(graph, dimensions=128, iterations=10, order=2, negative_samples=1):
"""
Fits a NetMF embedding to the given graph.
PARAMETERS
----------
graph : CsrGraph
Graph to which to fit an embedding
dimensions : int, optional
Number of dimensions for embedding, default is 128
iterations : int, optional
Number of iterations to run NetMF
order : int, optional
Power of matrix to go up to for NetMF
negative_samples : int, optional
Parameter for NetMF
RETURNS
-------
np.ndarray(shape=(graph.shape[0], d), dtype=np.float32)
"""
target_matrix = _create_target_matrix(graph, order, negative_samples)
return _create_embedding(target_matrix, dimensions, iterations)
def _create_D_inverse(graph):
"""
Creating a sparse inverse degree matrix.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
Return types:
* **D_inverse** *(Scipy array)* - Diagonal inverse degree matrix.
"""
index = np.arange(graph.shape[0])
values = np.array([1.0/np.sum(graph[node]) for node in range(graph.shape[0])])
shape = (graph.shape[0], graph.shape[0])
D_inverse = sparse.csr_matrix((values, (index, index)), shape=shape, dtype=np.float32)
return D_inverse
def _create_base_matrix(A):
"""
Creating the normalized adjacency matrix.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
Return types:
* **(A_hat, A_hat, A_hat, D_inverse)** *(SciPy arrays)* - Normalized adjacency matrices.
"""
#A = graph.to_csr_matrix()
D_inverse = _create_D_inverse(A)
A_hat = D_inverse.dot(A)
return (A_hat, A_hat, A_hat, D_inverse)
def _create_target_matrix(graph, order, negative_samples):
"""
Creating a log transformed target matrix.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
Return types:
* **target_matrix** *(SciPy array)* - The shifted PMI matrix.
"""
A_pool, A_tilde, A_hat, D_inverse = _create_base_matrix(graph)
for _ in range(order-1):
A_tilde = A_tilde.dot(A_hat)
A_pool = A_pool + A_tilde
del A_hat, A_tilde
A_pool.data = (graph.nnz/2*A_pool.data)/(order*negative_samples)
A_pool = A_pool.dot(D_inverse)
A_pool.data[A_pool.data < 1.0] = 1.0
target_matrix = sparse.csr_matrix((np.log(A_pool.data), A_pool.indices, A_pool.indptr),
shape=A_pool.shape,
dtype=np.float32)
return target_matrix
def _create_embedding(target_matrix, dimensions, iterations):
"""
Fitting a truncated SVD embedding of a PMI matrix.
"""
svd = TruncatedSVD(n_components=dimensions,
n_iter=iterations)
svd.fit(target_matrix)
embedding = svd.transform(target_matrix)
return embedding
| 3,060 | 30.556701 | 96 |
py
|
snlpy
|
snlpy-master/snlpy/embeddings/__init__.py
|
from snlpy.embeddings.deepwalk import DeepWalk
from snlpy.embeddings.node2vec import Node2Vec
from snlpy.embeddings.netmf import NetMF
from snlpy.embeddings.structural import Structural
from snlpy.ppr import ppr
| 212 | 34.5 | 50 |
py
|
mdf_modeling_paper
|
mdf_modeling_paper-main/mdf_decompose.py
|
import numpy as np
import dynesty.utils as dyutil
import multiprocessing as mp
import scipy.special
import dynesty, dynesty.pool
import contextlib
import math
mv_sun = 4.81
class KirbyMassMetInfo:
"""
Class representing the mass metallicity relation
"""
def mass_met(log10l):
""" avg feh at a given log10 (lum)"""
return -1.68 + 0.29 * (log10l - 6)
def log10_sig(log10l):
""" log10 of MDF width at a given log10(lum) """
return np.log10(0.45 - 0.06 * (log10l - 5)) # np.minimum(logl, -0.2)
def mass_met_spread():
""" Spread of the mass metallicity relation"""
return 0.15
def get_weights(n_inbin, nmax_distinct):
"""
Populate the 2d occupancy matrix, to pick up random fe/h in each
logl bin according to the mass metallicity spread
"""
nbins = len(n_inbin)
weights = np.zeros((nbins, nmax_distinct), dtype=int)
xind = n_inbin <= nmax_distinct
weights[xind, :] = np.arange(nmax_distinct)[None, :] < n_inbin[xind][:,
None]
xind1 = ~xind
weights[xind1, :] = n_inbin[xind1][:, None] // nmax_distinct + (
((n_inbin[xind1] % nmax_distinct)[:, None]) >
np.arange(nmax_distinct)[None, :]).astype(int)
assert (weights.sum(axis=1) == n_inbin).all()
return weights
def evaler(xvals,
logl_bins,
n_inbin,
rng=None,
feh_means=None,
nmax_distinct=200,
mass_met_info=None):
nbins = len(logl_bins)
if rng is not None and feh_means is None:
eps = rng.normal(size=(nbins, nmax_distinct))
feh_means0 = mass_met_info.mass_met(logl_bins)
feh_means = feh_means0[:, None] + eps * mass_met_info.mass_met_spread()
elif feh_means is not None and rng is None:
nmax_distinct = 1
pass
else:
raise RuntimeError('feh_means and rng are both set or unset')
feh_widths = 10**(mass_met_info.log10_sig(logl_bins)[:, None])
nstars = 10**logl_bins
weights = get_weights(n_inbin, nmax_distinct)
mult = (weights) * nstars[:, None] / np.sqrt(2 * np.pi) / feh_widths
# these are multipliers in front of exp(-((feh-mu)/sig)^2)
# this assumes that feh_width is the same for every galaxy
# may need to renormalize if considering restricted range of fehs
# may need to sum to one
# making all gaussian param 1ds
mult = mult.reshape(-1)
feh_means = feh_means.reshape(-1)
feh_widths = (feh_widths[:, None] + np.zeros(
(1, nmax_distinct))).reshape(-1)
xind = mult > 0
if not xind.any():
return xvals * 0 - 1e100, -1e100
logp = (np.log(mult[None, xind]) - 0.5 * ((
(xvals[:, None] - feh_means[None, xind]) / feh_widths[None, xind])**2))
ret = scipy.special.logsumexp(logp, axis=1)
return ret, 0
class CubeTransf:
"""
Class that is transform the parameter space into unit cube
needed for nested sampling
"""
def __init__(
self,
maxlogn=3,
nseed=1000,
minlogl=1.9,
maxlogl=8 # logl range for the most massive system)
):
self.minlogl = minlogl
self.maxlogl = maxlogl
self.nseed = nseed
self.maxlogn = maxlogn
def __call__(self, x):
"""
Apply the actual cube transform
"""
# first two dimensions are feh, logl then are the occupation numbers
# followed by the seed
nbins = len(x) - 3
minfeh = -4
maxfeh = 0.5
feh = minfeh + (maxfeh - minfeh) * x[0]
logl = self.minlogl + (self.maxlogl - self.minlogl) * x[1]
ns = 10**(-1 + (self.maxlogn + 1) * x[2:nbins + 2])
# seed = x[nbins]
seed = (x[nbins + 2] * self.nseed)
return np.concatenate(([feh, logl], ns, [seed]))
def logp(p, data, getModel=False):
"""
Evaluate the logprobability of data given parameter vector
"""
(fehs, logl_bins, logl0, loglerr, nmax_distinct, mass_met_info, prior_only,
minfeh, maxfeh) = data
nbins = len(logl_bins)
sections = [2, nbins, 1]
(feh_ref,
logl_ref), xparam, seed0 = np.array_split(p,
np.cumsum(sections)[:-1])
nfehbins = 100
xvals = np.linspace(minfeh, maxfeh, nfehbins)
# interpolation grid of metallicities
n_inbin = xparam.astype(int)
if seed0 < 0:
return -1e100
seed0 = seed0[0]
seed = seed0.astype(int)
# seed = [int(_) for _ in (seed0.tobytes())]
totlum = np.log10(np.sum(n_inbin * 10**logl_bins) + 10**logl_ref)
# avoid plateau in logl
penalty = (n_inbin - xparam).sum() * 0.001
penalty += (seed - seed0) * .001
penalty += -0.5 * ((totlum - logl0) / loglerr)**2
penalty += -0.5 * ((feh_ref - mass_met_info.mass_met(logl_ref)) /
mass_met_info.mass_met_spread())**2
if prior_only:
return penalty
rng = np.random.default_rng(seed)
logp0, penalty1 = evaler(xvals,
logl_bins,
n_inbin,
rng=rng,
nmax_distinct=nmax_distinct,
mass_met_info=mass_met_info)
penalty += penalty1
logp_ref, _ = evaler(xvals,
np.r_[logl_ref],
np.r_[1],
feh_means=np.r_[feh_ref],
mass_met_info=mass_met_info)
II = scipy.interpolate.InterpolatedUnivariateSpline(xvals, logp0, k=1)
II_ref = scipy.interpolate.InterpolatedUnivariateSpline(xvals,
logp_ref,
k=1)
norm = (np.exp(II(xvals)).sum() +
np.exp(II_ref(xvals)).sum()) * (xvals[1] - xvals[0])
logp1 = np.logaddexp(II(fehs), II_ref(fehs)) - np.log(norm)
if getModel:
return logp1
return np.sum(logp1) + penalty
def doit(
fehs,
curmv,
npar=None,
nthreads=36,
nlive=10000,
neff=None,
steps=100,
rstate=1,
maxlogn=3, # max(log(occupation number in each logl bin))
magpad=2.5, # how far away to go in luminosity above
mv_err=0.1, # luminosity uncertainty
minlogl=None, # minimum log(L) in the grid
maxlogl=None, # maximum log(L) in the grid
mass_met_info=None,
# a class with 3 functions mass_met(log10l), mass_met_spread()
# and log10_sig(log10l)
# that return mean feh at log10l, spread in mass met relation and
# log10 width of the mdf
prior_only=False,
minfeh=-4, # minimum feh in the data/truncation
maxfeh=0.5 # maximum feh in the data/truncation
):
"""
Model the MDF as combination of MDF of lower luminosity
objects
Parameters
----
fehs: array
Array of [Fe/H] of modelled stars
curmv: absolute luminosity of the system
npar: int or None
The number of luminosity bins. If none, the number of bins
is decided automatically
nthreads: int
The number of threads needed for sampling
nlive: int
The number of live-points for nested sampling
neff: int
The desired number of effective samples in the posterior
steps: int
The number of MCMC walk steps used at each iteration of
nested sampling
rstate: int or RandomGenerator
The initialization of the random state generator
maxlogn: int
The maximum value of the log of the occupation number in each
luminosity bin
magpad: float
How much brighter than the luminisity of the system the bins should go
mv_err: float
The uncertainty in the MV of the system
minlogl: float or None
minimum log(L) in the grid (if None, it's chosen automatically)
maxlogl: float or None,
maximum log(L) in the grid
mass_met_info: MassMetInfo class or None
a class instance that should have
3 methods mass_met(log10l), mass_met_spread(), and log10_sig(log10l)
that return mean feh at log10l, spread in mass met relation and
log10 width of the mdf
prior_only: boolean
if True on the the prior is sampled, the data is ignored
minfeh: float
The minimum [Fe/H] in the data/truncation
maxfeh: float
The maximum [Fe/H] in the data/truncation
Returns
------
ret: dict
Dictionary logl grid and posterior samples
Keys are
logl_grid: the logl grid
samp: The raw posterior samples
samp_n: The posterior samples for just the occupation numbers
in logl bins
n_16, cumul_n16, n84...: the percentiles of the occupation
numbers in bins (or for cumulativae number of dwarfs equal
or brighter than a luminosity of the bin)
"""
curlogl = -((curmv - mv_sun) / 2.5)
loglerr = mv_err / 2.5
if maxlogl is None:
maxlogl = curlogl + magpad / 2.5
if minlogl is None:
minlogl = 1.9
default_step_mag = 1
if npar is None:
npar = math.ceil((maxlogl - minlogl) / (default_step_mag / 2.5))
# this is to ensure we have a point on curlogl
# step = (curlogl - minlogl) / int(npar * (curlogl - minlogl) /
# (maxlogl - minlogl))
step = (maxlogl - minlogl) / (npar - 1)
logl_grid = np.arange(npar) * step + minlogl
nmax_distinct = 200
logl_args = (fehs, logl_grid, curlogl, loglerr, nmax_distinct,
mass_met_info, prior_only, minfeh, maxfeh)
# nseed = nlive
nseed = 2000
cube_transf = CubeTransf(nseed=nseed,
maxlogn=maxlogn,
minlogl=minlogl,
maxlogl=maxlogl)
if isinstance(rstate, int):
rstate = np.random.default_rng(rstate)
with (dynesty.pool.Pool(
nthreads, logp, cube_transf, logl_args=(logl_args, ))
if nthreads > 1 else contextlib.nullcontext()) as pool:
if nthreads > 1:
curl, curp, curargs = pool.loglike, pool.prior_transform, None
else:
curl, curp, curargs = logp, cube_transf, (logl_args, )
dns = dynesty.DynamicNestedSampler(
curl,
curp,
npar + 3,
nlive=nlive,
rstate=rstate,
logl_args=curargs,
pool=pool,
# sample='unif',
# sample='rslice',
walks=steps,
slices=steps,
sample='rslice',
)
dns.run_nested(n_effective=neff)
# dlogz_init=0.5, maxbatch=0)
samp = dyutil.resample_equal(
dns.results['samples'],
np.exp(dns.results['logwt'] - dns.results['logz'][-1]))
xlogl = samp[:, 1] * 1
xsamp = samp[:, 2:npar + 2].astype(int) * 1
xind = np.argmin(np.abs(logl_grid[None, :] - xlogl[:, None]), axis=1)
xsamp[np.arange(len(samp)), xind] += 1
res = dict(logl_grid=logl_grid, samp=samp, samp_n=xsamp)
for pp in [1, 5, 16, 50, 84, 95, 99]:
res['n%d' % pp] = scipy.stats.scoreatpercentile(xsamp, pp, axis=0)
res['cumul_n%d' % pp] = scipy.stats.scoreatpercentile(np.cumsum(
xsamp[:, ::-1], axis=1)[:, ::-1],
pp,
axis=0)
return res
| 11,538 | 34.179878 | 79 |
py
|
DivMF
|
DivMF-master/src/main.py
|
'''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import time
import math
import click
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from model import BPR
from utils import *
# Slice the given list into chunks of size n.
def list_chunk(lst, n):
return [lst[i:i+n] for i in range(0, len(lst), n)]
@click.command()
@click.option('--data', type=str, default='ml-1m', help='Select Dataset')
@click.option('--seed', type=int, default=0, help='Set random seed')
@click.option('--reg', type=bool, default=True, help='Use TDR if True')
@click.option('--unmask', type=bool, default=False, help='Use unmask scheme if True')
@click.option('--ut', type=int, default=0, help='Number of unmasking top items')
@click.option('--ur', type=int, default=0, help='Number of unmasking random items')
@click.option('--ep', type=int, default=200, help='Number of total epoch')
@click.option('--reclen', type=int, default=30, help='Number of epoch with reccommendation loss')
@click.option('--dim', type=int, default=32, help='Number of latent factors')
@click.option('--cpu', type=bool, default=False, help='Use CPU while TDR')
@click.option('--dut', type=float, default=0, help='Change on the number of unmasking top items per epoch')
@click.option('--dur', type=float, default=0, help='Change on the number of unmasking random items per epoch')
@click.option('--rbs', type=int, default=0, help='Number of rows in mini batch')
@click.option('--cbs', type=int, default=0, help='Number of columns in mini batch')
def main(data, seed, reg, unmask, ut, ur, ep, reclen, dim, cpu, dut, dur, rbs, cbs):
set_seed(seed)
device = DEVICE
# set hyperparameters
config = {
'lr': 1e-3,
'decay': 1e-4,
'latent_dim': dim,
'batch_size': 4096,
'epochs': ep,
'ks': [5, 10],
'trn_neg': 4,
'test_neg': 99
}
print(config)
torch.multiprocessing.set_sharing_strategy('file_system')
# load data
trn_path = f'../data/{data}/train'
vld_path = f'../data/{data}/validation'
test_path = f'../data/{data}/test'
train_data, test_data, user_num, item_num, train_mat = load_all(trn_path, test_path)
train_dataset = BPRData(
train_data, item_num, train_mat, config['trn_neg'], True)
test_dataset = BPRData(
test_data, item_num, train_mat, 0, False)
train_loader = DataLoader(train_dataset,
batch_size=config['batch_size'], shuffle=True,
num_workers=4)
test_loader = DataLoader(test_dataset,
batch_size=config['test_neg'] + 1,
shuffle=False, num_workers=0)
# define model and optimizer
model = BPR(user_num, item_num, config['latent_dim'])
model.to(device)
optimizer = optim.Adam(
model.parameters(), lr=config['lr'], weight_decay=config['decay'])
# show dataset stat
print('user:', user_num, ' item:', item_num, ' tr len:', len(train_data))
header = f'Epoch | '
for k in config['ks']:
header += f'Recall@{k:2d} NDCG@{k:2d} C@{k:2d} G@{k:2d} E@{k:2d} | '
header += f'Duration (sec)'
print(header)
# obtain items in training set and ground truth items from data
train_data = [[] for _ in range(user_num)]
gt = []
with open(test_path, 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
gt.append(eval(arr[0])[1])
line = fd.readline()
init_time = time.time()
# start model training
for epoch in range(1, config['epochs']+1):
model.train()
start_time = time.time()
train_loader.dataset.ng_sample()
if epoch == 1:
num_batch = 0
# train with recommendation loss
if epoch <= reclen:
for user, item_i, item_j in train_loader:
if epoch == 1:
for u, i in zip(user, item_i):
train_data[u].append(i)
user = user.to(device)
item_i = item_i.to(device)
item_j = item_j.to(device)
# recommendation loss
model.zero_grad()
prediction_i, prediction_j = model(user, item_i, item_j)
rec_loss = - (prediction_i - prediction_j).sigmoid().log().sum()
rec_loss.backward()
optimizer.step()
if epoch == 1:
num_batch += 1
# move model to cpu if option cpu is true
if epoch == reclen and cpu:
device = torch.device('cpu')
model = model.to(device)
optimizer_to(optimizer, device)
# train with diversity regularizer
if reg and epoch > reclen:
# top-k inference
k = config['ks'][1]
if rbs == 0:
row_batch_size = user_num
else:
row_batch_size = rbs
row_batch = list_chunk(torch.randperm(user_num).tolist(), row_batch_size)
if cbs == 0:
col_batch_size = item_num
else:
col_batch_size = cbs
col_batch = list_chunk(torch.randperm(item_num).tolist(), col_batch_size)
# calculate number of unmasking items for each mini batch
bk = math.ceil(k / len(col_batch))
bur = math.ceil(max(ur + int((epoch-reclen-1)*dur), 0) / len(col_batch))
but = math.ceil(max(ut + int((epoch-reclen-1)*dut), 0) / len(col_batch))
for rb in row_batch:
for cb in col_batch:
# inference top-k recommendation lists
model.zero_grad()
scores = []
items = torch.LongTensor(cb).to(device)
for u in rb:
u = torch.tensor([u]).to(device)
score, _ = model(u, items, items)
scores.append(score)
scores = torch.stack(scores)
scores = torch.softmax(scores, dim=1)
# unmasking mechanism
if unmask:
k_ = len(cb) - (bk+but)
else:
k_ = len(cb) - bk
mask_idx = torch.topk(-scores, k=k_)[1] # mask index for being filled 0
if unmask:
for u in range(len(rb)):
idx = torch.randperm(mask_idx.shape[1])
mask_idx[u] = mask_idx[u][idx]
if bur > 0:
mask_idx = mask_idx[:, :-bur]
mask = torch.zeros(size=scores.shape, dtype=torch.bool)
mask[torch.arange(mask.size(0)).unsqueeze(1), mask_idx] = True
topk_scores = scores.masked_fill(mask.to(device), 0)
# coverage regularizer
scores_sum = torch.sum(topk_scores, dim=0, keepdim=False)
epsilon = 0.01
scores_sum += epsilon
d_loss = -torch.sum(torch.log(scores_sum))
# skewness regularizer
topk_scores = torch.topk(scores, k=k)[0]
norm_scores = topk_scores / torch.sum(topk_scores, dim=1, keepdim=True)
e_loss = torch.sum(torch.sum(norm_scores * torch.log(norm_scores), dim=1))
# sum of losses
regularizations = d_loss + e_loss
regularizations.backward()
optimizer.step()
# evaluate metrics
model.eval()
HRs, NDCGs, coverages, Gs, Es = [], [], [], [], []
for k in config['ks']:
rec_list = make_rec_list(model, k, user_num, item_num, train_data, device)
HR, NDCG, coverage, giny, entropy = metrics_from_list(rec_list, item_num, gt)
HRs.append(HR)
NDCGs.append(NDCG)
coverages.append(coverage)
Gs.append(giny)
Es.append(entropy)
epoch_elapsed_time = time.time() - start_time
total_elapsed_time = time.time() - init_time
# print evaluated metrics to console
content = f'{epoch:6d} | '
for hr, ndcg, coverage, g, e in zip(HRs, NDCGs, coverages, Gs, Es):
content += f'{hr:.4f} {ndcg:.4f} {coverage:.4f} {g:.4f} {e:.4f} | '
content += f'{epoch_elapsed_time:.1f} {total_elapsed_time:.1f}'
print(content)
if __name__ == '__main__':
main()
| 8,840 | 37.947137 | 110 |
py
|
DivMF
|
DivMF-master/src/utils.py
|
'''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import random
import numpy as np
from numpy.core.numeric import indices
import pandas as pd
import scipy.sparse as sp
import torch
from torch._C import Value
import torch.utils.data as data
from tqdm import tqdm
CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda' if CUDA else 'cpu')
def set_seed(seed):
'''
Set pytorch random seed as seed.
'''
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if CUDA:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def hit(gt_item, pred_items):
'''
Check whether given recommendation list hits or not.
gt_item : ground truth item
pred_items : list of recommended items
'''
if gt_item in pred_items:
return 1
return 0
def ndcg(gt_item, pred_items):
'''
Calculate nDCG
gt_item : ground truth item
pred_items : list of recommended items
'''
if gt_item in pred_items:
index = pred_items.index(gt_item)
return np.reciprocal(np.log2(index+2))
return 0
def metrics_from_list(R, item_num, gt):
'''
Calculate all metrics from recommendation list
return average Hit Ratio, nDCG, coverage, gini index, entropy of R
R : list of recommendation lists
item_num : number of items in dataset
gt : list of ground truth items
'''
HR, NDCG = [], []
rec_items = []
cnt = [0 for i in range(item_num)]
for r, gt_item in zip(R, gt):
HR.append(hit(gt_item, r))
NDCG.append(ndcg(gt_item, r))
rec_items += r
for i in r:
cnt[i] += 1
coverage = len(set(rec_items))/item_num
giny = 0
cnt.sort()
height, area = 0, 0
for c in cnt:
height += c
area += height-c/2
fair_area = height*item_num/2
giny = (fair_area-area)/fair_area
a = torch.FloatTensor(cnt)
a/=sum(a)
entropy = torch.distributions.Categorical(probs=a).entropy()
return np.mean(HR), np.mean(NDCG), coverage, giny, entropy
def make_rec_list(model, top_k, user_num, item_num, train_data, device=DEVICE):
'''
Build recommendation lists from the model
model : recommendation model
top_k : length of a recommendation list
user_num : number of users in dataset
item_num : number of items in dataset
train_data : lists of items that a user interacted in training dataset
device : device where the model mounted on
'''
rtn = []
for u in range(user_num):
items = torch.tensor(list(set(range(item_num))-set(train_data[u]))).to(device)
u = torch.tensor([u]).to(device)
score, _ = model(u, items, items)
_, indices = torch.topk(score, top_k)
recommends = torch.take(items, indices).cpu().numpy().tolist()
rtn.append(recommends)
return rtn
def load_all(trn_path, test_neg, test_num=100):
""" We load all the three file here to save time in each epoch. """
'''
Load dataset from given path
trn_path : path of training dataset
test_neg : path of test dataset
'''
train_data = pd.read_csv(
trn_path,
sep='\t', header=None, names=['user', 'item'],
usecols=[0, 1], dtype={0: np.int32, 1: np.int32})
user_num = train_data['user'].max() + 1
item_num = train_data['item'].max() + 1
train_data = train_data.values.tolist()
# load ratings as a dok matrix
train_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)
for x in train_data:
train_mat[x[0], x[1]] = 1.0
test_data = []
with open(test_neg, 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
u = eval(arr[0])[0]
test_data.append([u, eval(arr[0])[1]])
for i in arr[1:]:
test_data.append([u, int(i)])
line = fd.readline()
return train_data, test_data, user_num, item_num, train_mat
class BPRData(data.Dataset):
def __init__(self, features,
num_item, train_mat=None, num_ng=0, is_training=None):
super(BPRData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
features : data
num_item : number of items
train_mat : interaction matrix
num_ng : number of negative samples
is_training : is model training
"""
self.features = features
self.num_item = num_item
self.train_mat = train_mat
self.num_ng = num_ng
self.is_training = is_training
def ng_sample(self):
'''
Sample negative items for BPR
'''
assert self.is_training, 'no need to sampling when testing'
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
for t in range(self.num_ng):
j = np.random.randint(self.num_item)
while (u, j) in self.train_mat:
j = np.random.randint(self.num_item)
self.features_fill.append([u, i, j])
def __len__(self):
'''
Number of instances.
'''
return self.num_ng * len(self.features) if \
self.is_training else len(self.features)
def __getitem__(self, idx):
'''
Grab an instance.
'''
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j = features[idx][2] if \
self.is_training else features[idx][1]
return user, item_i, item_j
def optimizer_to(optim, device):
'''
Move optimizer to target device
optim : optimizer
device : target device
'''
for param in optim.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
| 6,732 | 30.316279 | 86 |
py
|
DivMF
|
DivMF-master/src/model.py
|
'''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import torch.nn as nn
class BPR(nn.Module):
def __init__(self, user_num, item_num, factor_num):
super(BPR, self).__init__()
"""
user_num: number of users;
item_num: number of items;
factor_num: number of predictive factors.
"""
self.embed_user = nn.Embedding(user_num, factor_num)
self.embed_item = nn.Embedding(item_num, factor_num)
nn.init.normal_(self.embed_user.weight, std=0.01)
nn.init.normal_(self.embed_item.weight, std=0.01)
def forward(self, user, item_i, item_j):
'''
Calculate prediction scores of a user for two item lists.
'''
user = self.embed_user(user)
item_i = self.embed_item(item_i)
item_j = self.embed_item(item_j)
prediction_i = (user * item_i).sum(dim=-1)
prediction_j = (user * item_j).sum(dim=-1)
return prediction_i, prediction_j
| 972 | 25.297297 | 66 |
py
|
topcat
|
topcat-master/python/topcat.py
|
from py4j.java_gateway import (JavaGateway, GatewayParameters, launch_gateway)
import numpy as np
import os, sys
def absolutejarpath():
filepath = os.path.abspath(__file__)
projectdir = os.path.split(os.path.split(filepath)[0])[0]
jarpath = os.path.join(os.path.join(projectdir, "target"), "topcat-1.0-SNAPSHOT.jar")
return jarpath
port = launch_gateway(classpath=absolutejarpath(), die_on_exit=True, redirect_stdout=sys.stdout)
gateway = JavaGateway(gateway_parameters=GatewayParameters(port=port, auto_convert=True))
topcat = gateway.jvm.topcat.mains.PythonInterface
def persistenceModules_dist(distanceMatrices, filtrationValues, maxdim, contour=None):
if contour == None:
return list(map(PersistenceModule, topcat.computePersistenceModules(distanceMatrices, filtrationValues, maxdim)))
return list(map(PersistenceModule, topcat.computePersistenceModules(distanceMatrices, filtrationValues, maxdim, contour)))
'''
Computes the multiparameter persistence modules from a list of points up to dimension 'maxdim'.
@param points - points a numpy array of points
@param distances - a list of strings of the distances to be used ('euclidean', 'euclidean_codensity')
@param filtrationValues - a numpy array of filtration values for each distance
@param maxdim - the max dimension of the homology to be computed
Returns a list of python PersistenceModule objects.
'''
def persistenceModules(points, distances, filtrationValues, maxdim):
return list(map(PersistenceModule, topcat.computePersistenceModules(points, distances, filtrationValues, maxdim)))
def stableRank_dist(distanceMatrices, filtrationValues, maxdim, contour=None):
if contour == None:
return np.asarray(list(topcat.computeStableRank(distanceMatrices, filtrationValues, maxdim)))
return np.asarray(list(topcat.computeStableRank(distanceMatrices, filtrationValues, maxdim, contour)))
'''
Computes the stable rank of the multiparameter persistence modules computed from a list
of points up to dimension 'maxdim'.
@param points - points a numpy array of points
@param distances - a list of strings of the distances to be used ('euclidean', 'euclidean_codensity')
@param filtrationValues - a numpy array of filtration values for each distance
@param maxdim - the max dimension of the homology to be computed
@param contour - a numpy array of values for the step contours to be used for the stable rank (one for each distance).
Returns a list of python PersistenceModule objects.
'''
def stableRank(points, distances, filtrationValues, maxdim, contour=None):
if contour == None:
return np.asarray(list(topcat.computeStableRank(points, distances, filtrationValues, maxdim)))
return np.asarray(list(topcat.computeStableRank(points, distances, filtrationValues, maxdim, contour)))
class PersistenceModule(object):
def __init__(self, module):
self.module = module
'''
Computes the stable rank of the persistence module for shift values 'values'.
@param values - a list of floats specifying the shift values
returns a StableRankFunction object containing the stable rank for the shift values.
'''
def stableRank(self, values):
return self.module.computeStableRank(values)
'''
Computes the stable rank for a specified contour
@param contour - a PersistenceContour java object
'''
def stableRank(self, values, contour):
return self.module.computeStableRank(values, contour)
'''
Computes the rank of the map from index u to index v.
@param u - a list of integers
@param v - a list of integers
returns the rank of the map.
'''
def rank(self, u, v):
return self.module.rank(u, v)
def __str__(self):
return self.module.getFunctor().toString()
| 3,678 | 40.337079 | 123 |
py
|
elderflower
|
elderflower-master/setup.py
|
import os
import builtins
from setuptools import setup, find_packages
abspath = os.path.abspath(os.path.dirname(__file__))
def readme():
with open('README.md') as f:
return f.read()
install_requires = []
requirementPath = os.path.join(abspath, 'requirements.txt')
if os.path.isfile(requirementPath):
with open(requirementPath) as f:
install_requires = f.read().splitlines()
builtins.__SETUP__ = True
import elderflower
version = elderflower.__version__
setup(
name='elderflower',
version=version,
description='Wide-angle PSF modeling for low surface brightness imaging',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/NGC4676/elderflower',
author='Qing Liu',
author_email='[email protected]',
keywords='astronomy PSF fitting LSB',
packages=find_packages(include=['elderflower','elderflower.']),
python_requires='>=3.7',
install_requires=install_requires,
)
| 1,016 | 19.34 | 78 |
py
|
elderflower
|
elderflower-master/elderflower/panstarrs.py
|
"""
API for querying PANSTARRS
From http://ps1images.stsci.edu/ps1_dr2_api.html
"""
from astropy.io import ascii
from astropy.table import Table
import sys
import re
import numpy as np
import json
import requests
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
def ps1cone(ra,dec,radius,table="mean",release="dr1",format="csv",columns=None,
baseurl="https://catalogs.mast.stsci.edu/api/v0.1/panstarrs", verbose=False,
**kw):
"""Do a cone search of the PS1 catalog
Parameters
----------
ra (float): (degrees) J2000 Right Ascension
dec (float): (degrees) J2000 Declination
radius (float): (degrees) Search radius (<= 0.5 degrees)
table (string): mean, stack, or detection
release (string): dr1 or dr2
format: csv, votable, json
columns: list of column names to include (None means use defaults)
baseurl: base URL for the request
verbose: print info about request
**kw: other parameters (e.g., 'nDetections.min':2)
"""
data = kw.copy()
data['ra'] = ra
data['dec'] = dec
data['radius'] = radius
return ps1search(table=table,release=release,format=format,columns=columns,
baseurl=baseurl, verbose=verbose, **data)
def ps1search(table="mean",release="dr1",format="csv",columns=None,
baseurl="https://catalogs.mast.stsci.edu/api/v0.1/panstarrs", verbose=False,
**kw):
"""Do a general search of the PS1 catalog (possibly without ra/dec/radius)
Parameters
----------
table (string): mean, stack, or detection
release (string): dr1 or dr2
format: csv, votable, json
columns: list of column names to include (None means use defaults)
baseurl: base URL for the request
verbose: print info about request
**kw: other parameters (e.g., 'nDetections.min':2). Note this is required!
"""
data = kw.copy()
if not data:
raise ValueError("You must specify some parameters for search")
checklegal(table,release)
if format not in ("csv","votable","json"):
raise ValueError("Bad value for format")
url = "{baseurl}/{release}/{table}.{format}".format(**locals())
if columns:
# check that column values are legal
# create a dictionary to speed this up
dcols = {}
for col in ps1metadata(table,release)['name']:
dcols[col.lower()] = 1
badcols = []
for col in columns:
if col.lower().strip() not in dcols:
badcols.append(col)
if badcols:
raise ValueError('Some columns not found in table: {}'.format(', '.join(badcols)))
# two different ways to specify a list of column values in the API
# data['columns'] = columns
data['columns'] = '[{}]'.format(','.join(columns))
# either get or post works
# r = requests.post(url, data=data)
r = requests.get(url, params=data)
if verbose:
print(r.url)
r.raise_for_status()
if format == "json":
return r.json()
else:
return r.text
def checklegal(table,release):
"""Checks if this combination of table and release is acceptable
Raises a VelueError exception if there is problem
"""
releaselist = ("dr1", "dr2")
if release not in ("dr1","dr2"):
raise ValueError("Bad value for release (must be one of {})".format(', '.join(releaselist)))
if release=="dr1":
tablelist = ("mean", "stack")
else:
tablelist = ("mean", "stack", "detection")
if table not in tablelist:
raise ValueError("Bad value for table (for {} must be one of {})".format(release, ", ".join(tablelist)))
def ps1metadata(table="mean",release="dr1",
baseurl="https://catalogs.mast.stsci.edu/api/v0.1/panstarrs"):
"""Return metadata for the specified catalog and table
Parameters
----------
table (string): mean, stack, or detection
release (string): dr1 or dr2
baseurl: base URL for the request
Returns an astropy table with columns name, type, description
"""
checklegal(table,release)
url = "{baseurl}/{release}/{table}/metadata".format(**locals())
r = requests.get(url)
r.raise_for_status()
v = r.json()
# convert to astropy table
tab = Table(rows=[(x['name'],x['type'],x['description']) for x in v],
names=('name','type','description'))
return tab
def mastQuery(request):
"""Perform a MAST query.
Parameters
----------
request (dictionary): The MAST request json object
Returns head,content where head is the response HTTP headers, and content is the returned data
"""
server='mast.stsci.edu'
# Grab Python Version
version = ".".join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
"User-agent":"python-requests/"+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
conn.request("POST", "/api/v0/invoke", "request="+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head,content
def resolve(name):
"""Get the RA and Dec for an object using the MAST name resolver
Parameters
----------
name (str): Name of object
Returns RA, Dec tuple with position"""
resolverRequest = {'service':'Mast.Name.Lookup',
'params':{'input':name,
'format':'json'
},
}
headers,resolvedObjectString = mastQuery(resolverRequest)
resolvedObject = json.loads(resolvedObjectString)
# The resolver returns a variety of information about the resolved object,
# however for our purposes all we need are the RA and Dec
try:
objRa = resolvedObject['resolvedCoordinate'][0]['ra']
objDec = resolvedObject['resolvedCoordinate'][0]['decl']
except IndexError as e:
raise ValueError("Unknown object '{}'".format(name))
return (objRa, objDec)
| 6,737 | 30.783019 | 112 |
py
|
elderflower
|
elderflower-master/elderflower/container.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
from .io import logger
class Container:
"""
A container storing the prior, the loglikelihood function and
fitting data & setups. The container is passed to the sampler.
"""
def __init__(self,
n_spline=2,
leg2d=False,
fit_sigma=True,
fit_frac=False,
brightest_only=False,
parallel=False,
draw_real=True):
if n_spline is float:
if n_spline <=1:
sys.exit('n_spline needs to be >=2!')
self.n_spline = n_spline
self.fit_sigma = fit_sigma
self.fit_frac = fit_frac
self.leg2d = leg2d
self.brightest_only = brightest_only
self.parallel = parallel
self.draw_real = draw_real
def __str__(self):
return "A Container Class"
def __repr__(self):
if hasattr(self, 'ndim'):
return f"{self.__class__.__name__} p={self.ndim}"
else:
return f"{self.__class__.__name__}"
def set_prior(self, n_est, mu_est, std_est,
n_min=1.2, d_n0=0.2,
theta_in=50, theta_out=300):
""" Setup priors for Bayesian fitting."""
from .modeling import set_prior
fit_sigma = self.fit_sigma
fit_frac = self.fit_frac
n_spline = self.n_spline
leg2d = self.leg2d
prior_tf = set_prior(n_est, mu_est, std_est,
n_spline=n_spline, leg2d=leg2d,
n_min=n_min, d_n0=d_n0, fix_n0=self.fix_n0,
theta_in=theta_in, theta_out=theta_out,
fit_sigma=fit_sigma, fit_frac=fit_frac)
self.prior_transform = prior_tf
# Set labels for displaying the results
labels = set_labels(n_spline=n_spline, leg2d=leg2d,
fit_sigma=fit_sigma, fit_frac=fit_frac)
self.labels = labels
self.ndim = len(labels)
self.n_est = n_est
self.mu_est = mu_est
self.std_est = std_est
def set_MLE_bounds(self, n_est, mu_est, std_est,
n_min=1.2, d_n0=0.2,
theta_in=50, theta_out=300):
""" Setup p0 and bounds for MLE fitting """
for option in ['fit_sigma', 'fit_frac', 'leg2d']:
if getattr(self, option):
logger.warning(f"{option} not supported in MLE. Will be turned off.")
exec('self.' + option + ' = False')
n0 = n_est
n_spline = self.n_spline
log_t_in, log_t_out = np.log10(theta_in), np.log10(theta_out)
log_theta_bounds = [(log_t_in, log_t_out) for i in range(n_spline-1)]
bkg_bounds = [(mu_est-3*std_est, mu_est+3*std_est)]
if n_spline == 2:
self.param0 = np.array([n0, 2.2, 1.8, mu_est])
n_bounds = [(n0-d_n0, n0+d_n0), (n_min, 3.)]
elif n_spline == 3:
self.param0 = np.array([n0, 2.5, 2., 1.8, 2., mu_est])
n_bounds = [(n0-d_n0, n0+d_n0), (2., 3.), (n_min, 2+d_n0)]
else:
n_guess = np.linspace(3., n_min, n_spline-1)
theta_guess = np.linspace(1.8, log_t_out-0.3, n_spline-1)
self.param0 = np.concatenate([[n0], n_guess, theta_guess, [mu_est]])
n_bounds = [(n0-d_n0, n0+d_n0), (2., n0-d_n0)] + [(n_min, n0-d_n0) for i in range(n_spline-2)]
logger.warning("Components > 3. The MLE might reach maxiter or maxfev.")
self.MLE_bounds = tuple(n_bounds + log_theta_bounds + bkg_bounds)
self.n_est = n_est
self.mu_est = mu_est
self.std_est = std_est
self.ndim = 2 * n_spline
def set_likelihood(self,
image, mask_fit,
psf, stars,
norm='brightness',
psf_range=[None, None],
G_eff=1e5,
image_base=None):
""" Setup likelihood function for fitting """
from .modeling import set_likelihood
if image_base is None:
image_base = np.zeros_like(mask_fit)
self.image_base = image_base
# Copy psf and stars to preserve the orginal ones
stars_tri = stars.copy()
psf_tri = psf.copy()
# Set up likelihood function
loglike = set_likelihood(image, mask_fit,
psf_tri, stars_tri,
norm=norm,
psf_range=psf_range,
fix_n0=self.fix_n0,
std_est=self.std_est,
G_eff=G_eff,
n_spline=self.n_spline,
leg2d=self.leg2d,
fit_sigma=self.fit_sigma,
fit_frac=self.fit_frac,
brightest_only=self.brightest_only,
parallel=self.parallel,
draw_real=self.draw_real)
self.loglikelihood = loglike
def set_labels(n_spline, fit_sigma=True, fit_frac=False, leg2d=False):
""" Setup labels for cornerplot """
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
if n_spline=='m':
labels = [r'$\gamma_1$', r'$\beta_1$']
elif n_spline==1:
labels = [r'$n0$']
elif n_spline==2:
labels = [r'$n0$', r'$n1$', r'$\theta_1$']
elif n_spline==3:
labels = [r'$n0$', r'$n1$', r'$n2$', r'$\theta_1$', r'$\theta_2$']
else:
labels = [r'$n_%d$'%d for d in range(n_spline)] \
+ [r'$\theta_%d$'%(d+1) for d in range(n_spline-1)]
labels += [r'$\mu$']
if leg2d:
labels.insert(-1, r'$\log\,A_{01}$')
labels.insert(-1, r'$\log\,A_{10}$')
if fit_sigma:
labels += [r'$\log\,\sigma$']
if fit_frac:
labels += [r'$\log\,f$']
return labels
| 6,449 | 32.59375 | 106 |
py
|
elderflower
|
elderflower-master/elderflower/image.py
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.io import fits
import astropy.units as u
from astropy.utils import lazyproperty
from photutils.segmentation import SegmentationImage
from .io import logger
from .mask import mask_param_default
from .plotting import display, AsinhNorm, colorbar
from . import DF_pixel_scale, DF_raw_pixel_scale, DF_Gain
class ImageButler:
"""
A class storing Image info.
Parameters
----------
hdu_path : str
path of hdu data
obj_name : str
object name
band : str, 'g' 'G' 'r' or 'R'
filter name
pixel_scale : float
pixel scale in arcsec/pixel
pad : int
padding size of the image for fitting (default: 50)
ZP : float or None (default)
zero point (if None, read from header)
bkg : float or None (default)
background (if None, read from header)
G_eff : float or None (default)
effective gain (e-/ADU)
"""
def __init__(self, hdu_path,
obj_name='', band='G',
pixel_scale=DF_pixel_scale,
pad=0, ZP=None, bkg=None,
Gain_eff=None, verbose=True):
from .utils import crop_image
self.verbose = verbose
self.obj_name = obj_name
self.band = band
self.pixel_scale = pixel_scale
self.pad = pad
# Read hdu
assert os.path.isfile(hdu_path), "Image does not exist. Check path."
with fits.open(hdu_path) as hdul:
self.hdu_path = hdu_path
if verbose: logger.info(f"Read Image: {hdu_path}")
self.image_full = hdul[0].data
self.header = header = hdul[0].header
self.full_wcs = wcs.WCS(header)
self.bkg = bkg
self.ZP = ZP
self.Gain_eff = Gain_eff
@lazyproperty
def G_eff(self):
""" Effective Gain """
if self.Gain_eff is None:
N_frames = find_keyword_header(self.header, "NFRAMES", default=1e5)
G_eff = DF_Gain * N_frames
if self.verbose:
if N_frames==1e5:
logger.info("No effective Gain is given. Use sky noise.")
else:
logger.info("Effective Gain = %.3f"%G_eff)
return G_eff
else:
return self.Gain_eff
def __str__(self):
return "An ImageButler Class"
def __repr__(self):
return f"{self.__class__.__name__} for {self.hdu_path}"
class Image(ImageButler):
"""
An class storing images.
Parameters
----------
hdu_path : str
path of hdu data
bounds0 : list [X min, Y min, X max, Y max]
boundary of region to be fit
obj_name : str
object name
band : str, 'g' 'G' 'r' or 'R'
filter name
pixel_scale : float
pixel scale in arcsec/pixel
pad : int
padding size of the image for fitting (default: 50)
ZP : float or None (default)
zero point (if None, read from header)
bkg : float or None (default)
background (if None, read from header)
G_eff : float or None (default)
effective gain (e-/ADU)
"""
def __init__(self, hdu_path, bounds0,
obj_name='', band='G', pixel_scale=DF_pixel_scale,
pad=0, ZP=None, bkg=None, G_eff=None, verbose=True):
from .utils import crop_image
super().__init__(hdu_path, obj_name, band,
pixel_scale, pad, ZP, bkg, G_eff, verbose)
self.bounds0 = np.array(bounds0)
patch_Xmin0, patch_Ymin0, patch_Xmax0, patch_Ymax0 = self.bounds0
nX0 = (patch_Xmax0 - patch_Xmin0)
nY0 = (patch_Ymax0 - patch_Ymin0)
self.image_shape0 = (nY0, nX0)
self.image_shape = (nY0 - 2 * pad, nX0 - 2 * pad)
self.cen0 = ((nX0-1)/2., (nY0-1)/2.)
self.cen = ((nX0 - 2 * pad-1)/2.,
(nY0 - 2 * pad-1)/2.)
full_wcs = self.full_wcs
# Image cutout
self.bounds = np.array([patch_Xmin0+pad, patch_Ymin0+pad,
patch_Xmax0-pad, patch_Ymax0-pad])
self.image0, self.wcs0 = crop_image(self.image_full,
self.bounds0, wcs=full_wcs)
# Cutout with pad
self.image, self.wcs = crop_image(self.image_full,
self.bounds, wcs=full_wcs)
def __str__(self):
return "An Image Class"
def __repr__(self):
return ''.join([f"{self.__class__.__name__} cutout", str(self.bounds0)])
def display(self, **kwargs):
""" Display the image """
display(self.image, **kwargs)
def make_base_image(self, psf_star, stars, vmax=30, draw=False):
""" Make basement image with fixed PSF and stars """
from .modeling import make_base_image
psf_size = int(120/self.pixel_scale) # 2 arcmin
# Draw dim stars
self.image_base = make_base_image(self.image_shape, stars,
psf_star, self.pad, psf_size,
verbose=self.verbose)
if draw:
#display
m = plt.imshow(self.image_base, norm=AsinhNorm(a=0.1, vmin=0, vmax=vmax))
colorbar(m)
plt.show()
def read_measurement_table(self, dir_measure, **kwargs):
""" Read faint stars info and brightness measurement """
from .utils import read_measurement_table
self.table_faint, self.table_norm = \
read_measurement_table(dir_measure,
self.bounds0,
obj_name=self.obj_name,
band=self.band,
pad=self.pad, **kwargs)
@property
def fwhm(self):
""" FWHM in arcsec """
return self.get_median("FWHM_IMAGE") * self.pixel_scale
def get_median(self,
colname,
mag_range=[14., 18],
mag_name="MAG_AUTO_corr"):
""" Return median value of SE measurements in the image """
tab = self.table_norm
tab_unsatur = tab[tab['FLAGS']<4]
mag_range[0] = max(mag_range[0], np.nanmin(tab_unsatur[mag_name]))
mag_range[1] = min(mag_range[1], np.nanmax(tab[mag_name]))
cond = (tab[mag_name]>mag_range[0]) & (tab[mag_name]<mag_range[1])
return np.nanmedian(tab[cond][colname])
def assign_star_props(self, **kwargs):
""" Assign position and flux for faint and bright stars from tables. """
from .utils import assign_star_props
if hasattr(self, 'table_faint') & hasattr(self, 'table_norm'):
pos_ref = self.bounds[0], self.bounds[1]
self.stars_bright, self.stars_all = \
assign_star_props(self.ZP, self.bkg, self.image_shape, pos_ref,
self.table_norm, self.table_faint, **kwargs)
else:
raise AttributeError(f"{self.__class__.__name__} has no stars info. \
Read measurement tables first!")
def fit_n0(self, dir_measure, **kwargs):
from .utils import fit_n0
n0, d_n0 = fit_n0(dir_measure, self.bounds0,
self.obj_name, self.band, self.bkg, self.ZP, **kwargs)
self.n0 = n0
self.d_n0 = d_n0
return n0, d_n0
def generate_image_psf(self, psf,
SE_catalog, seg_map,
r_scale=12,
mag_threshold=[13.5,12],
mag_saturate=13,
mag_limit=15,
mag_limit_segm=22,
make_segm=False, K=2,
catalog_sup='SE',
catalog_sup_atlas=None,
use_PS1_DR2=False,
subtract_external=True,
draw=False,
keep_tmp=False, dir_tmp='./tmp'):
""" Generate image of stars from a PSF Model"""
from .utils import (crop_catalog,
identify_extended_source,
calculate_color_term,
fit_empirical_aperture,
make_segm_from_catalog,
add_supplementary_SE_star,
add_supplementary_atlas)
from .norm import measure_Rnorm_all
from .crossmatch import cross_match_PS1
from .modeling import generate_image_fit
from .utils import check_save_path
import shutil
if use_PS1_DR2: dir_tmp+='_PS2'
check_save_path(dir_tmp, overwrite=True, verbose=False)
band = self.band
obj_name = self.obj_name
bounds = self.bounds
SE_cat = crop_catalog(SE_catalog, bounds)
SE_cat_target, ext_cat, mag_saturate = identify_extended_source(SE_cat, draw=draw)
# Use PANSTARRS DR1 or DR2?
if use_PS1_DR2:
mag_name = mag_name_cat = band.lower()+'MeanPSFMag'
else:
mag_name = band.lower()+'mag'
mag_name_cat = band.lower()+'mag_PS'
# Crossmatch with PANSTRRS mag < mag_limit
tab_target, tab_target_full, catalog_star = \
cross_match_PS1(band, self.full_wcs,
SE_cat_target, bounds,
pixel_scale=self.pixel_scale,
mag_limit=mag_limit,
use_PS1_DR2=use_PS1_DR2,
verbose=False)
CT = calculate_color_term(tab_target_full, mag_range=[mag_saturate,mag_limit+2],
mag_name=mag_name_cat, draw=draw)
tab_target["MAG_AUTO_corr"] = tab_target[mag_name_cat] + CT
catalog_star["MAG_AUTO_corr"] = catalog_star[mag_name] + CT #corrected mag
catalog_star_name = os.path.join(dir_tmp, f'{obj_name}-catalog_PS_{band}_all.txt')
catalog_star.write(catalog_star_name, overwrite=True, format='ascii')
if catalog_sup == "ATLAS":
tab_target = add_supplementary_atlas(tab_target, catalog_sup_atlas, SE_catalog,
mag_saturate=mag_saturate)
elif catalog_sup == "SE":
tab_target = add_supplementary_SE_star(tab_target, SE_cat_target,
mag_saturate=mag_saturate, draw=draw)
self.tab_target = tab_target
# Measure I at r0
wcs, image = self.full_wcs, self.image_full
width_cross = int(10/self.pixel_scale)
tab_norm, res_thumb = measure_Rnorm_all(tab_target, bounds, wcs,
image, seg_map,
mag_limit=mag_limit,
mag_saturate=mag_saturate,
r_scale=r_scale,
k_enlarge=2,
width_cross=width_cross,
obj_name=obj_name,
mag_name=mag_name_cat,
save=True, dir_name=dir_tmp,
verbose=False)
self.read_measurement_table(dir_tmp, r_scale=r_scale, mag_limit=mag_limit)
# Make Star Models
self.assign_star_props(r_scale=r_scale, mag_threshold=mag_threshold,
verbose=True, draw=False, save=False)
self.stars_gen = stars = self.stars_bright
if make_segm:
# Make mask map
estimate_radius = fit_empirical_aperture(tab_target_full, seg_map,
mag_name=mag_name_cat, K=K,
degree=2, draw=draw)
seg_map_cat = make_segm_from_catalog(catalog_star, bounds,
estimate_radius,
mag_name=mag_name,
obj_name=obj_name,
band=band,
mag_limit=mag_limit_segm,
ext_cat=ext_cat,
draw=draw,
save=False,
dir_name=dir_tmp)
self.seg_map = seg_map_cat
# Generate model star
image_stars, _, _ = generate_image_fit(psf.copy(), stars.copy(),
self.image_shape, subtract_external=subtract_external)
logger.info("Image of stars has been generated based on the PSF Model!")
# Delete tmp dir
if not keep_tmp:
shutil.rmtree(dir_tmp)
return image_stars
class ImageList(ImageButler):
"""
A class storing a list of images.
Parameters
----------
hdu_path : str
path of hdu data
bounds0_list : list / turple
list of boundaries of regions to be fit (Nx4)
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
object name
band : str, 'g' 'G' 'r' or 'R'
filter name
pixel_scale : float
pixel scale in arcsec/pixel
pad : int
padding size of the image for fitting (default: 50)
ZP : float or None (default)
zero point (if None, read from header)
bkg : float or None (default)
background (if None, read from header)
G_eff : float or None (default)
effective gain (e-/ADU)
"""
def __init__(self, hdu_path, bounds0_list,
obj_name='', band='G', pixel_scale=DF_pixel_scale,
pad=0, ZP=None, bkg=None, G_eff=None, verbose=False):
super().__init__(hdu_path, obj_name, band,
pixel_scale, pad, ZP, bkg, G_eff, verbose)
self.bounds0_list = np.atleast_2d(bounds0_list)
self.Images = [Image(hdu_path, bounds0,
obj_name, band, pixel_scale,
pad, ZP, bkg, G_eff, verbose)
for bounds0 in self.bounds0_list]
self.N_Image = len(self.Images)
def __iter__(self):
for Img in self.Images:
yield Img
def __getitem__(self, index):
return self.Images[index]
@lazyproperty
def images(self):
return np.array([Img.image for Img in self.Images])
def display(self, fig=None, ax=None):
""" Display the image list """
if fig is None:
n_row = int((self.N_Image-1)//4+1)
fig, axes = plt.subplots(n_row, 4, figsize=(14,4*n_row))
# Draw
for i, ax in zip(range(self.N_Image), axes.ravel()):
display(self.images[i], fig=fig, ax=ax)
# Delete extra ax
for ax in axes.ravel()[self.N_Image:]: fig.delaxes(ax)
plt.tight_layout()
def read_measurement_tables(self, dir_measure, **kwargs):
""" Read faint stars info and brightness measurement """
self.tables_norm = []
self.tables_faint = []
for Img in self.Images:
Img.read_measurement_table(dir_measure, **kwargs)
self.tables_faint += [Img.table_faint]
self.tables_norm += [Img.table_norm]
self.fwhm = np.mean([Img.fwhm for Img in self.Images])
def assign_star_props(self, *args, **kwargs):
""" Assign position and flux for faint and bright stars from tables. """
stars_bright, stars_all = [], []
for Img in self.Images:
Img.assign_star_props(*args, **kwargs)
stars_bright += [Img.stars_bright]
stars_all += [Img.stars_all]
self.stars_bright = stars_bright
self.stars_all = stars_all
return stars_bright, stars_all
def make_base_image(self, psf_star, stars_all, vmax=30, draw=True):
""" Make basement image with fixed PSF and stars """
for i, (Image, stars) in enumerate(zip(self.Images, stars_all)):
Image.make_base_image(psf_star, stars)
def make_mask(self, stars_list, dir_measure,
mask_param=mask_param_default,
save=False, save_dir='../output/pic',
draw=True):
"""
Make deep mask, object mask, strip mask, and cross mask.
The 'deep' mask is based on S/N.
The object mask is for masking target sources such as large galaxies.
The strip mask is spider-like, used to reduce sample size of pixels
at large radii, equivalent to assign lower weights to outskirts.
The cross mask is for masking stellar spikes of bright stars.
Parameters
----------
stars_list: list of modeling.Stars object
List of Stars object.
dir_measure : str
Directory storing the measurement.
mask_param: dict, optional
Parameters setting up the mask map.
See string doc of wide.mask
draw : bool, optional, default True
Whether to draw mask map
save : bool, optional, default True
Whether to save the image
save_dir : str, optional
Path of saving plot, default current.
"""
from .mask import Mask
from .utils import crop_image
# S/N threshold of deep mask
sn_thre = mask_param['sn_thre']
# aper mask params
mask_type = mask_param['mask_type']
r_core = mask_param['r_core']
r_out = mask_param['r_out']
# strip mask params
wid_strip = mask_param['width_strip']
n_strip = mask_param['n_strip']
dist_strip = mask_param['dist_strip']
# cross mask params
wid_cross = mask_param['width_cross'] * mask_param['k_mask_cross']
dist_cross = mask_param['dist_cross']
if mask_type=='brightness':
from .utils import SB2Intensity
count = SB2Intensity(mask_param['SB_threshold'], self.bkg,
self.ZP, self.pixel_scale)[0]
else:
count = None
masks = []
for i, (Image, stars) in enumerate(zip(self.Images, stars_list)):
if self.verbose:
logger.info("Prepare mask for region {}.".format(i+1))
mask = Mask(Image, stars, verbose=self.verbose)
# Read a map of object masks (e.g. large galaxies) or
# create a new one with given shape parameters.
# Note the object mask need to have full shape as SExtractor's
mask.make_mask_object(mask_param['mask_obj'])
if hasattr(mask, 'mask_obj_field'):
# Crop the full object mask map into a smaller one
mask.mask_obj0 = crop_image(mask.mask_obj_field, Image.bounds0)
else:
mask.mask_obj0 = np.zeros(mask.image_shape0, dtype=bool)
# Primary SN threshold mask
mask.make_mask_map_deep(dir_measure,
mask_type,
r_core, r_out,
count=count,
sn_thre=sn_thre,
obj_name=self.obj_name,
band=self.band,
draw=draw, save=save,
save_dir=save_dir)
# Supplementary Strip + Cross mask
if dist_strip is None:
dist_strip = max(Image.image_shape) * self.pixel_scale
mask.make_mask_advanced(n_strip, wid_strip, dist_strip,
wid_cross, dist_cross,
clean=mask_param['clean'],
draw=draw, save=save, save_dir=save_dir)
masks += [mask]
self.Masks = masks
self.stars = [mask.stars_new for mask in masks]
@property
def mask_fit(self):
""" Masks for fitting """
return [mask.mask_fit for mask in self.Masks]
@property
def data(self):
""" 1D array to be fit """
data = [image[~mask_fit].copy().ravel()
for (image, mask_fit) in zip(self.images, self.mask_fit)]
return data
self
def estimate_bkg(self):
""" Estimate background level and std. """
from astropy.stats import sigma_clip
self.mu_est = np.zeros(len(self.Images))
self.std_est = np.zeros(len(self.Images))
for i, (Image, mask) in enumerate(zip(self.Images, self.mask_fit)):
data_sky = sigma_clip(Image.image[~mask], sigma=3)
mu_patch, std_patch = np.mean(data_sky), np.std(data_sky)
self.mu_est[i] = mu_patch
self.std_est[i] = std_patch
if self.verbose:
msg = "Estimate of Background: ({0:.4g} +/- {1:.4g}) for "
msg = msg.format(mu_patch, std_patch) + repr(Image)
logger.info(msg)
def fit_n0(self, dir_measure, N_min_fit=10, **kwargs):
""" Fit power index of 1st component with bright stars. """
self.n0, self.d_n0 = [], []
for i in range(self.N_Image):
if hasattr(self, 'std_est'):
kwargs['sky_std'] = self.std_est[i]
else:
logger.warning('Sky stddev is not estimated.')
N_fit = max(N_min_fit, self.stars[i].n_verybright)
n0, d_n0 = self.Images[i].fit_n0(dir_measure, N_fit=N_fit, **kwargs)
self.n0 += [n0]
self.d_n0 += [d_n0]
def set_container(self,
psf, stars,
n_spline=2,
leg2d=False,
fit_sigma=True,
fit_frac=False,
brightest_only=False,
parallel=False,
draw_real=True,
n_min=1.2,
d_n0_min=0.1,
theta0_range=[50, 300],
method='nested',
verbose=True):
""" Container for fit storing prior and likelihood function """
from .container import Container
self.containers = []
for i in range(self.N_Image):
if self.verbose:
logger.info("Region {}:".format(i+1))
image_shape = self.Images[i].image_shape
container = Container(n_spline, leg2d,
fit_sigma, fit_frac,
brightest_only=brightest_only,
parallel=parallel, draw_real=draw_real)
if hasattr(self, 'n0_'):
# Use a given fixed n0
n0, d_n0 = self.n0_, 0.1
if self.verbose:
msg = " - n0 is fixed to be a static value = {}.".format(n0)
logger.warning(msg)
else:
# Get first component power index if already fitted
# Otherwise n0 will be added as a parameter in the prior
n0 = getattr(self.Images[i],'n0', None)
d_n0 = getattr(self.Images[i],'d_n0', 0.1)
if (self.fix_n0 is False) | (n0 is None):
container.fix_n0 = False
d_n0 = max(d_n0, d_n0_min) # set a min dev for n0
if n0 is None: n0, d_n0 = psf.n0, 0.3 # rare case
if self.verbose:
logger.info(" - n0 will be included in the full fitting.")
else:
container.fix_n0 = self.fix_n0
if self.verbose:
msg = " - n0 will not be included in the full fitting."
msg += " Adopt fitted value n0 = {:.3f}.".format(n0)
logger.info(msg)
theta_in, theta_out = theta0_range
if theta_in is None:
theta_in = self.Masks[i].r_core_m * self.pixel_scale
if theta_out is None:
if psf.cutoff:
theta_out = psf.theta_c
else:
theta_out = int(0.8 * max(image_shape) * self.pixel_scale)
psf.theta_out = theta_out
if self.verbose:
logger.info("theta_in = {:.2f}, theta_out = {:.2f}".format(theta_in, theta_out))
# Set priors (Bayesian) or bounds (MLE)
prior_kws = dict(n_min=n_min, d_n0=d_n0,
theta_in=theta_in, theta_out=theta_out)
if method == 'mle':
# Set bounds on variables
container.set_MLE_bounds(n0, self.bkg, self.std_est[i], **prior_kws)
else:
# Set Priors
container.set_prior(n0, self.bkg, self.std_est[i], **prior_kws)
# Set Likelihood
container.set_likelihood(self.images[i],
self.mask_fit[i],
psf, stars[i],
psf_range=[None, None],
norm='brightness',
G_eff=self.G_eff,
image_base=self.Images[i].image_base)
# Set a few attributes to container for convenience
container.image = self.images[i]
container.data = self.data[i]
container.mask = self.Masks[i]
container.image_shape = image_shape
self.containers += [container]
class Thumb_Image:
"""
A class for operation and info storing of a thumbnail image.
Used for measuring scaling and stacking.
row: astropy.table.row.Row
Astropy table row.
wcs: astropy.wcs.wcs
WCS of image.
"""
def __init__(self, row, wcs):
self.wcs = wcs
self.row = row
def make_star_thumb(self,
image, seg_map=None,
n_win=20, seeing=2.5, max_size=200,
origin=1, verbose=False):
"""
Crop the image and segmentation map into thumbnails.
Parameters
----------
image : 2d array
Full image
seg_map : 2d array
Full segmentation map
n_win : int, optional, default 20
Enlarge factor (of fwhm) for the thumb size
seeing : float, optional, default 2.5
Estimate of seeing FWHM in pixel
max_size : int, optional, default 200
Max thumb size in pixel
origin : 1 or 0, optional, default 1
Position of the first pixel. origin=1 for SE convention.
"""
from .utils import coord_Im2Array
# Centroid in the image from the SE measurement
# Note SE convention is 1-based (differ from photutils)
X_c, Y_c = self.row["X_IMAGE"], self.row["Y_IMAGE"]
# Define thumbnail size
fwhm = max(self.row["FWHM_IMAGE"], seeing)
win_size = min(int(n_win * max(fwhm, 2)), max_size)
# Calculate boundary
X_min, X_max = max(origin, X_c - win_size), min(image.shape[1], X_c + win_size)
Y_min, Y_max = max(origin, Y_c - win_size), min(image.shape[0], Y_c + win_size)
x_min, y_min = coord_Im2Array(X_min, Y_min, origin) # py convention
x_max, y_max = coord_Im2Array(X_max, Y_max, origin)
X_WORLD, Y_WORLD = self.row["X_WORLD"], self.row["Y_WORLD"]
if verbose:
print("NUMBER: ", self.row["NUMBER"])
print("X_c, Y_c: ", (X_c, Y_c))
print("RA, DEC: ", (X_WORLD, Y_WORLD))
print("x_min, x_max, y_min, y_max: ", x_min, x_max, y_min, y_max)
print("X_min, X_max, Y_min, Y_max: ", X_min, X_max, Y_min, Y_max)
# Crop
self.img_thumb = image[x_min:x_max, y_min:y_max].copy()
if seg_map is None:
self.seg_thumb = None
self.mask_thumb = np.zeros_like(self.img_thumb, dtype=bool)
else:
self.seg_thumb = seg_map[x_min:x_max, y_min:y_max]
self.mask_thumb = (self.seg_thumb!=0) # mask sources
# Centroid position in the cutout (0-based py convention)
#self.cen_star = np.array([X_c - X_min, Y_c - Y_min])
self.cen_star = np.array([X_c - y_min - origin, Y_c - x_min - origin])
def extract_star(self, image,
seg_map=None,
sn_thre=2.5,
display_bkg=False,
display=False, **kwargs):
"""
Local background and segmentation.
If no segmentation map provided, do a local detection & deblend
to remove faint undetected source.
Parameters
----------
image : 2d array
Full image
seg_map : 2d array
Full segmentation map
sn_thre : float, optional, default 2.5
SNR threshold used for detection if seg_map is None
display_bkg : bool, optional, default False
Whether to display background measurment
display : bool, optional, default False
Whether to display detection & deblend around the star
"""
from .utils import (background_extraction,
detect_sources, deblend_sources)
# Make thumbnail image
self.make_star_thumb(image, seg_map, **kwargs)
img_thumb = self.img_thumb
seg_thumb = self.seg_thumb
mask_thumb = self.mask_thumb
# Measure local background, use constant if the thumbnail is small
shape = img_thumb.shape
b_size = round(min(shape)//5/25)*25
if shape[0] >= b_size:
back, back_rms = background_extraction(img_thumb, mask=mask_thumb, b_size=b_size)
else:
im_ = np.ones_like(img_thumb)
img_thumb_ma = img_thumb[~mask_thumb]
back, back_rms = (np.median(img_thumb_ma)*im_,
mad_std(img_thumb_ma)*im_)
self.bkg = back
self.bkg_rms = back_rms
if display_bkg:
# show background subtraction
from .plotting import display_background
display_background(img_thumb, back)
if seg_thumb is None:
# do local source detection to remove faint stars using photutils
threshold = back + (sn_thre * back_rms)
segm = detect_sources(img_thumb, threshold, npixels=5)
# deblending using photutils
segm_deb = deblend_sources(img_thumb, segm, npixels=5,
nlevels=64, contrast=0.005)
else:
segm_deb = SegmentationImage(seg_thumb)
# star_ma mask other sources in the thumbnail
star_label = segm_deb.data[round(self.cen_star[1]), round(self.cen_star[0])]
star_ma = ~((segm_deb.data==star_label) | (segm_deb.data==0))
self.star_ma = star_ma
if display:
from .plotting import display_source
display_source(img_thumb, segm_deb, star_ma, back)
def compute_Rnorm(self, R=12, **kwargs):
"""
Compute the scaling factor at R using an annulus.
Note the output values include the background level.
Paramters
---------
R : int, optional, default 12
radius in pix at which the scaling factor is meausured
kwargs : dict
kwargs passed to compute_Rnorm
"""
from .norm import compute_Rnorm
I_mean, I_med, I_std, I_flag = compute_Rnorm(self.img_thumb,
self.star_ma,
self.cen_star,
R=R, **kwargs)
self.I_mean = I_mean
self.I_med = I_med
self.I_std = I_std
self.I_flag = I_flag
# Use the median of background as the local background
self.I_sky = np.median(self.bkg)
| 34,037 | 36.904232 | 96 |
py
|
elderflower
|
elderflower-master/elderflower/plotting.py
|
import os
import string
import numpy as np
from copy import copy, deepcopy
try:
import seaborn as sns
seaborn_plot = True
except ImportError:
seaborn_plot = False
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.cmap'] = 'gnuplot2'
plt.rcParams['text.usetex'] = False
plt.rcParams['font.serif'] = "Times New Roman"
rcParams.update({'font.size': 16})
rcParams.update({'xtick.major.pad': '5.0'})
rcParams.update({'xtick.major.size': '4'})
rcParams.update({'xtick.major.width': '1.'})
rcParams.update({'xtick.minor.pad': '5.0'})
rcParams.update({'xtick.minor.size': '4'})
rcParams.update({'xtick.minor.width': '0.8'})
rcParams.update({'ytick.major.pad': '5.0'})
rcParams.update({'ytick.major.size': '4'})
rcParams.update({'ytick.major.width': '1.'})
rcParams.update({'ytick.minor.pad': '5.0'})
rcParams.update({'ytick.minor.size': '4'})
rcParams.update({'ytick.minor.width': '0.8'})
rcParams.update({'axes.labelsize': 16})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import LogStretch, AsinhStretch, HistEqStretch
from astropy.stats import mad_std, sigma_clip
import astropy.units as u
import photutils
from packaging import version
if version.parse(photutils.__version__) < version.parse("1.2"):
rand_state = "random_state"
else:
rand_state = "seed"
from photutils import CircularAperture
from . import DF_pixel_scale, DF_raw_pixel_scale
### Plotting Helpers ###
def LogNorm(vmin=None, vmax=None):
return ImageNormalize(stretch=LogStretch(), vmin=vmin, vmax=vmax)
def AsinhNorm(a=0.1, vmin=None, vmax=None):
return ImageNormalize(stretch=AsinhStretch(a=a), vmin=vmin, vmax=vmax)
def HistEqNorm(data):
return ImageNormalize(stretch=HistEqStretch(data))
def vmin_Nmad(img, N=3):
""" lower limit of visual imshow defined by N mad_std above median """
return np.nanmedian(img) - N * mad_std(img)
def v_Nsig(img, N=2):
""" upper/lower limit of visual imshow defined by N sigma above/below median """
return np.nanmedian(img) + N * np.nanstd(img)
def colorbar(mappable, pad=0.2, size="5%", loc="right",
ticks_rot=None, ticks_size=12, color_nan='gray', **args):
""" Customized colorbar """
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
if loc=="bottom":
orent = "horizontal"
pad = 1.5*pad
rot = 60 if ticks_rot is None else ticks_rot
else:
orent = "vertical"
rot = 0 if ticks_rot is None else ticks_rot
cax = divider.append_axes(loc, size=size, pad=pad)
cb = fig.colorbar(mappable, cax=cax, orientation=orent, **args)
cb.ax.set_xticklabels(cb.ax.get_xticklabels(),rotation=rot)
cb.ax.tick_params(labelsize=ticks_size)
#cmap = cb.mappable.get_cmap()
cmap = copy(plt.cm.get_cmap())
cmap.set_bad(color=color_nan, alpha=0.3)
return cb
def make_rand_cmap(n_label, random_state=12345):
from photutils.utils import make_random_cmap
rand_cmap = make_random_cmap(n_label, **{rand_state:random_state})
rand_cmap.set_under(color='black')
rand_cmap.set_over(color='white')
return rand_cmap
def make_rand_color(n_color, seed=1234,
colour = ["indianred", "plum", "seagreen", "lightcyan",
"orchid", 'gray', 'orange', 'yellow', "brown" ]):
import random
random.seed(seed)
rand_colours = [random.choice(colour) for i in range(n_color)]
return rand_colours
### Plotting Functions ###
def display(image, mask=None,
k_std=10, cmap="gray_r",
a=0.1, fig=None, ax=None):
""" Visualize an image """
if mask is not None:
sky = image[(mask==0)]
else:
sky = sigma_clip(image, 3)
vals = sky[~np.isnan(sky) & (sky!=0)] # some images has 0 as nan
sky_mean, sky_std = np.mean(vals), mad_std(vals)
if ax is None: fig, ax = plt.subplots(figsize=(12,8))
ax.imshow(image, cmap="gray_r",
norm=AsinhNorm(a, vmin=sky_mean-sky_std,
vmax=sky_mean+k_std*sky_std))
def display_background(image, back):
""" Display fitted background """
fig, (ax1,ax2,ax3) = plt.subplots(nrows=1,ncols=3,figsize=(13,4))
ax1.imshow(image, aspect="auto", cmap="gray",
norm=LogNorm(vmin=vmin_Nmad(image, N=3), vmax=v_Nsig(image, N=2)))
im2 = ax2.imshow(back, aspect="auto", cmap='gray')
colorbar(im2)
ax3.imshow(image - back, aspect="auto", cmap='gray',
norm=LogNorm(vmin=0., vmax=v_Nsig(image - back, N=2)))
plt.tight_layout()
def display_source(image, segm, mask, back, random_state=12345):
""" Display soruce detection and deblend around the target """
bkg_val = np.median(back)
vmin, vmax = vmin_Nmad(image, N=3), v_Nsig(image)
fig, (ax1,ax2,ax3) = plt.subplots(nrows=1,ncols=3,figsize=(13,4))
ax1.imshow(image, norm=LogNorm(vmin=vmin, vmax=vmax))
ax1.set_title("target", fontsize=16)
if type(segm) is np.ndarray:
from photutils.segmentation import SegmentationImage
segm = SegmentationImage(segm)
ax2.imshow(segm, cmap=segm.make_cmap(**{rand_state:random_state}))
ax2.set_title("segm", fontsize=16)
image_ma = image.copy()
image_ma[mask] = -1
ax3.imshow(image_ma, norm=LogNorm(vmin=vmin, vmax=vmax))
ax3.set_title("extracted", fontsize=16)
plt.show()
def draw_bounds(data, bounds, sub_bounds=None, seg_map=None,
ec='w', color='indianred', lw=2.5, hide_axis=True):
""" Draw boundaries of image """
from matplotlib import patches
fig, ax = plt.subplots(figsize=(12,8))
display(data, mask=seg_map, fig=fig, ax=ax)
Xmin, Ymin, Xmax, Ymax = bounds
rect = patches.Rectangle((Xmin, Ymin), Xmax-Xmin, Ymax-Ymin,
linewidth=lw, edgecolor=ec, facecolor='none')
ax.add_patch(rect)
if sub_bounds is not None:
for bounds_, l in zip(np.atleast_2d(sub_bounds), string.ascii_uppercase):
Xmin, Ymin, Xmax, Ymax = bounds_
rect = patches.Rectangle((Xmin, Ymin), Xmax-Xmin, Ymax-Ymin,
linewidth=2.5, edgecolor=color,
facecolor='none')
ax.text(Xmin+80, Ymin+80, r"$\bf %s$"%l, color=color,
ha='center', va='center', fontsize=20)
ax.add_patch(rect)
if hide_axis:
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def draw_scale_bar(ax, X_bar=200, Y_bar=150, y_text=100,
scale=5*u.arcmin, pixel_scale=2.5,
lw=6, fontsize=15, color='w', format='.0f',
border_color='k', border_lw=1, alpha=1):
""" Draw a scale bar """
import matplotlib.patheffects as PathEffects
L_bar = scale.to(u.arcsec).value/pixel_scale
ax.plot([X_bar-L_bar/2, X_bar+L_bar/2], [Y_bar,Y_bar],
color=color, alpha=alpha, lw=lw,
path_effects=[PathEffects.SimpleLineShadow(), PathEffects.Normal()])
ax.text(X_bar, y_text, '{0:{1}} {2}'.format(scale.value, format, scale.unit),
color=color, alpha=alpha, fontsize=fontsize,
ha='center', va='center', fontweight='bold',
path_effects=[PathEffects.SimpleLineShadow(),
PathEffects.withStroke(linewidth=border_lw, foreground=border_color)])
def draw_mask_map(image, seg_map, mask_deep, stars,
r_core=None, r_out=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map """
from matplotlib import patches
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6), dpi=100)
display(image, fig=fig, ax=ax1)
ax1.set_title("Image")
n_label = seg_map.max()
ax2.imshow(seg_map, vmin=1, vmax=n_label-2, cmap=make_rand_cmap(n_label))
ax2.set_title("Mask")
image2 = image.copy()
image2[mask_deep] = 0
im3 = ax3.imshow(image2, norm=LogNorm(vmin=vmin, vmax=vmax))
ax3.set_title("Sky")
colorbar(im3, pad=0.1, size="2%")
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
if r_out is not None:
aper = CircularAperture(star_pos_A, r=r_out[0])
aper.plot(color='lime',lw=1.5,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_out[1])
aper.plot(color='c',lw=1.5,label="",alpha=0.7, axes=ax3)
patch_Xsize = image.shape[1] - pad * 2
patch_Ysize = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), patch_Xsize, patch_Ysize, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_dual.png"), dpi=100)
plt.show()
plt.close()
else:
plt.show()
def draw_mask_map_strip(image, seg_comb, mask_comb, stars,
ma_example=None, r_core=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map w/ strips """
from matplotlib import patches
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6), dpi=100)
if ma_example is not None:
mask_strip, mask_cross = ma_example
mask_strip[mask_cross.astype(bool)]=0.5
ax1.plot(star_pos_A[0][0], star_pos_A[0][1], "r*",ms=18)
else:
mask_strip = np.zeros_like(image)
ax1.imshow(mask_strip, cmap="gray_r")
ax1.set_title("Strip/Cross")
n_label = seg_comb.max()
ax2.imshow(seg_comb, vmin=1, vmax=n_label-3, cmap=make_rand_cmap(n_label))
ax2.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax2.set_title("Mask Comb.")
image3 = image.copy()
shape = image.shape
image3[mask_comb] = 0
im3 = ax3.imshow(image3, norm=LogNorm(vmin=vmin, vmax=vmax))
ax3.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax3.set_xlim(0, shape[1])
ax3.set_ylim(0, shape[0])
ax3.set_title("Deep Sky")
colorbar(im3, pad=0.1, size="2%")
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
patch_Xsize = image.shape[1] - pad * 2
patch_Ysize = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), patch_Xsize, patch_Ysize, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_strip.png"), dpi=100)
plt.show()
plt.close()
else:
plt.show()
def Fit_background_distribution(image, mask_deep):
# Check background, fit with gaussian and exp-gaussian distribution
from scipy import stats
plt.figure(figsize=(6,4))
z_sky = image[~mask_deep]
if seaborn_plot:
sns.distplot(z_sky, label='Data', hist_kws={'alpha':0.3})
else:
plt.hist(z_sky, label='Data', alpha=0.3)
mu_fit, std_fit = stats.norm.fit(z_sky)
print(mu_fit, std_fit)
d_mod = stats.norm(loc=mu_fit, scale=std_fit)
x = np.linspace(d_mod.ppf(0.001), d_mod.ppf(0.999), 100)
plt.plot(x, d_mod.pdf(x), 'g-', lw=2, alpha=0.6, label='Norm Fit')
K_fit, mu_fit, std_fit = stats.exponnorm.fit(z_sky)
print(K_fit, mu_fit, std_fit)
d_mod2 = stats.exponnorm(loc=mu_fit, scale=std_fit, K=K_fit)
x = np.linspace(d_mod2.ppf(0.001), d_mod2.ppf(0.9999), 100)
plt.plot(x, d_mod2.pdf(x), 'r-', lw=2, alpha=0.6, label='Exp-Norm Fit')
plt.legend(fontsize=12)
def plot_PSF_model_1D(frac, f_core, f_aureole, psf_range=400,
xunit='pix', yunit='Intensity',
ZP=27.1, pixel_scale=DF_pixel_scale,
label='combined', alpha=0.8, decompose=True):
from .utils import Intensity2SB
r = np.logspace(0, np.log10(psf_range), 100)
I_core = (1-frac) * f_core(r)
I_aureole = frac * f_aureole(r)
I_tot = I_core + I_aureole
if xunit=="arcsec":
r *= pixel_scale
plt.xlabel('r [arcsec]', fontsize=14)
else:
plt.xlabel('r [pix]', fontsize=14)
if yunit=="Intensity":
plt.loglog(r, I_tot,
ls="-", lw=4, alpha=alpha, zorder=5, label=label)
if decompose:
plt.loglog(r, I_core,
ls="--", lw=3, alpha=alpha, zorder=3, label='core')
plt.loglog(r, I_aureole,
ls="--", lw=3, alpha=alpha, zorder=4, label='aureole')
plt.ylabel("log Intensity", fontsize=14)
plt.ylim(0.5*I_aureole.min(), I_tot.max()*2)
elif yunit=="SB":
plt.semilogx(r, -14.5+Intensity2SB(I_tot, BKG=0,
ZP=ZP, pixel_scale=pixel_scale),
ls="-", lw=4,alpha=alpha, zorder=5, label=label)
if decompose:
plt.semilogx(r, -14.5+Intensity2SB(I_core, BKG=0,
ZP=ZP, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=alpha, zorder=3, label='core')
plt.semilogx(r, -14.5+Intensity2SB(I_aureole, BKG=0,
ZP=ZP, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=alpha, zorder=4, label='aureole')
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.ylim(31,17)
plt.legend(loc=1, fontsize=12)
def plot_PSF_model_galsim(psf, image_shape=(1001,1001), contrast=None,
figsize=(7,6), save=False, save_dir='.'):
""" Plot and 1D PSF model and Galsim 2D model averaged in 1D """
from .utils import Intensity2SB, cal_profile_1d
nY, nX = image_shape
pixel_scale = psf.pixel_scale
frac = psf.frac
psf_core = psf.psf_core
psf_aureole = psf.psf_aureole
psf_star = psf.psf_star
img_core = psf_core.drawImage(scale=pixel_scale, method="no_pixel")
img_aureole = psf_aureole.drawImage(nx=201, ny=201, scale=pixel_scale, method="no_pixel")
img_star = psf_star.drawImage(nx=nX, ny=nY, scale=pixel_scale, method="no_pixel")
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(frac*img_aureole.array, color="g",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label=psf.aureole_model)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d((1-frac)*img_core.array, color="orange",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Moffat")
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(img_star.array,
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Combined")
plt.legend(loc=1, fontsize=12)
r = np.logspace(0, np.log10(max(image_shape)), 100)
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
plt.plot(r, np.log10((1-frac) * comp1 + comp2 * frac), ls="-", lw=3, zorder=5)
plt.plot(r, np.log10((1-frac) * comp1), ls="--", lw=3, zorder=1)
plt.plot(r, np.log10(comp2 * frac), ls="--", lw=3)
if psf.aureole_model == "multi-power":
for t in psf.theta_s_pix:
plt.axvline(t, ls="--", color="k",alpha=0.3, zorder=1)
if contrast is not None:
plt.axhline(np.log10(comp1.max()/contrast),color="k",ls="--")
plt.title("Model PSF",fontsize=14)
plt.ylim(-8.5, -0.5)
plt.xlim(r_rbin.min()*0.8, r_rbin.max()*1.2)
plt.tight_layout()
plt.show()
if save:
plt.savefig(os.path.join(save_dir, "Model_PSF.png"), dpi=100)
plt.close()
return img_star
def plot_flux_dist(Flux, Flux_thresholds, ZP=None,
save=False, save_dir='.', figsize=None, **kwargs):
import seaborn as sns
F_bright, F_verybright = Flux_thresholds
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
plt.axvline(np.log10(F_bright), color="k", ls="-",alpha=0.7, zorder=1)
plt.axvline(np.log10(F_verybright), color="k", ls="--",alpha=0.7, zorder=1)
plt.axvspan(1, np.log10(F_bright),
color='gray', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_bright), np.log10(F_verybright),
color='seagreen', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_verybright), 9,
color='steelblue', alpha=0.15, zorder=0)
if seaborn_plot:
sns.distplot(np.log10(Flux), kde=False, **kwargs)
else:
plt.hist(np.log10(Flux), alpha=0.5)
plt.yscale('log')
plt.xlabel('Estimated log Flux$_{tot}$ / Mag', fontsize=15)
plt.ylabel('# of stars', fontsize=15)
plt.legend(loc=1)
if ZP is not None:
ax1 = plt.gca()
xticks1 = ax1.get_xticks()
ax2 = ax1.twiny()
ax2.set_xticks(xticks1)
ax2.set_xticklabels(np.around(-2.5*xticks1+ZP ,1))
ax2.set_xbound(ax1.get_xbound())
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Flux_dist.png"), dpi=80)
plt.show()
plt.close()
def draw_independent_priors(priors, xlabels=None, plabels=None,
save=False, save_dir='./'):
x_s = [np.linspace(d.ppf(0.01), d.ppf(0.99), 100) for d in priors]
fig, axes = plt.subplots(1, len(priors), figsize=(15,4))
for k, ax in enumerate(axes):
ax.plot(x_s[k], priors[k].pdf(x_s[k]),'-', lw=5, alpha=0.6, label=plabels[k])
ax.legend()
if xlabels is not None:
ax.set_xlabel(xlabels[k], fontsize=12)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Prior.png"), dpi=100)
plt.close()
def draw_cornerplot(results, dims, labels=None, truths=None, figsize=(16,14),
save=False, save_dir='.', suffix='', **kwargs):
from dynesty import plotting as dyplot
nsamps, ndim = results.samples.shape
# if truth is given, show all dimensions
if truths is not None:
dims = None
# show subsets of dimensions
if dims is not None:
labels = labels[1:]
ndim = ndim - 1
fig = plt.subplots(ndim, ndim, figsize=figsize)
plot_kw = {'color':"royalblue", 'truth_color':"indianred",
'dims': dims, 'truths':truths, 'labels':labels,
'title_kwargs':{'fontsize':16, 'y': 1.04},
'title_fmt':'.3f', 'show_titles':True,
'label_kwargs':{'fontsize':16}}
plot_kw.update(kwargs)
fg, axes = dyplot.cornerplot(results, fig=fig, **plot_kw)
if save:
plt.savefig(os.path.join(save_dir, "Cornerplot%s.png"%suffix), dpi=120)
plt.show()
plt.close()
else:
return fg, axes
def draw_cornerbounds(results, nidm, prior_transform, labels=None, figsize=(10,10),
save=False, save_dir='.', suffix='', **kwargs):
fig, axes = plt.subplots(ndim-1, ndim-1, figsize=figsize)
plot_kw = {'labels':labels, 'it':1000, 'show_live':True}
plot_kw.update(kwargs)
fg, ax = dyplot.cornerbound(self.results, prior_transform=self.prior_tf,
fig=(fig, axes), **plot_kw)
if save:
plt.savefig(os.path.join(save_dir, "Cornerbound%s.png"%suffix), dpi=120)
plt.close()
else:
plt.show()
def draw2D_fit_vs_truth_PSF_mpow(results, psf, stars, labels, image,
image_base=None, vmin=None, vmax=None,
avg_func='median', save=False, save_dir="."):
""" Compare 2D fit and truth image """
from .sampler import get_params_fit
N_n = len([lab for lab in labels if "n" in lab])
N_theta = len([lab for lab in labels if "theta" in lab])
pmed, pmean, pcov = get_params_fit(results)
fits = pmed if avg_func=='median' else pmean
print("Fitted (mean) : ", np.around(pmean,3))
print("Fitted (median) : ", np.around(pmed,3))
n_s_fit = fits[:N_n]
if N_theta > 0:
theta_s_fit = np.append([psf.theta_s[0]], 10**fits[N_n:N_n+N_theta])
else:
theta_s_fit = psf.theta_s
mu_fit, sigma_fit = fits[-2], 10**fits[-1]
psf_fit = psf.copy()
psf_fit.update({'n_s':n_s_fit, 'theta_s': theta_s_fit})
psf_range = max(image.shape) * psf.pixel_scale
image_fit = generate_image_by_flux(psf_fit, stars, draw_real=True,
psf_range=[psf_range//2, psf_range])
if image_base is not None:
image_fit += image_base
image_fit += mu_fit
image_fit_noise = add_image_noise(image_fit, sigma_fit)
if vmin is None:
vmin = mu_fit - 0.3 * sigma_fit
if vmax is None:
vmax = vmin + 11
fig, (ax1, ax2, ax3) = plt.subplots(1,3,figsize=(18,6))
im = ax1.imshow(image_fit_noise, norm=LogNorm(vmin=vmin, vmax=vmax)); colorbar(im)
im = ax2.imshow(image, norm=LogNorm(vmin=vmin, vmax=vmax)); colorbar(im)
Diff = (image_fit_noise-image)/image
im = ax3.imshow(Diff, vmin=-0.1, vmax=0.1, cmap='seismic'); colorbar(im)
ax1.set_title("Fit: I$_f$")
ax2.set_title("Original: I$_0$")
ax3.set_title("Frac.Diff: (I$_f$ - I$_0$) / I$_0$")
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir,
"Fit_vs_truth_image.png"), dpi=120)
plt.close()
def draw_comparison_2D(data, mask, image_fit,
image_stars, bkg_image,
noise_image=0, r_core=None,
vmin=None, vmax=None, Gain=None,
cmap='gnuplot2', norm_stretch=0.05,
manual_locations=None,
save=False, save_dir=".", suffix=""):
""" Compare data and fit in 2D """
mask_fit = getattr(mask, 'mask_comb', mask.mask_deep)
std = np.std(image_fit[~mask_fit])
if vmin is None:
vmin = np.mean(bkg_image) - 2*std
if vmax is None:
vmax = vmin + 20*std
norm = AsinhNorm(norm_stretch, vmin=vmin, vmax=vmax)
norm2 = AsinhNorm(norm_stretch, vmin=0, vmax=vmax-vmin)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3,figsize=(19,11))
im = ax1.imshow(data, norm=norm, cmap=cmap)
ax1.set_title("Data [I$_0$]", fontsize=15); colorbar(im)
im = ax2.imshow(image_fit+noise_image, norm=norm, cmap=cmap)
ax2.set_title("Fit [I$_f$] + noise", fontsize=15); colorbar(im)
im = ax3.imshow(image_stars, norm=norm2, cmap=cmap)
contour = ax3.contour(image_stars, levels=[0,1,2,5,10,25],
norm=norm2, colors='w', alpha=0.7)
ax3.clabel(contour, fmt='%1g', inline=1, fontsize=12, manual=manual_locations)
ax3.set_title("Bright Stars [I$_{f,B}$]", fontsize=15); colorbar(im)
if Gain is None:
frac_diff = (image_fit-data)/data
im = ax4.imshow(frac_diff, vmin=-0.05, vmax=0.05, cmap="bwr")
ax4.set_title("Frac. Diff. [(I$_f$ - I$_0$)/I$_0$]", fontsize=15); colorbar(im)
else:
uncertainty = np.sqrt(np.std(noise_image)**2+(image_fit-bkg_image)/Gain)
chi = (image_fit-data)/uncertainty
# chi[mask_fit] = 0
im = ax4.imshow(chi, vmin=-5, vmax=5, cmap="coolwarm")
ax4.set_title("$\chi$ [(I$_f$ - I$_0$)/$\sigma$]", fontsize=15); colorbar(im)
residual = (data-image_stars)
im = ax5.imshow(residual, norm=norm, cmap=cmap)
ax5.set_title("Bright Subtracted [I$_0$ - I$_{f,B}$]", fontsize=15); colorbar(im)
residual[mask_fit] = 0
im = ax6.imshow(residual, norm=norm, cmap=cmap)
ax6.set_title("Bright Subtracted (masked)", fontsize=15); colorbar(im)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core,r_core]
aper1 = CircularAperture(mask.stars.star_pos_verybright, r=r_core[0])
aper1.plot(color='lime',lw=2,alpha=0.95, axes=ax6)
aper2 = CircularAperture(mask.stars.star_pos_medbright, r=r_core[1])
aper2.plot(color='skyblue',lw=2,label="",alpha=0.85, axes=ax6)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Comparison_fit_data2D%s.png"%suffix), dpi=100)
plt.show()
plt.close()
else:
plt.show()
def plot_fit_PSF1D(results, psf,
psf_size=1000, n_spline=2,
n_bootstrap=500, truth=None,
Amp_max=None, r_core=None,
save=False, save_dir="./",
suffix='', figsize=(7,6)):
from scipy.optimize.optimize import OptimizeResult
from astropy.stats import bootstrap
from .sampler import get_params_fit
pixel_scale = psf.pixel_scale
frac = psf.frac
# Read and print out fitting results
MLE_fit = type(results) == OptimizeResult
if MLE_fit:
pmed = pmean = results.x
else:
pmed, pmean, pcov, samples_eq = get_params_fit(results, return_sample=True)
samples_eq_bs = bootstrap(samples_eq, bootnum=1, samples=n_bootstrap)[0]
print(" - Fitting (mean) : ", np.around(pmean,3))
print(" - Fitting (median) : ", np.around(pmed,3))
# Canvas
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
if truth is not None:
print("Truth : ", psf.params)
psf.plot1D(psf_range=900, decompose=False, label='Truth')
# Number of n and theta in the fitting
if psf.aureole_model != "moffat":
theta_0 = psf.theta_0
N_n = n_spline
N_theta = n_spline - 1
psf_fit = psf.copy()
r = np.logspace(0., np.log10(psf_size), 100) # r in pix
comp1 = psf.f_core1D(r)
if psf.cutoff:
n_c = psf.n_c
theta_c = psf.theta_c
# Sample distribution from joint PDF
if not MLE_fit:
for sample in samples_eq_bs:
frac_k = frac
if psf.aureole_model == "moffat":
gamma1_k = sample[0]
beta1_k = sample[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_k = sample[0]
psf_fit.update({'n':n_k})
elif psf.aureole_model == "multi-power":
n_s_k = sample[:N_n]
theta_s_k = np.append(theta_0, 10**sample[N_n:N_n+N_theta])
if psf.cutoff:
n_s_k = np.append(n_s_k, n_c)
theta_s_k = np.append(theta_s_k, theta_c)
psf_fit.update({'n_s':n_s_k, 'theta_s':theta_s_k})
comp2_k = psf_fit.f_aureole1D(r)
plt.semilogy(r, (1-frac_k) * comp1 + frac_k * comp2_k,
color="lightblue", lw=2,alpha=0.1,zorder=1)
# Median and mean fitting
for fits, c, ls, lab in zip([pmed, pmean], ["royalblue", "b"],
["-.","-"], ["mean", "med"]):
if psf.aureole_model == "moffat":
gamma1_fit = fits[0]
beta1_fit = fits[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_fit = fits[0]
psf_fit.update({'n':n_fit})
elif psf.aureole_model == "multi-power":
n_s_fit = fits[:N_n]
theta_s_fit = np.append(theta_0, 10**fits[N_n:N_n+N_theta])
if psf.cutoff:
n_s_fit = np.append(n_s_fit, n_c)
theta_s_fit = np.append(theta_s_fit, theta_c)
psf_fit.update({'n_s':n_s_fit, 'theta_s':theta_s_fit})
comp2 = psf_fit.f_aureole1D(r)
y_fit = (1-frac) * comp1 + frac * comp2
plt.semilogy(r, y_fit, color=c, lw=2.5, ls=ls, alpha=0.8, label=lab+' comb.', zorder=4)
if lab=="med":
plt.semilogy(r, (1-frac) * comp1,
color="orange", lw=2, ls="--", alpha=0.7, label="med core",zorder=4)
plt.semilogy(r, frac * comp2,
color="seagreen", lw=2, ls="--", alpha=0.7, label="med aureole",zorder=4)
# Draw boundaries etc.
if r_core is not None:
if figsize is not None:
if psf.cutoff:
xlim = theta_c/pixel_scale
else:
xlim = psf_size
plt.axvspan(np.atleast_1d(r_core).max(), xlim,
color='steelblue', alpha=0.15, zorder=1)
plt.axvspan(np.atleast_1d(r_core).min(), np.atleast_1d(r_core).max(),
color='seagreen', alpha=0.15, zorder=1)
plt.axvspan(plt.gca().get_xlim()[0], np.atleast_1d(r_core).min(),
color='gray', alpha=0.15, zorder=1)
if psf.aureole_model != "moffat":
for t in psf_fit.theta_s_pix:
plt.axvline(t, lw=2, ls='--', color='k', alpha=0.5)
plt.legend(loc=1, fontsize=12)
plt.xlabel(r"$\rm r\,[pix]$",fontsize=18)
plt.ylabel(r"$\rm Intensity$",fontsize=18)
plt.title("Recovered PSF from Fitting",fontsize=18)
plt.ylim(3e-9, 0.5)
plt.xscale("log")
plt.tight_layout()
if save:
plt.savefig("%s/Fit_PSF1D%s.png"%(save_dir, suffix),dpi=100)
plt.show()
plt.close()
def plot_bright_star_profile(tab_target, table_norm, res_thumb,
bkg_sky=460, std_sky=2, pixel_scale=2.5, ZP=27.1,
mag_name='MAG_AUTO_corr', figsize=(8,6)):
from .utils import Intensity2SB, cal_profile_1d
#r = np.logspace(0.03,3,100)
z_mean_s, z_med_s = table_norm['Imean'], table_norm['Imed']
z_std_s, sky_mean_s = table_norm['Istd'], table_norm['Isky']
plt.figure(figsize=figsize)
ax = plt.subplot(111)
# adaptive colormap
n_color = len(res_thumb)+np.sum(tab_target[mag_name]<10)+1
cmap = plt.cm.plasma(np.linspace(0.01, 0.99, n_color))
ax.set_prop_cycle(plt.cycler('color', cmap))
mag_min, mag_max = tab_target[mag_name].min(), tab_target[mag_name].max()
for i, (num, sky_m, mag) in enumerate(zip(list(res_thumb.keys())[::-1],
sky_mean_s[::-1],tab_target[mag_name][::-1])):
if num in tab_target["NUMBER"]:
alpha = min(0.05*(mag_max+2-mag), 0.8)
errorbar = True if mag<10 else False
ms = max((15-mag), 0)
lw = max((12-mag), 1.5)
else:
alpha = 0.5; errorbar=False
ms, lw = 3, 3
img, ma, cen = res_thumb[num]['image'], res_thumb[num]['mask'], res_thumb[num]['center']
r_rbin, I_rbin, _ = cal_profile_1d(img, cen=cen, mask=ma,
ZP=ZP, sky_mean=bkg_sky, sky_std=std_sky,
pixel_scale=pixel_scale, dr=1,
xunit="pix", yunit="SB", errorbar=errorbar,
core_undersample=False, color=None, lw=lw,
markersize=ms, alpha=alpha)
if mag==mag_min:
plt.text(14, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
if mag==mag_max:
plt.text(2, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
I_sky = Intensity2SB(std_sky, 0, ZP=ZP, pixel_scale=pixel_scale)
plt.axhline(I_sky, color="k", ls="-.", alpha=0.5)
plt.text(1.1, I_sky+0.5, '1 $\sigma$', fontsize=10)
plt.ylim(30.5,16.5)
plt.xlim(1.,3e2)
plt.xscale('log')
plt.show()
| 34,006 | 36.576796 | 98 |
py
|
elderflower
|
elderflower-master/elderflower/norm.py
|
import os
import re
import sys
import math
import warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
from astropy.stats import sigma_clip
from photutils import CircularAnnulus
from .io import logger, save_pickle, load_pickle, check_save_path
from .stack import stack_star_image
def counter(i, number):
if np.mod((i+1), number//3) == 0:
print(" - completed: %d/%d"%(i+1, number))
### Class & Funcs for measuring scaling ###
def compute_Rnorm(image, mask_field, cen,
R=12, wid_ring=1, wid_cross=4,
mask_cross=True, display=False):
"""
Compute the scaling factor using an annulus.
Note the output values include the background level.
Paramters
---------
image : input image for measurement
mask_field : mask map with masked pixels = 1.
cen : center of the target in image coordiante
R : radius of annulus in pix
wid_ring : half-width of annulus in pix
wid_cross : half-width of spike mask in pix
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
if image is None:
return [np.nan] * 3 + [1]
cen = (cen[0], cen[1])
anl = CircularAnnulus([cen], R-wid_ring, R+wid_ring)
anl_ma = anl.to_mask()[0].to_image(image.shape)
in_ring = anl_ma > 0.5 # sky ring (R-wid, R+wid)
mask = in_ring & (~mask_field) & (~np.isnan(image))
# sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
in_cross = ((abs(xx-cen[0])<wid_cross))|(abs(yy-cen[1])<wid_cross)
mask = mask * (~in_cross)
if len(image[mask]) < 5:
return [np.nan] * 3 + [1]
z_ = sigma_clip(image[mask], sigma=3, maxiters=5)
z = z_.compressed()
I_mean = np.average(z, weights=anl_ma[mask][~z_.mask])
I_med, I_std = np.median(z), np.std(z)
if display:
L = min(100, int(mask.shape[0]))
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask, cmap="gray", alpha=0.7)
ax1.imshow(mask_field, alpha=0.2)
ax1.imshow(image, cmap='viridis', alpha=0.7,
norm=AsinhNorm(0.05, vmin=image.min(), vmax=I_med+50*I_std))
ax1.plot(cen[0], cen[1], 'r+', ms=10)
ax2.hist(z,alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean,
color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z, 0.2)
I_80 = np.quantile(z, 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
ax1.set_xlim(cen[0]-L//4, cen[0]+L//4)
ax1.set_ylim(cen[1]-L//4, cen[1]+L//4)
plt.show()
return I_mean, I_med, I_std, 0
def compute_Rnorm_batch(table_target,
image, seg_map, wcs,
r_scale=12, k_win=1,
mag_saturate=13.5,
mag_limit=15,
wid_ring=0.5, wid_cross=4,
display=False, verbose=True):
"""
Compute scaling factors for objects in the table.
Return an array with measurement and a dictionary containing maps and centers.
Paramters
---------
table_target : astropy.table.Table
SExtractor table containing measurements of sources.
image : 2d array
Full image.
seg_map : 2d array
Full segmentation map used to mask nearby sources during the measurement.
wcs_data : astropy.wcs.wcs
WCS of image.
r_scale : int, optional, default 12
Radius in pixel at which the flux scaling is measured.
k_win : int, optional, default 1
Enlargement factor for extracting thumbnails.
mag_saturate : float, optional, default 13.5
Estimate of magnitude at which the image is saturated.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured.
wid_ring : float, optional, default 0.5
Half-width in pixel of ring used to measure the scaling.
wid_cross : float, optional, default 4
Half-width in pixel of the spike mask when measuring the scaling.
Returns
-------
res_norm : nd array
A N x 5 array saving the measurements.
[I_mean, I_med, I_std, I_sky, I_flag]
res_thumb : dict
A dictionary storing thumbnails, mask, background and center of object.
"""
from .image import Thumb_Image
# Initialize
res_thumb = {}
res_norm = np.empty((len(table_target), 5))
# Iterate rows over the target table
for i, row in enumerate(table_target):
if verbose:
counter(i, len(table_target))
num, mag_auto = row['NUMBER'], row['MAG_AUTO']
wid_cross_ = wid_cross # spikes mask
# For brighter sources, use a broader window
if mag_auto <= mag_saturate-3:
n_win = int(40 * k_win)
elif mag_saturate-3 < mag_auto < mag_saturate:
n_win = int(30 * k_win)
elif mag_saturate < mag_auto < mag_limit:
n_win = int(20 * k_win)
wid_cross_ = max(wid_cross//2, 1)
else:
n_win = int(10 * k_win)
wid_cross_ = 0
# Make thumbnail of the star and mask sources
thumb = Thumb_Image(row, wcs)
thumb.extract_star(image, seg_map, n_win=n_win)
# Measure the mean, med and std of intensity at r_scale
thumb.compute_Rnorm(R=r_scale,
wid_ring=wid_ring,
wid_cross=wid_cross_,
display=display)
I_flag = thumb.I_flag
if (I_flag==1) & verbose: logger.debug("Errorenous measurement: #", num)
# Store results as dict (might be bulky)
res_thumb[num] = {"image":thumb.img_thumb,
"mask":thumb.star_ma,
"bkg":thumb.bkg,
"center":thumb.cen_star}
# Store measurements to array
I_stats = ['I_mean', 'I_med', 'I_std', 'I_sky']
res_norm[i] = np.array([getattr(thumb, attr) for attr in I_stats] + [I_flag])
return res_norm, res_thumb
def measure_Rnorm_all(hdu_path,
table,
bounds,
seg_map=None,
r_scale=12,
mag_limit=15,
mag_saturate=13.5,
mag_stack_limit=None,
mag_name='rmag_PS',
k_enlarge=1,
width_ring=0.5,
width_cross=4,
obj_name="",
display=False,
save=True, dir_name='.',
read=False, verbose=True):
"""
Measure intensity at r_scale for bright stars in table.
Parameters
----------
hdu_path : str
path of hdu data
table : astropy.table.Table
SExtractor table containing measurements of sources.
bounds : 1d array or list
Boundaries of the region in the image [Xmin, Ymin, Xmax, Ymax].
seg_map : 2d array, optional, default None
Full segmentation map used to mask nearby sources during the measurement.
If not given, it will be done locally by photutils.
r_scale : int, optional, default 12
Radius in pixel at which the flux scaling is measured.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured.
mag_saturate : float, optional, default 13.5
Estimate of magnitude at which the image is saturated.
mag_stack_limit : float, optional, default None
Max limit for stacking core PSF. Use mag_limit if None.
mag_name : str, optional, default 'rmag_PS'
Column name of magnitude used in the table.
k_enlarge : int, optional, default 1
Enlargement factor for extracting thumbnails.
width_ring : float, optional, default 0.5
Half-width in pixel of ring used to measure the scaling.
width_cross : float, optional, default 4
Half-width in pixel of the spike mask when measuring the scaling.
obj_name : str, optional
Object name used as prefix of saved output.
save : bool, optional, default True
Whether to save output table and thumbnails.
dir_name : str, optional
Path of saving. Use currrent one as default.
read : bool, optional, default False
Whether to read existed outputs if available.
Returns
-------
table_norm : astropy.table.Table
Table containing measurement results.
res_thumb : dict
A dictionary storing thumbnails, mask, background and center of object.
'image' : image of the object
'mask' : mask map from SExtractor with nearby sources masked (masked = 1)
'bkg' : estimated local 2d background
'center' : 0-based centroid of the object from SExtracror
"""
from .utils import convert_decimal_string
with fits.open(hdu_path) as hdul:
image = hdul[0].data
header = hdul[0].header
wcs_data = wcs.WCS(header)
if verbose:
msg = "Measure intensity at R = {0} ".format(r_scale)
msg += "for catalog stars {0:s} < {1:.1f} in ".format(mag_name, mag_limit)
msg += "{0}.".format(bounds)
logger.info(msg)
band = mag_name[0]
range_str = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bounds)
mag_str = convert_decimal_string(mag_limit)
fn_table_norm = os.path.join(dir_name, '%s-norm_%dpix_%smag%s_%s.txt'\
%(obj_name, r_scale, band, mag_str, range_str))
fn_res_thumb = os.path.join(dir_name, '%s-thumbnail_%smag%s_%s.pkl'\
%(obj_name, band, mag_str, range_str))
fn_psf_satck = os.path.join(dir_name, f'{obj_name}-{band}-psf_stack_{range_str}.fits')
if read:
table_norm = Table.read(fn_table_norm, format="ascii")
res_thumb = load_pickle(fn_res_thumb)
else:
tab = table[table[mag_name]<mag_limit]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
res_norm, res_thumb = compute_Rnorm_batch(tab, image,
seg_map, wcs_data,
r_scale=r_scale,
wid_ring=width_ring,
wid_cross=width_cross,
mag_saturate=mag_saturate,
mag_limit=mag_limit,
k_win=k_enlarge,
display=display,
verbose=verbose)
keep_columns = ['NUMBER', 'MAG_AUTO', 'MAG_AUTO_corr', 'MU_MAX', 'FLAGS', mag_name] \
+ [name for name in tab.colnames
if ('IMAGE' in name)|('CATALOG' in name)]
for name in keep_columns:
if name not in tab.colnames:
keep_columns.remove(name)
table_norm = tab[keep_columns].copy()
for j, colname in enumerate(['Imean','Imed','Istd','Isky','Iflag']):
if colname=='Iflag':
col = res_norm[:,j].astype(int)
else:
col = np.around(res_norm[:,j], 5)
table_norm[colname] = col
if save: # save star thumbnails
check_save_path(dir_name, overwrite=True, verbose=False)
save_pickle(res_thumb, fn_res_thumb, 'thumbnail result')
table_norm.write(fn_table_norm, overwrite=True, format='ascii')
# Stack non-saturated stars to obtain the inner PSF.
psf_size = 5 * r_scale + 1
psf_size = int(psf_size/2) * 2 + 1 # round to odd
not_edge = (table_norm['X_IMAGE'] > bounds[0] + psf_size) & \
(table_norm['X_IMAGE'] < bounds[2] - psf_size) & \
(table_norm['Y_IMAGE'] > bounds[1] + psf_size) & \
(table_norm['Y_IMAGE'] < bounds[3] - psf_size)
if mag_stack_limit is None:
mag_stack_limit = mag_limit
to_stack = (table_norm['MAG_AUTO']>mag_saturate+0.5) & (table_norm['MAG_AUTO']<mag_stack_limit) & (table_norm['FLAGS']<3) & not_edge
table_stack = table_norm[to_stack]
psf_stack = stack_star_image(table_stack, res_thumb,
size=psf_size, verbose=verbose)
if save:
fits.writeto(fn_psf_satck, data=psf_stack, overwrite=True)
if verbose:
logger.info(f"Saved stacked PSF to {fn_psf_satck}")
return table_norm, res_thumb
| 13,674 | 36.568681 | 136 |
py
|
elderflower
|
elderflower-master/elderflower/sampler.py
|
import os
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
import multiprocess as mp
from scipy.optimize import minimize
try:
import dynesty
from dynesty import plotting as dyplot
from dynesty import utils as dyfunc
dynesty_installed = True
except ImportError:
warnings.warn("dynesty is not installed. Only MLE method is available.")
dynesty_installed = False
from .io import logger
from .io import save_pickle, load_pickle
from .plotting import colorbar
class Sampler:
def __init__(self, container,
sample_method='auto', bound='multi',
n_cpu=None, n_thread=None,
run='nested', results=None):
""" A class for runnning the sampling and plotting results """
if (sample_method=='mle')|(dynesty_installed==False):
run = 'mle'
# run = False if a previous run is read
self.run = run
self.container = container
self.image = container.image
self.ndim = container.ndim
if run == 'nested':
self.labels = container.labels
if n_cpu is None:
n_cpu = min(mp.cpu_count()-1, 10)
if n_thread is not None:
n_thread = max(n_thread, n_cpu-1)
if n_cpu > 1:
self.open_pool(n_cpu)
self.use_pool = {'update_bound': False}
else:
self.pool = None
self.use_pool = None
self.prior_tf = container.prior_transform
self.loglike = container.loglikelihood
dsampler = dynesty.DynamicNestedSampler(self.loglike,
self.prior_tf, self.ndim,
sample=sample_method, bound=bound,
pool=self.pool, queue_size=n_thread,
use_pool=self.use_pool)
self.dsampler = dsampler
elif run == 'mle':
self.MLE_bounds = container.MLE_bounds
self.param0 = container.param0
self.loglike = container.loglikelihood
self.NLL = lambda p: -self.loglike(p)
else:
self._results = results # use existed results
def run_fitting(self,
nlive_init=100,
maxiter=10000,
nlive_batch=50,
maxbatch=2,
wt_kwargs={'pfrac': 0.8},
close_pool=True,
print_progress=True, **kwargs):
if not self.run:
logger.warning("Not available to run the fitting.")
return None
start = time.time()
if self.run == 'nested':
msg = "Run Nested sampling for the fitting... "
msg += "# of params: {0}".format(self.ndim)
logger.info(msg)
dlogz = 1e-3 * (nlive_init - 1) + 0.01
self.dsampler.run_nested(nlive_init=nlive_init,
nlive_batch=nlive_batch,
maxbatch=maxbatch,
maxiter=maxiter,
dlogz_init=dlogz,
wt_kwargs=wt_kwargs,
print_progress=print_progress, **kwargs)
if (self.pool is not None) & close_pool:
self.close_pool()
elif self.run == 'mle':
msg = "Run maximum likelihood estimate... "
msg += "# of params: {0}".format(self.ndim)
logger.info(msg)
msg = "MLE bounds:"
for mle_b in self.MLE_bounds:
msg += " [{0:.3f}, {1:.3f}]".format(mle_b[0], mle_b[1])
logger.info(msg)
results = minimize(self.NLL, self.param0, method='L-BFGS-B',
bounds=self.MLE_bounds)
self.MLE_results = results
end = time.time()
self.run_time = (end-start)
logger.info("Finish Fitting! Total time elapsed: %.3g s"%self.run_time)
def open_pool(self, n_cpu):
logger.info("Opening new pool: # of CPU used: %d"%(n_cpu))
self.pool = mp.Pool(processes=n_cpu)
self.pool.size = n_cpu
def close_pool(self):
logger.info("Pool Closed.")
self.pool.close()
self.pool.join()
@property
def results(self):
""" Results of the dynesty/MLE dynamic sampler class """
if self.run == 'nested':
return getattr(self.dsampler, 'results', {})
elif self.run == 'mle':
return getattr(self, 'MLE_results')
else:
return self._results
def get_params_fit(self, return_sample=False):
if self.run == 'mle':
return self.results.x, self.results.x, None
else:
return get_params_fit(self.results, return_sample)
def save_results(self, filename, save_dir='.'):
""" Save fitting results """
if not self.run:
logger.warning("No results to saved.")
return None
res = {}
if hasattr(self, 'fit_info'):
res['fit_info'] = {'run_time': round(self.run_time,2)}
for key, val in self.fit_info.items():
res['fit_info'][key] = val
res['fit_res'] = self.results # fitting results
res['container'] = self.container # a container for prior and likelihood
# Delete <local> prior and loglikelihood function which can't be pickled
for attr in ['prior_transform', 'loglikelihood']:
if hasattr(res['container'], attr):
delattr(res['container'], attr)
save_pickle(res, os.path.join(save_dir, filename), 'fitting result')
@classmethod
def load_results(cls, filename):
""" Read saved fitting results """
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = load_pickle(filename)
results = res['fit_res']
if 'fit_info' in res.keys():
logger.info(f"Read fitting results {filename}\n")
print(res['fit_info'])
results['fit_info'] = res['fit_info']
return cls(res['container'], run=False, results=results)
def cornerplot(self, truths=None, figsize=(16,15),
save=False, save_dir='.', suffix='', **kwargs):
from .plotting import draw_cornerplot
if self.run == 'mle': return None
# hide n0 subplots if n0 is fixed during the fitting
if self.container.fix_n0:
nsamps, ndim = self.results.samples.shape
dims = np.ix_(np.arange(1,ndim,1), range(nsamps))
else:
dims = None
labels = self.container.labels
return draw_cornerplot(self.results, dims,
labels=labels, truths=truths, figsize=figsize,
save=save, save_dir=save_dir, suffix=suffix, **kwargs)
def cornerbounds(self, figsize=(10,10),
save=False, save_dir='.', suffix='', **kwargs):
from .plotting import draw_cornerbounds
if self.run != 'nested': return None
if hasattr(self, 'prior_tf'):
draw_cornerbounds(self.results, self.ndim, self.prior_tf,
labels=self.labels, figsize=figsize,
save=save, save_dir=save_dir, suffix=suffix, **kwargs)
def plot_fit_PSF1D(self, psf, **kwargs):
from .plotting import plot_fit_PSF1D
n_spline = self.container.n_spline
psf_size = max(self.image.shape)
plot_fit_PSF1D(self.results, psf,
psf_size=psf_size, n_spline=n_spline, **kwargs)
def generate_fit(self, psf, stars, image_base=None, norm='brightness'):
"""
Build psf and images from fitting results.
Parameters
----------
psf : PSF_Model class
An inherited PSF model.
stars : Star class
A Star object storing info of stars.
image_base : numpy.array, default None
A base image to be added (e.g. faint stars)
"""
from .utils import make_psf_from_fit
from .modeling import generate_image_fit
ct = self.container
image_shape = ct.image_shape
psf_fit, params = make_psf_from_fit(self, psf, psf_range=max(image_shape))
self.bkg_fit = psf_fit.bkg
self.bkg_std_fit = psf_fit.bkg_std
stars_ = stars.copy()
stars_.z_norm = stars.z_norm + stars.BKG - self.bkg_fit
image_stars, noise_image, bkg_image \
= generate_image_fit(psf_fit, stars_, image_shape,
norm=norm, leg2d=ct.leg2d,
brightest_only=ct.brightest_only,
draw_real=ct.draw_real)
image_fit = image_stars + bkg_image
if (image_base is not None) & (~ct.brightest_only):
image_fit += image_base
# Images constructed from fitting
self.image_fit = image_fit
self.image_stars = image_stars
self.bkg_image = bkg_image
self.noise_image = noise_image
# PSF constructed from fitting
self.psf_fit = psf_fit
# Stars
self.stars = stars_
def calculate_reduced_chi2(self, Gain, dof):
"""Calculate reduced Chi^2"""
from .utils import calculate_reduced_chi2
ct = self.container
mask_fit = getattr(ct.mask, 'mask_comb')
data = ct.data
data_pred = (self.image_fit[~mask_fit]).ravel()
uncertainty = np.sqrt(self.bkg_std_fit**2+(data_pred-self.bkg_fit)/Gain)
calculate_reduced_chi2(data_pred, data, uncertainty, dof=dof)
def draw_comparison_2D(self, **kwargs):
from .plotting import draw_comparison_2D
ct = self.container
image = ct.image
mask = ct.mask
if hasattr(self, 'image_fit'):
draw_comparison_2D(image, mask, self.image_fit, self.image_stars,
self.bkg_image, self.noise_image, **kwargs)
def draw_background(self, save=False, save_dir='.', suffix=''):
plt.figure()
if hasattr(self, 'bkg_image'):
im = plt.imshow(self.bkg_image); colorbar(im)
if save:
plt.savefig(os.path.join(save_dir,'Background2D%s.png'%(suffix)), dpi=80)
else:
plt.show()
# (Old) functional way
def Run_Dynamic_Nested_Fitting(loglikelihood, prior_transform, ndim,
nlive_init=100, sample='auto',
nlive_batch=50, maxbatch=2,
pfrac=0.8, n_cpu=None, print_progress=True):
print("Run Nested Fitting for the image... #a of params: %d"%ndim)
start = time.time()
if n_cpu is None:
n_cpu = min(mp.cpu_count()-1, 10)
with mp.Pool(processes=n_cpu) as pool:
logger.info("Opening pool: # of CPU used: %d"%(n_cpu))
pool.size = n_cpu
dlogz = 1e-3 * (nlive_init - 1) + 0.01
pdsampler = dynesty.DynamicNestedSampler(loglikelihood, prior_transform, ndim,
sample=sample, pool=pool,
use_pool={'update_bound': False})
pdsampler.run_nested(nlive_init=nlive_init,
nlive_batch=nlive_batch,
maxbatch=maxbatch,
print_progress=print_progress,
dlogz_init=dlogz,
wt_kwargs={'pfrac': pfrac})
end = time.time()
print("Finish Fitting! Total time elapsed: %.3gs"%(end-start))
return pdsampler
def get_params_fit(results, return_sample=False):
samples = results.samples # samples
weights = np.exp(results.logwt - results.logz[-1]) # normalized weights
pmean, pcov = dyfunc.mean_and_cov(samples, weights) # weighted mean and covariance
samples_eq = dyfunc.resample_equal(samples, weights) # resample weighted samples
pmed = np.median(samples_eq,axis=0)
if return_sample:
return pmed, pmean, pcov, samples_eq
else:
return pmed, pmean, pcov
def merge_run(res_list):
return dyfunc.merge_runs(res_list)
| 13,270 | 34.579088 | 92 |
py
|
elderflower
|
elderflower-master/elderflower/task.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import numpy as np
from pathlib import Path
from functools import partial
from astropy.io import fits
from astropy.table import Table
from .io import logger
from .io import find_keyword_header, check_save_path, clean_pickling_object
from .detection import default_SE_config, default_conv, default_nnw
from .mask import mask_param_default
from . import DF_pixel_scale, DF_raw_pixel_scale, DF_Gain
def Run_Detection(hdu_path,
obj_name,
band,
threshold=5,
work_dir='./',
config_path=None,
executable=None,
ZP_keyname='REFZP',
ZP=None,
pixel_scale=DF_pixel_scale,
ref_cat='APASSref.cat',
apass_dir=None,
sep_match=2,
**SE_kwargs):
"""
Run a first-step source detection with SExtractor. This step generates a SExtractor catalog
and segementation map for the cross-match and measurement in Match_Mask_Measure.
Magnitudes are converted using the zero-point stored in the header ('ZP_keyname'). If not
stored in the header, it will try to compute the zero-point by cross-match with the APASS
catalog. In this case, the directory to the APASS catalogs is needed ('apass_dir'). If a
reference catalog already exists, it can be provided ('ref_cat') to save time.
Parameters
----------
hdu_path : str
Full path of hdu data
obj_name : str
Object name
band : str, ‘G’, ‘g’, ‘R’, ‘r’
Filter name
threshold : int, optional, default 5
Detection and analysis threshold of SExtractor
work_dir : str, optional, default current directory
Full path of directory for saving
config_path : str, optional, None
Full path of configuration file of running SExtractor.
By default it uses the one stored in configs/
executable : str, optional, None
Full path of the SExtractor executable. If SExtractor is installed
this can be obtained by typing '$which source-extractor' or
'$which sex' in the shell.
By default it will searched with an attempt.
ZP_keyname : str, optional, default REFZP
Keyword names of zero point in the header.
If not found, a value can be passed by ZP.
ZP : float or None, optional, default None
Zero point value. If None, it finds ZP_keyname in the header.
If not provided either, it will compute a zero point by
cross-match with the APASS catalog.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel.
ref_cat : str, optional, default 'APASSref.cat'
Full path file name of the APASS reference catalog.
If not found, it will generate a reference catalog.
apass_dir : str, optional, default None
Full path of the diectory of the APASS catalogs.
sep_match : float, optional, default 2
Maximum separation (in arcsec) for crossmatch with APASS.
Not used if ZP is given in the header.
Returns
-------
ZP: float
Zero point value from the header, or a crossmatch with APASS, or a user-input.
Notes
-----
SExtractor must be installed and the local executable path needs to be correct.
A configuration file can be passed by config_path than default, but parameters can be
overwritten by passing them as kwargs, e.g. (note SExtractor keywords are in capital):
Run_Detection(..., DETECT_THRESH=10)
will override threshold.
"""
from .detection import run as run_sextractor
from .io import update_SE_kwargs, get_SExtractor_path
logger.info(f"Run SExtractor on {hdu_path}...")
check_save_path(work_dir, overwrite=True, verbose=False)
band = band.lower()
segname = os.path.join(work_dir, f'{obj_name}-{band}_seg.fits')
catname = os.path.join(work_dir, f'{obj_name}-{band}.cat')
header = fits.getheader(hdu_path)
if config_path is None: config_path = default_SE_config
if executable is None: executable = get_SExtractor_path()
SE_extra_params = ['NUMBER','X_WORLD','Y_WORLD','FLUXERR_AUTO','MAG_AUTO',
'MU_MAX','CLASS_STAR','ELLIPTICITY']
# Find zero-point in the fits header
if ZP_keyname not in header.keys():
logger.warning("ZP_keyname is not found in the header")
# If not in the header, check kwargs
if type(ZP) is not float:
# If not available in kwargs, compute by crossmatch with refcat
try:
from dfreduce.utils.catalogues import (match_catalogues, load_apass_in_region)
except ImportError:
msg = "Crossmatch is currently not available because dfreduce is not installed. A ZP_keyname is required in the header."
logger.error(msg)
sys.exit()
logger.info("Compute zero-point from crossmatch with APASS catalog...")
# alias for CDELT and CD
for axis in [1, 2]:
cd = 'CD{0}_{1}'.format(axis, axis)
if cd not in header.keys():
header[cd] = header['PC{0}_{1}'.format(axis, axis)]
# Run sextractor with free zero-point
SE_catalog = run_sextractor(hdu_path,
extra_params=SE_extra_params,
config_path=config_path,
catalog_path=catname,
executable=executable,
DETECT_THRESH=10, ANALYSIS_THRESH=10,
PIXEL_SCALE=pixel_scale,
FILTER_NAME=default_conv,
STARNNW_NAME=default_nnw)
# Load (APASS) reference catalog
ref_cat = os.path.join(work_dir, "{0}.{1}".format(*os.path.basename(ref_cat).rsplit('.', 1)))
if os.path.exists(ref_cat):
refcat = Table.read(ref_cat, format='ascii')
else:
logger.info("Generate APASS reference catalog... It will take some time.")
ra_range = abs(header['NAXIS1'] * header['CD1_1'])
dec_range = abs(header['NAXIS2'] * header['CD2_2'])
maxra = header['CRVAL1'] - header['CRPIX1'] * header['CD1_1']
mindec = header['CRVAL2'] - header['CRPIX2'] * header['CD2_2']
minra = maxra - ra_range
maxdec = mindec + dec_range
bounds_cat = [mindec, maxdec, minra, maxra]
if apass_dir!=None:
if os.path.exists(apass_dir):
refcat = load_apass_in_region(apass_dir,
bounds=bounds_cat)
refcat.write(ref_cat, format='ascii')
else:
raise FileNotFoundError('APASS directory not available.')
# Crossmatch SE catalog with reference catalog
imagecat_match, refcat_match = match_catalogues(SE_catalog, refcat, band, sep_max=sep_match)
# Get the median ZP from the crossmatched catalog
ZP = np.median(refcat_match[band] - imagecat_match[band])
logger.info("Matched median zero-point = {:.3f}".format(ZP))
else:
ZP = np.float(header[ZP_keyname])
logger.info("Read zero-point from header : ZP = {:.3f}".format(ZP))
logger.info("Pixel scale = {:.2f}".format(pixel_scale))
logger.info("Detection threshold = {:.1f}".format(threshold))
SE_kwargs_update = {'DETECT_THRESH':threshold,
'ANALYSIS_THRESH':threshold,
'MAG_ZEROPOINT':ZP,
'PIXEL_SCALE':pixel_scale}
SE_kwargs = update_SE_kwargs(SE_kwargs, SE_kwargs_update)
SE_catalog = run_sextractor(hdu_path,
extra_params=SE_extra_params,
config_path=config_path,
catalog_path=catname,
executable=executable,
CHECKIMAGE_TYPE='SEGMENTATION',
CHECKIMAGE_NAME=segname, **SE_kwargs)
if not (os.path.isfile(catname)) & (os.path.isfile(segname)):
raise FileNotFoundError('SE catalog/segmentation not saved properly.')
logger.info(f"CATALOG saved as {catname}")
logger.info(f"SEGMENTATION saved as {segname}")
return ZP
def Match_Mask_Measure(hdu_path,
bounds_list,
obj_name,
band,
pixel_scale=DF_pixel_scale,
ZP_keyname='REFZP',
ZP=None,
bkg=None,
field_pad=50,
r_scale=12,
mag_limit=15,
mag_limit_segm=22,
mag_saturate=13.5,
mask_param=mask_param_default,
draw=True,
save=True,
use_PS1_DR2=False,
fn_psf_core=None,
work_dir='./'):
"""
Generate a series of files as preparations for the fitting.
The function completes by the following steps:
1) Identify bright extended sources empirically and mask them.
2) Crossmatch the SExtractor table with the PANSTARRS catalog.
3) Correct the catalogued magnitudes to the used filter.
4) Add saturated stars missing in the crossmatch by a correction.
5) Make mask maps for dim stars with empirical apertures enlarged from SExtractor.
6) Measure brightness in annuli around bright stars
The output files are saved in:
work_dir/obj_name/Measure-PS1 or work_dir/obj_name/Measure-PS2
Parameters
----------
hdu_path : str
Full path of hdu data.
bounds_list : 2D list / turple
List of boundaries of regions to be fit (Nx4).
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name.
band : str, 'g', 'G', 'r', 'R'
Filter name.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel.
ZP_keyname : str, optional, default 'REFZP'
Keyword names of zero point in the header.
If not found, a value can be passed by ZP.
ZP : float or None, optional, default None
Zero point value (if None, read ZP from header).
bkg : float or None, optional, default None
Background estimated value (if None, read BACKVAL from header).
field_pad : int, optional, default 100
Padding size (in pix) of the field for crossmatch.
Only used if use_PS1_DR2=False
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured.
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured.
mag_limit_segm : float, optional, default 22
Magnitude limit to make segmentation
mag_saturate : float, optional, default 13.5
Estimate of magnitude at which the image is saturated.
The exact value will be fit.
mask_param: dict, optional
Parameters setting up the mask map.
See doc string of .mask for details.
draw : bool, optional, default True
Whether to draw diagnostic plots.
save : bool, optional, default True
Whether to save results.
use_PS1_DR2 : bool, optional, default False
Whether to use PANSTARRS DR2. Crossmatch with DR2 is done by MAST query,
which could easily fail if a field is too large (> 1 deg^2).
fn_psf_core : bool, optional, default None
Path of the provided stacked PSF core.
work_dir : str, optional, default current directory
Full path of directory for saving.
Returns
-------
None
None
"""
band = band.lower()
bounds_list = np.atleast_2d(bounds_list).astype(int)
##################################################
# Read and Display
##################################################
from .utils import crop_image, crop_catalog, background_stats
from astropy import wcs
# Read hdu
if not os.path.isfile(hdu_path):
msg = "Image does not exist. Check path."
logger.error(msg)
raise FileNotFoundError()
with fits.open(hdu_path) as hdul:
logger.info(f"Read Image: {hdu_path}")
data = hdul[0].data
header = hdul[0].header
wcs_data = wcs.WCS(header)
# Read output from SExtractor detection
SE_cat_full = Table.read(os.path.join(work_dir, f'{obj_name}-{band}.cat'), format="ascii.sextractor")
seg_map = fits.getdata(os.path.join(work_dir, f'{obj_name}-{band}_seg.fits'))
# Get ZP from header
if ZP is None: ZP = find_keyword_header(header, "ZP", raise_error=True)
# Get background from header or simple stats
bkg_, std = background_stats(data, header, mask=(seg_map>0), bkg_keyname="BACKVAL")
if bkg is None: bkg = bkg_
# Convert SE measured flux into mag
flux = SE_cat_full["FLUX_AUTO"]
mag = -2.5 * np.ma.log10(flux).filled(flux[flux>0].min()) + ZP
SE_cat_full["MAG_AUTO"] = np.around(mag, 5)
field_bounds = [field_pad, field_pad,
data.shape[1]-field_pad,
data.shape[0]-field_pad]
if not use_PS1_DR2: logger.info("Match field %r with catalog"%field_bounds)
logger.info("Measure Sky Patch [X min, Y min, X max, Y max] :")
[logger.info(" - Bounds: %r"%b) for b in bounds_list.tolist()]
# Display field_bounds and sub-regions to be matched
patch = crop_image(data, field_bounds,
sub_bounds=bounds_list,
seg_map=seg_map, draw=draw)
# Crop parent SE catalog
SE_cat = crop_catalog(SE_cat_full, field_bounds)
##################################################
# Crossmatch with Star Catalog (across the field)
##################################################
import astropy.units as u
from .utils import (identify_extended_source,
calculate_color_term,
add_supplementary_atlas,
add_supplementary_SE_star)
from .crossmatch import cross_match_PS1
# Identify bright extended sources and enlarge their mask
SE_cat_target, ext_cat, mag_saturate = identify_extended_source(SE_cat, draw=draw,
mag_limit=mag_limit,
mag_saturate=mag_saturate)
# Use PANSTARRS DR1 or DR2?
if use_PS1_DR2:
mag_name = mag_name_cat = band+'MeanPSFMag'
bounds_crossmatch = bounds_list
dir_name = os.path.join(work_dir, 'Measure-PS2/')
else:
mag_name = band+'mag'
mag_name_cat = mag_name+'_PS'
bounds_crossmatch = field_bounds
dir_name = os.path.join(work_dir, 'Measure-PS1/')
# Crossmatch with PANSTRRS mag < mag_limit
tab_target, tab_target_full, catalog_star = \
cross_match_PS1(band, wcs_data,
SE_cat_target,
bounds_crossmatch,
pixel_scale=pixel_scale,
sep=pixel_scale*u.arcsec,
mag_limit=mag_limit,
use_PS1_DR2=use_PS1_DR2,
verbose=True)
# Calculate color correction between PANSTARRS and DF filter
CT = calculate_color_term(tab_target_full, mag_range=[mag_saturate,18],
mag_name=mag_name_cat, draw=draw)
catalog_star["MAG_AUTO_corr"] = catalog_star[mag_name] + CT # corrected MAG_AUTO
tab_target["MAG_AUTO_corr"] = tab_target[mag_name_cat] + CT
# Mannually add stars missed in the crossmatch or w/ weird mag to table
tab_target = add_supplementary_SE_star(tab_target, SE_cat_target,
mag_saturate, mag_limit, draw=draw)
##################################################
# Save matched table and catalog
##################################################
from .utils import convert_decimal_string
if save:
check_save_path(dir_name, overwrite=True, verbose=False)
mag_str = convert_decimal_string(mag_limit)
tab_target_name = os.path.join(dir_name,
'%s-catalog_match_%smag%s.txt'%(obj_name, band, mag_str))
tab_target.write(tab_target_name,
overwrite=True, format='ascii')
catalog_star_name = os.path.join(dir_name,
f'{obj_name}-catalog_PS_{band}_all.txt')
catalog_star.write(catalog_star_name,
overwrite=True, format='ascii')
logger.info(f"Saved PANSTARRS catalog & matched sources in {dir_name}")
##################################################
# Build Mask & Measure Scaling (in selected patch)
##################################################
from .utils import (fit_empirical_aperture,
make_segm_from_catalog)
from .norm import measure_Rnorm_all
from .stack import make_global_stack_PSF
from .plotting import plot_bright_star_profile
# Empirical enlarged aperture size from magnitude based on matched SE detection
estimate_radius = fit_empirical_aperture(tab_target_full, seg_map,
mag_name=mag_name_cat,
mag_range=[10,22], K=2,
R_max=int(200/pixel_scale),
degree=2, draw=draw)
mask_par = mask_param_default.copy()
mask_par.update(mask_param)
k_mask_ext = mask_par['k_mask_ext']
width_cross_pix = mask_par['width_cross']/pixel_scale
width_ring_pix = mask_par['width_ring']/pixel_scale
for bounds in bounds_list:
# Catalog bound slightly wider than the region
catalog_bounds = (bounds[0]-field_pad, bounds[1]-field_pad,
bounds[2]+field_pad, bounds[3]+field_pad)
# Crop the star catalog and matched SE catalog
catalog_star_patch = crop_catalog(catalog_star, catalog_bounds,
sortby=mag_name,
keys=("X_CATALOG", "Y_CATALOG"))
tab_target_patch = crop_catalog(tab_target, catalog_bounds,
sortby=mag_name_cat,
keys=("X_IMAGE", "Y_IMAGE"))
# Make segmentation map from catalog based on SE seg map of one band
seg_map_c = make_segm_from_catalog(catalog_star_patch,
bounds,
estimate_radius,
mag_name=mag_name,
mag_limit=mag_limit_segm,
obj_name=obj_name,
band=band,
ext_cat=ext_cat,
k_mask_ext=k_mask_ext,
draw=draw,
save=save,
dir_name=dir_name,
verbose=True)
tab_norm, res_thumb = measure_Rnorm_all(hdu_path,
tab_target_patch,
bounds,
seg_map=seg_map,
mag_limit=mag_limit,
mag_saturate=mag_saturate,
r_scale=r_scale,
width_cross=width_cross_pix,
width_ring=width_ring_pix,
obj_name=obj_name,
mag_name=mag_name_cat,
save=save, dir_name=dir_name,
verbose=True)
if draw:
plot_bright_star_profile(tab_target_patch,
tab_norm, res_thumb,
bkg_sky=bkg, std_sky=std, ZP=ZP,
pixel_scale=pixel_scale)
make_global_stack_PSF(dir_name, bounds_list, obj_name, band, verbose=True)
def Run_PSF_Fitting(hdu_path,
bounds_list,
obj_name,
band,
pixel_scale=DF_pixel_scale,
ZP_keyname='REFZP',
ZP=None,
bkg=None,
G_eff=None,
pad=50,
r_scale=12,
r_montage=10,
mag_limit=15,
mag_threshold=[14,12.],
mask_param=mask_param_default,
resampling_factor=1,
n_spline=3,
cutoff=True,
n_cutoff=4,
theta_cutoff=1200,
core_param={"frac":0.3, "beta":6.},
theta_0=5,
n0_=None,
fit_n0=True,
fit_n0_range=[20,40],
theta0_range=[30,300],
fix_n0=False,
fit_sigma=True,
fit_frac=False,
leg2d=False,
draw_real=True,
brightest_only=False,
parallel=True,
n_cpu=None,
nlive_init=None,
sample_method='auto',
print_progress=True,
draw=True,
save=True,
stop=False,
clean_measure=True,
use_PS1_DR2=False,
fn_psf_core=None,
work_dir='./'):
"""
Run the wide-angle PSF fitting.
Parameters
----------
hdu_path : str
Full path of hdu data
bounds_list : 2D int list / turple
List of boundaries of regions to be fit (Nx4)
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name
band : str, 'g', 'G', 'r', 'R'
Filter name
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel
ZP_keyname : str, optional, default 'REFZP'
Keyword names of zero point in the header.
If not found, a value can be passed by ZP.
ZP : float or None, optional, default None
Zero point value (if None, read ZP from header)
bkg : float or None, optional, default None
Background estimated value (if None, read BACKVAL from header)
G_eff : float or None (default)
Effective gain (e-/ADU)
pad : int, optional, default 50
Padding size of the field for fitting
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured.
Default is 30" for Dragonfly.
r_montage : int, optional, default 10
Montage Radius for core and outer wings
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured
mag_threshold : [float, float], default: [14, 11]
Magnitude theresholds to classify faint stars, medium bright stars and
very bright stars. The conversion from brightness is using a static PSF.
mask_param: dict, optional
Parameters setting up the mask map.
See doc string of .mask for details.
n_spline : int, optional, default 3
Number of power-law component for the aureole models.
The speed goes down as n_spline goes up. Default is 3.
cutoff : bool, optional, default True
If True, the aureole will be cutoff at theta_cutoff.
n_cutoff : float, optional, default 4
Cutoff slope for the aureole model.
Default is 4 for Dragonfly.
theta_cutoff : float, optional, default 1200
Cutoff range (in arcsec) for the aureole model.
Default is 20' for Dragonfly.
core_param: dict, optional
Initial estimate of parameters of the PSF core (not needed to be accurate).
The values will be fitted from stacked PSF.
"frac": fraction of aureole
"beta": moffat beta
"fwhm": moffat fwhm, in arcsec (optional)
theta_0 : float, optional, default 5
Flattened radius. Arbitrary but need to be small. in arcsec
n0_ : float, optional, default None
Power index of the first component, only used if fix_n0=True.
fit_n0 : bool, optional, default True
If True, fit n0 from profiles of bright stars before the Bayesian fitting.
fit_n0_range : 2-list, optional, default [20, 40]
Range for fitting n0 in arcsec
theta0_range : 2-list, optional, default [30, 300]
Range for fitting theta0 in arcsec
fix_n0 : bool, optional, default False
If True, n0 will be fixed to that value in the fitting.
Only set as True when n0 is known to be proper of for test purpose.
fit_sigma : bool, optional, default False
Whether to fit the background stddev.
If False, will use the estimated value.
fit_frac : bool, optional, default False
Whether to fit the fraction of the aureole.
If False, use the fiducial value in core_param.
leg2d : bool, optional, default False
If True, fit a varied background with 2D Legendre polynomial.
Currently only support 1st order.
draw_real : bool, optional, default True
Whether to draw very bright stars in real space.
Recommended to be turned on.
brightest_only : bool, optional, default False
Whether to draw very bright stars only.
If turned on the fitting will ignore medium bright stars.
parallel : bool, optional, default True
Whether to run drawing for medium bright stars in parallel.
n_cpu : int, optional, default None
Number of cpu used for fitting and/or drawing.
nlive_init : int, optional, default None
Number of initial live points in dynesty. If None will
use nlive_init = ndim*10.
sample_method : {'auto', 'unif', 'rwalk', 'rstagger', 'slice', 'rslice', 'hslice', callable}, optional, default is 'auto'
Samplimg method in dynesty. If 'auto', the method is 'unif' for ndim < 10,
'rwalk' for 10 <= ndim <= 20, 'slice' for ndim > 20.
'mle': Maximum likelhood evaluation using scipy.
print_progress : bool, optional, default True
Whether to turn on the progress bar of dynesty
draw : bool, optional, default True
Whether to draw diagnostic plots
save : bool, optional, default True
Whether to save results
clean_measure : bool, optional, default True
Whether to clean intermediate files for measurement
use_PS1_DR2 : bool, optional, default False
Whether to use PANSTARRS DR2.
Crossmatch with DR2 is done by MAST query, which might fail
if a field is too large (> 1 deg^2)
fn_psf_core : bool, optional, default None
Path of the provided stacked PSF core.
work_dir : str, optional, default current directory
Full Path of directory for saving
Returns
-------
samplers : list
A list of Sampler class which contains fitting results.
"""
band = band.lower()
bounds_list = np.atleast_2d(bounds_list).astype(int)
# Set up directory names
plot_dir = os.path.join(work_dir, 'plot')
check_save_path(plot_dir, overwrite=True, verbose=False)
if use_PS1_DR2:
dir_measure = os.path.join(work_dir, 'Measure-PS2/')
else:
dir_measure = os.path.join(work_dir, 'Measure-PS1/')
# option for running on resampled image
from .utils import process_resampling
hdu_path, bounds_list = process_resampling(hdu_path, bounds_list,
obj_name, band,
pixel_scale=pixel_scale,
mag_limit=mag_limit,
r_scale=r_scale,
dir_measure=dir_measure,
work_dir=work_dir,
factor=resampling_factor)
if resampling_factor!=1:
obj_name += '_rp'
pixel_scale *= resampling_factor
r_scale /= resampling_factor
############################################
# Read Image and Table
############################################
# from . import DF_Gain
from .image import ImageList
from .utils import background_stats
# Read quantities from header
header = fits.getheader(hdu_path)
data = fits.getdata(hdu_path)
if ZP is None: ZP = find_keyword_header(header, ZP_keyname)
if G_eff is None:
N_frames = find_keyword_header(header, "NFRAMES", default=1e5)
G_eff = DF_Gain * N_frames
if N_frames==1e5:
logger.info("No effective Gain is given. Use sky noise.")
else:
logger.info("Effective Gain = %.3f"%G_eff)
# Get background from header or simple stats
seg_map = fits.getdata(os.path.join(work_dir, f'{obj_name}-{band}_seg.fits'))
bkg_, std = background_stats(data, header, mask=(seg_map>0), bkg_keyname="BACKVAL")
if bkg is None: bkg = bkg_
# Construct Image List
DF_Images = ImageList(hdu_path, bounds_list,
obj_name, band,
pixel_scale=pixel_scale,
pad=pad, ZP=ZP, bkg=bkg, G_eff=G_eff)
# Read faint stars info and brightness measurement
DF_Images.read_measurement_tables(dir_measure,
r_scale=r_scale,
mag_limit=mag_limit)
############################################
# Setup Stars
############################################
from .utils import assign_star_props
# class for bright stars and all stars
stars_b, stars_all = DF_Images.assign_star_props(r_scale=r_scale,
mag_threshold=mag_threshold,
verbose=True, draw=False,
save=save, save_dir=plot_dir)
############################################
# Masking
############################################
from .mask import Mask
# Mask faint and centers of bright stars
mask_param_default.update(mask_param)
DF_Images.make_mask(stars_b, dir_measure, mask_param=mask_param_default,
draw=draw, save=save, save_dir=plot_dir)
# Collect stars for fit. Choose if only use brightest stars
if brightest_only:
stars = [s.use_verybright() for s in DF_Images.stars]
else:
stars = DF_Images.stars # for fit
############################################
# Estimate Background & Fit n0
############################################
DF_Images.estimate_bkg()
if fix_n0:
DF_Images.n0_ = n0 = n0_ # fixed value
DF_Images.fix_n0 = True # fix n0 as the input value
else:
DF_Images.fit_n0(dir_measure,
pixel_scale=pixel_scale,
fit_range=fit_n0_range,
mag_max=mag_limit-2,
mag_limit=mag_limit,
r_scale=r_scale, sky_std=std,
draw=draw, save=save,
save_dir=plot_dir)
DF_Images.fix_n0 = fit_n0 # if use prefit value, also fix n0
n0 = np.median(DF_Images.n0) # initial guess
############################################
# Setup PSF and Fit the Core
############################################
from .utils import make_psf_2D
from .stack import montage_psf_image
## PSF Parameters ##
n_s = np.array([n0, 2.5]) # initial guess of power index
theta_s = np.array([theta_0, 10**2.])
# initial guess of transition radius in arcsec
# Core parameters, will be fitted
frac, beta = [core_param.get(prop) for prop in ["frac", "beta"]]
fwhm = core_param.get("fwhm", DF_Images.fwhm)
cutoff_param = dict(cutoff=cutoff, n_c=n_cutoff, theta_c=theta_cutoff)
# Make 2D PSF and a PSF Model ('psf')
image_psf, psf = make_psf_2D(n_s=n_s, theta_s=theta_s,
frac=frac, beta=beta, fwhm=fwhm,
cutoff_param=cutoff_param,
pixel_scale=pixel_scale,
psf_range=theta_cutoff)
# Montage the core and the 1st model component
if fn_psf_core is None:
fn_psf_stack = os.path.join(dir_measure, f'{obj_name}-{band}-PSF_stack.fits')
else:
fn_psf_stack = fn_psf_core
psf_stack = fits.getdata(fn_psf_stack)
image_psf = montage_psf_image(psf_stack, image_psf, r=r_montage)
# Fit and update core parameters
psf.fit_psf_core_1D(image_psf,
obj_name=obj_name, band=band,
save=save, draw=draw, save_dir=plot_dir)
############################################
# Set Basement Image
############################################
# Make fixed background of dim stars
DF_Images.make_base_image(psf.psf_star, stars_all, draw=False)
############################################
# Setup Priors and Likelihood Models for Fitting
############################################
DF_Images.set_container(psf, stars,
n_spline=n_spline,
theta0_range=theta0_range,
n_min=1.2, leg2d=leg2d,
parallel=parallel,
draw_real=draw_real,
fit_sigma=fit_sigma,
fit_frac=fit_frac,
brightest_only=brightest_only,
method=sample_method,
verbose=True)
## (a stop for inspection/developer)
if stop:
print('Stop for sanity check... Does everything look good?')
return DF_Images, psf, stars
############################################
# Run Sampling
############################################
from .sampler import Sampler
from .io import DateToday, AsciiUpper
samplers = []
for i, reg in enumerate(AsciiUpper(DF_Images.N_Image)):
ct = DF_Images.containers[i]
ndim = ct.ndim
s = Sampler(ct, n_cpu=n_cpu, sample_method=sample_method)
if nlive_init is None: nlive_init = ndim*10
# Run fitting
s.run_fitting(nlive_init=nlive_init,
nlive_batch=5*ndim+5, maxbatch=2,
print_progress=print_progress)
if save:
# Save outputs
core_param = {"frac":psf.frac, "fwhm":fwhm, "beta":psf.beta}
s.fit_info = {'obj_name':obj_name,
'band':band,
'date':DateToday(),
'n_spline':n_spline,
'bounds':bounds_list[i],
'pixel_scale':pixel_scale,
'r_scale':r_scale,
'core_param':core_param,
'fit_n0':fit_n0}
if cutoff:
s.fit_info.update(cutoff_param)
suffix = str(n_spline)+'p'
if leg2d: suffix+='l'
if fit_frac: suffix+='f'
if brightest_only: suffix += 'b'
if use_PS1_DR2: suffix += '_ps2'
if sample_method=='mle': suffix+='_mle'
Xmin, Ymin, Xmax, Ymax = bounds_list[i]
range_str = f'X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]'
fname = f'{obj_name}-{band}-{reg}-{range_str}-fit{suffix}.res'
s.save_results(fname, save_dir=work_dir)
stars[i].save(f'{obj_name}-{band}-{reg}-{range_str}-stars', save_dir=work_dir)
############################################
# Plot Results
############################################
from .plotting import AsinhNorm
suffix = str(n_spline)+'p'+'_'+obj_name
# Generate bright star model with the PSF
s.generate_fit(psf, stars[i], image_base=DF_Images[i].image_base)
if draw:
r_core = mask_param['r_core']
s.cornerplot(figsize=(18, 16),
save=save, save_dir=plot_dir, suffix=suffix)
# Plot recovered PSF
s.plot_fit_PSF1D(psf, n_bootstrap=500, r_core=r_core,
save=save, save_dir=plot_dir, suffix=suffix)
# Calculate Chi^2
s.calculate_reduced_chi2(Gain=G_eff, dof=ndim)
# Draw 2D compaison
s.draw_comparison_2D(r_core=r_core, Gain=G_eff,
vmin=DF_Images.bkg-s.bkg_std_fit,
vmax=DF_Images.bkg+20*s.bkg_std_fit,
save=save, save_dir=plot_dir, suffix=suffix)
if leg2d:
# Draw background
s.draw_background(save=save, save_dir=plot_dir,
suffix=suffix)
else:
pmed, pmean, pcov = s.get_params_fit()
print(" - Fitting (mean) : ", np.around(pmean,3))
print(" - Fitting (median) : ", np.around(pmed,3))
# Append the sampler
samplers += [s]
# Delete Stars to avoid pickling error in rerun
clean_pickling_object('stars')
# Clean intermediate outputs of each region for measurement
if clean_measure:
for file in Path(dir_measure).glob('*X*Y*'):
os.remove(file)
return samplers
class berry:
"""
Fruit of elderflower.
(A wrapper for running the functions.)
Parameters
----------
hdu_path : str
path of hdu data
bounds_list : list [[X min, Y min, X max, Y max],[...],...]
list of boundaries of regions to be fit (Nx4)
obj_name : str
object name
band : str, 'g', 'G', 'r', 'R'
filter name
work_dir : str, optional, default current directory
Full Path of directory for saving
config_file : yaml, optional, default None
configuration file which contains keyword arguments.
If None, use the default configuration file.
Example
-------
# Initialize the task
elder = berry(hdu_path, bounds, obj_name, 'g', work_dir, config_file)
# Check keyword parameters listed in the configuration:
elder.parameters
# Run detection
elder.detection()
# Run the task
elder.run()
"""
def __init__(self,
hdu_path,
bounds_list,
obj_name,
band,
work_dir='./',
config_file=None):
from .io import config_kwargs, default_config, load_config
self.hdu_path = hdu_path
self.bounds_list = bounds_list
self.obj_name = obj_name
self.band = band
with fits.open(hdu_path) as hdul:
self.data = hdul[0].data
self.header = hdul[0].header
hdul.close()
self.work_dir = work_dir
if config_file is None:
config_file = default_config
self.config = load_config(config_file)
self.config_func = partial(config_kwargs, config_file=config_file)
@property
def parameters(self):
""" Keyword parameter list in the configuration file """
@self.config_func
def _kwargs(**kwargs):
return kwargs
return _kwargs()
def detection(self, **kwargs):
""" Run the source detection. """
self.ZP = Run_Detection(self.hdu_path,
self.obj_name, self.band,
work_dir=self.work_dir,
FILTER_NAME=default_conv,
STARNNW_NAME=default_nnw, **kwargs)
def run(self, **kwargs):
""" Run the task (Match_Mask_Measure + Run_PSF_Fitting). """
@self.config_func
def _run(func, **kwargs):
argnames = func.__code__.co_varnames[:func.__code__.co_argcount]
keys = set(kwargs.keys()).intersection(argnames)
pars = {key: kwargs[key] for key in keys}
return func(self.hdu_path, self.bounds_list,
self.obj_name, self.band,
work_dir=self.work_dir, **pars)
_run(Match_Mask_Measure, **kwargs)
self.samplers = _run(Run_PSF_Fitting, **kwargs)
| 42,957 | 39.412041 | 136 |
py
|
elderflower
|
elderflower-master/elderflower/modeling.py
|
import os
import time
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from scipy.integrate import quad
from scipy.spatial import distance
from scipy.special import gamma as Gamma
from astropy import units as u
from astropy.io import fits, ascii
from astropy.modeling import models
from astropy.utils import lazyproperty
import warnings
try:
import galsim
from galsim import GalSimBoundsError
galsim_installed = True
except ImportError:
warnings.warn("Galsim is not installed. Convolution-based star rendering is not enabled.")
galsim_installed = False
from copy import deepcopy
from numpy.polynomial.legendre import leggrid2d
from itertools import combinations
from functools import partial, lru_cache
try:
from .parallel import parallel_compute
parallel_enabled = True
except ImportError:
warnings.warn("Joblib / multiprocessing is not installed. Parallelization is not enabled.")
parallel_enabled = False
from .numeric import *
from .io import logger
from .utils import Intensity2SB, SB2Intensity
from .utils import round_good_fft, calculate_psf_size
from .utils import NormalizationError
from . import DF_pixel_scale
############################################
# Functions for making PSF models
############################################
class PSF_Model:
""" A PSF Model object """
def __init__(self, params=None,
core_model='moffat',
aureole_model='multi-power'):
"""
Parameters
----------
params : a dictionary containing keywords of PSF parameter
core_model : model of PSF core (moffat)
aureole_model : model of aureole ("power" or "multi-power")
"""
self.core_model = core_model
self.aureole_model = aureole_model
self.cutoff = True # cutoff by default
# Build attribute for parameters from dictionary keys
for key, val in params.items():
if type(val) is list:
params[key] = val = np.array(val)
exec('self.' + key + ' = val')
self.params = params
if hasattr(self, 'fwhm'):
self.gamma = fwhm_to_gamma(self.fwhm, self.beta)
self.params['gamma'] = self.gamma
elif hasattr(self, 'gamma'):
self.fwhm = gamma_to_fwhm(self.gamma, self.beta)
self.params['fwhm'] = self.fwhm
else:
logger.error('Either fwhm or gamma needs to be given.')
if galsim_installed:
self.gsparams = galsim.GSParams(folding_threshold=1e-10)
if aureole_model == "power":
self.n0 = params['n0']
self.theta_0 = self.theta_0 = params['theta_0']
elif aureole_model == "multi-power":
self.n0 = params['n_s'][0]
self.theta_0 = params['theta_s'][0]
self.theta_s = np.array(self.theta_s)
def __str__(self):
return "A PSF Model Class"
def __repr__(self):
return " ".join([f"{self.__class__.__name__}", f"<{self.aureole_model}>"])
def pixelize(self, pixel_scale=DF_pixel_scale):
""" Build grid for drawing """
self.pixel_scale = pixel_scale
for key, val in self.params.items():
if ('gamma' in key) | ('theta' in key):
val = val / pixel_scale
exec('self.' + key + '_pix' + ' = val')
def update(self, params):
""" Update PSF parameters from dictionary keys """
pixel_scale = self.pixel_scale
for key, val in params.items():
if np.ndim(val) > 0:
val = np.array(val)
exec('self.' + key + ' = val')
self.params[key] = val
if 'fwhm' in params.keys():
self.gamma = fwhm_to_gamma(self.fwhm, self.beta)
self.params['gamma'] = self.gamma
elif 'gamma' in params.keys():
self.fwhm = gamma_to_fwhm(self.gamma, self.beta)
self.params['fwhm'] = self.fwhm
else:
pass
self.pixelize(pixel_scale)
def copy(self):
""" A deep copy of the object """
return deepcopy(self)
@property
def f_core1D(self):
""" 1D Core function *in pix* """
gamma_pix, beta = self.gamma_pix, self.beta
c_mof2Dto1D = C_mof2Dto1D(gamma_pix, beta)
return lambda r: moffat1d_normed(r, gamma_pix, beta) / c_mof2Dto1D
@property
def f_aureole1D(self):
""" 1D Aureole function *in pix* """
if self.aureole_model == "moffat":
gamma1_pix, beta1 = self.gamma1_pix, self.beta1
c_mof2Dto1D = C_mof2Dto1D(gamma1_pix, beta1)
f_aureole = lambda r: moffat1d_normed(r, gamma1_pix, beta1) / c_mof2Dto1D
elif self.aureole_model == "power":
n0, theta_0_pix = self.n0, self.theta_0_pix
c_aureole_2Dto1D = C_pow2Dto1D(n0, theta_0_pix)
f_aureole = lambda r: trunc_power1d_normed(r, n0, theta_0_pix) / c_aureole_2Dto1D
elif self.aureole_model == "multi-power":
n_s, theta_s_pix = self.n_s, self.theta_s_pix
c_aureole_2Dto1D = C_mpow2Dto1D(n_s, theta_s_pix)
f_aureole = lambda r: multi_power1d_normed(r, n_s, theta_s_pix) / c_aureole_2Dto1D
return f_aureole
def plot1D(self, **kwargs):
""" Plot 1D profile """
from .plotting import plot_PSF_model_1D
plot_PSF_model_1D(self.frac, self.f_core1D, self.f_aureole1D, **kwargs)
if self.aureole_model == "multi-power":
if kwargs.get("xunit") == "arcsec":
vline_pos = self.theta_s
else:
vline_pos = self.theta_s_pix
for pos in vline_pos:
plt.axvline(pos, ls="--", color="k", alpha=0.3, zorder=0)
def generate_core(self):
""" Generate Galsim PSF of core. """
gamma, beta = self.gamma, self.beta
self.fwhm = fwhm = gamma * 2. * math.sqrt(2**(1./beta)-1)
if galsim_installed:
psf_core = galsim.Moffat(beta=beta, fwhm=fwhm,
flux=1., gsparams=self.gsparams) # in arcsec
self.psf_core = psf_core
else:
psf_core = None
return psf_core
def generate_aureole(self,
contrast=1e6,
psf_scale=None,
psf_range=None,
min_psf_range=60,
max_psf_range=1200,
interpolant="linear"):
"""
Generate Galsim PSF of aureole.
Parameters
----------
contrast: Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given.
psf_scale: Pixel scale of the PSF, <= pixel scale of data. In arcsec/pix.
psf_range: Range of PSF. In arcsec.
min_psf_range : Minimum range of PSF. In arcsec.
max_psf_range : Maximum range of PSF. In arcsec.
interpolant: Interpolant method in Galsim.
Returns
----------
psf_aureole: power law Galsim PSF, flux normalized to be 1.
psf_size: Full image size of PSF used. In pixel.
"""
if galsim_installed == False:
psf_aureole = None
psf_size = None
else:
from galsim import Moffat, ImageF, InterpolatedImage
if psf_scale is None:
psf_scale = self.pixel_scale
if self.aureole_model == "moffat":
gamma1, beta1 = self.gamma1, self.beta1
if psf_range is None:
psf_range = max_psf_range
psf_size = round_good_fft(2 * psf_range // psf_scale)
else:
if psf_range is None:
psf_size = calculate_psf_size(self.n0, self.theta_0, contrast,
psf_scale, min_psf_range, max_psf_range)
else:
psf_size = round_good_fft(psf_range)
# Generate Grid of PSF and plot PSF model in real space onto it
xx_psf, yy_psf, cen_psf = generate_psf_grid(psf_size)
if self.aureole_model == "moffat":
psf_aureole = Moffat(beta=beta1, scale_radius=gamma1,
flux=1., gsparams=self.gsparams)
else:
if self.aureole_model == "power":
theta_0_pix = self.theta_0 / psf_scale
psf_model = trunc_power2d(xx_psf, yy_psf,
self.n0, theta_0_pix, I_theta0=1, cen=cen_psf)
elif self.aureole_model == "multi-power":
theta_s_pix = self.theta_s / psf_scale
psf_model = multi_power2d(xx_psf, yy_psf,
self.n_s, theta_s_pix, 1, cen=cen_psf)
# Parse the image to Galsim PSF model by interpolation
image_psf = ImageF(psf_model)
psf_aureole = InterpolatedImage(image_psf, flux=1,
scale=psf_scale,
x_interpolant=interpolant,
k_interpolant=interpolant)
self.psf_aureole = psf_aureole
self.theta_out = max_psf_range
return psf_aureole, psf_size
def Flux2Amp(self, Flux):
""" Convert Flux to Astropy Moffat Amplitude (pixel unit) """
Amps = [moffat2d_Flux2Amp(self.gamma_pix, self.beta, Flux=(1-self.frac)*F)
for F in Flux]
return np.array(Amps)
def I2I0(self, I, r=12):
""" Convert aureole I(r) at r to I0. r in pixel """
if self.aureole_model == "moffat":
return I2I0_mof(self.gamma1_pix, self.beta1, r, I=I)
elif self.aureole_model == "power":
return I2I0_pow(self.n0, self.theta_0_pix, r, I=I)
elif self.aureole_model == "multi-power":
return I2I0_mpow(self.n_s, self.theta_s_pix, r, I=I)
def I02I(self, I0, r=12):
""" Convert aureole I(r) at r to I0. r in pixel """
if self.aureole_model == "moffat":
return I02I_mof(self.gamma1_pix, self.beta1, r, I0=I0)
elif self.aureole_model == "power":
return I02I_pow(self.n0, self.theta_0_pix, r, I0=I0)
elif self.aureole_model == "multi-power":
return I02I_mpow(self.n_s, self.theta_s_pix, r, I0=I0)
def calculate_external_light(self, stars, n_iter=2):
""" Calculate the integrated external scatter light that affects
the flux scaling from very bright stars on the other stars.
Parameters
----------
stars : Star object
n_iter : iteration time to do the calculation
"""
I_ext = np.zeros(stars.n_bright)
if self.aureole_model == "moffat":
pass
else:
z_norm_verybright0 = stars.z_norm_verybright.copy()
pos_source, pos_eval = stars.star_pos_verybright, stars.star_pos_bright
if self.aureole_model == "power":
cal_ext_light = partial(calculate_external_light_pow,
n0=self.n0, theta0=self.theta_0_pix,
pos_source=pos_source, pos_eval=pos_eval)
elif self.aureole_model == "multi-power":
cal_ext_light = partial(calculate_external_light_mpow,
n_s=self.n_s, theta_s_pix=self.theta_s_pix,
pos_source=pos_source, pos_eval=pos_eval)
# Loop the subtraction
r_scale = stars.r_scale
verybright = stars.verybright[stars.bright]
for i in range(n_iter):
z_norm_verybright = z_norm_verybright0 - I_ext[verybright]
z_norm_verybright[z_norm_verybright<0] = 0
I0_verybright = self.I2I0(z_norm_verybright, r=r_scale)
I_ext = cal_ext_light(I0_source=I0_verybright)
return I_ext
def I2Flux(self, I, r):
""" Convert aureole I(r) at r to total flux. r in pixel """
if self.aureole_model == "moffat":
return I2Flux_mof(self.frac, self.gamma1_pix, self.beta1, r, I=I)
elif self.aureole_model == "power":
return I2Flux_pow(self.frac, self.n0, self.theta_0_pix, r, I=I)
elif self.aureole_model == "multi-power":
return I2Flux_mpow(self.frac, self.n_s, self.theta_s_pix, r, I=I)
def Flux2I(self, Flux, r):
""" Convert aureole I(r) at r to total flux. r in pixel """
if self.aureole_model == "moffat":
return Flux2I_mof(self.frac, self.gamma1_pix, self.beta1, r, Flux=Flux)
elif self.aureole_model == "power":
return Flux2I_pow(self.frac, self.n0, self.theta_0_pix, r, Flux=Flux)
elif self.aureole_model == "multi-power":
return Flux2I_mpow(self.frac, self.n_s, self.theta_s_pix, r, Flux=Flux)
def SB2Flux(self, SB, BKG, ZP, r):
""" Convert suface brightness SB at r to total flux, given background value and ZP. """
# Intensity = I + BKG
I = SB2Intensity(SB, BKG, ZP, self.pixel_scale) - BKG
return self.I2Flux(I, r)
def Flux2SB(self, Flux, BKG, ZP, r):
""" Convert total flux to suface brightness SB at r, given background value and ZP. """
I = self.Flux2I(Flux, r)
return Intensity2SB(I+ BKG, BKG, ZP, self.pixel_scale)
@property
def psf_star(self):
""" Galsim object of star psf (core+aureole) """
if galsim_installed:
frac = self.frac
psf_core, psf_aureole = self.psf_core, self.psf_aureole
return (1-frac) * psf_core + frac * psf_aureole
else:
return None
def plot_PSF_model_galsim(self, contrast=None, save=False, save_dir='.'):
""" Build and plot Galsim 2D model averaged in 1D """
from .plotting import plot_PSF_model_galsim
image_psf = plot_PSF_model_galsim(self, contrast=contrast,
save=save, save_dir=save_dir)
self.image_psf = image_psf / image_psf.array.sum()
@staticmethod
def write_psf_image(image_psf, filename='PSF_model.fits'):
""" Write the 2D psf image to fits """
hdu = fits.ImageHDU(image_psf)
hdu.writeto(filename, overwrite=True)
def draw_core2D_in_real(self, star_pos, Flux):
""" 2D drawing function of the core in real space given positions and flux (of core) of target stars """
gamma, alpha = self.gamma_pix, self.beta
Amps = np.array([moffat2d_Flux2Amp(gamma, alpha, Flux=flux)
for flux in Flux])
f_core_2d_s = np.array([models.Moffat2D(amplitude=amp, x_0=x0, y_0=y0,
gamma=gamma, alpha=alpha)
for ((x0,y0), amp) in zip(star_pos, Amps)])
return f_core_2d_s
def draw_aureole2D_in_real(self, star_pos, Flux=None, I0=None):
""" 2D drawing function of the aureole in real space given positions and flux / amplitude (of aureole) of target stars """
if self.aureole_model == "moffat":
gamma1_pix, alpha1 = self.gamma1_pix, self.beta1
# In this case I_theta0 is defined as the amplitude at gamma
if I0 is None:
I_theta0 = moffat2d_Flux2I0(gamma1_pix, alpha1, Flux=Flux)
elif Flux is None:
I_theta0 = I0
else:
raise NormalizationError("Both Flux and I0 are not given.")
Amps = np.array([moffat2d_I02Amp(alpha1, I0=I0)
for I0 in I_theta0])
f_aureole_2d_s = np.array([models.Moffat2D(amplitude=amp,
x_0=x0, y_0=y0,
gamma=gamma1_pix,
alpha=alpha1)
for ((x0,y0), amp) in zip(star_pos, Amps)])
elif self.aureole_model == "power":
n0 = self.n0
theta_0_pix = self.theta_0_pix
if I0 is None:
I_theta0 = power2d_Flux2Amp(n0, theta_0_pix, Flux=1) * Flux
elif Flux is None:
I_theta0 = I0
else:
raise NormalizationError("Both Flux and I0 are not given.")
f_aureole_2d_s = np.array([lambda xx, yy, cen=pos, I=I:\
trunc_power2d(xx, yy, cen=cen,
n=n0, theta0=theta_0_pix,
I_theta0=I)
for (I, pos) in zip(I_theta0, star_pos)])
elif self.aureole_model == "multi-power":
n_s = self.n_s
theta_s_pix = self.theta_s_pix
if I0 is None:
I_theta0 = multi_power2d_Flux2Amp(n_s, theta_s_pix, Flux=1) * Flux
elif Flux is None:
I_theta0 = I0
else:
raise NormalizationError("Both Flux and I0 are not given.")
f_aureole_2d_s = np.array([lambda xx, yy, cen=pos, I=I:\
multi_power2d(xx, yy, cen=cen,
n_s=n_s, theta_s=theta_s_pix,
I_theta0=I)
for (I, pos) in zip(I_theta0, star_pos)])
return f_aureole_2d_s
def fit_psf_core_1D(self, image_psf, **kwargs):
""" Fit the core parameters from 1D profiles of the input 2D PSF. """
from .utils import fit_psf_core_1D
params0 = {"fwhm":self.fwhm,
"beta":self.beta,
"frac":self.frac,
"n_s":self.n_s,
"theta_s":self.theta_s}
frac, beta = fit_psf_core_1D(image_psf,
params0=params0,
pixel_scale=self.pixel_scale,
**kwargs)
self.frac = max(1e-7, min(frac,1.0))
self.beta = beta
self.update({"frac":frac, "beta":beta})
class Stars:
"""
Class storing positions & flux of faint/medium-bright/bright stars
"""
def __init__(self, star_pos, Flux,
Flux_threshold=[2.7e5, 2.7e6],
z_norm=None, r_scale=12, BKG=0):
"""
Parameters
----------
star_pos: 2d array
pixel positions of stars in the region
Flux: 1d array
flux of stars (in ADU)
Flux_threshold : [float, float]
thereshold of flux [MB, VB]
(default: corresponding to [13.5, 11] mag for DF)
z_norm : 1d array
flux scaling measured at r_scale
r_scale : int
radius at which to measure the flux scaling
BKG : float
sky background value
"""
self.star_pos = np.atleast_2d(star_pos)
self.Flux = np.atleast_1d(Flux)
self.Flux_threshold = Flux_threshold
self.F_bright = Flux_threshold[0]
self.F_verybright = Flux_threshold[1]
self.n_tot = len(star_pos)
self.bright = (self.Flux >= self.F_bright)
self.verybright = (self.Flux >= self.F_verybright)
self.medbright = self.bright & (~self.verybright)
if z_norm is not None:
self.z_norm = z_norm
self.r_scale = r_scale
self.BKG = BKG
def __str__(self):
return "A Star Class"
def __repr__(self):
return ' N='.join([f"{self.__class__.__name__}", str(self.n_tot)])
@classmethod
def from_znorm(cls, psf, star_pos, z_norm,
z_threshold=[10, 300], r_scale=12):
""" Star object built from intensity at r_scale instead of flux. """
Flux = psf.I2Flux(z_norm, r=r_scale)
Flux_threshold = psf.I2Flux(z_threshold, r=r_scale)
return cls(star_pos, Flux, Flux_threshold,
z_norm=z_norm, r_scale=r_scale)
def update_Flux(self, Flux):
self.Flux = Flux
def _info(self):
Flux = self.Flux
if len(Flux[self.medbright])>0:
msg = "# of medium bright stars : {0} ".format(self.n_medbright)
msg += "(flux range:{0:.2g}~{1:.2g})".format(Flux[self.medbright].min(), Flux[self.medbright].max())
logger.info(msg)
if len(Flux[self.verybright])>0:
msg = "# of very bright stars : {0} ".format(self.n_verybright)
msg += "(flux range:{0:.2g}~{1:.2g})".format(Flux[self.verybright].min(), Flux[self.verybright].max())
logger.info(msg)
# Rendering stars in parallel if number of bright stars exceeds 50
if self.n_medbright < 50:
msg = "Not many bright stars. Recommend to draw in serial."
logger.debug(msg)
self.parallel = False
else:
msg = "Crowded fields w/ bright stars > 50. Recommend to allow parallel."
logger.debug(msg)
self.parallel = True
@lazyproperty
def n_faint(self):
return np.sum(~self.bright)
@lazyproperty
def n_bright(self):
return np.sum(self.bright)
@lazyproperty
def n_verybright(self):
return np.sum(self.verybright)
@lazyproperty
def n_medbright(self):
return np.sum(self.medbright)
@property
def Flux_faint(self):
return self.Flux[~self.bright]
@property
def Flux_bright(self):
return self.Flux[self.bright]
@property
def Flux_verybright(self):
return self.Flux[self.verybright]
@property
def Flux_medbright(self):
return self.Flux[self.medbright]
@property
def z_norm_bright(self):
return self.z_norm[self.bright]
@property
def z_norm_verybright(self):
return self.z_norm[self.verybright]
@lazyproperty
def star_pos_faint(self):
return self.star_pos[~self.bright]
@lazyproperty
def star_pos_bright(self):
return self.star_pos[self.bright]
@lazyproperty
def star_pos_verybright(self):
return self.star_pos[self.verybright]
@lazyproperty
def star_pos_medbright(self):
return self.star_pos[self.medbright]
def plot_flux_dist(self, **kwargs):
from .plotting import plot_flux_dist
plot_flux_dist(self.Flux, [self.F_bright, self.F_verybright], **kwargs)
def copy(self):
return deepcopy(self)
def use_verybright(self):
""" Crop the object into a new object only contains its very bright stars """
logger.info("Only model brightest stars in the field.")
stars_vb = Stars(self.star_pos_verybright,
self.Flux_verybright,
Flux_threshold=self.Flux_threshold,
z_norm=self.z_norm_verybright,
r_scale=self.r_scale, BKG=self.BKG)
return stars_vb
def remove_outsider(self, image_shape, gap=[36,12]):
""" Remove out-of-field stars far from the edge. """
star_pos = self.star_pos
Flux = self.Flux
def out(d):
out_max = np.vstack([(star_pos[:,0]>image_shape[1]+d),
(star_pos[:,1]>image_shape[0]+d)]).T
out_min = (star_pos<-d)
out = out_min | out_max
return np.logical_or.reduce(out, axis=1)
remove_A = out(gap[0]) & self.verybright
remove_B = out(gap[1]) & self.medbright
remove = remove_A | remove_B
stars_new = Stars(star_pos[~remove], Flux[~remove],
self.Flux_threshold, self.z_norm[~remove],
r_scale=self.r_scale, BKG=self.BKG)
return stars_new
def save(self, name='stars', save_dir='./'):
from .io import save_pickle
save_pickle(self, os.path.join(save_dir, name+'.pkl'), 'Star model')
### 2D functions ###
@lru_cache(maxsize=16)
def generate_psf_grid(psf_size):
""" Generate Grid of PSF and plot PSF model in real space onto it """
cen_psf = ((psf_size-1)/2., (psf_size-1)/2.)
yy_psf, xx_psf = np.mgrid[:psf_size, :psf_size]
return xx_psf, yy_psf, cen_psf
def power2d(xx, yy, n, theta0, I_theta0, cen):
""" Power law for 2d array, normalized = I_theta0 at theta0 """
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2) + 1e-6
rr[rr<=1] = rr[rr>1].min()
a = I_theta0 / (theta0)**(-n)
z = a * np.power(rr, -n)
return z
@njit
def trunc_power2d(xx, yy, n, theta0, I_theta0, cen):
""" Truncated power law for 2d array, normalized = I_theta0 at theta0 """
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2).ravel() + 1e-6
a = I_theta0 / (theta0)**(-n)
z = a * np.power(rr, -n)
z[rr<=theta0] = I_theta0
return z.reshape(xx.shape)
@njit
def multi_power2d(xx, yy, n_s, theta_s, I_theta0, cen, clear=False):
""" Multi-power law for 2d array, I = I_theta0 at theta0, theta in pix"""
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2).ravel()
z = np.zeros(xx.size)
theta0 = theta_s[0]
z[rr<=theta0] = I_theta0
if clear:
z[rr<=theta0] = 0
for k in range(len(a_s)):
reg = (rr>theta_s[k]) & (rr<=theta_s[k+1]) if k<len(a_s)-1 else (rr>theta_s[k])
z[reg] = a_s[k] * np.power(rr[reg], -n_s[k])
return z.reshape(xx.shape)
############################################
# Functions for PSF rendering with Galsim
############################################
def get_center_offset(pos):
""" Shift center for the purpose of accuracy (by default galsim round to integer!)
Originally should be x_pos, y_pos = pos + 1 (ref galsim demo)
But origin of star_pos in SE is (1,1) but (0,0) in python """
x_pos, y_pos = pos
x_nominal = x_pos + 0.5
y_nominal = y_pos + 0.5
ix_nominal = int(math.floor(x_nominal+0.5))
iy_nominal = int(math.floor(y_nominal+0.5))
dx = x_nominal - ix_nominal
dy = y_nominal - iy_nominal
offset = galsim.PositionD(dx,dy)
return (ix_nominal, iy_nominal), offset
def draw_star(k, star_pos, Flux,
psf_star, psf_size, full_image,
pixel_scale=DF_pixel_scale):
""" Draw star #k at position star_pos[k] with Flux[k], using a combined PSF (psf_star) on full_image"""
# Function of drawing, devised to facilitate parallelization.
stamp, bounds = get_stamp_bounds(k, star_pos, Flux, psf_star, psf_size,
full_image, pixel_scale=pixel_scale)
full_image[bounds] += stamp[bounds]
def get_stamp_bounds(k, star_pos, Flux,
psf_star, psf_size, full_image,
pixel_scale=DF_pixel_scale):
""" Get stamp and boundary of star #k at position star_pos[k] with Flux[k], using a combined PSF (psf_star) on full_image"""
pos, flux = star_pos[k], Flux[k]
star = psf_star.withFlux(flux)
# Account for the fractional part of the position
(ix_nominal, iy_nominal), offset = get_center_offset(pos)
stamp = star.drawImage(nx=psf_size, ny=psf_size, scale=pixel_scale,
offset=offset, method='no_pixel')
stamp.setCenter(ix_nominal, iy_nominal)
bounds = stamp.bounds & full_image.bounds
return stamp, bounds
############################################
# Functions for making mock images
############################################
def add_image_noise(image, noise_std, random_seed=42):
""" Add Gaussian noise image """
if galsim_installed == False:
logger.warning("Galsim is not installed. Function disabled.")
return np.zeros_like(image)
else:
from galsim import ImageF, BaseDeviate, GaussianNoise
logger.debug("Generate noise background w/ stddev = %.3g"%noise_std)
Image = galsim.ImageF(image)
rng = galsim.BaseDeviate(random_seed)
gauss_noise = galsim.GaussianNoise(rng, sigma=noise_std)
Image.addNoise(gauss_noise)
return Image.array
def make_base_image(image_shape, stars, psf_base, pad=50, psf_size=64, verbose=False):
""" Background images composed of dim stars with fixed PSF psf_base"""
if galsim_installed:
from galsim import ImageF
else:
return np.zeros(image_shape)
if verbose:
logger.info("Generate base image of faint stars (flux < %.2g)."%(stars.F_bright))
start = time.time()
nX0 = image_shape[1] + 2 * pad
nY0 = image_shape[0] + 2 * pad
full_image0 = ImageF(nX0, nY0)
star_pos = stars.star_pos_faint + pad
Flux = stars.Flux_faint
if len(star_pos) == 0:
return np.zeros((nY0, nX0))
# draw faint stars with fixed PSF using galsim in Fourier space
for k in range(len(star_pos)):
try:
draw_star(k, star_pos=star_pos, Flux=Flux,
psf_star=psf_base, psf_size=psf_size, full_image=full_image0)
except GalSimBoundsError as e:
logger.debug("GalSim reported a GalSimBoundsError")
if verbose:
print(e.__doc__)
print(e.message)
continue
image_base0 = full_image0.array
end = time.time()
if verbose: logger.info("Total Time: %.3f s\n"%(end-start))
image_base = image_base0[pad:nY0-pad, pad:nX0-pad]
return image_base
def make_truth_image(psf, stars, image_shape, contrast=1e6,
parallel=False, verbose=False, saturation=4.5e4):
"""
Draw a truth image according to the given psf, position & flux.
In two manners: 1) convolution in FFT w/ Galsim;
and 2) plot in real space w/ astropy model.
"""
if galsim_installed == False:
raise Exception("Galsim is not installed. Function disabled.")
else:
from galsim import ImageF
if verbose:
logger.info("Generate the truth image.")
start = time.time()
# attributes
frac = psf.frac
gamma_pix = psf.gamma_pix
beta = psf.beta
nY, nX = image_shape
yy, xx = np.mgrid[:nY, :nX]
psf_core = psf.psf_core
psf_aureole = psf.psf_aureole
full_image = ImageF(nX, nY)
Flux_A = stars.Flux_bright
star_pos_A = stars.star_pos_bright
image_gs = full_image.array
# Draw bright stars in real space
func_core_2d_s = psf.draw_core2D_in_real(star_pos_A, (1-frac) * Flux_A)
func_aureole_2d_s = psf.draw_aureole2D_in_real(star_pos_A, frac * Flux_A)
image = np.sum([f2d(xx,yy) + p2d(xx,yy)
for (f2d, p2d) in zip(func_core_2d_s,
func_aureole_2d_s)], axis=0)
# combine the two image
image += image_gs
# saturation limit
image[image>saturation] = saturation
if verbose:
end = time.time()
logger.info("Total Time: %.3f s\n"%(end-start))
return image
def generate_image_by_flux(psf, stars, xx, yy,
contrast=[1e5,1e6],
min_psf_range=90,
max_psf_range=1200,
psf_range=[None,None],
psf_scale=DF_pixel_scale,
parallel=False,
draw_real=True,
draw_core=False,
brightest_only=False,
interpolant='cubic'):
"""
Generate the image by total flux, given the PSF object and Star object.
Parameters
----------
psf : PSF model describing the PSF model shape
stars : Star model describing positions and scaling of stars
contrast : Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given in psf_range.
min_psf_range : Minimum range of PSF if contrast is used. In arcsec.
max_psf_range : Maximum range of PSF if contrast is used. In arcsec.
psf_range : full range of PSF size (in arcsec) for drawing [medium, very] bright stars in convolution. Use contrast if not given. (default: None)
psf_scale : pixel scale of PSF. iN arcsec/pixel. Default to DF pixel scale.
parallel : whether to run drawing for medium bright stars in parallel.
draw_real : whether to draw very bright stars in real.
draw_core : whether to draw the core for very bright stars in real.
brightest_only : whether to draw very bright stars only.
interpolant : Interpolant method in Galsim.
Returns
----------
image : drawn image
"""
nY, nX = xx.shape
frac = psf.frac
if psf_scale is None:
psf_scale = psf.pixel_scale
if not(draw_real & brightest_only):
psf_c = psf.psf_core
# Setup the canvas
full_image = galsim.ImageF(nX, nY)
if not brightest_only:
# Draw medium bright stars with galsim in Fourier space
psf_e, psf_size = psf.generate_aureole(contrast=contrast[0],
psf_scale=psf_scale,
psf_range=psf_range[0],
min_psf_range=min_psf_range//3,
max_psf_range=max_psf_range//3,
interpolant=interpolant)
psf_size = psf_size // 2 * 2
psf_star = (1-frac) * psf_c + frac * psf_e
if stars.n_medbright > 0:
if (not parallel) | (parallel_enabled==False):
# Draw in serial
for k in range(stars.n_medbright):
draw_star(k,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
else:
# Draw in parallel, automatically back to serial computing if too few jobs
p_get_stamp_bounds = partial(get_stamp_bounds,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
results = parallel_compute(np.arange(stars.n_medbright), p_get_stamp_bounds,
lengthy_computation=False, verbose=False)
for (stamp, bounds) in results:
full_image[bounds] += stamp[bounds]
if draw_real:
# Draw aureole of very bright star (if high cost in FFT) in real space
# Note origin of star_pos in SE is (1,1) but (0,0) in python
image_gs = full_image.array
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright-1,
Flux=frac * stars.Flux_verybright)
image_aureole = np.sum([f2d(xx,yy) for f2d in func_aureole_2d_s], axis=0)
if draw_core:
func_core_2d_s = psf.draw_core2D_in_real(stars.star_pos_verybright-1,
Flux=(1-frac) * stars.Flux_verybright)
image_gs += np.sum([f2d(xx,yy) for f2d in func_core_2d_s], axis=0)
image = image_gs + image_aureole
else:
# Draw very bright star in Fourier space
psf_e_2, psf_size_2 = psf.generate_aureole(contrast=contrast[1],
psf_scale=psf_scale,
psf_range=psf_range[1],
min_psf_range=min_psf_range,
max_psf_range=max_psf_range,
interpolant=interpolant)
psf_size_2 = psf_size_2 // 2 * 2
psf_star_2 = (1-frac) * psf_c + frac * psf_e_2
for k in range(stars.n_verybright):
draw_star(k,
star_pos=stars.star_pos_verybright,
Flux=stars.Flux_verybright,
psf_star=psf_star_2,
psf_size=psf_size_2,
full_image=full_image)
image = full_image.array
return image
def generate_image_by_znorm(psf, stars, xx, yy,
contrast=[1e5,1e6],
min_psf_range=90,
max_psf_range=1200,
psf_range=[None,None],
psf_scale=DF_pixel_scale,
parallel=False,
draw_real=True,
brightest_only=False,
subtract_external=True,
draw_core=False,
interpolant='cubic'):
"""
Generate the image by flux scaling, given the PSF object and Star object.
Parameters
----------
psf : PSF model describing the PSF model shape
stars : Star model describing positions and scaling of stars
xx, yy : image grid
contrast : Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given in psf_range.
min_psf_range : Minimum range of PSF if contrast is used. In arcsec.
max_psf_range : Maximum range of PSF if contrast is used. In arcsec.
psf_range : full range of PSF size (in arcsec) for drawing [medium, very] bright stars in convolution. (default: None)
psf_scale : pixel scale of PSF. iN arcsec/pixel. Default to DF pixel scale.
parallel : whether to run drawing for medium bright stars in parallel.
draw_real : whether to draw very bright stars in real.
brightest_only : whether to draw very bright stars only.
draw_core : whether to draw the core for very bright stars in real.
subtract_external : whether to subtract external scattter light from very bright stars.
interpolant : Interpolant method in Galsim.
Returns
----------
image : drawn image
"""
nY, nX = xx.shape
frac = psf.frac
r_scale = stars.r_scale
z_norm = stars.z_norm.copy()
# Subtract external light from brightest stars
if subtract_external:
I_ext = psf.calculate_external_light(stars)
z_norm[stars.bright] -= I_ext
if draw_real & brightest_only:
# Skip computation of Flux, and ignore core PSF
I0_verybright = psf.I2I0(z_norm[stars.verybright], r_scale)
else:
# Core PSF
psf_c = psf.psf_core
# Update stellar flux:
z_norm[z_norm<=0] = np.abs(z_norm).min() # problematic negatives
Flux = psf.I2Flux(z_norm, r=r_scale)
stars.update_Flux(Flux)
# Setup the canvas
if galsim_installed == False:
brightest_only = True
draw_real = True
full_image = np.empty((nY, nX), dtype=np.float32)
else:
full_image = galsim.ImageF(nX, nY)
if not brightest_only:
# 1. Draw medium bright stars with galsim in Fourier space
psf_e, psf_size = psf.generate_aureole(contrast=contrast[0],
psf_scale=psf_scale,
psf_range=psf_range[0],
min_psf_range=min_psf_range//3,
max_psf_range=max_psf_range//3,
interpolant=interpolant)
# psf_size = psf_size // 2 * 2
# Draw medium bright stars with galsim in Fourier space
psf_star = (1-frac) * psf_c + frac * psf_e
if stars.n_medbright > 0:
if (not parallel) | (parallel_enabled==False):
# Draw in serial
for k in range(stars.n_medbright):
draw_star(k,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
else:
# Draw in parallel, automatically back to serial computing if too few jobs
p_get_stamp_bounds = partial(get_stamp_bounds,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
results = parallel_compute(np.arange(stars.n_medbright), p_get_stamp_bounds,
lengthy_computation=False, verbose=False)
for (stamp, bounds) in results:
full_image[bounds] += stamp[bounds]
if draw_real:
# Draw very bright star in real space (high cost in convolution)
# Note origin of star_pos in SE is (1,1) but (0,0) in python
if brightest_only:
image_gs = 0. # no galsim image
# Only plot the aureole. A heavy mask is required.
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright-1,
I0=I0_verybright)
else:
image_gs = full_image.array
# Plot core + aureole.
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright-1,
Flux=frac * stars.Flux_verybright)
if draw_core:
func_core_2d_s = psf.draw_core2D_in_real(stars.star_pos_verybright-1,
Flux=(1-frac) * stars.Flux_verybright)
image_gs += np.sum([f2d(xx,yy) for f2d in func_core_2d_s], axis=0)
image_aureole = np.sum([f2d(xx,yy) for f2d in func_aureole_2d_s], axis=0)
image = image_gs + image_aureole
else:
# Draw very bright star in real space
psf_e_2, psf_size_2 = psf.generate_aureole(contrast=contrast[1],
psf_scale=psf_scale,
psf_range=psf_range[1],
min_psf_range=min_psf_range,
max_psf_range=max_psf_range,
interpolant=interpolant)
# psf_size_2 = psf_size_2 // 2 * 2
psf_star_2 = (1-frac) * psf_c + frac * psf_e_2
for k in range(stars.n_verybright):
draw_star(k,
star_pos=stars.star_pos_verybright,
Flux=stars.Flux_verybright,
psf_star=psf_star_2,
psf_size=psf_size_2,
full_image=full_image)
image = full_image.array
return image
def generate_image_fit(psf_fit, stars, image_shape, norm='brightness',
brightest_only=False, draw_real=True,
subtract_external=False, leg2d=False):
""" Generate the fitted bright stars, the fitted background and
a noise images (for display only). """
nY, nX = image_shape
yy, xx = np.mgrid[:nY, :nX]
stars_ = stars.copy()
if norm=='brightness':
draw_func = generate_image_by_znorm
elif norm=='flux':
draw_func = generate_image_by_flux
if stars_.n_verybright==0:
subtract_external = False
pixel_scale = psf_fit.pixel_scale
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
image_stars = draw_func(psf_fit, stars_, xx, yy,
psf_range=[900, max(image_shape)*pixel_scale],
psf_scale=pixel_scale,
brightest_only=brightest_only,
subtract_external=subtract_external,
draw_real=draw_real, draw_core=True)
if hasattr(psf_fit, 'bkg_std') & hasattr(psf_fit, 'bkg'):
image_stars_noise = add_image_noise(image_stars, psf_fit.bkg_std)
noise_image = image_stars_noise - image_stars
bkg_image = psf_fit.bkg * np.ones((nY, nX))
logger.info(" - Background = %.3g +/- %.3g"%(psf_fit.bkg, psf_fit.bkg_std))
else:
noise_image = bkg_image = np.zeros_like(image_stars)
if leg2d:
Xgrid = np.linspace(-(1-1/nX)/2., (1-1/nX)/2., nX)
Ygrid = np.linspace(-(1-1/nY)/2., (1-1/nY)/2., nY)
H10 = leggrid2d(Xgrid, Ygrid, c=[[0,1],[0,0]])
H01 = leggrid2d(Xgrid, Ygrid, c=[[0,0],[1,0]])
bkg_image += psf_fit.A10 * H10 + psf_fit.A01 * H01
return image_stars, noise_image, bkg_image
############################################
# Priors and Likelihood Models for Fitting
############################################
def set_prior(n_est, mu_est, std_est, n_spline=2,
n_min=1.2, d_n0=0.1, d_n=0.2, std_min=3,
theta_in=50, theta_out=300, leg2d=False,
fix_n0=False, fix_theta=False,
fit_sigma=True, fit_frac=False):
"""
Setup prior transforms for models.
Parameters
----------
n_est : estimate of the first power-law index, i.e. from profile fitting
mu_est : estimate of sky background level, from either the global DF reduction pipeline or a local sigma-clipped mean after aggresive mask
std_est : esimtate of sky uncertainty, from a local sigma-clipped stddev after aggresive mask
n_spline : number of power-law component for modeling the aureole
n_min : minium power index allowed in fitting
d_n0 : stddev of noraml prior of n_0
d_n : minimum length of prior jump in n_k for n_spline>=3, default 0.2
theta_in : inner boundary of the first transition radius
theta_out : outer boundary of the first transition radius
leg2d : whether a legendre polynomial background will be fit
std_min : estimated (expected to be poisson) noise as minimum noise
fit_frac : whether the aureole fraction will be fit
fit_sigma : whether the sky uncertainty will be fit
Returns
----------
prior_tf : prior transform function for fitting
"""
log_t_in = np.log10(theta_in)
log_t_out = np.log10(theta_out)
Dlog_t = log_t_out - log_t_in
log_t_s = np.logspace(log_t_in, log_t_out, n_spline+1)[1:-1]
#used if fix_theta=True
Prior_mu = stats.truncnorm(a=-3, b=1., loc=mu_est, scale=std_est) # mu : N(mu_est, std_est)
# counting helper for # of parameters
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
Prior_logsigma = stats.truncnorm(a=-3, b=1,
loc=np.log10(std_est), scale=0.3)
Prior_logfrac = stats.uniform(loc=-2.5, scale=2.2)
if n_spline == 'm':
Prior_gamma = stats.uniform(loc=0., scale=10.)
Prior_beta = stats.uniform(loc=1.1, scale=6.)
def prior_tf_mof(u):
v = u.copy()
v[0] = Prior_gamma.ppf(u[0]) # gamma1
v[1] = Prior_beta.ppf(u[1]) # beta1
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_level = v[-K]
else:
leg_level = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_level-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_level-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_mof
else:
Prior_n0 = stats.norm(loc=n_est, scale=d_n0)
# n0 : N(n, d_n0)
Prior_logtheta1 = stats.uniform(loc=log_t_in, scale=Dlog_t)
# log theta1 : log t_in - log t_out arcsec
if n_spline==2:
def prior_tf_2p(u):
v = u.copy()
if fix_n0:
v[0] = n_est
#v[0] = np.random.normal(n_est, d_n0)
else:
v[0] = Prior_n0.ppf(u[0])
v[1] = u[1] * (v[0]- d_n0 - n_min) + n_min # n1 : n_min - (n0-d_n0)
v[2] = Prior_logtheta1.ppf(u[2])
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_amp = v[-K]
else:
leg_amp = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_amp-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_amp-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_2p
else:
Priors = [Prior_n0, Prior_logtheta1,
Prior_mu, Prior_logsigma, Prior_logfrac]
prior_tf = partial(prior_tf_sp, Priors=Priors, n_spline=n_spline,
n_min=n_min, n_est=n_est, d_n0=d_n0, d_n=d_n,
log_t_s=log_t_s, log_t_out=log_t_out,
K=K, fix_n0=fix_n0, leg2d=leg2d,
fit_sigma=fit_sigma, fit_frac=fit_frac)
return prior_tf
def prior_tf_sp(u, Priors, n_spline=3,
d_n=0.2, n_min=1.2, n_max=3.5, n_est=3.3, d_n0=0.2,
leg2d=False, fix_n0=False, flexible=False,
fix_theta=False, log_t_s=[90, 180], log_t_out=300,
K=1, fit_sigma=True, fit_frac=False):
""" Prior Transform function for n_spline """
# loglikehood vector
v = u.copy()
# read priors
Prior_n0, Prior_logtheta1, Prior_mu, Prior_logsigma, Prior_logfrac = Priors
# n prior
if fix_n0:
v[0] = n_est
else:
v[0] = Prior_n0.ppf(u[0])
if flexible:
for k in range(n_spline-2):
v[k+1] = u[k+1] * max(-2.+d_n, n_min-v[k]+d_n) + (v[k]-d_n)
# n_k+1 : max{n_min, n_k-2} - n_k-d_n
v[k+2] = u[k+2] * min(n_max-(v[k+1]-d_n), n_max-n_min) + max(n_min, v[k+1]-d_n)
# n_last : max(n_min, n_k-d_n) - n_max
else:
for k in range(n_spline-1):
v[k+1] = u[k+1] * max(-2.+d_n, n_min-v[k]+d_n) + (v[k]-d_n)
# n_k+1 : max{n_min, n_k-2} - n_k-d_n
# theta prior
if fix_theta:
v[n_spline:2*n_spline-1] = log_t_s
else:
v[n_spline] = Prior_logtheta1.ppf(u[n_spline])
# log theta1 : log t_in - t_out # in arcsec
for k in range(n_spline-2):
v[k+n_spline+1] = u[k+n_spline+1] * \
(log_t_out - v[k+n_spline]) + v[k+n_spline]
# log theta_k+1: log theta_k - log t_out # in arcsec
# background prior
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_amp = v[-K]
else:
leg_amp = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_amp-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_amp-1.3, scale=1.3) # log A01
# frac prior
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
def build_independent_priors(priors):
""" Build priors for Bayesian fitting. Priors should has a (scipy-like) ppf class method."""
def prior_transform(u):
v = u.copy()
for i in range(len(u)):
v[i] = priors[i].ppf(u[i])
return v
return prior_transform
def draw_proposal(draw_func,
proposal,
psf, stars,
K=1, leg=None):
# Draw image and calculate log-likelihood
# K : position order of background in the proposal (dafault -2)
mu = proposal[-K-1]
image_tri = draw_func(psf, stars)
image_tri += mu
if leg is not None:
A10, A01 = 10**proposal[-K-2], 10**proposal[-K-3]
H10, H01 = leg.coefs
image_tri += A10 * H10 + A01 * H01
return image_tri
def calculate_likelihood(ypred, data, sigma):
# Calculate log-likelihood
residsq = (ypred - data)**2 / sigma**2
loglike = -0.5 * np.sum(residsq + np.log(2 * np.pi * sigma**2))
if not np.isfinite(loglike):
loglike = -1e100
return loglike
class Legendre2D:
"""
Legendre 2D coefficients
"""
def __init__(self, image_shape, K=0, order=1):
self.image_shape = image_shape
nY, nX = image_shape
#x_grid = y_grid = np.linspace(0,image_size-1, image_size)
#self.x_grid = x_grid
#self.y_grid = y_grid
#self.cen = ((Ximage_size-1)/2., (Yimage_size-1)/2.)
X_grid = np.linspace(-(1-1/nX)/2., (1-1/nX)/2., nX)
Y_grid = np.linspace(-(1-1/nY)/2., (1-1/nY)/2., nY)
self.X_grid = X_grid
self.Y_grid = Y_grid
if order == 1:
H10 = leggrid2d(X_grid, Y_grid, c=[[0,1],[0,0]])
H01 = leggrid2d(X_grid, Y_grid, c=[[0,0],[1,0]])
self.coefs = [H10, H01]
def set_likelihood(image, mask_fit, psf, stars,
norm='brightness', n_spline=2,
fix_n0=False, brightest_only=False,
psf_range=[None,None], leg2d=False,
std_est=None, G_eff=1e5,
fit_sigma=True, fit_frac=False,
parallel=False, draw_real=False):
"""
Setup likelihood function.
Parameters
----------
image: 2d image data to be fit
mask_fit: mask map (masked region is 1)
psf: A PSF class to be updated
stars: Stars class for the modeling
Returns
----------
loglike : log-likelihood function for fitting
"""
data = image[~mask_fit].copy().ravel()
image_shape = image.shape
nY, nX = image_shape
yy, xx = np.mgrid[:nY, :nX]
stars_0 = stars.copy()
z_norm = stars_0.z_norm.copy()
if norm=='brightness':
draw_func = generate_image_by_znorm
elif norm=='flux':
draw_func = generate_image_by_flux
if (psf.aureole_model!='moffat') & (stars.n_verybright > 0) & (norm=='brightness'):
subtract_external = True
else:
subtract_external = False
p_draw_func = partial(draw_func, xx=xx, yy=yy,
psf_range=psf_range,
psf_scale=psf.pixel_scale,
max_psf_range=psf.theta_out,
brightest_only=brightest_only,
subtract_external=subtract_external,
parallel=parallel, draw_real=draw_real)
# K : position order of background in the proposal (dafault -2)
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
# 1st-order Legendre Polynomial
if leg2d:
leg = Legendre2D(image_shape, order=1)
H10, H01 = leg.coefs
else:
leg = None
H10, H01 = 0, 0
if n_spline == 'm':
def loglike_mof(v):
gamma1, beta1 = v[:2]
mu = v[-K-1]
if fit_sigma:
sigma = 10**v[-K]
param_update = {'gamma1':gamma1, 'beta1':beta1}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (stars.BKG - mu)
image_tri = p_draw_func(psf, stars)
image_tri += mu
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_mof
else:
n0 = psf.n0
theta_0 = psf.theta_0 # inner flattening
cutoff = psf.cutoff # whether to cutoff
theta_c = psf.theta_c # outer cutoff
n_c = psf.n_c
if n_spline==2:
def loglike_2p(v):
n_s = v[:2]
### Below is new!
if fix_n0:
n_s[0] = n0
###
theta_s = [theta_0, 10**v[2]]
if cutoff:
n_s = np.append(n_s, n_c)
theta_s = np.append(theta_s, theta_c)
mu = v[-K-1]
if not np.all(theta_s[1:] > theta_s[:-1]):
loglike = -1e100
return loglike
param_update = {'n_s':n_s, 'theta_s':theta_s}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
psf.update({'n_s':n_s, 'theta_s':theta_s})
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (stars.BKG - mu)
image_tri = p_draw_func(psf, stars)
image_tri += mu
if leg2d:
A10, A01 = 10**v[-K-2], 10**v[-K-3]
bkg_leg = A10 * H10 + A01 * H01
image_tri += bkg_leg
ypred = image_tri[~mask_fit].ravel()
if fit_sigma:
# sigma = 10**v[-K]
sigma = np.sqrt((10**v[-K])**2+(ypred-mu)/G_eff)
else:
#sigma = std_est
sigma = np.sqrt(std_est**2+(ypred-mu)/G_eff)
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_2p
elif n_spline==3:
def loglike_3p(v):
n_s = v[:3]
if fix_n0:
n_s[0] = n0
theta_s = np.array([theta_0, 10**v[3], 10**v[4]])
if cutoff:
n_s = np.append(n_s, n_c)
theta_s = np.append(theta_s, theta_c)
if not np.all(theta_s[1:] > theta_s[:-1]):
loglike = -1e100
return loglike
mu = v[-K-1]
param_update = {'n_s':n_s, 'theta_s':theta_s}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (stars.BKG - mu)
image_tri = p_draw_func(psf, stars)
image_tri += mu
if leg2d:
A10, A01 = 10**v[-K-2], 10**v[-K-3]
image_tri += A10 * H10 + A01 * H01
ypred = image_tri[~mask_fit].ravel()
if fit_sigma:
#sigma = 10**v[-K]
sigma = np.sqrt((10**v[-K])**2+(ypred-mu)/G_eff)
else:
#sigma = std_est
sigma = np.sqrt(std_est**2+(ypred-mu)/G_eff)
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_3p
else:
def loglike_sp(v):
n_s = v[:n_spline]
if fix_n0:
n_s[0] = n0
theta_s = np.append(theta_0, 10**v[n_spline:2*n_spline-1])
if cutoff:
n_s = np.append(n_s, n_c)
theta_s = np.append(theta_s, theta_c)
if not np.all(theta_s[1:] > theta_s[:-1]):
loglike = -1e100
return loglike
mu = v[-K-1]
param_update = {'n_s':n_s, 'theta_s':theta_s}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (stars.BKG - mu)
image_tri = draw_proposal(p_draw_func, v,
psf, stars,
K=K, leg=leg)
ypred = image_tri[~mask_fit].ravel()
if fit_sigma:
#sigma = 10**v[-K]
sigma = np.sqrt((10**v[-K])**2+(ypred-mu)/G_eff)
else:
#sigma = std_est
sigma = np.sqrt(std_est**2+(ypred-mu)/G_eff)
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_sp
| 64,315 | 35.357264 | 150 |
py
|
elderflower
|
elderflower-master/elderflower/utils.py
|
import os
import re
import sys
import math
import time
import string
import random
import warnings
from functools import partial
import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from astropy import wcs
from astropy import units as u
from astropy.io import fits, ascii
from astropy.nddata import Cutout2D
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column, setdiff, join
from astropy.stats import mad_std, biweight_location, gaussian_fwhm_to_sigma
from astropy.stats import sigma_clip, SigmaClip, sigma_clipped_stats
from photutils import detect_sources, deblend_sources
from photutils import CircularAperture, CircularAnnulus, EllipticalAperture
try:
import galsim
from galsim import GalSimBoundsError
galsim_installed = True
except ImportError:
warnings.warn("Galsim is not installed. Convolution-based star rendering is not enabled.")
galsim_installed = False
from .io import logger
from .io import save_pickle, load_pickle, check_save_path
from .numeric import log_linear, flattened_linear, piecewise_linear
from .plotting import LogNorm, AsinhNorm, colorbar
from . import DF_pixel_scale, DF_raw_pixel_scale
try:
from reproject import reproject_interp
reproject_install = True
except ImportError:
warnings.warn("reproject is not installed. No rescaling available.")
reproject_install = False
# default SE columns for cross_match
SE_COLUMNS = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD",
"MAG_AUTO", "FLUX_AUTO", "FWHM_IMAGE", "MU_MAX", "FLAGS"]
# Fiducial values of PSF parameters
DF_default_params = {"fwhm":6.,
"beta":6.6,
"frac":0.3,
"n_s":np.array([3.3, 2.5]),
"theta_s":np.array([5, 100])}
### Baisc Funcs ###
def coord_Im2Array(X_IMAGE, Y_IMAGE, origin=1):
""" Convert image coordniate to numpy array coordinate """
x_arr, y_arr = int(max(round(Y_IMAGE)-origin, 0)), int(max(round(X_IMAGE)-origin, 0))
return x_arr, y_arr
def coord_Array2Im(x_arr, y_arr, origin=1):
""" Convert image coordniate to numpy array coordinate """
X_IMAGE, Y_IMAGE = y_arr+origin, x_arr+origin
return X_IMAGE, Y_IMAGE
def Intensity2SB(Intensity, BKG, ZP, pixel_scale=DF_pixel_scale):
""" Convert intensity to surface brightness (mag/arcsec^2) given the background value, zero point and pixel scale """
I = np.atleast_1d(np.copy(Intensity))
I[np.isnan(I)] = BKG
if np.any(I<=BKG):
I[I<=BKG] = np.nan
I_SB = -2.5*np.log10(I - BKG) + ZP + 2.5 * math.log10(pixel_scale**2)
return I_SB
def SB2Intensity(SB, BKG, ZP, pixel_scale=DF_pixel_scale):
"""
Convert surface brightness (mag/arcsec^2)to intensity given the
background value, zero point and pixel scale.
"""
SB = np.atleast_1d(SB)
I = 10** ((SB - ZP - 2.5 * math.log10(pixel_scale**2))/ (-2.5)) + BKG
return I
def convert_decimal_string(value, n_digit=1):
# convert decimal number into string for file save
value_int = int(value)
value_deci = int(10**n_digit*np.round(value - value_int, n_digit))
return "{:d}p{:d}".format(value_int, value_deci)
def round_good_fft(x):
# Rounded PSF size to 2^k or 3*2^k
a = 1 << int(x-1).bit_length()
b = 3 << int(x-1).bit_length()-2
if x>b:
return a
else:
return min(a,b)
def calculate_psf_size(n0, theta_0, contrast=1e5, psf_scale=DF_pixel_scale,
min_psf_range=60, max_psf_range=720):
A0 = theta_0**n0
opt_psf_range = int((contrast * A0) ** (1./n0))
psf_range = max(min_psf_range, min(opt_psf_range, max_psf_range))
# full (image) PSF size in pixel
psf_size = 2 * psf_range // psf_scale
return round_good_fft(psf_size)
def compute_poisson_noise(data, n_frame=1, header=None, Gain=0.37):
if header is not None:
try:
n_frame = np.int32(header['NFRAMES'])
except KeyError:
n_frame = 1
G_effective = Gain * n_frame # effecitve gain: e-/ADU
std_poi = np.nanmedian(np.sqrt(data/G_effective))
if np.isnan(std_poi):
std_poi = None
print("Sky Poisson Noise Unavailable.")
else:
print("Sky Poisson Noise: %.3f"%std_poi)
return std_poi
def extract_bool_bitflags(bitflags, ind):
from astropy.nddata.bitmask import interpret_bit_flags
return np.array(["{0:016b}".format(0xFFFFFFFF & interpret_bit_flags(flag))[-ind]
for flag in np.atleast_1d(bitflags)]).astype(bool)
### Photometry Funcs ###
def background_stats(data, header, mask, bkg_keyname="BACKVAL", **kwargs):
""" Check if background stored in header + short stats """
from astropy.stats import sigma_clipped_stats
from .io import find_keyword_header
# Short estimate summary
mean, med, std = sigma_clipped_stats(data, mask, **kwargs)
logger.info("Background stats: mean = %.5g med = %.5g std = %.5g"%(mean, med, std))
# check header key
if bkg_keyname in header.keys():
bkg = find_keyword_header(header, bkg_keyname)
else:
bkg = med
return bkg, std
def background_annulus(cen, data, mask,
r_in=240., r_out=360, draw=True,
**plot_kw):
""" Extract local background value using annulus """
data_ = data.copy()
annulus_aperture = CircularAnnulus(cen, r_in=r_in, r_out=r_out)
annulus_masks = annulus_aperture.to_mask(method='center')
annulus_data = annulus_masks.multiply(data_)
mask_ring = annulus_masks.data
annulus_data_1d = annulus_data[mask_ring!=0]
mask_1d = annulus_masks.multiply(mask)[mask_ring!=0]
_, median_sigclip, _ = sigma_clipped_stats(annulus_data_1d, mask=mask_1d)
if draw:
plt.imshow(np.ma.array(annulus_data, mask=mask_ring==0), **plot_kw)
plt.show()
return median_sigclip
def background_extraction(field, mask=None, return_rms=True,
b_size=64, f_size=3, n_iter=5, **kwargs):
""" Extract background & rms image using SE estimator with mask """
from photutils import Background2D, SExtractorBackground
try:
Bkg = Background2D(field, mask=mask,
bkg_estimator=SExtractorBackground(),
box_size=b_size, filter_size=f_size,
sigma_clip=SigmaClip(sigma=3., maxiters=n_iter),
**kwargs)
back = Bkg.background
back_rms = Bkg.background_rms
except ValueError:
img = field.copy()
if mask is not None:
img[mask] = np.nan
back = np.nanmedian(field) * np.ones_like(field)
back_rms = np.nanstd(field) * np.ones_like(field)
if return_rms:
return back, back_rms
else:
return back
def source_detection(data, sn=2.5, b_size=120,
k_size=3, fwhm=3, smooth=True,
sub_background=True, mask=None):
from astropy.convolution import Gaussian2DKernel
from photutils import detect_sources, deblend_sources
if sub_background:
back, back_rms = background_extraction(data, b_size=b_size)
threshold = back + (sn * back_rms)
else:
back = np.zeros_like(data)
threshold = np.nanstd(data)
if smooth:
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=k_size, y_size=k_size)
kernel.normalize()
else:
kernel=None
segm_sm = detect_sources(data, threshold, npixels=5,
filter_kernel=kernel, mask=mask)
data_ma = data.copy() - back
data_ma[segm_sm.data!=0] = np.nan
return data_ma, segm_sm
def iter_curve_fit(x_data, y_data, func, p0=None,
color=None, x_min=None, x_max=None,
x_lab='', y_lab='',c_lab='',
n_iter=3, k_std=5, draw=True,
fig=None, ax=None, **kwargs):
""" Wrapper for iterative curve_fit """
# min-max cutoff
if x_min is None: x_min = x_data.min()
if x_max is None: x_max = x_data.max()
cut = (x_data>x_min) & (x_data<x_max)
x_data = x_data[cut]
y_data = y_data[cut]
if color is not None: color = color[cut]
# initialize
x_test = np.linspace(x_min, x_max)
clip = np.zeros_like(x_data, dtype='bool')
# first curve_fit
popt, pcov = curve_fit(func, x_data, y_data, p0=p0, **kwargs)
if draw:
with plt.rc_context({"text.usetex": False}):
if fig is None: fig = plt.figure()
if ax is None: ax = fig.add_subplot(1,1,1)
# Iterative sigma clip
for i in range(n_iter):
if draw: ax.plot(x_test, func(x_test, *popt),
color='r', lw=1, ls='--', alpha=0.2)
x_clip, y_clip = x_data[~clip], y_data[~clip]
popt, pcov = curve_fit(func, x_clip, y_clip, p0=p0, **kwargs)
# compute residual and stddev
res = y_data - func(x_data, *popt)
std = mad_std(res)
clip = res**2 > (k_std*std)**2
# clip function
clip_func = lambda x, y: (y - func(x, *popt))**2 > (k_std*std)**2
if draw:
s = ax.scatter(x_data, y_data, c=color,
s=10, alpha=0.4)
ax.scatter(x_data[clip], y_data[clip], lw=2, s=25,
facecolors='none', edgecolors='orange', alpha=0.7)
ax.plot(x_test, func(x_test, *popt), color='r')
if plt.rcParams['text.usetex']:
c_lab = c_lab.replace('_','$\_$')
x_lab = x_lab.replace('_','$\_$')
y_lab = y_lab.replace('_','$\_$')
if color is not None:
with plt.rc_context({"text.usetex": False}):
fig.colorbar(s, label=c_lab)
ax.set_xlim(x_min, x_max)
invert = lambda lab: ('MAG' in lab) | ('MU' in lab)
if invert(x_lab): ax.invert_xaxis()
if invert(y_lab): ax.invert_yaxis()
# ax.set_xlabel(x_lab)
# ax.set_ylabel(y_lab)
return popt, pcov, clip_func
def identify_extended_source(SE_catalog, mag_limit=15, mag_saturate=13.5, draw=True):
""" Empirically pick out (bright) extended sources in the SE_catalog.
The catalog need to contain following columns:
'MAG_AUTO', 'MU_MAX', 'ELLIPTICITY', 'CLASS_STAR' """
bright = SE_catalog['MAG_AUTO'] < mag_limit
SE_bright = SE_catalog[bright]
if len(SE_bright)>0:
x_data, y_data = SE_bright['MAG_AUTO'], SE_bright['MU_MAX']
else:
return SE_catalog, None
MU_satur_0 = np.quantile(y_data, 0.001) # guess of saturated MU_MAX
MAG_satur_0 = mag_saturate # guess of saturated MAG_AUTO
# Fit a flattened linear
logger.info("Fit an empirical relation to exclude extended sources...")
popt, _, clip_func = iter_curve_fit(x_data, y_data, flattened_linear,
p0=(1, MAG_satur_0, MU_satur_0),
x_max=mag_limit, x_min=max(7,np.min(x_data)),
color=SE_bright['CLASS_STAR'],
x_lab='MAG_AUTO',y_lab='MU_MAX',
c_lab='CLASS_STAR', draw=draw)
if draw: plt.show()
mag_saturate = popt[1]
logger.info("Saturation occurs at mag = {:.2f}".format(mag_saturate))
# pick outliers in the catalog
outlier = clip_func(SE_catalog['MAG_AUTO'], SE_catalog['MU_MAX'])
# identify bright extended sources by:
# (1) elliptical object or CLASS_STAR<0.5 or
# or (2) lie out of MU_MAX vs MAG_AUTO relation
# and (3) brighter than mag_limit
is_extend = (SE_catalog['ELLIPTICITY']>0.7) | (SE_catalog['CLASS_STAR']<0.5)
is_extend = (is_extend | outlier) & bright
SE_catalog_extend = SE_catalog[is_extend]
if len(SE_catalog_extend)>0:
SE_catalog_point = setdiff(SE_catalog, SE_catalog_extend)
return SE_catalog_point, SE_catalog_extend, mag_saturate
else:
return SE_catalog, None, mag_saturate
def clean_isolated_stars(xx, yy, mask, star_pos, pad=0, dist_clean=60):
""" Remove items of stars far away from mask """
star_pos = star_pos + pad
clean = np.zeros(len(star_pos), dtype=bool)
for k, pos in enumerate(star_pos):
rr = np.sqrt((xx-pos[0])**2+(yy-pos[1])**2)
if np.min(rr[~mask]) > dist_clean:
clean[k] = True
return clean
def cal_profile_1d(img, cen=None, mask=None, back=None, bins=None,
color="steelblue", xunit="pix", yunit="Intensity",
seeing=2.5, pixel_scale=DF_pixel_scale, ZP=27.1,
sky_mean=0, sky_std=3, dr=1,
lw=2, alpha=0.7, markersize=5, I_shift=0,
core_undersample=False, figsize=None,
label=None, plot_line=False, mock=False,
plot=True, errorbar=False,
scatter=False, fill=False, use_annulus=False):
"""
Calculate 1d radial profile of a given star postage.
"""
if mask is None:
mask = np.zeros_like(img, dtype=bool)
if back is None:
back = np.ones_like(img) * sky_mean
bkg_val = np.median(back)
if cen is None:
cen = (img.shape[1]-1)/2., (img.shape[0]-1)/2.
if use_annulus:
img[mask] = np.nan
yy, xx = np.indices(img.shape)
rr = np.sqrt((xx - cen[0])**2 + (yy - cen[1])**2)
r = rr[~mask].ravel() # radius in pix
z = img[~mask].ravel() # pixel intensity
r_core = np.int32(2 * seeing) # core radius in pix
# Decide the outermost radial bin r_max before going into the background
bkg_cumsum = np.arange(1, len(z)+1, 1) * bkg_val
z_diff = abs(z.cumsum() - bkg_cumsum)
n_pix_max = len(z) - np.argmin(abs(z_diff - 0.00005 * z_diff[-1]))
r_max = np.min([img.shape[0]//2, np.sqrt(n_pix_max/np.pi)])
if xunit == "arcsec":
r *= pixel_scale # radius in arcsec
r_core *= pixel_scale
r_max *= pixel_scale
d_r = dr * pixel_scale
else:
d_r = dr
with warnings.catch_warnings():
warnings.simplefilter('ignore')
clip = lambda z: sigma_clip((z), sigma=5, maxiters=5)
if bins is None:
# Radial bins: discrete/linear within r_core + log beyond it
if core_undersample:
# for undersampled core, bin at int pixels
bins_inner = np.unique(r[r<r_core]) - 1e-3
else:
n_bin_inner = int(min((r_core/d_r*2), 6))
bins_inner = np.linspace(0, r_core-d_r, n_bin_inner) - 1e-3
n_bin_outer = np.max([6, np.min([np.int32(r_max/d_r/10), 50])])
if r_max > (r_core+d_r):
bins_outer = np.logspace(np.log10(r_core+d_r),
np.log10(r_max+2*d_r), n_bin_outer)
else:
bins_outer = []
bins = np.concatenate([bins_inner, bins_outer])
_, bins = np.histogram(r, bins=bins)
# Calculate binned 1d profile
r_rbin = np.array([])
z_rbin = np.array([])
zerr_rbin = np.array([])
for k, b in enumerate(bins[:-1]):
r_in, r_out = bins[k], bins[k+1]
in_bin = (r>=r_in) & (r<=r_out)
if use_annulus:
# Fractional ovelap w/ annulus
annl = CircularAnnulus(cen, abs(r_in)/pixel_scale, r_out/pixel_scale)
annl_ma = annl.to_mask()
# Intensity by fractional mask
z_ = annl_ma.multiply(img)
zb = np.sum(z_[~np.isnan(z_)]) / annl.area
zerr_b = sky_std / annl.area
rb = np.mean(r[in_bin])
else:
z_clip = clip(z[~np.isnan(z) & in_bin])
if np.ma.is_masked(z_clip):
z_clip = z_clip.compressed()
if len(z_clip)==0:
continue
zb = np.mean(z_clip)
zstd_b = np.std(z_clip) if len(z_clip) > 10 else 0
zerr_b = np.sqrt((zstd_b**2 + sky_std**2) / len(z_clip))
rb = np.mean(r[in_bin])
z_rbin = np.append(z_rbin, zb)
zerr_rbin = np.append(zerr_rbin, zerr_b)
r_rbin = np.append(r_rbin, rb)
logzerr_rbin = 0.434 * abs( zerr_rbin / (z_rbin-sky_mean))
if yunit == "SB":
I_rbin = Intensity2SB(z_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) + I_shift
if plot:
if figsize is not None:
plt.figure(figsize=figsize)
if yunit == "Intensity":
# plot radius in Intensity
plt.plot(r_rbin, np.log10(z_rbin), "-o", color=color,
mec="k", lw=lw, ms=markersize, alpha=alpha, zorder=3, label=label)
if scatter:
I = np.log10(z)
if fill:
plt.fill_between(r_rbin, np.log10(z_rbin)-logzerr_rbin, np.log10(z_rbin)+logzerr_rbin,
color=color, alpha=0.2, zorder=1)
plt.ylabel("log Intensity")
elif yunit == "SB":
# plot radius in Surface Brightness
if mock is False:
I_sky = -2.5*np.log10(sky_std) + ZP + 2.5 * math.log10(pixel_scale**2)
p = plt.plot(r_rbin, I_rbin, "-o", mec="k",
lw=lw, ms=markersize, color=color,
alpha=alpha, zorder=3, label=label)
if scatter:
I = Intensity2SB(z, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) + I_shift
if errorbar:
Ierr_rbin_up = I_rbin - Intensity2SB(z_rbin+zerr_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) - I_shift
Ierr_rbin_lo = Intensity2SB(z_rbin-zerr_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) - I_rbin + I_shift
lolims = np.isnan(Ierr_rbin_lo)
uplims = np.isnan(Ierr_rbin_up)
Ierr_rbin_lo[lolims] = 99
Ierr_rbin_up[uplims] = np.nan
plt.errorbar(r_rbin, I_rbin, yerr=[Ierr_rbin_up, Ierr_rbin_lo],
fmt='', ecolor=p[0].get_color(), capsize=2, alpha=0.5)
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.gca().invert_yaxis()
plt.ylim(30,17)
plt.xscale("log")
plt.xlim(max(r_rbin[np.isfinite(r_rbin)][0]*0.8, pixel_scale*0.5),
r_rbin[np.isfinite(r_rbin)][-1]*1.2)
if xunit == "arcsec":
plt.xlabel("Radius [arcsec]")
else:
plt.xlabel("radius [pix]")
if scatter:
plt.scatter(r[r<3*r_core], I[r<3*r_core], color=color,
s=markersize/2, alpha=alpha/2, zorder=1)
plt.scatter(r[r>=3*r_core], I[r>=3*r_core], color=color,
s=markersize/5, alpha=alpha/10, zorder=1)
# Decide the radius within which the intensity saturated for bright stars w/ intersity drop half
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dz_rbin = np.diff(np.log10(z_rbin))
dz_cum = np.cumsum(dz_rbin)
if plot_line:
r_satr = r_rbin[np.argmax(dz_cum<-0.5)] + 1e-3
plt.axvline(r_satr,color="k",ls="--",alpha=0.9)
plt.axvline(r_core,color="k",ls=":",alpha=0.9)
if yunit == "SB":
plt.axhline(I_sky,color="gray",ls="-.",alpha=0.7)
if yunit == "Intensity":
return r_rbin, z_rbin, logzerr_rbin
elif yunit == "SB":
return r_rbin, I_rbin, None
def make_psf_2D(n_s, theta_s,
frac=0.3, beta=6.6, fwhm=6.,
cutoff_param={"cutoff":False, "n_c":4, "theta_c":1200},
psf_range=1200, pixel_scale=DF_pixel_scale, plot=False):
"""
Make 2D PSF from parameters.
Parameters
----------
n_s : 1d list or array
Power index of PSF aureole.
theta_s : 1d list or array
Transition radii of PSF aureole in arcsec.
frac: float
Fraction of aureole [0 - 1]
beta : float
Moffat beta
fwhm : float
Moffat fwhm in arcsec
cutoff_param : dict, optional
Parametets controlling the cutoff.
psf_range : int, optional, default 1200
Range of image_psf. In arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
plot : bool, optional, default False
Whether to plot the PSF image.
Returns
-------
image_psf : 2d array
Image of the PSF. Normalized to 1.
psf : eldflower.modeling.PSF_Model
The PSF model object.
"""
from .modeling import PSF_Model
# Aureole Parameters
params_mpow = {"frac":frac, "fwhm":fwhm, "beta":beta,
"n_s":np.atleast_1d(n_s), "theta_s":np.atleast_1d(theta_s)}
params_mpow.update(cutoff_param)
if cutoff_param["cutoff"]:
psf_range = max(cutoff_param["theta_c"], psf_range)
# Build PSF Model
psf = PSF_Model(params=params_mpow, aureole_model='multi-power')
# Build grid of image for drawing
psf.pixelize(pixel_scale)
if galsim_installed:
# Generate core and aureole PSF
psf_c = psf.generate_core()
psf_e, psf_size = psf.generate_aureole(contrast=1e6,
psf_range=psf_range,
psf_scale=pixel_scale)
# Plot Galasim 2D model extracted in 1D
if plot: psf.plot1D(xunit='arcsec')
# Center and grid
size = int(np.floor(psf_range/pixel_scale) * 2) + 1
cen = ((size-1)/2., (size-1)/2.)
x_ = y_ = np.linspace(0,size-1,size)
xx, yy = np.meshgrid(x_, y_)
# Draw image of PSF normalized to 1
PSF_aureole = psf.draw_aureole2D_in_real([cen], Flux=np.array([frac]))[0]
PSF_core = psf.draw_core2D_in_real([cen], Flux=np.array([1-frac]))[0]
image_psf = PSF_core(xx,yy) + PSF_aureole(xx,yy)
image_psf = image_psf/image_psf.sum()
return image_psf, psf
def make_psf_1D(n_s, theta_s,
frac=0.3, beta=6.6, fwhm=6.,
cutoff_param={"cutoff":False, "n_c":4, "theta_c":1200},
psf_range=1200, pixel_scale=DF_pixel_scale,
dr=1, mag=0, ZP=0, plot=False):
"""
Make 1D PSF profiles from parameters.
Parameters
----------
n_s : 1d list or array
Power index of PSF aureole.
theta_s : 1d list or array
Transition radii of PSF aureole.
frac: float
Fraction of aureole [0 - 1]
beta : float
Moffat beta
fwhm : float
Moffat fwhm in arcsec
cutoff_param : dict, optional
Parametets controlling the cutoff.
psf_range : int, optional, default 1200
Range of PSF. In arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
dr : float, optional, default 0.5
Parameters controlling the radial interval
mag : float, optional, default 0.5
Magnitude of the PSF.
ZP : float, optional, default 0
Zero point.
plot : bool, optional, default False
Whether to plot the 1D PSF profile.
Returns
-------
r : 1d array
Radius of the profile in arcsec
I : 1d array
Surface brightness in mag/arcsec^2
D : 2d array
Image of the PSF.
"""
Amp = 10**((mag-ZP)/-2.5)
if plot:
print('Scaled 1D PSF to magnitude = ', mag)
size = int(np.floor(psf_range/pixel_scale) * 2) + 1
cen = ((size-1)/2., (size-1)/2.)
D, psf = make_psf_2D(n_s, theta_s, frac, beta, fwhm,
cutoff_param=cutoff_param,
pixel_scale=pixel_scale,
psf_range=psf_range)
D *= Amp
r, I, _ = cal_profile_1d(D, cen=cen, mock=True,
ZP=ZP, sky_mean=0, sky_std=1e-9,
dr=dr, seeing=fwhm,
pixel_scale=pixel_scale,
xunit="arcsec", yunit="SB",
color="lightgreen",
lw=4, alpha=0.9, plot=plot,
core_undersample=True)
if plot:
plt.xlim(2, max(1e3, np.max(2*theta_s)))
plt.ylim(24,4)
for pos in theta_s:
plt.axvline(pos, ls="--", color="k", alpha=0.3, zorder=0)
return r, I, D
def calculate_fit_SB(psf, r=np.logspace(0.03,2.5,100), mags=[15,12,9], ZP=27.1):
frac = psf.frac
I_s = [10**((mag-ZP)/-2.5) for mag in mags]
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
I_tot_s = [Intensity2SB(((1-frac) * comp1 + comp2 * frac) * I,
0, ZP, psf.pixel_scale) for I in I_s]
return I_tot_s
def fit_psf_core_1D(image_psf,
params0=DF_default_params,
theta_out=30, d_theta=1.,
pixel_scale=2.5, beta_max=8.,
obj_name="", band="r",
draw=True, save=False, save_dir='./'):
"""
Fit the core parameters from 1D profiles of the input 2D PSF.
Parameters
----------
image_psf : 2d array
The image of the PSF.
params0 : dict
Initial guess of oarameters of PSF.
Use Dragonfly fiducial vales is not given.
'frac' : float
Fraction of aureole [0 - 1]
'beta' : float
Moffat beta
'fwhm' : float
Moffat fwhm in arcsec
'n_s' : 1d list or array
Power index of PSF aureole.
Not required to be accurate. n0 is recommended to be close.
'theta_s' : 1d list or array
Transition radii of PSF aureole.
Not required to be accurate.
theta_out : float, optional, default 30
Max radias in arcsec of the profile.
d_theta : float, optional, default 1.
Radial interval of the profile in arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
beta_max : float, optional, default 8.
Upper bound of the Moffat beta (lower bound is 1.1)
obj_name : str
Object name
band : str, 'g' 'G' 'r' or 'R'
Filter name
draw : bool, optional, default True
Whether to plot the fit.
save : bool, optional, default True
Whether to save the plot.
save_dir : str, optional
Path of saving plot, default current.
"""
# Read initial guess of parameters
frac, beta, fwhm = [params0.get(prop) for prop in ["frac", "beta", "fwhm"]]
n_s = params0["n_s"]
theta_s = params0["theta_s"]
# Center of the input PSF image
cen = ((image_psf.shape[1]-1)/2., (image_psf.shape[0]-1)/2.)
# Set grid points
d_theta = min(d_theta, pixel_scale)
rp = np.arange(1, theta_out+d_theta, d_theta)
# Calculate 1D profile
r_psf, I_psf, _ = cal_profile_1d(image_psf, cen=cen, dr=0.5,
ZP=0, sky_mean=0, plot=False,
pixel_scale=pixel_scale, seeing=3,
xunit="arcsec", yunit="SB",
core_undersample=True)
# Interpolate at grid points
Ip = np.interp(rp, r_psf, I_psf)
# Guess and bounds for core params
p0 = [frac, beta]
bounds = [(1e-5, 0.5), (1.2, beta_max)]
logger.info("Fitting core parameters from stacked PSF...")
# Define the target function for fitting the core
def make_psf_core_1D(r_intp, frac, beta):
r_1d, I_1d,_ = make_psf_1D(n_s=n_s, theta_s=theta_s,
frac=frac, beta=beta, fwhm=fwhm,
dr=0.5, pixel_scale=pixel_scale)
I_intp = np.interp(r_intp, r_1d, I_1d)
return I_intp
# Fit the curve
popt, pcov = curve_fit(make_psf_core_1D, rp, Ip, p0, bounds=bounds)
frac, beta = popt
frac_err, beta_err = np.sqrt(pcov[0,0]), np.sqrt(pcov[1,1])
logger.info(" - frac = {:.3f}+/-{:.3f}".format(frac, frac_err))
logger.info(" - beta = {:.3f}+/-{:.3f}".format(beta, beta_err))
logger.info(" - fwhm = {:.3f} from stacking".format(fwhm))
if draw:
I_fit = make_psf_core_1D(rp, frac, beta)
plt.plot(rp, I_fit, 'r-o', ms=5, label='Fit')
plt.plot(rp, Ip, 'y-o', mfc='None', mec='y', label='Data')
plt.ylim(I_fit.max()+0.5, I_fit.min()-0.5)
plt.xlim(-2, theta_out * 1.1)
plt.xlabel("Radius [arcsec]")
plt.ylabel("Surface Brightness")
plt.legend()
if save:
fn_plot = f'Fit_core_{obj_name}-{band}.png'
plt.savefig(os.path.join(save_dir, fn_plot))
plt.show()
return frac, beta
### Resampling functions ###
def transform_rescale(val, scale=0.5):
""" transform coordinates after resampling """
return (val-1) * scale + scale/2. + 0.5
def transform_table_coordinates(table, filename, scale=0.5):
""" transform coordinates in a table and write to a new one """
table_ = table.copy()
# transform coordiantes for X/Y_IMAGE and A/B_IMAGE
for coln in table_.colnames:
if 'IMAGE' in coln:
if ('X' in coln) | ('X' in coln):
table_[coln] = transform_rescale(table[coln], scale)
else:
table_[coln] *= scale
table_.write(filename, format='ascii', overwrite=True)
def downsample_wcs(wcs_input, scale=0.5):
""" Downsample the input wcs along an axis using {CDELT, CRPIX} FITS convention """
header = wcs_input.to_header(relax=True)
shape = wcs_input.pixel_shape
if 'PC1_1' in header.keys():
cdname = 'PC'
elif 'CD1_1' in header.keys():
cdname = 'CD'
elif 'CDELT1' in header.keys():
cdname = 'CDELT'
else:
msg = 'Fits header has no proper coordinate info (CD, PC, CDELT)!'
logger.error(msg)
raise KeyError(msg)
for axis in [1, 2]:
if cdname == 'PC':
cd = 'PC{0:d}_{0:d}'.format(axis)
elif cdname == 'CD':
cd = 'CD{0:d}_{0:d}'.format(axis)
elif cdname=='CDELT':
cd = 'CDELT{0:d}'.format(axis)
cp = 'CRPIX{0:d}'.format(axis)
na = 'NAXIS{0:d}'.format(axis)
header[cp] = transform_rescale(header[cp], scale)
header[cd] = header[cd]/scale
header[na] = int(round(shape[axis-1]*scale))
return wcs.WCS(header)
def write_downsample_fits(fn, fn_out,
scale=0.5, order=3,
keyword_preserved=['NFRAMES', 'BACKVAL',
'EXP_EFF', 'FILTNAM'],
wcs_out=None):
"""
Write fits data downsampled by factor.
Alternatively a target wcs can be provided.
Parameters
----------
fn: str
full path of fits file
fn_out: str
full path of output fits file
scale: int, optional, default 0.5
scaling factor
order: int, optional, default 3 ('bicubic')
order of interpolation (see docs of reproject)
keyword_preserved: str list
list of keyword to preserve in the output fits
wcs_out: wcs, optional, default None
output target wcs. must have shape info.
If given, scale will be overriden.
Notes
-----
If the output image contains all nan, it is likely the reprojection
fails, try use lower orders, or replacing the nan values.
"""
if reproject_install == False:
logger.warning('Module reproject not installed.')
return None
# read fits
header = fits.getheader(fn)
data = fits.getdata(fn)
wcs_input = wcs.WCS(header)
if (wcs_out is not None) & hasattr(wcs_out, 'pixel_shape'):
# use input wcs and shape
shape_out = wcs_out.pixel_shape
logger.info('Rescaling with given shape: {}'.format(shape))
else:
# make new wcs and shape according to scale factor
wcs_out = downsample_wcs(wcs_input, scale)
shape_out = (int(data.shape[0]*scale), int(data.shape[1]*scale))
logger.info('Rescaling with factor: {}'.format(scale))
# reproject the image by new wcs
data_rp, _ = reproject_interp((data, wcs_input), wcs_out,
shape_out=shape_out, order=order)
# write new header
header_out = wcs_out.to_header()
for key in ['NFRAMES', 'BACKVAL', 'EXP_EFF', 'FILTNAM']:
if key in header.keys():
header_out[key] = header[key]
# write new fits
fits.writeto(fn_out, data_rp, header=header_out, overwrite=True)
logger.info('Resampled image saved to: {}'.format(fn_out))
return True
def downsample_segmentation(fn, fn_out, scale=0.5):
""" Downsample segmentation and write to fits """
from scipy.ndimage import zoom
if os.path.isfile(fn):
segm = fits.getdata(fn)
segm_out = zoom(segm, zoom=0.5, order=1)
fits.writeto(fn_out, segm_out, overwrite=True)
else:
pass
def process_resampling(fn, bounds, obj_name, band,
pixel_scale=DF_pixel_scale, r_scale=12,
mag_limit=15, dir_measure='./', work_dir='./',
factor=1, verbose=True):
from .image import ImageList
# turn bounds_list into 2d array
bounds = np.atleast_2d(bounds).astype(int)
if factor!=1:
if verbose:
logger.info('Resampling by a factor of {0:.1g}...'.format(factor))
scale = 1/factor
fn_rp = "{0}_{2}.{1}".format(*os.path.basename(fn).rsplit('.', 1) + ['rp'])
fn_rp = os.path.join(work_dir, fn_rp)
bounds_rp = np.array([np.round(b_*scale) for b_ in bounds], dtype=int)
# resample image if it does not exist
if not os.path.exists(fn_rp):
write_downsample_fits(fn, fn_rp, scale, order=3)
# construct Image List for original image
DF_Images = ImageList(fn, bounds, obj_name, band,
pixel_scale=pixel_scale)
# read faint stars info and brightness measurement
DF_Images.read_measurement_tables(dir_measure,
r_scale=r_scale,
mag_limit=mag_limit)
# new quantities and names
r_scale *= scale
pixel_scale /= scale
obj_name_rp = obj_name + '_rp'
if verbose:
logger.info('Transforming coordinates for measurement tables...')
for Img, bound, bound_rp in zip(DF_Images, bounds, bounds_rp):
# transform coordinates and write as new tables
old_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound)
new_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound_rp)
table_faint, table_norm = Img.table_faint, Img.table_norm
fn_catalog = os.path.join(dir_measure,
"%s-catalog_PS_%s_all.txt"%(obj_name_rp, band.lower()))
mag_str = convert_decimal_string(mag_limit)
fn_norm = os.path.join(dir_measure, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name_rp, r_scale, band.lower(), mag_str, new_range))
transform_table_coordinates(table_faint, fn_catalog, scale)
transform_table_coordinates(table_norm, fn_norm, scale)
# reproject segmentation
if verbose:
logger.info('Resampling segmentation for bounds:', bound)
fn_seg = os.path.join(dir_measure,
"%s-segm_%s_catalog_%s.fits"\
%(obj_name, band.lower(), old_range))
fn_seg_out = os.path.join(dir_measure, "%s-segm_%s_catalog_%s.fits"\
%(obj_name_rp, band.lower(), new_range))
downsample_segmentation(fn_seg, fn_seg_out, scale)
else:
fn_rp, bounds_rp = fn, bounds
return fn_rp, bounds_rp
### Catalog / Data Manipulation Helper ###
def id_generator(size=6, chars=None):
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def crop_catalog(cat, bounds, keys=("X_IMAGE", "Y_IMAGE"), sortby=None):
Xmin, Ymin, Xmax, Ymax = bounds
A, B = keys
crop = (cat[A]>=Xmin) & (cat[A]<=Xmax) & (cat[B]>=Ymin) & (cat[B]<=Ymax)
if sortby is not None:
cat_crop = cat[crop]
cat_crop.sort(keys=sortby)
return cat_crop
else:
return cat[crop]
def crop_pad(image, pad):
""" Crop the padding of the image """
shape = image.shape
return image[pad:shape[0]-pad, pad:shape[1]-pad]
def crop_image(data, bounds, wcs=None, draw=False, **kwargs):
""" Crop the data (and segm map if given) with the given bouds.
Note boundaries are in 1-based pixel coordianates. """
Xmin, Ymin, Xmax, Ymax = bounds
# X, Y image size
nX, nY = (Xmax-Xmin, Ymax-Ymin)
# center in 1-based pixel coordinates
cen = (Xmin+(nX-1)/2., Ymin+(nY-1)/2.)
# make cutout
cutout = Cutout2D(data, cen, (nY, nX), wcs=wcs)
if draw:
from .plotting import draw_bounds
draw_bounds(data, bounds, **kwargs)
# also return cutout of wcs if given
if wcs is None:
return cutout.data
else:
return cutout.data, cutout.wcs
def transform_coords2pixel(table, wcs, name='',
RA_key="RAJ2000", DE_key="DEJ2000", origin=1):
""" Transform the RA/DEC columns in the table into pixel coordinates given wcs"""
coords = np.vstack([np.array(table[RA_key]),
np.array(table[DE_key])]).T
pos = wcs.wcs_world2pix(coords, origin)
table.add_column(Column(np.around(pos[:,0], 4)*u.pix), name="X_CATALOG")
table.add_column(Column(np.around(pos[:,1], 4)*u.pix), name="Y_CATALOG")
table.add_column(Column(np.arange(len(table))+1, dtype=int),
index=0, name="ID"+'_'+name)
return table
def merge_catalog(SE_catalog, table_merge, sep=5 * u.arcsec,
RA_key="RAJ2000", DE_key="DEJ2000", keep_columns=None):
""" Crossmatch and merge two catalogs by coordinates"""
c_SE = SkyCoord(ra=SE_catalog["X_WORLD"], dec=SE_catalog["Y_WORLD"], unit=u.deg)
c_tab = SkyCoord(ra=table_merge[RA_key], dec=table_merge[DE_key])
idx, d2d, d3d = c_SE.match_to_catalog_sky(c_tab)
match = d2d < sep
cat_SE_match = SE_catalog[match]
cat_tab_match = table_merge[idx[match]]
cat_tab_match.add_column(cat_SE_match["NUMBER"], index=0, name="NUMBER")
cat_match = join(cat_SE_match, cat_tab_match, keys='NUMBER')
if keep_columns is not None:
cat_match.keep_columns(keep_columns)
return cat_match
def read_measurement_table(dir_name, bounds0,
obj_name='', band='G',
pad=50, r_scale=12,
mag_limit=15):
""" Read measurement tables from the directory """
use_PS1_DR2 = True if 'PS2' in dir_name else False
# Magnitude name
b_name = band.lower()
mag_name = b_name+'MeanPSFMag' if use_PS1_DR2 else b_name+'mag'
# Clipped bounds
patch_Xmin0, patch_Ymin0, patch_Xmax0, patch_Ymax0 = bounds0
bounds = (patch_Xmin0+pad, patch_Ymin0+pad,
patch_Xmax0-pad, patch_Ymax0-pad)
## Read measurement for faint stars from catalog
# Faint star catalog name
fname_catalog = os.path.join(dir_name, "%s-catalog_PS_%s_all.txt"%(obj_name, b_name))
# Check if the file exist before read
assert os.path.isfile(fname_catalog), f"Table {fname_catalog} does not exist!"
logger.debug(f"Reading catalog {fname_catalog}.")
table_catalog = Table.read(fname_catalog, format="ascii")
mag_catalog = table_catalog[mag_name]
# stars fainter than magnitude limit (fixed as background), > 22 is ignored
table_faint = table_catalog[(mag_catalog>=mag_limit) & (mag_catalog<22)]
table_faint = crop_catalog(table_faint,
keys=("X_CATALOG", "Y_CATALOG"),
bounds=bounds)
## Read measurement for bright stars
# Catalog name
mag_str = convert_decimal_string(mag_limit)
range_str = "X[{:d}-{:d}]Y[{:d}-{:d}]"
range_str = range_str.format(patch_Xmin0, patch_Xmax0, patch_Ymin0, patch_Ymax0)
fname_norm = os.path.join(dir_name, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name, r_scale, b_name, mag_str, range_str))
# Check if the file exist before read
assert os.path.isfile(fname_norm), f"Table {fname_norm} does not exist"
logger.debug(f"Reading catalog {fname_norm}.")
table_norm = Table.read(fname_norm, format="ascii")
# Crop the catalog
table_norm = crop_catalog(table_norm, bounds=bounds0)
# Do not use flagged measurement
Iflag = table_norm["Iflag"]
table_norm = table_norm[Iflag==0]
return table_faint, table_norm
def assign_star_props(ZP, sky_mean, image_shape, pos_ref,
table_norm, table_faint=None,
r_scale=12, mag_threshold=[13.5,12],
psf=None, keys='Imed', verbose=True,
draw=True, save=False, save_dir='./'):
""" Assign position and flux for faint and bright stars from tables. """
from .modeling import Stars
# Positions & Flux (estimate) of bright stars from measured norm
star_pos = np.vstack([table_norm["X_IMAGE"],
table_norm["Y_IMAGE"]]).T - pos_ref
mag = table_norm['MAG_AUTO_corr'] if 'MAG_AUTO_corr' in table_norm.colnames else table_norm['MAG_AUTO']
Flux = 10**((np.array(mag)-ZP)/(-2.5))
# Estimate of brightness I at r_scale (I = Intensity - BKG) and flux
z_norm = table_norm['Imed'].data - table_norm['Isky'].data
z_norm[z_norm<=0] = min(1, z_norm[z_norm>0].min())
# Convert and printout thresholds
Flux_threshold = 10**((np.array(mag_threshold) - ZP) / (-2.5))
if verbose:
msg = "Magnitude Thresholds: {0}, {1} mag"
msg = msg.format(*mag_threshold)
logger.info(msg)
msg = "Flux Thresholds: {0}, {1} ADU"
msg = msg.format(*np.around(Flux_threshold,2))
logger.info(msg)
try:
SB_threshold = psf.Flux2SB(Flux_threshold, BKG=sky_mean, ZP=ZP, r=r_scale)
msg = "Surface Brightness Thresholds: {0}, {1} mag/arcsec^2 "
msg = msg.format(*np.around(SB_threshold,1))
msg += "at {0} pix for sky = {1:.3f}".format(r_scale, sky_mean)
logger.info(msg3)
except:
pass
# Bright stars in model
stars_bright = Stars(star_pos, Flux, Flux_threshold=Flux_threshold,
z_norm=z_norm, r_scale=r_scale, BKG=sky_mean)
stars_bright = stars_bright.remove_outsider(image_shape, gap=[3*r_scale, r_scale])
stars_bright._info()
if (table_faint is not None) & ('MAG_AUTO_corr' in table_faint.colnames):
table_faint['FLUX_AUTO_corr'] = 10**((table_faint['MAG_AUTO_corr']-ZP)/(-2.5))
try:
ma = table_faint['FLUX_AUTO_corr'].data.mask
except AttributeError:
ma = np.isnan(table_faint['FLUX_AUTO_corr'])
# Positions & Flux of faint stars from catalog
star_pos_faint = np.vstack([table_faint["X_CATALOG"].data[~ma],
table_faint["Y_CATALOG"].data[~ma]]).T - pos_ref
Flux_faint = np.array(table_faint['FLUX_AUTO_corr'].data[~ma])
# Combine two samples, make sure they do not overlap
star_pos = np.vstack([star_pos, star_pos_faint])
Flux = np.concatenate([Flux, Flux_faint])
stars_all = Stars(star_pos, Flux, Flux_threshold, BKG=sky_mean)
if draw:
stars_all.plot_flux_dist(label='All', color='plum')
stars_bright.plot_flux_dist(label='Model', color='orange', ZP=ZP,
save=save, save_dir=save_dir)
plt.show()
return stars_bright, stars_all
def interp_I0(r, I, r0, r1, r2):
""" Interpolate I0 at r0 with I(r) between r1 and r2 """
range_intp = (r>r1) & (r<r2)
logI0 = np.interp(r0, r[(r>r1)&(r<r2)], np.log10(I[(r>r1)&(r<r2)]))
return 10**logI0
def compute_mean_I(r, I, r1, r2):
""" Compute mean I under I(r) between r1 and r2 """
range_intg = (r>r1) & (r<r2)
r_range = r[range_intg]
return np.trapz(I[range_intg], r_range)/(r_range.max()-r_range.min())
def fit_n0(dir_measure, bounds,
obj_name, band, BKG, ZP,
pixel_scale=DF_pixel_scale,
fit_range=[20,40], dr=0.1,
N_fit=15, mag_max=13, mag_limit=15,
I_norm=24, norm='intp',
r_scale=12, sky_std=3,
plot_brightest=True, draw=True,
save=False, save_dir="./"):
"""
Fit the first component of using bright stars.
Parameters
----------
dir_measure : str
Directory storing the measurement
bounds : 1d list, [Xmin, Ymin, Xmax, Ymax]
Fitting boundary
band : str, 'g' 'G' 'r' or 'R'
Filter name
obj_name : str
Object name
BKG : float
Background value for profile measurement
ZP : float
Zero-point
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
fit_range : 2-list, optional, default [20, 40]
Range for fitting in arcsec
dr : float, optional, default 0.2
Profile step paramter
N_fit : int, optional, default 15
Number of stars used to fit n0
mag_max : float, optional, default 13
Max magnitude of stars used to fit n0
I_norm : float, optional, default 24
SB at which profiles are normed
norm : 'intp' or 'intg', optional, default 'intg'
Normalization method to scale profiles.
Use mean value by 'intg', use interpolated value by 'intp'
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured
sky_std : float, optional, default 3
Sky stddev (for display only)
plot_brightest : bool, optional, default True
Whether to draw profile of the brightest star
draw : bool, optional, default True
Whether to draw profiles and fit process
save : bool, optional, default True
Whether to save plot.
save_dir : str, optional
Full path of saving plot, default current.
Returns
-------
n0 : float
first power index
d_n0 : float
uncertainty of n0
"""
Xmin, Ymin, Xmax, Ymax = bounds
r1, r2 = fit_range
r0 = r_scale*pixel_scale
if r1<r0<r2:
# read result thumbnail and norm table
b = band.lower()
mag_str = convert_decimal_string(mag_limit)
range_str = f'X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]'
fn_res_thumb = os.path.join(dir_measure, f'{obj_name}-thumbnail_{b}mag{mag_str}_{range_str}.pkl')
fn_tab_norm = os.path.join(dir_measure, f'{obj_name}-norm_{r_scale}pix_{b}mag{mag_str}_{range_str}.txt')
res_thumb = load_pickle(fn_res_thumb)
tab_norm = Table.read(fn_tab_norm, format='ascii')
if draw:
fig, ax = plt.subplots(1,1,figsize=(8,6))
else:
fig, ax, ax_ins = None, None, None
# r_rbin: r in arcsec, I_rbin: SB in mag/arcsec^2
# I_r0: SB at r0, I_rbin: SB in mag/arcsec^2
r_rbin_all, I_rbin_all = np.array([]), np.array([])
I_r0_all, In_rbin_all = np.array([]), np.array([])
tab_fit = tab_norm[tab_norm['MAG_AUTO_corr']<mag_max][:N_fit]
if len(tab_fit)==0:
msg = "No enought bright stars in this region.\n"
msg += "Use guess n0=3, dn0=0.3. Include n0 in the fitting."
logger.warning(msg)
return None, None
logger.info("Fit n0 with profiles of %d bright stars..."%(len(tab_fit)))
for num in tab_fit['NUMBER']:
res = res_thumb[num]
img, ma, cen = res['image'], res['mask'], res['center']
bkg = np.median(res_thumb[num]['bkg'])
sky_mean = bkg if BKG is None else BKG
# calculate 1d profile
r_rbin, I_rbin, _ = cal_profile_1d(img, cen=cen, mask=ma,
ZP=ZP, sky_mean=sky_mean, sky_std=sky_std,
xunit="arcsec", yunit="SB",
errorbar=False, dr=dr,
pixel_scale=pixel_scale,
core_undersample=False, plot=False)
range_intp = (r_rbin>r1) & (r_rbin<r2)
if len(r_rbin[range_intp]) > 5:
if norm=="intp":
# interpolate I0 at r0, r in arcsec
I_r0 = interp_I0(r_rbin, I_rbin, r0, r1, r2)
elif norm=="intg":
I_r0 = compute_mean_I(r_rbin, I_rbin, r1, r2)
r_rbin_all = np.append(r_rbin_all, r_rbin)
I_rbin_all = np.append(I_rbin_all, I_rbin)
I_r0_all = np.append(I_r0_all, I_r0)
In_rbin_all = np.append(In_rbin_all, I_rbin-I_r0+I_norm)
if draw:
cal_profile_1d(img, cen=cen, mask=ma, dr=1,
ZP=ZP, sky_mean=sky_mean, sky_std=2.8,
xunit="arcsec", yunit="SB", errorbar=False,
pixel_scale=pixel_scale,
core_undersample=False, color='steelblue', lw=2,
I_shift=I_norm-I_r0, markersize=0, alpha=0.2)
if plot_brightest & draw:
num = list(res_thumb.keys())[0]
img0, ma0, cen0 = res_thumb[num]['image'], res_thumb[num]['mask'], res_thumb[num]['center']
cal_profile_1d(img0, cen=cen0, mask=ma0, dr=0.8,
ZP=ZP, sky_mean=BKG, sky_std=sky_std,
xunit="arcsec", yunit="SB", errorbar=True,
pixel_scale=pixel_scale,
core_undersample=False, color='k', lw=3,
I_shift=I_norm-I_r0_all[0], markersize=8, alpha=0.9)
ax.annotate("Brightest",(r0-4.8*pixel_scale, I_norm+1.5),fontsize=12)
if draw:
ax.set_xlim(1.5*pixel_scale, 4e2)
ax.set_ylim(I_norm+6.5,I_norm-7.5)
ax.axvspan(r1, r2, color='gold', alpha=0.1)
ax.axvline(r0, ls='--',color='k', alpha=0.9, zorder=1)
ax.set_xscale('log')
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
ax_ins = inset_axes(ax, width="35%", height="35%",
bbox_to_anchor=(-0.02,-0.02, 1, 1),
bbox_transform=ax.transAxes)
else:
msg = "r0 is out of fit_range! Try change fit_range to include r_scale.\n"
msg += "Use guess n0=3, dn0=0.3. n0 will be included in the fitting."
logger.warning(msg)
return 3, 0.3
if norm=="intp":
p_log_linear = partial(log_linear, x0=r0, y0=I_norm)
popt, pcov, clip_func = iter_curve_fit(r_rbin_all, In_rbin_all, p_log_linear,
x_min=r1, x_max=r2, p0=10,
bounds=(3, 15), n_iter=3, k_std=10,
x_lab='R [arcsec]', y_lab='MU [mag/arcsec2]',
draw=draw, fig=fig, ax=ax_ins)
r_n, I_n = r0, I_norm
elif norm=="intg":
p_log_linear = partial(log_linear)
popt, pcov, clip_func = iter_curve_fit(r_rbin_all, In_rbin_all, p_log_linear,
x_min=r1, x_max=r2, p0=(10, r0, I_norm),
bounds=([3,r1,I_norm-1], [15,r2,I_norm+1]),
x_lab='R [arcsec]', y_lab='MU [mag/arcsec2]',
n_iter=3, k_std=10,
draw=draw, fig=fig, ax=ax_ins)
r_n, I_n = r0, p_log_linear(r0, *popt)
if draw:
ax.scatter(r_n, I_n, marker='*',color='r', s=300, zorder=4)
ax_ins.set_ylim(I_norm+1.75, I_norm-2.25)
ax_ins.axvline(r0, lw=2, ls='--', color='k', alpha=0.7, zorder=1)
ax_ins.axvspan(r1, r2, color='gold', alpha=0.1)
ax_ins.scatter(r_n, I_n, marker='*',color='r', s=200, zorder=4)
ax_ins.tick_params(direction='in',labelsize=14)
ax_ins.set_ylabel('')
if save:
fn_plot = f'Fit_n0_{range_str}_{obj_name}-{b}.png'
plt.savefig(os.path.join(save_dir, fn_plot))
plt.show()
# I ~ klogr; m = -2.5logF => n = k/2.5
n0, d_n0 = popt[0]/2.5, np.sqrt(pcov[0,0])/2.5
logger.info(" - n0 = {:.4f}+/-{:.4f}".format(n0, d_n0))
return n0, d_n0
## Add supplementary stars
def add_supplementary_atlas(tab, tab_atlas, SE_catalog,
sep=3*u.arcsec, mag_saturate=13):
""" Add unmatched bright (saturated) stars using HLSP ATLAS catalog. """
if len(tab['MAG_AUTO_corr']<mag_saturate)<1:
return tab
logger.info("Adding unmatched bright stars from HLSP ATLAS catalog.")
if not os.path.isfile(tab_atlas):
logger.error("No ATLAS catalog is found.")
# cross match SE catalog and ATLAS catalog
coords_atlas = SkyCoord(tab_atlas['RA'], tab_atlas['Dec'], unit=u.deg)
coords_SE = SkyCoord(SE_catalog['X_WORLD'],SE_catalog['Y_WORLD'])
idx, d2d, _ = coords_SE.match_to_catalog_sky(coords_atlas)
match = d2d < sep
SE_catalog_match = SE_catalog[match]
tab_atlas_match = tab_atlas[idx[match]]
# add ATLAS mag to the table
SE_catalog_match['gmag_atlas'] = tab_atlas_match['g']
SE_catalog_match.sort('gmag_atlas')
# add supplementary stars (bright stars failed to matched)
cond_sup = (SE_catalog_match['MAG_AUTO']<mag_saturate)
SE_catalog_sup = SE_catalog_match[cond_sup]
num_SE_sup = np.setdiff1d(SE_catalog_sup['NUMBER'], tab['NUMBER'])
# make a new table containing unmatched bright stars
use_cols = SE_COLUMNS + ['gmag_atlas']
tab_sup = Table(dtype=SE_catalog_sup[use_cols].dtype)
for num in num_SE_sup:
row = SE_catalog_sup[SE_catalog_sup['NUMBER']==num][0]
tab_sup.add_row(row[use_cols])
# add color term to MAG_AUTO
CT = calculate_color_term(SE_catalog_match,
mag_range=[mag_saturate,18],
mag_name='gmag_atlas', draw=False)
tab_sup['MAG_AUTO_corr'] = tab_sup['gmag_atlas'] + CT
tab_sup.add_columns([tab_sup['X_IMAGE'], tab_sup['Y_IMAGE']],
names=['X_CATALOG', 'Y_CATALOG'])
# Join the two tables by common keys
keys = set(tab.colnames).intersection(tab_sup.colnames)
if len(tab_sup) > 0:
tab_join = join(tab, tab_sup, keys=keys, join_type='outer')
tab_join.sort('MAG_AUTO_corr')
return tab_join
else:
return tab
def add_supplementary_SE_star(tab, SE_catatlog,
mag_saturate=13, mag_limit=15, draw=True):
"""
Add unmatched bright (saturated) stars in SE_catatlogas to tab.
Magnitude is corrected by interpolation from other matched stars.
"""
if len(tab['MAG_AUTO_corr']<mag_saturate)<5:
return tab
logger.info("Adding unmatched bright stars based on SE measurements...")
# Empirical function to correct MAG_AUTO for saturation
# Fit a sigma-clipped piecewise linear
popt, _, clip_func = iter_curve_fit(tab['MAG_AUTO'], tab['MAG_AUTO_corr'],
piecewise_linear, x_max=mag_limit, n_iter=5, k_std=10,
p0=(1, 2, mag_saturate, mag_saturate),
bounds=(0.9, [2, 4, mag_limit, mag_limit]),
x_lab='MAG_AUTO', y_lab='MAG_AUTO corr', draw=draw)
# Empirical corrected magnitude
f_corr = lambda x: piecewise_linear(x, *popt)
mag_corr = f_corr(tab['MAG_AUTO'])
# Remove rows with large magnitude offset
loc_rm = np.where(abs(tab['MAG_AUTO_corr']-mag_corr)>2)
if draw:
plt.scatter(tab[loc_rm]['MAG_AUTO'], tab[loc_rm]['MAG_AUTO_corr'],
marker='s', s=40, facecolors='none', edgecolors='lime')
plt.xlim(mag_limit, tab['MAG_AUTO'].min())
plt.ylim(mag_limit, tab['MAG_AUTO_corr'].min())
plt.show()
tab.remove_rows(loc_rm[0])
# Add supplementary stars (bright stars failed to matched)
cond_sup = (SE_catatlog['MAG_AUTO']<mag_saturate) & (SE_catatlog['CLASS_STAR']>0.7)
SE_cat_bright = SE_catatlog[cond_sup]
num_SE_sup = np.setdiff1d(SE_cat_bright['NUMBER'], tab['NUMBER'])
# make a new table containing all unmatched bright stars
tab_sup = Table(dtype=SE_cat_bright.dtype)
for num in num_SE_sup:
row = SE_cat_bright[SE_cat_bright['NUMBER']==num][0]
tab_sup.add_row(row)
# add corrected MAG_AUTO
tab_sup['MAG_AUTO_corr'] = f_corr(tab_sup['MAG_AUTO'])
tab_sup.add_columns([tab_sup['X_IMAGE'], tab_sup['Y_IMAGE']],
names=['X_CATALOG', 'Y_CATALOG'])
# Join the two tables by common keys
keys = set(tab.colnames).intersection(tab_sup.colnames)
if len(tab_sup) > 0:
tab_join = join(tab, tab_sup, keys=keys, join_type='outer')
tab_join.sort('MAG_AUTO_corr')
return tab_join
else:
return tab
def calculate_color_term(tab_target,
mag_range=[13,18], mag_name='gmag_PS',
verbose=True, draw=True):
"""
Use non-saturated stars to calculate color term between SE MAG_AUTO
and magnitude in the matched catalog .
Parameters
----------
tab_target : full matched source catlog
mag_range : range of magnitude for stars to be used
mag_name : column name of magnitude in tab_target
draw : whethert to draw a diagnostic plot of MAG_AUTO vs diff.
Returns
----------
CT : color correction term (SE - catlog)
"""
mag = tab_target["MAG_AUTO"]
mag_cat = tab_target[mag_name]
d_mag = tab_target["MAG_AUTO"] - mag_cat
use_range = (mag>mag_range[0])&(mag<mag_range[1])&(~np.isnan(mag_cat))
d_mag = d_mag[use_range]
mag = mag[use_range]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
d_mag_clip = sigma_clip(d_mag, 3, maxiters=10)
CT = biweight_location(d_mag_clip)
if draw:
plt.figure()
plt.scatter(mag, d_mag, s=8, alpha=0.2, color='gray')
plt.scatter(mag, d_mag_clip, s=6, alpha=0.3)
plt.axhline(CT, color='k', alpha=0.7)
plt.ylim(-3,3)
plt.xlim(mag_range[0]-0.5, mag_range[1]+0.5)
xlab = "MAG_AUTO"
ylab = "MAG AUTO - %s"%mag_name
if plt.rcParams['text.usetex']:
xlab = xlab.replace('_','$\_$')
ylab = ylab.replace('_','$\_$')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
logger.info("Average Color Term [SE-%s] = %.5f"%(mag_name, CT))
return np.around(CT,5)
def fit_empirical_aperture(tab_target, seg_map, mag_name='rmag_PS',
K=2, R_min=1, R_max=100,
mag_range=[11, 22], degree=2, draw=True):
"""
Fit an empirical polynomial curve for log radius of aperture based on
corrected magnitudes and segm map of SE. Radius is enlarged K times.
Parameters
----------
tab_target : full matched source catlog
seg_map : training segm map
mag_name : column name of magnitude in tab_target
mag_range : range of magnitude for stars to be used
K : enlargement factor on the original segm map (default 2)
R_min : minimum aperture size in pixel (default 1)
R_max : maximum aperture size in pixel (default 100)
degree : degree of polynomial (default 2)
draw : whether to draw a diagnostic plot of log R vs mag
Returns
----------
estimate_radius : a function turns magnitude into log R
"""
from photutils.segmentation import SegmentationImage
msg = "Fitting {0}-order empirical relation for ".format(degree)
msg += "apertures of catalog stars based on SExtarctor (X{0:.1f})".format(K)
logger.info(msg)
# Read from SE segm map
segm_deb = SegmentationImage(seg_map)
R_aper = (segm_deb.get_areas(tab_target["NUMBER"])/np.pi)**0.5
tab_target['logR'] = np.log10(K * R_aper)
mag_match = tab_target[mag_name]
mag_match[np.isnan(mag_match)] = -1
tab = tab_target[(mag_match>mag_range[0])&(mag_match<mag_range[1])]
mag_all = tab[mag_name]
logR = tab['logR']
p_poly = np.polyfit(mag_all, logR, degree)
f_poly = np.poly1d(p_poly)
if draw:
plt.scatter(tab_target[mag_name], tab_target['logR'], s=8, alpha=0.2, color='gray')
plt.scatter(mag_all, logR, s=8, alpha=0.2, color='k')
mag_ls = np.linspace(6,23)
clip = np.zeros_like(mag_all, dtype='bool')
for i in range(3):
if draw: plt.plot(mag_ls, f_poly(mag_ls), lw=1, ls='--')
mag, logr = mag_all[~clip], logR[~clip]
p_poly = np.polyfit(mag, logr, degree)
f_poly = np.poly1d(p_poly)
dev = np.sqrt((logR-f_poly(mag_all))**2)
clip = dev>3*np.mean(dev)
if draw:
plt.plot(mag_ls, f_poly(mag_ls), lw=2, color='gold')
plt.scatter(mag, logr, s=3, alpha=0.2, color='gold')
plt.xlabel("magnitude (catalog)")
plt.ylabel(r"$\log_{10}\,R$")
plt.xlim(7,23)
plt.ylim(0.15,2.2)
plt.show()
estimate_radius = lambda m: max(10**min(np.log10(R_max), f_poly(m)), R_min)
return estimate_radius
def make_segm_from_catalog(catalog_star,
bounds, estimate_radius,
mag_name='rmag', mag_limit=22,
obj_name='', band='G',
ext_cat=None,
k_mask_ext=5,
parallel=False,
save=False,
dir_name='./Measure',
draw=True, verbose=True):
"""
Make segmentation map from star catalog.
Mask aperture sizes are evaluated from SE segmentation maps.
Parameters
----------
catalog_star : star catalog
bounds : 1X4 1d array defining bounds of region
estimate_radius : function of turning magnitude into log R
mag_name : magnitude column name in catalog_star
mag_limit : magnitude limit to add segmentation
ext_cat : (bright) extended source catalog to mask
k_mask_ext: enlarge factor for a/b of masks of extended sources.
draw : whether to draw the segm map
save : whether to save the segm map as fits
dir_name : path of saving
Returns
----------
seg_map : output segm map generated from catalog
"""
Xmin, Ymin, Xmax, Ymax = bounds
nX = Xmax - Xmin
nY = Ymax - Ymin
try:
catalog = catalog_star[~catalog_star[mag_name].mask]
except AttributeError:
catalog = catalog_star[~np.isnan(catalog_star[mag_name])]
catalog = catalog[catalog[mag_name]<mag_limit]
if verbose:
msg = "Make segmentation map based on catalog {:s}: {:d} stars"
msg = msg.format(mag_name, len(catalog))
logger.info(msg)
# Estimate mask radius
R_est = np.array([estimate_radius(m) for m in catalog[mag_name]])
# Draw segment map generated from the catalog
seg_map = np.zeros((nY, nX))
def assign_labels_from_apertures(apers, seg_map, label_ini=1):
# Segmentation k sorted by mag of source catalog
for (k, aper) in enumerate(apers):
star_ma = aper.to_mask(method='center').to_image(seg_map.shape)
if star_ma is not None:
seg_map[star_ma.astype(bool)] = k + label_ini
return seg_map
if parallel==False:
# Generate object apertures
apers = [CircularAperture((X_c-Xmin, Y_c-Ymin), r=r)
for (X_c, Y_c, r) in zip(catalog['X_CATALOG'], catalog['Y_CATALOG'], R_est)]
# Further mask for bright extended sources
if ext_cat is not None:
if len(ext_cat)>0:
for (X_c,Y_c, a, b, theta) in zip(ext_cat['X_IMAGE'],
ext_cat['Y_IMAGE'],
ext_cat['A_IMAGE'],
ext_cat['B_IMAGE'],
ext_cat['THETA_IMAGE'],):
pos = (X_c-Xmin, Y_c-Ymin)
theta_ = np.mod(theta, 360) * np.pi/180
aper = EllipticalAperture(pos, a*k_mask_ext, b*k_mask_ext, theta_)
apers.append(aper)
seg_map = assign_labels_from_apertures(apers, seg_map, label_ini=1)
else:
L_trunk = 1000 # Y size of segment trunk
catalog.sort('Y_CATALOG') # sorted by Y_c
N_trunk = nY // L_trunk
def make_segm_trunk(i_trunk, catalog, radius):
seg_map_trunk = np.zeros((nY_trunk, nX)) # SLICE OF SEGM MAP
if i_trunk == N_trunk-1:
nY_trunk = nY % L_trunk # mod
else:
nY_trunk = L_trunk
Y_trunk_min = Ymin + i_trunk*L_trunk # min Y value in the trunk
# make Y slice of catalog & radii
trunk = abs(catalog['Y_CATALOG']-(Y_trunk_min+L_trunk//2))<=L_trunk//2
cat_trunk = catalog[trunk]
R_trunk = radius[trunk]
apers = [CircularAperture((X_c-Xmin, Y_c-Y_trunk_min), r=r)
for (X_c, Y_c, r) in zip(cat_trunk['X_CATALOG'], cat_trunk['Y_CATALOG'], R_trunk)]
# Initial label of the trunk segm
lab_ini = len(catalog[catalog['Y_CATALOG']<Y_trunk_min]) + 1
# Assign labels to segm trunk
seg_map_trunk = assign_labels_from_apertures(apers, seg_map_trunk, lab_ini)
return seg_map_trunk
p_make_segm_trunk = partial(make_segm_trunk, catalog=catalog, radius=R_est)
results = parallel_compute(np.arange(N_trunk), p_make_segm_trunk,
lengthy_computation=True, verbose=False)
# results = [p_make_segm_trunk(k) for k in range(N_trunk)]
seg_map = np.vstack(results)
# Further mask for bright extended sources
if ext_cat is not None:
if len(ext_cat)>0:
for (X_c,Y_c, a, b, theta) in zip(ext_cat['X_IMAGE'],
ext_cat['Y_IMAGE'],
ext_cat['A_IMAGE'],
ext_cat['B_IMAGE'],
ext_cat['THETA_IMAGE'],):
pos = (X_c-Xmin, Y_c-Ymin)
theta_ = np.mod(theta, 360) * np.pi/180
aper = EllipticalAperture(pos, a*6, b*6, theta_)
apers.append(aper)
seg_map = assign_labels_from_apertures(apers, seg_map, label_ini=seg_map.max()+1)
if draw:
from .plotting import make_rand_cmap
plt.figure(figsize=(6,6), dpi=100)
plt.imshow(seg_map, vmin=1, cmap=make_rand_cmap(int(seg_map.max())))
plt.show()
# Save segmentation map built from catalog
if save:
check_save_path(dir_name, overwrite=True, verbose=False)
hdu_seg = fits.PrimaryHDU(seg_map.astype(int))
band = band.lower()
range_str = f"X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]"
fname = f"{obj_name}-segm_{band}_catalog_{range_str}.fits"
filename = os.path.join(dir_name, fname)
hdu_seg.writeto(filename, overwrite=True)
if verbose:
logger.info(f"Saved segmentation map made from catalog as {filename}")
return seg_map
def make_psf_from_fit(sampler, psf=None,
pixel_scale=DF_pixel_scale,
psf_range=None,
leg2d=False):
"""
Recostruct PSF from fit.
Parameters
----------
sampler : Sampler.sampler class
The output sampler file (.res)
psf : PSF_Model class, default None
An inherited PSF model. If None, initialize with a fiducial model.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel
Returns
-------
psf_fit : PSF_Model class
Recostructed PSF.
params : list
Fitted parameters.
"""
ct = sampler.container
n_spline = ct.n_spline
fit_sigma, fit_frac = ct.fit_sigma, ct.fit_frac
params_fit, _, _ = sampler.get_params_fit()
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
if psf is None:
# Fiducial PSF Model
from .modeling import PSF_Model
params = DF_default_params
psf = PSF_Model(params, aureole_model='multi-power')
psf.pixelize(pixel_scale)
if psf.aureole_model == "moffat":
gamma1_fit, beta1_fit = params_fit[:2]
param_update = {'gamma1':gamma1_fit, 'beta1':beta1_fit}
else:
N_n = n_spline
N_theta = n_spline - 1
if psf.cutoff:
try:
n_c = psf.n_c
theta_c = psf.theta_c
except AttributeError:
n_c = 4
theta_c = 1200
if psf.aureole_model == "power":
n_fit = params_fit[0]
param_update = {'n':n_fit}
elif psf.aureole_model == "multi-power":
n_s_fit = params_fit[:N_n]
theta_s_fit = np.append(psf.theta_0, 10**params_fit[N_n:N_n+N_theta])
if psf.cutoff:
n_s_fit = np.append(n_s_fit, n_c)
theta_s_fit = np.append(theta_s_fit, theta_c)
param_update = {'n0':n_s_fit[0], 'n_s':n_s_fit, 'theta_s':theta_s_fit}
if fit_frac:
frac = 10**params_fit[-1]
param_update['frac'] = frac
# Make a new copy and update parameters
psf_fit = psf.copy()
psf_fit.update(param_update)
mu_fit = params_fit[-K-1]
if fit_sigma:
sigma_fit = 10**params_fit[-K]
else:
sigma_fit = ct.std_est
if ct.leg2d:
psf_fit.A10, psf_fit.A01 = 10**params_fit[-K-2], 10**params_fit[-K-3]
psf_fit.bkg, psf_fit.bkg_std = mu_fit, sigma_fit
_ = psf_fit.generate_core()
_, _ = psf_fit.generate_aureole(psf_range=psf_range, psf_scale=pixel_scale)
return psf_fit, params_fit
def calculate_reduced_chi2(fit, data, uncertainty, dof=5):
chi2_reduced = np.sum(((fit-data)/uncertainty)**2)/(len(data)-dof)
logger.info("Reduced Chi^2 = %.5f"%chi2_reduced)
class NormalizationError(Exception):
def __init__(self, message): self.message = message
def __str__(self): return(repr(self.message))
def __repr__(self): return 'Normalization Error(%r)'%(str(self))
class InconvergenceError(NormalizationError):
def __init__(self, message): self.message = message
def __repr__(self):
return 'InconvergenceError: %r'%self.message
| 73,388 | 35.971788 | 121 |
py
|
elderflower
|
elderflower-master/elderflower/__init__.py
|
__version__ = "0.3.3"
# Pixel scale (arcsec/pixel) for reduced and raw Dragonfly data
DF_pixel_scale = 2.5
DF_raw_pixel_scale = 2.85
# Gain (e-/ADU) of Dragonfly
DF_Gain = 0.37
try:
__SETUP__ = True
except NameError:
__SETUP__ = False
if not __SETUP__:
from . import io
from . import numeric
from . import utils
from . import modeling
from . import image
from . import mask
from . import crossmatch
from . import detection
from . import stack
from . import norm
from . import sampler
from . import container
from . import task
from . import plotting
from . import parallel
from . import panstarrs
from . import atlas
| 707 | 17.153846 | 63 |
py
|
elderflower
|
elderflower-master/elderflower/stack.py
|
import os
import re
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import binary_dilation
from astropy.io import fits
from astropy.table import Table
from photutils.aperture import CircularAperture
from .io import logger
from .plotting import LogNorm
from .utils import background_annulus
from . import DF_pixel_scale, DF_raw_pixel_scale
### Stacking PSF functions ###
def resample_thumb(image, mask, center, shape_new=None):
"""
Shift and resample the thumb image & mask to have odd dimensions
and center at the center pixel. The new dimension can be specified.
Parameters
----------
image : input image, 2d array
mask : input mask, 2d bool array (masked = 1)
center : center of the target, array or turple
shape_new : new shape after resampling
Returns
-------
image_ : output image, 2d array
mask_ : output mask, 2d bool array (masked =1)
center_ : center of the target after the shift
"""
from scipy.interpolate import RectBivariateSpline
X_c, Y_c = center
NX, NY = image.shape
# original grid points
Xp, Yp = np.linspace(0, NX-1, NX), np.linspace(0, NY-1, NY)
rbspl = RectBivariateSpline(Xp, Yp, image, kx=3, ky=3)
rbspl_ma = RectBivariateSpline(Xp, Yp, mask, kx=1, ky=1)
# new NAXIS
if shape_new is None:
NX_ = NY_ = int(np.floor(image.shape[0]/2) * 2) - 3
else:
NX_, NY_ = shape_new
# shift grid points
Xp_ = np.linspace(X_c - NX_//2, X_c + NX_//2, NX_)
Yp_ = np.linspace(Y_c - NY_//2, Y_c + NY_//2, NY_)
# resample image
image_ = rbspl(Xp_, Yp_)
mask_ = rbspl_ma(Xp_, Yp_) > 0.5
center_ = np.array([X_c - Xp_[0], Y_c - Yp_[0]])
return image_, mask_, center_
def stack_star_image(table_stack, res_thumb, size=61, verbose=True):
"""
Stack images of stars in the table.
Parameters
----------
table_stack : astropy.table.Table
SExtarctor table of stars to stack
res_thumb : dict
the dict containing the thumbnails, masks and centers
size : int, optional, default 61
Size of the stacked image in pixel, will be round to odd number.
Returns
-------
image_stack : stacked image
"""
size = int(size/2) * 2 + 1
shape = (size, size)
canvas = np.zeros(shape)
footprint = np.zeros_like(canvas)
i = 0
if verbose:
logger.info("Stacking {0} non-staurated stars to obtain the PSF core...".format(len(table_stack)))
for num in table_stack['NUMBER']:
# Read image, mask and center
img_star = res_thumb[num]['image']
mask_star = res_thumb[num]['mask']
cen_star = res_thumb[num]['center']
# enlarge mask
for j in range(1):
mask_star = binary_dilation(mask_star)
shape_star = img_star.shape
if shape_star[0]!=shape_star[1]: continue
# meausre local background
r_out = min(img_star.shape) * 0.8 //2
r_in = r_out - 5
bkg = background_annulus(cen_star, img_star, mask_star, r_in=r_in, r_out=r_out, draw=False)
# resample thumbnail centroid to center
img_star_, mask_star_, cen_star_ = resample_thumb(img_star, mask_star, cen_star)
shape_star_ = img_star_.shape
# remove nearby sources
img_star_ = img_star_ - bkg
img_star_[mask_star_] = 0
img_star_ = img_star_/img_star_.sum()
# add cutout to canvas
dx = abs(shape_star_[0]-canvas.shape[0])//2
dy = abs(shape_star_[1]-canvas.shape[1])//2
if shape_star_[0] > size:
cutout = img_star_[dx:-dx,dy:-dy]
canvas += cutout
footprint += (cutout!=0)
elif shape_star_[0] < size:
cutout = img_star_
canvas[dx:-dx,dy:-dy] += cutout
footprint[dx:-dx,dy:-dy] += (cutout!=0)
else:
canvas += img_star_
footprint += 1
i += 1
image_stack = canvas/footprint
image_stack = image_stack/image_stack.sum()
return image_stack
def make_global_stack_PSF(dir_name,
bounds_list,
obj_name, band,
overwrite=True,
verbose=True):
"""
Combine the stacked PSF of all regions into one, skip if existed.
Parameters
----------
dir_name : str
path containing the stacked PSF
bounds_list : 2D int list / turple
List of boundaries of regions to be fit (Nx4)
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name
band : str, 'g' 'G' 'r' or 'R'
Filter name
"""
fn_stack = os.path.join(dir_name, f'{obj_name}-{band}-PSF_stack.fits')
if overwrite or (os.path.isfile(fn_stack)==False):
for i, bounds in enumerate(bounds_list):
range_str = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bounds)
fn = os.path.join(dir_name, f'{obj_name}-{band}-psf_stack_{range_str}.fits')
image_psf = fits.getdata(fn)
if i==0:
image_stack = image_psf
else:
image_stack += image_psf
image_stack = image_stack/np.nansum(image_stack)
if i>0:
if verbose:
logger.info("Read & stack {:} PSF.".format(i+1))
fits.writeto(fn_stack, data=image_stack, overwrite=True)
if verbose:
logger.info("Saved stacked PSF as {:}".format(fn_stack))
else:
logger.warning("{:} existed. Skip Stack.".format(fn_stack))
def montage_psf_image(image_psf, image_wide_psf, r=12, dr=0.5, wid_cross=None):
"""
Montage the core of the stacked psf and the wing of the wide psf model.
Parameters
----------
image_psf : 2d array
The image of the inner PSF.
image_wide_psf : 2d array
The image of the wide-angle PSF.
r : int, optional, default 12
Radius in pixel at which the PSF is montaged.
dr : float, optional, default 0.5
Width of annulus for measuring the scaling.
Returns
-------
image_PSF : 2d array
The image of the output PSF.
"""
image_PSF = image_wide_psf.copy()
# Wide PSF
size = image_wide_psf.shape[0]
cen = ((size-1)/2., (size-1)/2.)
x_ = y_ = np.linspace(0,size-1,size)
xx, yy = np.meshgrid(x_, y_)
rr = np.sqrt((yy-cen[0])**2+(xx-cen[1])**2)
I_wide = np.median(image_wide_psf[(rr<r+dr)&(rr>r-dr)])
# Stacked PSF
size_psf = image_psf.shape[0]
cen_psf = ((size_psf-1)/2., (size_psf-1)/2.)
x_psf = y_psf = np.linspace(0,size_psf-1,size_psf)
xx_psf, yy_psf = np.meshgrid(x_psf, y_psf)
rr_psf = np.sqrt((yy_psf-cen_psf[0])**2+(xx_psf-cen_psf[1])**2)
if wid_cross is not None:
mask_cross = np.logical_or.reduce([abs(yy_psf-cen_psf[0])<wid_cross, abs(xx_psf-cen_psf[1])<wid_cross])
else:
mask_cross = np.zeros_like(image_psf, dtype=bool)
I_psf = np.median(image_psf[(rr_psf<r+dr)&(rr_psf>r-dr)&(~mask_cross)])
# Montage
image_PSF[rr<r] = image_psf[rr_psf<r]/ I_psf * I_wide
image_PSF = image_PSF/image_PSF.sum()
return image_PSF
def get_aperture_flux_fraction(image, frac):
""" Get radius within which contains certain fraction of total flux. """
shape = image.shape
size = min(shape)
cen = ((shape[1]-1)/2., (shape[0]-1)/2.)
r_aper_list = np.array(list(np.around(np.logspace(0.3, np.log10(size//2), 50), 1)))
flux_list = np.empty_like(r_aper_list)
for k, r_aper in enumerate(r_aper_list):
aper = CircularAperture(cen, r=r_aper)
aper_ma = aper.to_mask().to_image(shape)
flux_list[k] = (image*aper_ma).sum()
total_flux = np.ma.sum(image) * frac
r_aper_target = r_aper_list[np.argmin(abs(flux_list-total_flux))]
return round(r_aper_target)
def fine_stack_PSF_image(table_stack, res_thumb, size=61, fwhm_psf=5, n_iter=2, verbose=True):
from scipy.ndimage import shift
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.convolution import convolve_fft, Gaussian2DKernel
from photutils.background import MADStdBackgroundRMS, MMMBackground
from photutils.detection import IRAFStarFinder
from photutils.psf import (DAOGroup, IterativelySubtractedPSFPhotometry)
from photutils.psf.epsf import EPSFModel
from photutils.segmentation import detect_sources
from .utils import background_extraction
# image grid
image_shape = (size, size)
image_cen = ((size-1)/2., (size-1)/2.)
x_ = y_ = np.linspace(0,size-1,size)
if verbose:
logger.info("Stacking {0} non-staurated stars to obtain the PSF core...".format(len(table_stack)))
stars_scaled = np.ma.empty((len(table_stack), size, size))
# Shift image center and normalize by aperture flux
for ind in range(len(table_stack)):
num = table_stack['NUMBER'][ind]
bkg = res_thumb[num]['bkg']
image = res_thumb[num]['image']
mask = res_thumb[num]['mask']
center = res_thumb[num]['center']
for j in range(3):
mask = binary_dilation(mask)
image = np.ma.array(image - bkg, mask=mask)
# resample
image_new, mask_new, center_new = resample_thumb(image, mask, center, shape_new=image_shape)
# rough estimate of first total flux
r_aper = get_aperture_flux_fraction(np.ma.array(image_new, mask=mask_new), frac=0.99)
r_aper = min(r_aper, size)
aper = CircularAperture(center, r=r_aper)
aper_new = CircularAperture(center_new, r=r_aper)
aper_ma = aper_new.to_mask().to_image(image_new.shape)
# normalization
flux = np.ma.sum(image_new[(aper_ma == 1) & (~mask_new)])
stars_scaled[ind] = image_new / flux
# first median stack
star_med_0 = np.ma.median(stars_scaled, axis=0)
star_med = star_med_0.copy()
# some static PSF photometry setup
bkgrms = MADStdBackgroundRMS()
daogroup = DAOGroup(2.0 * fwhm_psf)
mmmbkg = MMMBackground()
fitter = LevMarLSQFitter()
iraf_finder_kws = dict(fwhm=fwhm_psf, brightest=5,
minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0,
sharplo=0.0, sharphi=2.0)
# mask growth kernel
kernel_mask = Gaussian2DKernel(fwhm_psf*gaussian_fwhm_to_sigma)
for i in range(n_iter):
# first R99 estimate
r_aper_star = get_aperture_flux_fraction(star_med, frac=0.99)
# some dynamic PSF photometry setup
psf_model = EPSFModel(star_med, x0=image_cen[0], y0=image_cen[1])
psf_photometry_kws = dict(group_maker=DAOGroup(2.0 * fwhm_psf),
bkg_estimator=MMMBackground(),
psf_model=psf_model,
fitter=LevMarLSQFitter(),
aperture_radius=r_aper_star,
fitshape=image_shape, niters=1)
# Do PSF photometry
stars_out = np.ma.empty_like(stars_scaled)
for k, image in enumerate(stars_scaled):
# Aperture mask
aper = CircularAperture(image_cen, r=r_aper_star)
aper_ma = aper.to_mask().to_image(image_shape) == 1
# def PSF photometry model
iraffind = IRAFStarFinder(threshold=3*bkgrms(image[~aper_ma]), **iraf_finder_kws)
photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, **psf_photometry_kws)
# do photometry
result_tab = photometry(image=image)
irow = ((result_tab['x_fit'] - image_cen[0])**2+(result_tab['y_fit'] - image_cen[1])**2).argmin()
# get residual
residual_image = photometry.get_residual_image()
residual_image_ma = residual_image.copy()
# mask target star
residual_image_ma[aper_ma] = np.nan
# detect nearby souces
std_res_ma = bkgrms(residual_image_ma)
segm = detect_sources(residual_image_ma, threshold=3*std_res_ma, npixels=5)
if segm is None:
mask = np.zeros_like(image, dtype=bool)
else:
mask = segm.data > 0
mask = convolve_fft(mask, kernel_mask) > 0.1
# shift
dy, dx = (image_cen[1]-result_tab[irow]['y_fit'], image_cen[0]-result_tab[irow]['x_fit'])
image_shifted = shift(image, [dy, dx], order=3, mode='nearest')
mask_shifted = shift(mask, [dy, dx], order=0, mode='constant', cval=0)
# norm
image_star = np.ma.array(image_shifted, mask=mask_shifted)
bkg_val = np.ma.median(np.ma.array(residual_image, mask=mask | aper_ma | np.isnan(image)))
image_star = image_star - bkg_val
image_star = image_star/image_star.sum()
image_star = np.ma.array(image_star, mask=mask0)
stars_out[k] = image_star
star_med_out = np.nanmedian(stars_out, axis=0)
star_med = star_med_out / star_med_out.sum()
image_stack = star_med.copy()
return image_stack
| 13,695 | 32.568627 | 111 |
py
|
elderflower
|
elderflower-master/elderflower/numeric.py
|
import math
import numpy as np
from scipy.integrate import quad
from scipy.spatial import distance
from scipy.special import gamma as Gamma
try:
from numba import njit
except ImportError:
def njit(*args, **kwargs):
def dummy_decorator(func, *args, **kwargs):
return func
return dummy_decorator
############################################
# Analytic Functions for models
############################################
### numeric conversion ###
def fwhm_to_gamma(fwhm, beta):
""" in arcsec """
return fwhm / 2. / math.sqrt(2**(1./beta)-1)
def gamma_to_fwhm(gamma, beta):
""" in arcsec """
return gamma / fwhm_to_gamma(1, beta)
### interpolate value ###
def interp_I0(r, I, r0, r1, r2):
""" Interpolate I0 at r0 with I(r) between r1 and r2 """
range_intp = (r>r1) & (r<r2)
logI0 = np.interp(r0, r[(r>r1)&(r<r2)], np.log10(I[(r>r1)&(r<r2)]))
return 10**logI0
def compute_mean_I(r, I, r1, r2):
""" Compute mean I under I(r) between r1 and r2 """
range_intg = (r>r1) & (r<r2)
r_range = r[range_intg]
return np.trapz(I[range_intg], r_range)/(r_range.max()-r_range.min())
### funcs on single element ###
@njit
def compute_multi_pow_norm(n_s, theta_s, I_theta0):
""" Compute normalization factor A of each power law component A_i*(theta)^(n_i)"""
n0, theta0 = n_s[0], theta_s[0]
a0 = I_theta0 * theta0**(n0)
a_s = np.zeros(len(n_s))
a_s[0] = a0
I_theta_i = a0 * float(theta_s[1])**(-n0)
for i, (n_i, theta_i) in enumerate(zip(n_s[1:], theta_s[1:])):
a_i = I_theta_i/(theta_s[i+1])**(-n_i)
a_s[i+1] = a_i
I_theta_i = a_i * float(theta_s[i+2])**(-n_i)
return a_s
def trunc_pow(x, n, theta0, I_theta0=1):
""" Truncated power law for single element, I = I_theta0 at theta0 """
a = I_theta0 / (theta0)**(-n)
y = a * x**(-n) if x > theta0 else I_theta0
return y
def multi_pow(x, n_s, theta_s, I_theta0, a_s=None):
""" Continuous multi-power law for single element """
if a_s is None:
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
n0, theta0, a0 = n_s[0], theta_s[0], a_s[0]
if x <= theta0:
return I_theta0
elif x<= theta_s[1]:
y = a0 * x**(-n0)
return y
else:
for k in range(len(a_s)):
try:
if x <= theta_s[k+2]:
y = a_s[k+1] * x**(-n_s[k+1])
return y
except IndexError:
pass
else:
y = a_s[-1] * x**(-n_s[-1])
return y
### 1D functions ###
def log_linear(x, k, x0, y0):
""" linear function y ~ k * log x passing (x0,y0) """
x_ = np.log10(x)
return k * x_ + (y0-k*np.log10(x0))
def flattened_linear(x, k, x0, y0):
""" A linear function flattened at (x0,y0) of 1d array """
return np.array(list(map(lambda x:k*x + (y0-k*x0) if x>=x0 else y0, x)))
def piecewise_linear(x, k1, k2, x0, y0):
""" A piecewise linear function transitioned at (x0,y0) of 1d array """
return np.array(list(map(lambda x:k1*x + (y0-k1*x0) if x>=x0 else k2*x + (y0-k2*x0), x)))
def power1d(x, n, theta0, I_theta0):
""" Power law for 1d array, I = I_theta0 at theta0, theta in pix """
a = I_theta0 / (theta0)**(-n)
y = a * np.power(x + 1e-6, -n)
return y
def trunc_power1d(x, n, theta0, I_theta0=1):
""" Truncated power law for 1d array, I = I_theta0 at theta0, theta in pix """
a = I_theta0 / (theta0)**(-n)
y = a * np.power(x + 1e-6, -n)
y[x<=theta0] = I_theta0
return y
def multi_power1d(x, n_s, theta_s, I_theta0, clear=False):
""" Multi-power law for 1d array, I = I_theta0 at theta0, theta in pix"""
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
theta0 = theta_s[0]
y = np.zeros_like(x)
y[x<=theta0] = I_theta0
for k in range(len(a_s)):
reg = (x>theta_s[k]) & (x<=theta_s[k+1]) if k<len(a_s)-1 else (x>theta_s[k])
y[reg] = a_s[k] * np.power(x[reg], -n_s[k])
if clear:
y[x<=theta0] = 0
return y
def trunc_power1d_normed(x, n, theta0):
""" Truncated power law for 1d array, flux normalized = 1, theta in pix """
norm_pow = quad(trunc_pow, 0, np.inf, args=(n, theta0, 1))[0]
y = trunc_power1d(x, n, theta0, 1) / norm_pow
return y
def moffat1d_normed(x, gamma, alpha):
""" Moffat for 1d array, flux normalized = 1 """
Mof_mod_1d = models.Moffat1D(amplitude=1, x_0=0, gamma=gamma, alpha=alpha)
norm_mof = quad(Mof_mod_1d, 0, np.inf)[0]
y = Mof_mod_1d(x) / norm_mof
return y
def multi_power1d_normed(x, n_s, theta_s):
""" Multi-power law for 1d array, flux normalized = 1, theta in pix """
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
norm_mpow = quad(multi_pow, 0, np.inf,
args=(n_s, theta_s, 1, a_s), limit=100)[0]
y = multi_power1d(x, n_s, theta_s, 1) / norm_mpow
return y
### 2D functions ###
def power2d(xx, yy, n, theta0, I_theta0, cen):
""" Power law for 2d array, normalized = I_theta0 at theta0 """
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2) + 1e-6
rr[rr<=1] = rr[rr>1].min()
a = I_theta0 / (theta0)**(-n)
z = a * np.power(rr, -n)
return z
@njit
def multi_power2d(xx, yy, n_s, theta_s, I_theta0, cen, clear=False):
""" Multi-power law for 2d array, I = I_theta0 at theta0, theta in pix"""
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2).ravel()
z = np.zeros(xx.size)
theta0 = theta_s[0]
z[rr<=theta0] = I_theta0
if clear:
z[rr<=theta0] = 0
for k in range(len(a_s)):
reg = (rr>theta_s[k]) & (rr<=theta_s[k+1]) if k<len(a_s)-1 else (rr>theta_s[k])
z[reg] = a_s[k] * np.power(rr[reg], -n_s[k])
return z.reshape(xx.shape)
### Flux/Amplitude Convertion ###
def moffat1d_Flux2Amp(r_core, beta, Flux=1):
""" Calculate the (astropy) amplitude of 1d Moffat profile given the core width, power index, and total flux F.
Note in astropy unit (x,y) the amplitude should be scaled with 1/sqrt(pi)."""
Amp = Flux * Gamma(beta) / ( r_core * np.sqrt(np.pi) * Gamma(beta-1./2) ) # Derived scaling factor
return Amp
def moffat1d_Amp2Flux(r_core, beta, Amp=1):
Flux = Amp / moffat1d_Flux2Amp(r_core, beta, Flux=1)
return Flux
def power1d_Flux2Amp(n, theta0, Flux=1, trunc=True):
if trunc:
I_theta0 = Flux * (n-1)/n / theta0
else:
I_theta0 = Flux * (n-1) / theta0
return I_theta0
def power1d_Amp2Flux(n, theta0, Amp=1, trunc=True):
if trunc:
Flux = Amp * n/(n-1) * theta0
else:
Flux = Amp * 1./(n-1) * theta0
return Flux
def moffat2d_Flux2Amp(r_core, beta, Flux=1):
return Flux * (beta-1) / r_core**2 / np.pi
def moffat2d_Amp2Flux(r_core, beta, Amp=1):
return Amp / moffat2d_Flux2Amp(r_core, beta, Flux=1)
def moffat2d_Flux2I0(r_core, beta, Flux=1):
Amp = moffat2d_Flux2Amp(r_core, beta, Flux=Flux)
return moffat2d_Amp2I0(beta, Amp=Amp)
def moffat2d_I02Amp(beta, I0=1):
# Convert I0(r=r_core) to Amplitude
return I0 * 2**(2*beta)
def moffat2d_Amp2I0(beta, Amp=1):
# Convert I0(r=r_core) to Amplitude
return Amp * 2**(-2*beta)
def power2d_Flux2Amp(n, theta0, Flux=1):
if n>2:
I_theta0 = (1./np.pi) * Flux * (n-2)/n / theta0**2
else:
raise InconvergenceError('PSF is not convergent in Infinity.')
return I_theta0
def power2d_Amp2Flux(n, theta0, Amp=1):
return Amp / power2d_Flux2Amp(n, theta0, Flux=1)
def multi_power2d_Amp2Flux(n_s, theta_s, Amp=1, theta_trunc=1e5):
""" convert amplitude(s) to integral flux with 2D multi-power law """
if np.ndim(Amp)>0:
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
a_s = np.multiply(a_s[:,np.newaxis], Amp)
else:
a_s = compute_multi_pow_norm(n_s, theta_s, Amp)
I_2D = sum_I2D_multi_power2d(Amp, a_s, n_s, theta_s, theta_trunc)
return I_2D
@njit
def sum_I2D_multi_power2d(Amp, a_s, n_s, theta_s, theta_trunc=1e5):
""" Supplementary function for multi_power2d_Amp2Flux tp speed up """
theta0 = theta_s[0]
I_2D = Amp * np.pi * theta0**2
for k in range(len(n_s)-1):
if n_s[k] == 2:
I_2D += 2*np.pi * a_s[k] * math.log(theta_s[k+1]/theta_s[k])
else:
I_2D += 2*np.pi * a_s[k] * (theta_s[k]**(2-n_s[k]) - theta_s[k+1]**(2-n_s[k])) / (n_s[k]-2)
if n_s[-1] > 2:
I_2D += 2*np.pi * a_s[-1] * theta_s[-1]**(2-n_s[-1]) / (n_s[-1]-2)
elif n_s[-1] == 2:
I_2D += 2*np.pi * a_s[-1] * math.log(theta_trunc/theta_s[-1])
else:
I_2D += 2*np.pi * a_s[-1] * (theta_trunc**(2-n_s[-1]) - theta_s[-1]**(2-n_s[-1])) / (2-n_s[-1])
return I_2D
def multi_power2d_Flux2Amp(n_s, theta_s, Flux=1):
return Flux / multi_power2d_Amp2Flux(n_s, theta_s, Amp=1)
def I2I0_mof(r_core, beta, r, I=1):
""" Convert Intensity I(r) at r to I at r_core with moffat.
r_core and r in pixel """
Amp = I * (1+(r/r_core)**2)**beta
I0 = moffat2d_Amp2I0(beta, Amp)
return I0
def I02I_mof(r_core, beta, r, I0=1):
""" Convert I at r_core to Intensity I(r) at r with moffat.
r_core and r in pixel """
Amp = moffat2d_I02Amp(beta, I0)
I = Amp * (1+(r/r_core)**2)**(-beta)
return I
def I2Flux_mof(frac, r_core, beta, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of moffat.
r_core and r in pixel """
Amp = I * (1+(r/r_core)**2)**beta
Flux_mof = moffat2d_Amp2Flux(r_core, beta, Amp=Amp)
Flux_tot = Flux_mof / frac
return Flux_tot
def Flux2I_mof(frac, r_core, beta, r, Flux=1):
""" Convert total flux at r to Intensity I(r) with fraction of moffat.
r_core and r in pixel """
Flux_mof = Flux * frac
Amp = moffat2d_Flux2Amp(r_core, beta, Flux=Flux_mof)
I = Amp * (1+(r/r_core)**2)**(-beta)
return I
def I2I0_pow(n0, theta0, r, I=1):
""" Convert Intensity I(r) at r to I at theta_0 with power law.
theata_s and r in pixel """
I0 = I * (r/theta0)**n0
return I0
def I02I_pow(n0, theta0, r, I0=1):
""" Convert Intensity I(r) at r to I at theta_0 with power law.
theata_s and r in pixel """
I = I0 / (r/theta0)**n0
return I
def I2Flux_pow(frac, n0, theta0, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of power law.
theata0 and r in pixel """
I0 = I2I0_pow(n0, theta0, r, I=I)
Flux_pow = power2d_Amp2Flux(n0, theta0, Amp=I0)
Flux_tot = Flux_pow / frac
return Flux_tot
def Flux2I_pow(frac, n0, theta0, r, Flux=1):
""" Convert total flux to Intensity I(r) at r.
theata0 and r in pixel """
Flux_pow = Flux * frac
I0 = power2d_Flux2Amp(n0, theta0, Flux=Flux_pow)
I = I0 / (r/theta0)**n0
return I
def I2I0_mpow(n_s, theta_s_pix, r, I=1):
""" Convert Intensity I(r) at r to I at theta_0 with multi-power law.
theata_s and r in pixel """
i = np.digitize(r, theta_s_pix, right=True) - 1
I0 = I * r**(n_s[i]) * theta_s_pix[0]**(-n_s[0])
for j in range(i):
I0 *= theta_s_pix[j+1]**(n_s[j]-n_s[j+1])
return I0
def I02I_mpow(n_s, theta_s_pix, r, I0=1):
""" Convert Intensity I(r) at r to I at theta_0 with multi-power law.
theata_s and r in pixel """
i = np.digitize(r, theta_s_pix, right=True) - 1
I = I0 / r**(n_s[i]) / theta_s_pix[0]**(-n_s[0])
for j in range(i):
I *= theta_s_pix[j+1]**(n_s[j+1]-n_s[j])
return I
def calculate_external_light_pow(n0, theta0, pos_source, pos_eval, I0_source):
""" Calculate light produced by source (I0, pos_source) at pos_eval. """
r_s = distance.cdist(pos_source, pos_eval)
I0_s = np.repeat(I0_source[:, np.newaxis], r_s.shape[-1], axis=1)
r_s += 1e-3 # shift to avoid zero division
I_s = I0_s / (r_s/theta0)**n0
I_s[(r_s==1e-3)] = 0
return I_s.sum(axis=0)
def calculate_external_light_mpow(n_s, theta_s_pix, pos_source, pos_eval, I0_source):
""" Calculate light produced by source (I0_source, pos_source) at pos_eval. """
r_s = distance.cdist(pos_source, pos_eval)
r_inds = np.digitize(r_s, theta_s_pix, right=True) - 1
r_inds_uni, r_inds_inv = np.unique(r_inds, return_inverse=True)
I0_s = np.repeat(I0_source[:, np.newaxis], r_s.shape[-1], axis=1)
# Eq: I(r) = I0 * (theta0/theta1)^(n0) * (theta1/theta2)^(n1) *...* (theta_{k}/r)^(nk)
r_s += 1e-3 # shift to avoid zero division
I_s = I0_s * theta_s_pix[0]**n_s[0] / r_s**(n_s[r_inds])
factors = np.array([np.prod([theta_s_pix[j+1]**(n_s[j+1]-n_s[j])
for j in range(i)]) for i in r_inds_uni])
I_s *= factors[r_inds_inv].reshape(len(I0_source),-1)
I_s[(r_s==1e-3)] = 0
return I_s.sum(axis=0)
def I2Flux_mpow(frac, n_s, theta_s, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of multi-power law.
theata_s and r in pixel """
I0 = I2I0_mpow(n_s, theta_s, r, I=I)
Flux_mpow = multi_power2d_Amp2Flux(n_s=n_s, theta_s=theta_s, Amp=I0)
Flux_tot = Flux_mpow / frac
return Flux_tot
def Flux2I_mpow(frac, n_s, theta_s, r, Flux=1):
""" Convert total flux to Intensity I(r) at r.
theata_s and r in pixel """
i = np.digitize(r, theta_s, right=True) - 1
Flux_mpow = Flux * frac
I0 = multi_power2d_Flux2Amp(n_s=n_s, theta_s=theta_s, Flux=Flux_mpow)
I = I0 / r**(n_s[i]) / theta_s[0]**(-n_s[0])
for j in range(i):
I /= theta_s[j+1]**(n_s[j]-n_s[j+1])
return I
### 1D/2D conversion factor ###
def C_mof2Dto1D(r_core, beta):
""" gamma in pixel """
return 1./(beta-1) * 2*math.sqrt(np.pi) * r_core * Gamma(beta) / Gamma(beta-1./2)
def C_mof1Dto2D(r_core, beta):
""" gamma in pixel """
return 1. / C_mof2Dto1D(r_core, beta)
@njit
def C_pow2Dto1D(n, theta0):
""" theta0 in pixel """
return np.pi * theta0 * (n-1) / (n-2)
@njit
def C_pow1Dto2D(n, theta0):
""" theta0 in pixel """
return 1. / C_pow2Dto1D(n, theta0)
@njit
def C_mpow2Dto1D(n_s, theta_s):
""" theta in pixel """
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
n0, theta0, a0 = n_s[0], theta_s[0], a_s[0]
I_2D = 1. * np.pi * theta0**2
for k in range(len(n_s)-1):
if n_s[k] == 2:
I_2D += 2*np.pi * a_s[k] * np.log(theta_s[k+1]/theta_s[k])
else:
I_2D += 2*np.pi * a_s[k] * (theta_s[k]**(2-n_s[k]) - theta_s[k+1]**(2-n_s[k])) / (n_s[k]-2)
I_2D += 2*np.pi * a_s[-1] * theta_s[-1]**(2-n_s[-1]) / (n_s[-1]-2)
I_1D = 1. * theta0
for k in range(len(n_s)-1):
if n_s[k] == 1:
I_1D += a_s[k] * np.log(theta_s[k+1]/theta_s[k])
else:
I_1D += a_s[k] * (theta_s[k]**(1-n_s[k]) - theta_s[k+1]**(1-n_s[k])) / (n_s[k]-1)
I_1D += a_s[-1] * theta_s[-1]**(1-n_s[-1]) / (n_s[-1]-1)
return I_2D / I_1D
@njit
def C_mpow1Dto2D(n_s, theta_s):
""" theta in pixel """
return 1. / C_mpow2Dto1D(n_s, theta_s)
| 15,181 | 31.579399 | 115 |
py
|
elderflower
|
elderflower-master/elderflower/io.py
|
import os
# Path
package_dir = os.path.dirname(__file__)
test_dir = os.path.normpath(os.path.join(package_dir, '../tests'))
script_dir = os.path.normpath(os.path.join(package_dir, '../scripts'))
config_dir = os.path.normpath(os.path.join(package_dir, '../configs'))
# Default configuration path
default_config = os.path.join(config_dir, './config.yml')
import re
import sys
import yaml
import string
import shutil
import subprocess
import numpy as np
from datetime import datetime
from functools import partial, wraps
try:
import dill as pickle
except ImportError:
import pickle
from pickle import PicklingError
### LOGGING ###
import logging
from astropy.logger import AstropyLogger
class elderflowerLogger(AstropyLogger):
def reset(self, level='INFO', to_file=None, overwrite=True):
""" Reset logger. If to_file is given as a string, the output
will be stored into a log file. """
for handler in self.handlers[:]:
self.removeHandler(handler)
self.setLevel(level)
if isinstance(to_file, str) is False:
# Set up the stdout handlers
handler = StreamHandler()
self.addHandler(handler)
else:
if os.path.isfile(to_file) & overwrite:
os.remove(to_file)
# Define file handler and set formatter
file_handler = logging.FileHandler(to_file)
msg = '[%(asctime)s] %(levelname)s: %(message)s'
formatter = logging.Formatter(msg, datefmt='%Y-%m-%d|%H:%M:%S')
file_handler.setFormatter(formatter)
self.addHandler(file_handler)
self.propagate = False
class StreamHandler(logging.StreamHandler):
""" A StreamHandler that logs messages in different colors. """
def emit(self, record):
stream_print(record.msg, record.levelno)
def stream_print(msg, levelno=logging.INFO):
""" Enable colored msg using ANSI escape codes based input levelno. """
levelname = logging.getLevelName(levelno)
stream = sys.stdout
if levelno < logging.INFO:
level_msg = '\x1b[1;30m'+levelname+': '+'\x1b[0m'
elif levelno < logging.WARNING:
level_msg = '\x1b[1;32m'+levelname+': '+'\x1b[0m'
elif levelno < logging.ERROR:
level_msg = '\x1b[1;31m'+levelname+': '+'\x1b[0m'
else:
level_msg = levelname+': '
stream = sys.stderr
print(f'{level_msg}{msg}', file=stream)
logging.setLoggerClass(elderflowerLogger)
logger = logging.getLogger('elderflowerLogger')
logger.reset()
######
def check_save_path(dir_name, overwrite=True, verbose=True):
""" Check if the input dir_name exists. If not, create a new one.
If yes, clear the content if overwrite=True. """
if not os.path.exists(dir_name):
os.makedirs(dir_name)
else:
if len(os.listdir(dir_name)) != 0:
if overwrite:
if verbose: logger.info(f"'{dir_name}' exists. Will overwrite files.")
#shutil.rmtree(dir_name)
else:
while os.path.exists(dir_name):
dir_name = input(f"'{dir_name}' exists. Enter a new dir name for saving:")
if input("exit"): sys.exit()
os.makedirs(dir_name)
if verbose: logger.info(f"Results will be saved in {dir_name}\n")
def get_executable_path(executable):
""" Get the execuable path """
command = f'which {executable}'
check_exe_path = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
exe_path = check_exe_path.stdout.read().decode("utf-8").rstrip('\n')
return exe_path
def get_SExtractor_path():
""" Get the execuable path of SExtractor.
Possible (sequential) alias: source-extractor, sex, sextractor """
# Check_path
SE_paths = list(map(get_executable_path,
['source-extractor', 'sex', 'sextractor']))
# return the first availble path
try:
SE_executable = next(path for path in SE_paths if len(path)>0)
return SE_executable
except StopIteration:
logger.error('SExtractor path is not found automatically.')
return ''
def update_SE_kwargs(kwargs={},
kwargs_update={'DETECT_THRESH':3,
'ANALYSIS_THRESH':3}):
""" Update SExtractor keywords in kwargs """
from .detection import default_conv, default_nnw
SE_key = kwargs.keys()
for key in kwargs_update.keys():
if key not in SE_key: kwargs[key] = kwargs_update[key]
if 'FILTER_NAME' not in SE_key : kwargs['FILTER_NAME'] = default_conv
if 'STARNNW_NAME' not in SE_key : kwargs['STARNNW_NAME'] = default_nnw
for key in ['CHECKIMAGE_TYPE', 'CHECKIMAGE_TYPE']:
if key in SE_key:
kwargs.pop(key, None)
logger.warning(f'{key} is a reserved keyword. Not updated.')
return kwargs
def find_keyword_header(header, keyword,
default=None, input_val=False, raise_error=False):
""" Search keyword value in header (converted to float).
Accept a value by input if keyword is not found. """
try:
val = np.float(header[keyword])
except KeyError:
logger.info(f"Keyname {keyword} missing in the header .")
if input_val:
try:
val = np.float(input(f"Input a value of {keyword} :"))
except ValueError:
msg = f"Invalid {keyword} values!"
logger.error(msg)
raise ValueError(msg)
elif default is not None:
logger.info("Set {} to default value = {}".format(keyword, default))
val = default
else:
if raise_error:
msg = f"{keyword} must be specified in the header."
logger.error(msg)
raise KeyError(msg)
else:
return None
return val
def DateToday():
""" Today's date in YYYY-MM-DD """
return datetime.today().strftime('%Y-%m-%d')
def AsciiUpper(N):
""" ascii uppercase letters """
return string.ascii_uppercase[:N]
def save_pickle(data, filename, name=""):
""" Save data as pickle file. """
try:
logger.info(f"Saving {name} to {filename}")
with open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
except PicklingError:
logger.error(f"Saving {filename} failed")
def load_pickle(filename):
""" Load data as pickle file. """
logger.info(f"Read from {filename}")
if os.path.exists(filename):
with open(filename, 'rb') as f:
try:
out = pickle.load(f)
except ValueError as err:
logger.error(err)
import pickle5
out = pickle5.load(f)
return out
else:
msg = f'{filename} not found!'
logger.error(msg)
raise FileNotFoundError(msg)
def clean_pickling_object(keyword):
""" Delete pickled objects defined in __main__ to avoid pickling error """
for variable in dir():
if keyword in variable:
del locals()[variable]
def load_config(filename):
""" Read a yaml configuration. """
if not filename.endswith('.yml'):
msg = f"Table {filename} is not a yaml file. Exit."
logger.error(msg)
sys.exit()
with open(filename, 'r') as f:
try:
return yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as err:
logger.error(err)
def check_config_keys(config, func):
""" List all keynames that are not of the function. """
argnames = func.__code__.co_varnames[:func.__code__.co_argcount]
extra_keys = set(config.keys()).difference(argnames)
logger.warning("{} in config are not parameters.".format(extra_keys))
def config_kwargs(func, config_file):
"""Wrap keyword arguments from a yaml configuration file."""
# Load yaml file
config = load_config(config_file)
logger.info(f"Loaded configuration file {config_file}")
# Wrap the function
@wraps(func)
def wrapper(*args, **kwargs):
config.update(kwargs)
return func(*args, **config)
return wrapper
| 8,489 | 30.917293 | 94 |
py
|
elderflower
|
elderflower-master/elderflower/atlas.py
|
"""
Query HLSP-Atlas using Casjobs Command Line tool
"""
import os
import time
import glob
import subprocess
import numpy as np
from shutil import copyfile
from .io import script_dir, config_dir
default_atlas_config = os.path.join(config_dir, './casjobs.config')
exe_path = os.path.join(script_dir, 'casjobs.jar')
def query_atlas(ra_range, dec_range,
wsid, password,
mag_limit=16):
"""
Query ATLAS database.
ra_range: list
dec_range: list
wsid: casjob WSID
password: casjob password
mag_limit: limiting magnitude
"""
# make a temp directory and copy atlas command line tool
os.makedirs('ATLAS', exist_ok=True)
os.chdir('ATLAS')
copyfile(exe_path, 'casjobs.jar')
copyfile(default_atlas_config, 'casjobs.config')
# replace config wsid and password
with open('casjobs.config', "r") as f:
config = f.read()
config = config.replace('YOUR_WEBSERVICES_ID', str(wsid))
config = config.replace('YOUR_PASSWORD', str(password))
with open('casjobs.config', "w") as f:
f.write(config)
# write script
ra_min, ra_max = np.around([ra_range[0], ra_range[1]], 5)
dec_min, dec_max = np.around([dec_range[0], dec_range[1]], 5)
casjobs_script = f"""#!/bin/bash
casjobs_path=casjobs.jar
java -jar $casjobs_path execute "select RA, Dec, g, r from refcat2 into mydb.atlas where ra between {ra_min} and {ra_max} and dec between {dec_min} and {dec_max} and g <= {mag_limit}"
java -jar $casjobs_path extract -b atlas -F -type csv -d
java -jar $casjobs_path execute -t "mydb" -n "drop query" "drop table atlas"
"""
with open('casjobs_atlas.sh', 'w') as f:
f.write(casjobs_script)
# run script
out = subprocess.Popen('sh casjobs_atlas.sh', stdout=subprocess.PIPE, shell=True)
os.chdir('../')
# rename
# fn_out = 'ATLAS/cat_atlas.csv'
# time.sleep(0.1)
# table = glob.glob('ATLAS/atlas_*.csv')[-1]
# os.rename(table, fn_out)
return out
| 2,045 | 27.816901 | 183 |
py
|
elderflower
|
elderflower-master/elderflower/parallel.py
|
"""
Submodule for parallel computing
Adapted from https://github.com/pycroscopy/sidpy (S.Somnath, C.Smith)
"""
import numpy as np
import joblib
def parallel_compute(data, func, cores=None, lengthy_computation=False, func_args=None, func_kwargs=None, verbose=False):
"""
Computes the provided function using multiple cores using the joblib library
Parameters
----------
data : numpy.ndarray
Data to map function to. Function will be mapped to the first axis of data
func : callable
Function to map to data
cores : uint, optional
Number of logical cores to use to compute
Default - All cores - 1 (total cores <= 4) or - 2 (cores > 4) depending on number of cores.
lengthy_computation : bool, optional
Whether or not each computation is expected to take substantial time.
Sometimes the time for adding more cores can outweigh the time per core
Default - False
func_args : list, optional
arguments to be passed to the function
func_kwargs : dict, optional
keyword arguments to be passed onto function
verbose : bool, optional. default = False
Whether or not to print statements that aid in debugging
Returns
-------
results : list
List of computational results
"""
if not callable(func):
raise TypeError('Function argument is not callable')
if not isinstance(data, np.ndarray):
raise TypeError('data must be a numpy array')
if func_args is None:
func_args = list()
else:
if isinstance(func_args, tuple):
func_args = list(func_args)
if not isinstance(func_args, list):
raise TypeError('Arguments to the mapped function should be specified as a list')
if func_kwargs is None:
func_kwargs = dict()
else:
if not isinstance(func_kwargs, dict):
raise TypeError('Keyword arguments to the mapped function should be specified via a dictionary')
req_cores = cores
rank = 0
cores = recommend_cpu_cores(data.shape[0],
requested_cores=cores,
lengthy_computation=lengthy_computation,
verbose=verbose)
if verbose:
print('Rank {} starting computing on {} cores (requested {} cores)'.format(rank, cores, req_cores))
if cores > 1:
values = [joblib.delayed(func)(x, *func_args, **func_kwargs) for x in data]
results = joblib.Parallel(n_jobs=cores, backend='multiprocessing')(values)
# Finished reading the entire data set
if verbose:
print('Rank {} finished parallel computation'.format(rank))
else:
if verbose:
print("Rank {} computing serially ...".format(rank))
# List comprehension vs map vs for loop?
# https://stackoverflow.com/questions/1247486/python-list-comprehension-vs-map
results = [func(vector, *func_args, **func_kwargs) for vector in data]
return results
def recommend_cpu_cores(num_jobs, requested_cores=None, lengthy_computation=False, min_free_cores=None, verbose=False):
"""
Decides the number of cores to use for parallel computing
Parameters
----------
num_jobs : unsigned int
Number of times a parallel operation needs to be performed
requested_cores : unsigned int (Optional. Default = None)
Number of logical cores to use for computation
lengthy_computation : Boolean (Optional. Default = False)
Whether or not each computation takes a long time. If each computation is quick, it may not make sense to take
a hit in terms of starting and using a larger number of cores, so use fewer cores instead.
Eg- BE SHO fitting is fast (<1 sec) so set this value to False,
Eg- Bayesian Inference is very slow (~ 10-20 sec)so set this to True
min_free_cores : uint (Optional, default = 1 if number of logical cores < 5 and 2 otherwise)
Number of CPU cores that should not be used)
verbose : Boolean (Optional. Default = False)
Whether or not to print statements that aid in debugging
Returns
-------
requested_cores : unsigned int
Number of logical cores to use for computation
"""
from multiprocess import cpu_count
logical_cores = cpu_count()
if min_free_cores is not None:
if not isinstance(min_free_cores, int):
raise TypeError('min_free_cores should be an unsigned integer')
if min_free_cores < 0 or min_free_cores >= logical_cores:
raise ValueError('min_free_cores should be an unsigned integer less than the number of logical cores')
if verbose:
print('Number of requested free CPU cores: {} was accepted'.format(min_free_cores))
else:
if logical_cores > 4:
min_free_cores = 2
else:
min_free_cores = 1
if verbose:
print('Number of CPU free cores set to: {} given that the CPU has {} logical cores'
'.'.format(min_free_cores, logical_cores))
max_cores = max(1, logical_cores - min_free_cores)
if requested_cores is None:
# conservative allocation
if verbose:
print('No requested_cores given. Using estimate of {}.'.format(max_cores))
requested_cores = max_cores
else:
if not isinstance(requested_cores, int):
raise TypeError('requested_cores should be an unsigned integer')
if verbose:
print('{} cores requested.'.format(requested_cores))
if requested_cores < 0 or requested_cores > logical_cores:
# Respecting the explicit request
requested_cores = max(min(int(abs(requested_cores)), logical_cores), 1)
if verbose:
print('Clipped explicit request for CPU cores to: {}'.format(requested_cores))
if not isinstance(num_jobs, int):
raise TypeError('num_jobs should be an unsigned integer')
if num_jobs < 1:
raise ValueError('num_jobs should be greater than 0')
jobs_per_core = max(int(num_jobs / requested_cores), 1)
min_jobs_per_core = 10 # I don't like to hard-code things here but I don't have a better idea for now
if verbose:
print('computational jobs per core = {}. For short computations, each core must have at least {} jobs to '
'warrant parallel computation.'.format(jobs_per_core, min_jobs_per_core))
if not lengthy_computation:
if verbose:
print('Computations are not lengthy.')
if requested_cores > 1 and jobs_per_core < min_jobs_per_core:
# cut down the number of cores if there are too few jobs
jobs_per_core = 2 * min_jobs_per_core
# intelligently set the cores now.
requested_cores = max(1, min(requested_cores, int(num_jobs / jobs_per_core)))
if verbose:
print('Not enough jobs per core. Reducing cores to {}'.format(requested_cores))
return int(requested_cores)
| 7,075 | 41.884848 | 121 |
py
|
elderflower
|
elderflower-master/elderflower/mask.py
|
import os
import math
import warnings
import numpy as np
from astropy.io import fits
from astropy.coordinates import SkyCoord
import astropy.units as u
from .io import logger
from .modeling import Stars
from .utils import background_extraction, crop_pad
from . import DF_pixel_scale
mask_param_default = dict(
mask_type='aper',
r_core=24,
r_out=None,
sn_thre=2.5,
SB_threshold=24.5,
mask_obj=None,
width_ring=1.5,
width_cross=10,
k_mask_ext=5,
k_mask_cross=2,
dist_cross=180,
width_strip=24,
n_strip=48,
dist_strip=1800,
clean=True)
"""
mask_param: Parameters setting up the mask map.
r_core : int or [int, int], default 24
Radius (in pix) for the inner mask of [very, medium]
bright stars. Default is 1' for Dragonfly.
r_out : int or [int, int] or None, default None
Radius (in pix) for the outer mask of [very, medium]
bright stars. If None, turn off outer mask.
sn_thre : float, default 2.5
SNR threshold used for deep mask.
mask_obj : str, file path
Path to the ibject mask file. See mask.make_mask_object
width_ring : float, default 1.5
Half-width in arcsec of ring used to measure the scaling.
width_cross : float, default 10
Half-width in arcsec of the spike mask when measuring the scaling.
k_mask_ext: int, default 5
Enlarge factor for A and B of masks of extended sources.
k_mask_cross : float, default 2
Enlarge factor for the width of the spike mask for fitting.
dist_cross: float, default 180
Range of each spike mask (in arcsec) for fitting
width_strip : float, default 0.5 arcmin
Half-width of each strip mask (in arcsec)
n_strip : int, default 48
Number of strip mask.
dist_strip : float, default 0.5 deg
range of each strip mask (in arcsec)
clean : bool, default True
Whether to remove medium bright stars far from any available
pixels for fitting. A new Stars object will be stored in
stars_new, otherwise it is simply a copy.
"""
class Mask:
""" Class for masking sources """
def __init__(self, Image, stars, verbose=True):
"""
Parameters
----------
Image : an Image class
stars : a Star object
"""
self.Image = Image
self.stars = stars
self.image0 = Image.image0
self.image_shape0 = Image.image0.shape
self.pixel_scale = Image.pixel_scale
self.bounds0 = Image.bounds0
self.image_shape = Image.image_shape
self.nX = Image.image_shape[1]
self.nY = Image.image_shape[0]
self.pad = Image.pad
self.yy, self.xx = np.mgrid[:self.nY + 2 * self.pad,
:self.nX + 2 * self.pad]
self.pad = Image.pad
self.bkg = Image.bkg
self.verbose = verbose
def __str__(self):
return "A Mask Class"
def __repr__(self):
return f"{self.__class__.__name__} for {repr(self.Image)}"
@property
def mask_base(self):
mask_base0 = getattr(self, 'mask_base0', self.mask_deep0)
return crop_pad(mask_base0, self.pad)
@property
def seg_base(self):
seg_base0 = getattr(self, 'seg_base0', self.seg_deep0)
return crop_pad(seg_base0, self.pad)
@property
def mask_deep(self):
return crop_pad(self.mask_deep0, self.pad)
@property
def seg_deep(self):
return crop_pad(self.seg_deep0, self.pad)
@property
def mask_comb(self):
return crop_pad(self.mask_comb0, self.pad)
@property
def seg_comb(self):
return crop_pad(self.seg_comb0, self.pad)
@property
def mask_fit(self):
""" Mask for fit """
return getattr(self, 'mask_comb', self.mask_deep)
def make_mask_object(self, mask_obj=None, file_obj=None,
wcs=None, enlarge=3):
"""
Read an object mask map (e.g. giant galaxies) or make one
using elliptical apertures with shape parameters.
Parameters
----------
mask_obj : str, default None
Object mask file name
file_obj : str, default None
Ascii file (.txt) that stores shape parameters (wcs is needed).
wcs: astropy.wcs.WCS
WCS of the image if making new mask.
Note this is the full wcs, not cropped one
enlarge : int, default 3
Enlargement factor
Notes
-----
If mask_obj (e.g., {obj_name}_maskobj.fits) exists, use it as the object mask.
Otherwise, it looks for a file_obj ({obj_name}_shape.txt) and make a new one.
The txt must have following parameters in each row, starting at line 1:
pos : turple or array or turples
position(s) (x,y) of apertures
a_ang : float or 1d array
semi-major axis length(s) in arcsec
b_ang : float or 1d array
semi-minor axis length(s) in arcsec
PA_ang : float or 1d array
patch angle (ccw from north) in degree
"""
if mask_obj is not None:
if os.path.isfile(mask_obj):
msg = f"Read mask map of objects: {os.path.abspath(mask_obj)}"
# read existed mask map
self.mask_obj_field = fits.getdata(mask_obj).astype(bool)
else:
msg = "Object mask not found. Skip."
if self.verbose:
logger.info(msg)
elif file_obj is not None:
if os.path.isfile(file_obj) == False:
if self.verbose:
logger.warning(f"{file_obj} is not found!")
return None
if wcs is None:
logger.warning("WCS is not given!")
return None
if self.verbose:
msg = f"Read shape parameters of objects from {os.path.abspath(file_obj)}"
logger.info(msg)
# read shape parameters from file
par = np.atleast_2d(np.loadtxt(file_obj_pars))
pos = par[:,:1] # [RA, Dec] as first two columns
a_ang, b_ang, PA_ang = par[:,2], par[:,3], par[:,4]
# make mask map with parameters
self.mask_obj_field = make_mask_aperture(pos, a_ang, b_ang,
PA_ang, wcs,
enlarge=enlarge,
pixel_scale=self.pixel_scale)
else:
return None
def make_mask_map_deep(self, dir_measure=None, mask_type='aper',
r_core=None, r_out=None, count=None,
draw=True, save=False, save_dir='.',
obj_name='', band='G', *args, **kwargs):
"""
Make deep mask map of bright stars based on either of:
(1) aperture (2) brightness
The mask map is then combined with a base segm map (if given) (for masking sources below S/N threshold) and a S_N seg map (for masking bright sources/features not contained in the catalog)
Parameters
----------
mask_type : 'aper' or 'brightness', optional
"aper": aperture-like masking (default)
"brightness": brightness-limit masking
r_core : core radius of [medium, very bright] stars to be masked
count : absolute count (in ADU) above which is masked
obj_name : name of object
band : filter name. r/R/G/g
draw : whether to draw mask map
save : whether to save the image
save_dir : path of saving
"""
image0 = self.image0
stars = self.stars
pad = self.pad
if dir_measure is not None:
bounds0 = self.bounds0
range_str = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bounds0)
fname_seg = "%s-segm_%s_catalog_%s.fits"\
%(obj_name, band.lower(), range_str)
fname_seg_base = os.path.join(dir_measure, fname_seg)
logger.info(f"Read mask map built from catalog: {fname_seg_base}")
# Try finding basement segment map generated by catalog
if os.path.isfile(fname_seg_base) is False:
if self.verbose:
logger.warning(f"{fname_seg_base} doe not exist. Only use SExtractor's.")
seg_base0 = None
else:
seg_base0 = fits.getdata(fname_seg_base)
self.seg_base0 = seg_base0
self.mask_base0 = seg_base0 > 0
else:
seg_base0 = None
# S/N + Core mask
mask_deep0, seg_deep0 = make_mask_map_dual(image0, stars, self.xx, self.yy,
mask_type=mask_type,
pad=pad, seg_base=seg_base0,
r_core=r_core, r_out=r_out, count=count,
n_bright=stars.n_bright,
**kwargs)
# combine with object mask
mask_obj0 = self.mask_obj0
mask_deep0 = mask_deep0 & mask_obj0
seg_deep0[mask_obj0] = seg_deep0.max() + 1
self.mask_deep0 = mask_deep0
self.seg_deep0 = seg_deep0
self.r_core = r_core
self.r_core_m = min(np.unique(r_core))
self.count = count
# Display mask
if draw:
from .plotting import draw_mask_map
draw_mask_map(image0, seg_deep0, mask_deep0, stars,
pad=pad, r_core=r_core, r_out=r_out,
save=save, save_dir=save_dir)
def make_mask_advanced(self, n_strip=48,
wid_strip=30, dist_strip=1800,
wid_cross=20, dist_cross=180,
clean=True, draw=True,
save=False, save_dir='.'):
"""
Make spider-like mask map and mask stellar spikes for bright stars.
The spider-like mask map is to reduce sample size of pixels at large
radii, equivalent to assign lower weights to outskirts.
Note: make_mask_map_deep() need to be run first.
Parameters
----------
n_strip : number of each strip mask
wid_strip : half-width of each strip mask (in arcsec) (default: 0.5 arcmin)
dist_strip : range of each strip mask (in arcsec) (default: 0.5 deg)
wid_cross : half-width of spike mask (in arcsec) (default: 20 arcsec)
dist_cross : range of each spike mask (in arcsec) (default: 3 arcmin)
clean : whether to remove medium bright stars far from any available
pixels for fitting. A new Stars object will be stored in
stars_new, otherwise it is simply a copy.
draw : whether to draw mask map
save : whether to save the image
save_dir : path of saving
"""
if hasattr(self, 'mask_deep0') is False:
return None
image0 = self.image0
stars = self.stars
pad = self.pad
pixel_scale = self.pixel_scale
dist_strip_pix = dist_strip / pixel_scale
dist_cross_pix = dist_cross / pixel_scale
wid_strip_pix = wid_strip / pixel_scale
wid_cross_pix = wid_cross / pixel_scale
if stars.n_verybright > 0:
# Strip + Cross mask
mask_strip_s, mask_cross_s = make_mask_strip(stars, self.xx, self.yy,
pad=pad, n_strip=n_strip,
wid_strip=wid_strip_pix,
dist_strip=dist_strip_pix,
wid_cross=wid_cross_pix,
dist_cross=dist_cross_pix)
# combine strips
mask_strip_all = ~np.logical_or.reduce(mask_strip_s)
mask_cross_all = ~np.logical_or.reduce(mask_cross_s)
seg_deep0 = self.seg_deep0
# combine deep, crosses and strips
seg_comb0 = seg_deep0.copy()
ma_extra = (mask_strip_all|~mask_cross_all) & (seg_deep0==0)
seg_comb0[ma_extra] = seg_deep0.max()-2
mask_comb0 = (seg_comb0!=0)
# assign attribute
self.mask_comb0 = mask_comb0
self.seg_comb0 = seg_comb0
# example mask for the brightest star
ma_example = mask_strip_s[0], mask_cross_s[0]
else:
if self.verbose:
msg = "No very bright stars in the field! Will skip the mask."
msg += " Try lower thresholds."
logger.warning(msg)
self.seg_comb0 = seg_comb0 = self.seg_deep0
self.mask_comb0 = mask_comb0 = (seg_comb0!=0)
ma_example = None
clean = False
# Clean medium bright stars far from bright stars
if clean:
from .utils import clean_isolated_stars
clean = clean_isolated_stars(self.xx, self.yy, mask_comb0,
stars.star_pos, pad=pad)
if stars.n_verybright > 0:
clean[stars.Flux >= stars.F_verybright] = False
z_norm_clean = stars.z_norm[~clean] if hasattr(stars, 'z_norm') else None
stars_new = Stars(stars.star_pos[~clean], stars.Flux[~clean],
stars.Flux_threshold, z_norm=z_norm_clean,
r_scale=stars.r_scale, BKG=stars.BKG)
self.stars_new = stars_new
else:
self.stars_new = stars.copy()
# Display mask
if draw:
from .plotting import draw_mask_map_strip
draw_mask_map_strip(image0, seg_comb0, mask_comb0,
self.stars_new, r_core=self.r_core,
ma_example=ma_example, pad=pad,
save=save, save_dir=save_dir)
def make_mask_aperture(pos, A_ang, B_ang, PA_ang, wcs,
enlarge=3, pixel_scale=DF_pixel_scale, save=True):
"""
Make mask map with elliptical apertures.
Parameters
----------
pos : 1d or 2d array
[RA, Dec] coordinate(s) of aperture centers
A_ang, B_ang : float or 1d array
semi-major/minor axis length(s) in arcsec
PA_ang : float or 1d array
patch angle (counter-clockwise from north) in degree
wcs : astropy.wcs.WCS
enlarge : float
enlargement factor
pixel_scale : float
pixel scale in arcsec/pixel
save : bool
whether to save the mask
fname : str
name of saved mask
Returns
----------
mask : 2d array mask map (masked area = 1)
"""
from photutils import EllipticalAperture
shape = wcs.array_shape
mask = np.zeros(shape, dtype=bool)
if np.ndim(pos) == 1:
RA, Dec = pos
elif np.ndim(pos) == 2:
RA, Dec = pos[:,0], pos[:,1]
# shape properties of apertures
aper_props = np.atleast_2d(np.array([RA, Dec, A_ang, B_ang, PA_ang]).T)
for ra, dec, a_ang, b_ang, pa_ang in aper_props:
# convert coordinates to positions
coords = SkyCoord(f'{ra} {dec}', unit=u.deg)
pos = wcs.all_world2pix(ra, dec, 0) # 0-original in photutils
# convert angular to pixel unit
a_pix = a_ang / pixel_scale
b_pix = b_ang / pixel_scale
# correct PA to theta in photutils (from +x axis)
theta = np.mod(pa_ang+90, 360) * np.pi/180
# make elliptical aperture
aper = EllipticalAperture(pos, enlarge*a_pix, enlarge*b_pix, theta)
# convert aperture to mask
ma_aper = aper.to_mask(method='center')
ma = ma_aper.to_image(shape).astype(bool)
mask[ma] = 1.0
if save: fits.writeto(fname, mask, overwrite=True)
return mask
def make_mask_map_core(image_shape, star_pos, r_core=12):
""" Make stars out to r_core """
# mask core
yy, xx = np.indices(image_shape)
mask_core = np.zeros(image_shape, dtype=bool)
if np.ndim(r_core) == 0:
r_core = np.ones(len(star_pos)) * r_core
core_region= np.logical_or.reduce([np.sqrt((xx-pos[0])**2+(yy-pos[1])**2) < r for (pos,r) in zip(star_pos,r_core)])
mask_core[core_region] = 1
segmap = mask_core.astype(int).copy()
return mask_core, segmap
def make_mask_map_dual(image, stars,
xx=None, yy=None,
mask_type='aper', pad=0,
r_core=24, r_out=None,
count=None, seg_base=None,
n_bright=25, sn_thre=3,
nlevels=64, contrast=0.001,
npix=4, b_size=64,
verbose=True):
"""
Make mask map in dual mode:
for faint stars, mask with S/N > sn_thre;
for bright stars, mask core (r < r_core pix).
Parameters
----------
Image : an Image class
stars : a Star object
Returns
-------
mask_deep : mask map
segmap : segmentation map
"""
from photutils import detect_sources, deblend_sources
from photutils.segmentation import SegmentationImage
if (xx is None) | (yy is None):
yy, xx = np.mgrid[:image.shape[0]+2*pad, :image.shape[1]+2*pad]
star_pos = stars.star_pos_bright + pad
if mask_type == 'aper':
if len(np.unique(r_core)) == 1:
r_core_A, r_core_B = r_core, r_core
r_core_s = np.ones(len(star_pos)) * r_core
else:
r_core_A, r_core_B = r_core[:2]
r_core_s = np.array([r_core_A if F >= stars.F_verybright else r_core_B
for F in stars.Flux_bright])
if r_out is not None:
if len(np.unique(r_out)) == 1:
r_out_A, r_out_B = r_out, r_out
r_out_s = np.ones(len(star_pos)) * r_out_s
else:
r_out_A, r_out_B = r_out[:2]
r_out_s = np.array([r_out_A if F >= stars.F_verybright else r_out_B
for F in stars.Flux_bright])
if verbose:
logger.info("Mask outer regions: r > %d (%d) pix "%(r_out_A, r_out_B))
if sn_thre is not None:
if verbose:
logger.info("Detect and deblend source... Mask S/N > %.1f"%(sn_thre))
# detect all source first
back, back_rms = background_extraction(image, b_size=b_size)
threshold = back + (sn_thre * back_rms)
segm0 = detect_sources(image, threshold, npixels=npix)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# deblend source
segm_deb = deblend_sources(image, segm0, npixels=npix,
nlevels=nlevels, contrast=contrast)
# for pos in star_pos:
# if (min(pos[0],pos[1]) > 0) & (pos[0] < image.shape[0]) & (pos[1] < image.shape[1]):
# star_lab = segmap[coord_Im2Array(pos[0], pos[1])]
# segm_deb.remove_label(star_lab)
segmap = segm_deb.data.copy()
max_lab = segm_deb.max_label
# remove S/N mask map for input (bright) stars
for pos in star_pos:
rr2 = (xx-pos[0])**2+(yy-pos[1])**2
lab = segmap[np.where(rr2==np.min(rr2))][0]
segmap[segmap==lab] = 0
if seg_base is not None:
segmap2 = seg_base
if sn_thre is not None:
# Combine Two mask
segmap[segmap2>n_bright] = max_lab + segmap2[segmap2>n_bright]
segm_deb = SegmentationImage(segmap)
else:
# Only use seg_base, bright stars are aggressively masked
segm_deb = SegmentationImage(segmap2)
max_lab = segm_deb.max_label
if mask_type == 'aper':
# mask core for bright stars out to given radii
if verbose:
logger.info("Mask core regions: r < %d (VB) /%d (MB) pix"%(r_core_A, r_core_B))
core_region = np.logical_or.reduce([np.sqrt((xx-pos[0])**2+(yy-pos[1])**2) < r
for (pos,r) in zip(star_pos,r_core_s)])
mask_star = core_region.copy()
if r_out is not None:
# mask outer region for bright stars out to given radii
outskirt = np.logical_and.reduce([np.sqrt((xx-pos[0])**2+(yy-pos[1])**2) > r
for (pos,r) in zip(star_pos,r_out_s)])
mask_star = (mask_star) | (outskirt)
elif mask_type == 'brightness':
# If count is not given, use 5 sigma above background.
if count is None:
count = np.mean(back + (5 * back_rms))
# mask core for bright stars below given ADU count
if verbose:
logger.info("Mask core regions: Count > %.2f ADU "%count)
mask_star = image >= count
segmap[mask_star] = max_lab+1
# set dilation border a different label (for visual)
segmap[(segmap!=0)&(segm_deb.data==0)] = max_lab+2
# set mask map
mask_deep = (segmap!=0)
return mask_deep, segmap
def make_mask_strip(stars, xx, yy, pad=0, n_strip=24,
wid_strip=12, dist_strip=720,
wid_cross=8, dist_cross=72, verbose=True):
""" Make mask map in strips with width *in pixel unit* """
if verbose:
logger.info("Making sky strips crossing very bright stars...")
if stars.n_verybright>0:
mask_strip_s = np.empty((stars.n_verybright, xx.shape[0], xx.shape[1]))
mask_cross_s = np.empty_like(mask_strip_s)
else:
return None, None
star_pos = stars.star_pos_verybright + pad
phi_s = np.linspace(-90, 90, n_strip+1)
a_s = np.tan(phi_s*np.pi/180)
for k, (x_b, y_b) in enumerate(star_pos):
m_s = (y_b-a_s*x_b)
mask_strip = np.logical_or.reduce([abs((yy-a*xx-m)/math.sqrt(1+a**2)) < wid_strip
for (a, m) in zip(a_s, m_s)])
mask_cross = np.logical_or.reduce([abs(yy-y_b)<wid_cross, abs(xx-x_b)<wid_cross])
dist_map1 = np.sqrt((xx-x_b)**2+(yy-y_b)**2) < dist_strip
dist_map2 = np.sqrt((xx-x_b)**2+(yy-y_b)**2) < dist_cross
mask_strip_s[k] = mask_strip & dist_map1
mask_cross_s[k] = mask_cross & dist_map2
return mask_strip_s, mask_cross_s
| 23,378 | 35.415888 | 196 |
py
|
elderflower
|
elderflower-master/elderflower/detection.py
|
"""
Submodule for running SExtractor from dfreduce (credit: Johnny Greco).
Will be replaced by an independent module.
"""
import os
from subprocess import call
import numpy as np
from astropy.io import ascii, fits
from .io import logger
from .io import config_dir
# default SExtractor paths and files
input_file_path = os.path.join(config_dir, 'sextractor')
kernel_path = os.path.join(input_file_path, 'kernels')
default_SE_config = os.path.join(input_file_path, 'default.sex')
default_param_file = os.path.join(input_file_path, 'default.param')
default_run_config = os.path.join(input_file_path, 'default.config')
default_nnw = os.path.join(input_file_path, 'default.nnw')
default_conv = os.path.join(kernel_path, 'default.conv')
# get list of all config options
with open(os.path.join(input_file_path, 'config_options.txt'), 'r') as file:
all_option_names = [line.rstrip() for line in file]
# get list of all SExtractor measurement parameters
default_params = ['X_IMAGE', 'Y_IMAGE', 'FLUX_AUTO', 'FLUX_RADIUS',
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', 'FWHM_IMAGE', 'FLAGS']
with open(os.path.join(input_file_path, 'all_params.txt'), 'r') as file:
all_param_names = [line.rstrip() for line in file]
# default non-standard options
default_options = dict(
BACK_SIZE=128,
DETECT_THRESH=4,
DETECT_MINAREA=4,
ANALYSIS_THRESH=4,
GAIN=0.37,
PHOT_APERTURES=6,
PIXEL_SCALE=2.5,
SEEING_FWHM=2.5,
VERBOSE_TYPE='QUIET',
MEMORY_BUFSIZE=4096,
MEMORY_OBJSTACK=30000,
MEMORY_PIXSTACK=3000000,
PARAMETERS_NAME=default_param_file,
FILTER_NAME=default_conv,
STARNNW_NAME=default_nnw
)
def temp_fits_file(path_or_pixels, tmp_path='/tmp', run_label=None,
prefix='tmp', header=None):
is_str = type(path_or_pixels) == str or type(path_or_pixels) == np.str_
if is_str and header is None:
path = path_or_pixels
created_tmp = False
else:
if is_str:
path_or_pixels = fits.getdata(path_or_pixels)
label = '' if run_label is None else '_' + run_label
fn = '{}{}.fits'.format(prefix, label)
path = os.path.join(tmp_path, fn)
fits.writeto(path, path_or_pixels, header=header, overwrite=True)
created_tmp = True
return path, created_tmp
def is_list_like(check):
t = type(check)
c = t == list or t == np.ndarray or t == pd.Series or t == pd.Int64Index
return c
def list_of_strings(str_or_list):
"""
Return a list of strings from a single string of comma-separated values.
"""
if is_list_like(str_or_list):
ls_str = str_or_list
elif type(str_or_list) == str:
ls_str = str_or_list.replace(' ', '').split(',')
else:
Exception('{} is not correct type for list of str'.format(str_or_list))
return ls_str
def run(path_or_pixels, catalog_path=None, config_path=default_run_config,
executable='source-extractor', tmp_path='/tmp', run_label=None, header=None,
extra_params=None, **sextractor_options):
"""
Run SExtractor.
Parameters
----------
path_or_pixels : str
Full path file name to the fits image -- or -- The image pixels as
a numpy array. In the latter case, a temporary fits file will be
written in tmp_path with an optional run_label to make the temp file
name unique (this is useful if you are running in parallel).
catalog_path : str (optional)
If not None, the full path file name of the output catalog. If None,
a temporary catalog will be written in tmp_path with a
run_label (if it's not None).
config_path : str (optional)
Full path SExtractor configuration file name.
executable : str (optional)
The SExtractor executable name (full path if necessary)
tmp_path : str (optional)
Path for temporary fits files if you pass image pixels to
this function.
run_label : str (optional)
A unique label for the temporary files.
header : astropy.io.fits.Header (optional)
Image header if you pass image pixels to this function and want
SExtractor to have the header information.
extra_params: str or list-like (optional)
Additional SE measurement parameters. The default parameters, which
are always in included, are the following:
X_IMAGE, Y_IMAGE, FLUX_AUTO, FLUX_RADIUS, FWHM_IMAGE, A_IMAGE,
B_IMAGE, THETA_IMAGE, FLAGS
**sextractor_options: Keyword arguments
Any SExtractor configuration option.
Returns
-------
catalog : astropy.Table
The SExtractor source catalog.
Notes
-----
You must have SExtractor installed to run this function.
The 'sextractor_options' keyword arguments may be passed one at a time or
as a dictionary, exactly the same as **kwargs.
Example:
# like this
cat = sextractor.run(image_fn, cat_fn, FILTER='N', DETECT_THRESH=10)
# or like this
options = dict(FILTER='N', DETECT_THRESH=10)
cat = sextractor.run(image_fn, cat_fn, **options)
# extra_params can be given in the following formats
extra_params = 'FLUX_RADIUS'
extra_params = 'FLUX_RADIUS,ELLIPTICITY'
extra_params = 'FLUX_RADIUS, ELLIPTICITY'
extra_params = ['FLUX_RADIUS', 'ELLIPTICITY']
# (it is case-insensitive)
"""
image_path, created_tmp = temp_fits_file(path_or_pixels,
tmp_path=tmp_path,
run_label=run_label,
prefix='se_tmp',
header=header)
logger.debug('Running SExtractor on ' + image_path)
# update config options
final_options = default_options.copy()
for k, v in sextractor_options.items():
k = k.upper()
if k not in all_option_names:
msg = '{} is not a valid SExtractor option -> we will ignore it!'
logger.warning(msg.format(k))
else:
logger.debug('SExtractor config update: {} = {}'.format(k, v))
final_options[k] = v
# create catalog path if necessary
if catalog_path is not None:
cat_name = catalog_path
save_cat = True
else:
label = '' if run_label is None else '_' + run_label
cat_name = os.path.join(tmp_path, 'se{}.cat'.format(label))
save_cat = False
# create and write param file if extra params were given
param_fn = None
if extra_params is not None:
extra_params = list_of_strings(extra_params)
params = default_params.copy()
for par in extra_params:
p = par.upper()
_p = p[:p.find('(')] if p.find('(') > 0 else p
if _p not in all_param_names:
msg = '{} is not a valid SExtractor param -> we will ignore it!'
logger.warning(msg.format(p))
elif _p in default_params:
msg = '{} is a default parameter -> No need to add it!'
logger.warning(msg.format(p))
else:
params.append(p)
if len(params) > len(default_params):
label = '' if run_label is None else '_' + run_label
param_fn = os.path.join(tmp_path, 'params{}.se'.format(label))
with open(param_fn, 'w') as f:
logger.debug('Writing parameter file to ' + param_fn)
print('\n'.join(params), file=f)
final_options['PARAMETERS_NAME'] = param_fn
# build shell command
cmd = executable + ' -c {} {}'.format(config_path, image_path)
cmd += ' -CATALOG_NAME ' + cat_name
for k, v in final_options.items():
cmd += ' -{} {}'.format(k.upper(), v)
if param_fn is not None:
cmd += ' -PARAMETERS_NAME ' + param_fn
# run it
logger.debug(f'>> {cmd}')
call(cmd, shell=True)
if 'CATALOG_TYPE' not in final_options.keys():
catalog = ascii.read(cat_name)
elif final_options['CATALOG_TYPE'] == 'ASCII_HEAD':
catalog = ascii.read(cat_name)
else:
catalog = None
if created_tmp:
logger.debug('Deleting temporary file ' + image_path)
os.remove(image_path)
if param_fn is not None:
logger.debug('Deleting temporary file ' + param_fn)
os.remove(param_fn)
if not save_cat:
logger.debug('Deleting temporary file ' + cat_name)
os.remove(cat_name)
return catalog
| 8,487 | 35.429185 | 84 |
py
|
elderflower
|
elderflower-master/elderflower/crossmatch.py
|
import os
import re
import sys
import math
import random
import warnings
import numpy as np
from astropy import wcs
from astropy import units as u
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column, join, vstack
from .io import logger
from .utils import transform_coords2pixel
from .utils import crop_catalog, merge_catalog, SE_COLUMNS
from . import DF_pixel_scale, DF_raw_pixel_scale
def query_vizier(catalog_name, radius, columns, column_filters, header=None, coord=None):
""" Query catalog in Vizier database with the given catalog name,
search radius and column names. If coords is not given, look for fits header """
from astroquery.vizier import Vizier
# Prepare for quearyinig Vizier with filters up to infinitely many rows. By default, this is 50.
viz_filt = Vizier(columns=columns, column_filters=column_filters)
viz_filt.ROW_LIMIT = -1
if coord==None:
RA, DEC = re.split(",", header['RADEC'])
coord = SkyCoord(RA+" "+DEC , unit=(u.hourangle, u.deg))
# Query!
result = viz_filt.query_region(coord, radius=radius,
catalog=[catalog_name])
return result
def cross_match(wcs_data, SE_catalog, bounds, radius=None,
pixel_scale=DF_pixel_scale, mag_limit=15, sep=3*u.arcsec,
clean_catalog=True, mag_name='rmag',
catalog={'Pan-STARRS': 'II/349/ps1'},
columns={'Pan-STARRS': ['RAJ2000', 'DEJ2000', 'e_RAJ2000', 'e_DEJ2000',
'objID', 'Qual', 'gmag', 'e_gmag', 'rmag', 'e_rmag']},
column_filters={'Pan-STARRS': {'rmag':'{0} .. {1}'.format(5, 22)}},
magnitude_name={'Pan-STARRS':['rmag','gmag']},
verbose=True):
"""
Cross match SExtractor catalog with Vizier Online catalog.
'URAT': 'I/329/urat1'
magnitude_name: "rmag"
columns: ['RAJ2000', 'DEJ2000', 'mfa', 'gmag', 'e_gmag', 'rmag', 'e_rmag']
column_filters: {'mfa':'=1', 'rmag':'{0} .. {1}'.format(8, 18)}
'USNO': 'I/252/out'
magnitude_name: "Rmag"
columns: ['RAJ2000', 'DEJ2000', 'Bmag', 'Rmag']
column_filters: {"Rmag":'{0} .. {1}'.format(5, 15)}
"""
cen = (bounds[2]+bounds[0])/2., (bounds[3]+bounds[1])/2.
coord_cen = wcs_data.pixel_to_world(cen[0], cen[1])
if radius is None:
L = math.sqrt((cen[0]-bounds[0])**2 + (cen[1]-bounds[1])**2)
radius = L * pixel_scale * u.arcsec
if verbose:
msg = "Search {0} ".format(np.around(radius.to(u.deg), 3))
msg += f"around: (ra, dec) = ({coord_cen.to_string()})"
logger.info(msg)
for j, (cat_name, table_name) in enumerate(catalog.items()):
# Query from Vizier
result = query_vizier(catalog_name=table_name,
radius=radius,
columns=columns[cat_name],
column_filters=column_filters[cat_name],
coord=coord_cen)
Cat_full = result[table_name]
if len(cat_name) > 4:
c_name = cat_name[0] + cat_name[-1]
else:
c_name = cat_name
m_name = np.atleast_1d(mag_name)[j]
# Transform catalog wcs coordinate into pixel postion
Cat_full = transform_coords2pixel(Cat_full, wcs_data, name=c_name)
# Crop catalog and sort by the catalog magnitude
Cat_crop = crop_catalog(Cat_full, bounds, sortby=m_name,
keys=("X_CATALOG", "Y_CATALOG"))
# catalog magnitude
mag_cat = Cat_crop[m_name]
# Screen out bright stars (mainly for cleaning duplicate source in catalog)
Cat_bright = Cat_crop[(np.less(mag_cat, mag_limit,
where=~np.isnan(mag_cat))) & ~np.isnan(mag_cat)]
mag_cat.mask[np.isnan(mag_cat)] = True
if clean_catalog:
# Clean duplicate items in the catalog
c_bright = SkyCoord(Cat_bright['RAJ2000'], Cat_bright['DEJ2000'], unit=u.deg)
c_catalog = SkyCoord(Cat_crop['RAJ2000'], Cat_crop['DEJ2000'], unit=u.deg)
idxc, idxcatalog, d2d, d3d = c_catalog.search_around_sky(c_bright, sep)
inds_c, counts = np.unique(idxc, return_counts=True)
row_duplicate = np.array([], dtype=int)
# Use the measurement with min error in RA/DEC
for i in inds_c[counts>1]:
obj_duplicate = Cat_crop[idxcatalog][idxc==i]
# obj_duplicate.pprint(max_lines=-1, max_width=-1)
# Remove detection without magnitude measurement
mag_obj = obj_duplicate[m_name]
obj_duplicate = obj_duplicate[~np.isnan(mag_obj)]
# Use the detection with the best astrometry
e2_coord = obj_duplicate["e_RAJ2000"]**2 + obj_duplicate["e_DEJ2000"]**2
min_e2_coord = np.nanmin(e2_coord)
for ID in obj_duplicate[e2_coord>min_e2_coord]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
Cat_crop.remove_rows(np.unique(row_duplicate))
#Cat_bright = Cat_crop[mag_cat<mag_limit]
for m_name in magnitude_name[cat_name]:
mag = Cat_crop[m_name]
if verbose:
logger.debug("%s %s: %.3f ~ %.3f"%(cat_name, m_name, mag.min(), mag.max()))
# Merge Catalog
keep_columns = SE_COLUMNS + ["ID"+'_'+c_name] + magnitude_name[cat_name] + \
["X_CATALOG", "Y_CATALOG"]
tab_match = merge_catalog(SE_catalog, Cat_crop, sep=sep,
keep_columns=keep_columns)
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
keep_columns=keep_columns)
# Rename columns
for m_name in magnitude_name[cat_name]:
tab_match[m_name].name = m_name+'_'+c_name
tab_match_bright[m_name].name = m_name+'_'+c_name
# Join tables
if j==0:
tab_target_all = tab_match
tab_target = tab_match_bright
else:
tab_target_all = join(tab_target_all, tab_match, keys=SE_COLUMNS,
join_type='left', metadata_conflicts='silent')
tab_target = join(tab_target, tab_match_bright, keys=SE_COLUMNS,
join_type='left', metadata_conflicts='silent')
# Sort matched catalog by SE MAG_AUTO
tab_target.sort("MAG_AUTO")
tab_target_all.sort("MAG_AUTO")
mag_all = tab_target_all[mag_name+'_'+c_name]
mag = tab_target[mag_name+'_'+c_name]
if verbose:
logger.info("Matched stars with %s %s: %.3f ~ %.3f"\
%(cat_name, mag_name, mag_all.min(), mag_all.max()))
logger.info("Matched bright stars with %s %s: %.3f ~ %.3f"\
%(cat_name, mag_name, mag.min(), mag.max()))
return tab_target, tab_target_all, Cat_crop
def cross_match_PS1_DR2(wcs_data, SE_catalog, bounds,
band='g', radius=None, clean_catalog=True,
pixel_scale=DF_pixel_scale, sep=5*u.arcsec,
mag_limit=15, verbose=True):
"""
Use PANSTARRS DR2 API to do cross-match with the SE source catalog.
Note this could be (much) slower compared to cross-match using Vizier.
Parameters
----------
wcs_data : wcs of data
SE_catalog : SE source catalog
bounds : Nx4 2d / 1d array defining the cross-match region(s) [Xmin, Ymin, Xmax, Ymax]
clean_catalog : whether to clean the matched catalog. (default True)
The PS-1 catalog contains duplicate items on a single source with different
measurements. If True, duplicate items of bright sources will be cleaned by
removing those with large coordinate errors and pick the items with most
detections in that band .
mag_limit : magnitude threshould defining bright stars.
sep : maximum separation (in astropy unit) for crossmatch with SE.
Returns
-------
tab_target : table containing matched bright sources with SE source catalog
tab_target_all : table containing matched all sources with SE source catalog
catalog_star : PS-1 catalog of all sources in the region(s)
"""
from astropy.nddata.bitmask import interpret_bit_flags
from .panstarrs import ps1cone
band = band.lower()
mag_name = band + 'MeanPSFMag'
c_name = 'PS'
for j, bounds in enumerate(np.atleast_2d(bounds)):
cen = (bounds[2]+bounds[0])/2., (bounds[3]+bounds[1])/2.
coord_cen = wcs_data.pixel_to_world(cen[0], cen[1])
ra, dec = coord_cen.ra.value, coord_cen.dec.value
L = math.sqrt((cen[0]-bounds[0])**2 + (cen[1]-bounds[1])**2)
radius = (L * pixel_scale * u.arcsec).to(u.deg)
if verbose:
msg = "Search {0} ".format(np.around(radius.to(u.deg), 3))
msg += f"around: (ra, dec) = ({coord_cen.to_string()})"
logger.info(msg)
#### Query PANSTARRS start ####
constraints = {'nDetections.gt':1, band+'MeanPSFMag.lt':22}
# strip blanks and weed out blank and commented-out values
columns = """raMean,decMean,raMeanErr,decMeanErr,nDetections,ng,nr,
gMeanPSFMag,gMeanPSFMagErr,gFlags,rMeanPSFMag,rMeanPSFMagErr,rFlags""".split(',')
columns = [x.strip() for x in columns]
columns = [x for x in columns if x and not x.startswith('#')]
results = ps1cone(ra, dec, radius.value, release='dr2', columns=columns, **constraints)
Cat_full = ascii.read(results)
for filter in 'gr':
col = filter+'MeanPSFMag'
Cat_full[col].format = ".4f"
Cat_full[col][Cat_full[col] == -999.0] = np.nan
for coord in ['ra','dec']:
Cat_full[coord+'MeanErr'].format = ".5f"
#### Query PANSTARRS end ####
Cat_full.sort(mag_name)
Cat_full['raMean'].unit = u.deg
Cat_full['decMean'].unit = u.deg
Cat_full = transform_coords2pixel(Cat_full, wcs_data, name=c_name,
RA_key="raMean", DE_key="decMean")
# Crop catalog and sort by the catalog magnitude
Cat_crop = crop_catalog(Cat_full, bounds, sortby=mag_name,
keys=("X_CATALOG", "Y_CATALOG"))
# Remove detection without magnitude
has_mag = ~np.isnan(Cat_crop[mag_name])
Cat_crop = Cat_crop[has_mag]
# Pick out bright stars
mag_cat = Cat_crop[mag_name]
Cat_bright = Cat_crop[mag_cat<mag_limit]
if clean_catalog:
# A first crossmatch with bright stars in catalog for cleaning
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
RA_key="raMean", DE_key="decMean")
tab_match_bright.sort(mag_name)
# Clean duplicate items in the catalog
c_bright = SkyCoord(tab_match_bright['X_WORLD'],
tab_match_bright['Y_WORLD'], unit=u.deg)
c_catalog = SkyCoord(Cat_crop['raMean'],
Cat_crop['decMean'], unit=u.deg)
idxc, idxcatalog, d2d, d3d = \
c_catalog.search_around_sky(c_bright, sep)
inds_c, counts = np.unique(idxc, return_counts=True)
row_duplicate = np.array([], dtype=int)
# Use the measurement following some criteria
for i in inds_c[counts>1]:
obj_dup = Cat_crop[idxcatalog][idxc==i]
obj_dup['sep'] = d2d[idxc==i]
#obj_dup.pprint(max_lines=-1, max_width=-1)
# Use the detection with mag
mag_obj_dup = obj_dup[mag_name]
obj_dup = obj_dup[~np.isnan(mag_obj_dup)]
# Use the closest match
good = (obj_dup['sep'] == min(obj_dup['sep']))
### Extra Criteria
# # Coordinate error of detection
# err2_coord = obj_dup["raMeanErr"]**2 + \
# obj_dup["decMeanErr"]**2
# # Use the detection with the best astrometry
# min_e2_coord = np.nanmin(err2_coord)
# good = (err2_coord == min_e2_coord)
# # Use the detection with PSF mag err
# has_err_mag = obj_dup[mag_name+'Err'] > 0
# # Use the detection > 0
# n_det = obj_dup['n'+band]
# has_n_det = n_det > 0
# # Use photometry not from tycho in measurement
# use_tycho_phot = extract_bool_bitflags(obj_dup[band+'Flags'], 7)
# good = has_err_mag & has_n_det & (~use_tycho_phot)
###
# Add rows to be removed
for ID in obj_dup[~good]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
obj_dup = obj_dup[good]
if len(obj_dup)<=1:
continue
# Use brightest detection
mag = obj_dup[mag_name]
for ID in obj_dup[mag>min(mag)]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
# Remove rows
Cat_crop.remove_rows(np.unique(row_duplicate))
# Subset catalog containing bright stars
Cat_bright = Cat_crop[Cat_crop[mag_name]<mag_limit]
# Merge Catalog
keep_columns = SE_COLUMNS + ["ID"+'_'+c_name] + columns + \
["X_CATALOG", "Y_CATALOG"]
tab_match = merge_catalog(SE_catalog, Cat_crop, sep=sep,
RA_key="raMean", DE_key="decMean", keep_columns=keep_columns)
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
RA_key="raMean", DE_key="decMean", keep_columns=keep_columns)
if j==0:
tab_target_all = tab_match
tab_target = tab_match_bright
catalog_star = Cat_crop
else:
tab_target_all = vstack([tab_target_all, tab_match], join_type='exact')
tab_target = vstack([tab_target, tab_match_bright], join_type='exact')
catalog_star = vstack([catalog_star, Cat_crop], join_type='exact')
# Sort matched catalog by matched magnitude
tab_target.sort(mag_name)
tab_target_all.sort(mag_name)
if verbose:
logger.info("Matched stars with PANSTARRS DR2 %s: %.3f ~ %.3f"\
%(mag_name, np.nanmin(tab_target_all[mag_name]),
np.nanmax(tab_target_all[mag_name])))
logger.info("Matched bright stars with PANSTARRS DR2 %s: %.3f ~ %.3f"\
%(mag_name, np.nanmin(tab_target[mag_name]),
np.nanmax(tab_target[mag_name])))
return tab_target, tab_target_all, catalog_star
def cross_match_PS1(band, wcs_data,
SE_cat_target, bounds_list,
pixel_scale=DF_pixel_scale,
sep=None, mag_limit=15, n_attempt=3,
use_PS1_DR2=False, verbose=True):
b_name = band.lower()
if sep is None:
sep = pixel_scale * u.arcsec
if use_PS1_DR2:
from urllib.error import HTTPError
# Give 3 attempts in matching PS1 DR2 via MAST.
# This could fail if the FoV is too large.
for attempt in range(n_attempt):
try:
tab_target, tab_target_full, catalog_star = \
cross_match_PS1_DR2(wcs_data,
SE_cat_target,
bounds_list,
pixel_scale=pixel_scale,
sep=sep,
mag_limit=mag_limit,
band=b_name,
verbose=verbose)
except HTTPError:
logger.warning('Gateway Time-out. Try again.')
else:
break
else:
msg = f'504 Server Error: {n_attempt} failed attempts. Exit.'
logger.error(msg)
sys.exit()
else:
mag_name = b_name+'mag'
tab_target, tab_target_full, catalog_star = \
cross_match(wcs_data,
SE_cat_target,
bounds_list,
pixel_scale=pixel_scale,
sep=sep,
mag_limit=mag_limit,
mag_name=mag_name,
verbose=verbose)
return tab_target, tab_target_full, catalog_star
| 18,154 | 41.717647 | 102 |
py
|
elderflower
|
elderflower-master/elderflower/.ipynb_checkpoints/utils-checkpoint.py
|
import os
import re
import sys
import math
import time
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from astropy import wcs
from astropy import units as u
from astropy.io import fits, ascii
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.stats import mad_std, median_absolute_deviation, gaussian_fwhm_to_sigma
from astropy.stats import sigma_clip, SigmaClip, sigma_clipped_stats
from photutils import detect_sources, deblend_sources
from photutils import CircularAperture, CircularAnnulus, EllipticalAperture
from photutils.segmentation import SegmentationImage
from .plotting import LogNorm, AsinhNorm, colorbar
### Baisc Funcs ###
def coord_Im2Array(X_IMAGE, Y_IMAGE, origin=1):
""" Convert image coordniate to numpy array coordinate """
x_arr, y_arr = int(max(round(Y_IMAGE)-origin, 0)), int(max(round(X_IMAGE)-origin, 0))
return x_arr, y_arr
def coord_Array2Im(x_arr, y_arr, origin=1):
""" Convert image coordniate to numpy array coordinate """
X_IMAGE, Y_IMAGE = y_arr+origin, x_arr+origin
return X_IMAGE, Y_IMAGE
def fwhm_to_gamma(fwhm, beta):
""" in arcsec """
return fwhm / 2. / math.sqrt(2**(1./beta)-1)
def gamma_to_fwhm(gamma, beta):
""" in arcsec """
return gamma / fwhm_to_gamma(1, beta)
def Intensity2SB(I, BKG, ZP, pixel_scale=2.5):
""" Convert intensity to surface brightness (mag/arcsec^2) given the background value, zero point and pixel scale """
I = np.atleast_1d(I)
I[np.isnan(I)] = BKG
if np.any(I<=BKG):
I[I<=BKG] = np.nan
I_SB = -2.5*np.log10(I - BKG) + ZP + 2.5 * math.log10(pixel_scale**2)
return I_SB
def SB2Intensity(SB, BKG, ZP, pixel_scale=2.5):
""" Convert surface brightness (mag/arcsec^2)to intensity given the background value, zero point and pixel scale """
SB = np.atleast_1d(SB)
I = 10** ((SB - ZP - 2.5 * math.log10(pixel_scale**2))/ (-2.5)) + BKG
return I
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def counter(i, number):
if np.mod((i+1), number//4) == 0:
print("completed: %d/%d"%(i+1, number))
def round_good_fft(x):
# Rounded PSF size to 2^k or 3*2^k
a = 1 << int(x-1).bit_length()
b = 3 << int(x-1).bit_length()-2
if x>b:
return a
else:
return min(a,b)
def calculate_psf_size(n0, theta_0, contrast=1e5, psf_scale=2.5,
min_psf_range=60, max_psf_range=720):
A0 = theta_0**n0
opt_psf_range = int((contrast * A0) ** (1./n0))
psf_range = max(min_psf_range, min(opt_psf_range, max_psf_range))
# full (image) PSF size in pixel
psf_size = 2 * psf_range // psf_scale
return round_good_fft(psf_size)
def compute_poisson_noise(data, n_frame=1, header=None, Gain=0.37):
if header is not None:
try:
n_frame = np.int(header['NFRAMES'])
except KeyError:
n_frame = 1
G_effective = Gain * n_frame # effecitve gain: e-/ADU
std_poi = np.nanmedian(np.sqrt(data/G_effective))
if np.isnan(std_poi):
std_poi = None
print("Sky Poisson Noise Unavailable.")
else:
print("Sky Poisson Noise: %.3f"%std_poi)
return std_poi
def extract_bool_bitflags(bitflags, ind):
from astropy.nddata.bitmask import interpret_bit_flags
return np.array(["{0:016b}".format(0xFFFFFFFF & interpret_bit_flags(flag))[-ind]
for flag in np.atleast_1d(bitflags)]).astype(bool)
### Photometry Funcs ###
def background_sub_SE(field, mask=None, b_size=64, f_size=3, n_iter=5):
""" Subtract background using SE estimator with mask """
from photutils import Background2D, SExtractorBackground, MedianBackground
try:
Bkg = Background2D(field, mask=mask, bkg_estimator=SExtractorBackground(),
box_size=(b_size, b_size), filter_size=(f_size, f_size),
sigma_clip=SigmaClip(sigma=3., maxiters=n_iter))
back = Bkg.background
back_rms = Bkg.background_rms
except ValueError:
img = field.copy()
if mask is not None:
img[mask] = np.nan
back, back_rms = np.nanmedian(field) * np.ones_like(field), np.nanstd(field) * np.ones_like(field)
if mask is not None:
back *= ~mask
back_rms *= ~mask
return back, back_rms
def display_background_sub(field, back):
from .plotting import vmax_2sig, vmin_3mad
# Display and save background subtraction result with comparison
fig, (ax1,ax2,ax3) = plt.subplots(nrows=1,ncols=3,figsize=(12,4))
ax1.imshow(field, aspect="auto", cmap="gray", vmin=vmin_3mad(field), vmax=vmax_2sig(field),norm=LogNorm())
im2 = ax2.imshow(back, aspect="auto", cmap='gray')
colorbar(im2)
ax3.imshow(field - back, aspect="auto", cmap='gray', vmin=0., vmax=vmax_2sig(field - back),norm=LogNorm())
plt.tight_layout()
def source_detection(data, sn=2, b_size=120,
k_size=3, fwhm=3, smooth=True,
sub_background=True, mask=None):
from astropy.convolution import Gaussian2DKernel
from photutils import detect_sources, deblend_sources
if sub_background:
back, back_rms = background_sub_SE(data, b_size=b_size)
threshold = back + (sn * back_rms)
else:
back = np.zeros_like(data)
threshold = np.nanstd(data)
if smooth:
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=k_size, y_size=k_size)
kernel.normalize()
else:
kernel=None
segm_sm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel, mask=mask)
data_ma = data.copy() - back
data_ma[segm_sm!=0] = np.nan
return data_ma, segm_sm
def clean_isolated_stars(xx, yy, mask, star_pos, pad=0, dist_clean=60):
star_pos = star_pos + pad
clean = np.zeros(len(star_pos), dtype=bool)
for k, pos in enumerate(star_pos):
rr = np.sqrt((xx-pos[0])**2+(yy-pos[1])**2)
if np.min(rr[~mask]) > dist_clean:
clean[k] = True
return clean
def cal_profile_1d(img, cen=None, mask=None, back=None, bins=None,
color="steelblue", xunit="pix", yunit="Intensity",
seeing=2.5, pixel_scale=2.5, ZP=27.1, sky_mean=884, sky_std=3, dr=1.5,
lw=2, alpha=0.7, markersize=5, I_shift=0, figsize=None,
core_undersample=False, label=None, plot_line=False, mock=False,
plot=True, scatter=False, fill=False, errorbar=False, verbose=False):
"""Calculate 1d radial profile of a given star postage"""
if mask is None:
mask = np.zeros_like(img, dtype=bool)
if back is None:
back = np.ones_like(img) * sky_mean
if cen is None:
cen = (img.shape[1]-1)/2., (img.shape[0]-1)/2.
yy, xx = np.indices((img.shape))
rr = np.sqrt((xx - cen[0])**2 + (yy - cen[1])**2)
r = rr[~mask].ravel() # radius in pix
z = img[~mask].ravel() # pixel intensity
r_core = np.int(3 * seeing/pixel_scale) # core radius in pix
# Decide the outermost radial bin r_max before going into the background
bkg_cumsum = np.arange(1, len(z)+1, 1) * np.median(back)
z_diff = abs(z.cumsum() - bkg_cumsum)
n_pix_max = len(z) - np.argmin(abs(z_diff - 0.0001 * z_diff[-1]))
r_max = np.sqrt(n_pix_max/np.pi)
r_max = np.min([img.shape[0]//2, r_max])
if verbose:
print("Maximum R: %d (pix)"%np.int(r_max))
if xunit == "arcsec":
r = r * pixel_scale # radius in arcsec
r_core = r_core * pixel_scale
r_max = r_max * pixel_scale
d_r = dr * pixel_scale if xunit == "arcsec" else dr
# z = z[~np.isnan(z)]
if mock:
clip = lambda z: sigma_clip((z), sigma=5, maxiters=3)
else:
clip = lambda z: 10**sigma_clip(np.log10(z+1e-10), sigma=3, maxiters=5)
if bins is None:
# Radial bins: discrete/linear within r_core + log beyond it
if core_undersample:
# for undersampled core, bin in individual pixels
bins_inner = np.unique(r[r<r_core]) + 1e-3
else:
bins_inner = np.linspace(0, r_core, int(min((r_core/d_r*2), 5))) - 1e-5
n_bin_outer = np.max([7, np.min([np.int(r_max/d_r/10), 50])])
if r_max > (r_core+d_r):
bins_outer = np.logspace(np.log10(r_core+d_r), np.log10(r_max-d_r), n_bin_outer)
else:
bins_outer = []
bins = np.concatenate([bins_inner, bins_outer])
_, bins = np.histogram(r, bins=bins)
# Calculate binned 1d profile
r_rbin = np.array([])
z_rbin = np.array([])
zstd_rbin = np.array([])
for k, b in enumerate(bins[:-1]):
in_bin = (r>bins[k])&(r<bins[k+1])
z_clip = clip(z[in_bin])
if len(z_clip)==0:
continue
zb = np.mean(z_clip)
zstd_b = np.std(z_clip)
z_rbin = np.append(z_rbin, zb)
zstd_rbin = np.append(zstd_rbin, zstd_b)
r_rbin = np.append(r_rbin, np.mean(r[in_bin]))
logzerr_rbin = 0.434 * abs( zstd_rbin / (z_rbin-sky_mean))
if plot:
if figsize is not None:
plt.figure(figsize=figsize)
if yunit == "Intensity":
# plot radius in Intensity
plt.plot(r_rbin, np.log10(z_rbin), "-o", color=color,
mec="k", lw=lw, ms=markersize, alpha=alpha, zorder=3, label=label)
if scatter:
I = np.log10(z)
if fill:
plt.fill_between(r_rbin, np.log10(z_rbin)-logzerr_rbin, np.log10(z_rbin)+logzerr_rbin,
color=color, alpha=0.2, zorder=1)
plt.ylabel("log Intensity")
elif yunit == "SB":
# plot radius in Surface Brightness
I_rbin = Intensity2SB(I=z_rbin, BKG=np.median(back),
ZP=ZP, pixel_scale=pixel_scale) + I_shift
I_sky = -2.5*np.log10(sky_std) + ZP + 2.5 * math.log10(pixel_scale**2)
plt.plot(r_rbin, I_rbin, "-o", mec="k",
lw=lw, ms=markersize, color=color, alpha=alpha, zorder=3, label=label)
if scatter:
I = Intensity2SB(I=z, BKG=np.median(back),
ZP=ZP, pixel_scale=pixel_scale) + I_shift
if errorbar:
Ierr_rbin_up = I_rbin - Intensity2SB(I=z_rbin,
BKG=np.median(back)-sky_std,
ZP=ZP, pixel_scale=pixel_scale)
Ierr_rbin_lo = Intensity2SB(I=z_rbin-sky_std,
BKG=np.median(back)+sky_std,
ZP=ZP, pixel_scale=pixel_scale) - I_rbin
lolims = np.isnan(Ierr_rbin_lo)
uplims = np.isnan(Ierr_rbin_up)
Ierr_rbin_lo[lolims] = 4
Ierr_rbin_up[uplims] = 4
plt.errorbar(r_rbin, I_rbin, yerr=[Ierr_rbin_up, Ierr_rbin_lo],
fmt='', ecolor=color, capsize=2, alpha=0.5)
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.gca().invert_yaxis()
plt.ylim(30,17)
plt.xscale("log")
plt.xlim(max(r_rbin[np.isfinite(r_rbin)][0]*0.8, 1e-1),r_rbin[np.isfinite(r_rbin)][-1]*1.2)
plt.xlabel("R [arcsec]") if xunit == "arcsec" else plt.xlabel("r [pix]")
if scatter:
plt.scatter(r[r<3*r_core], I[r<3*r_core], color=color,
s=markersize/2, alpha=alpha/2, zorder=1)
plt.scatter(r[r>=3*r_core], I[r>=3*r_core], color=color,
s=markersize/5, alpha=alpha/10, zorder=1)
# Decide the radius within which the intensity saturated for bright stars w/ intersity drop half
dz_rbin = np.diff(np.log10(z_rbin))
dz_cum = np.cumsum(dz_rbin)
if plot_line:
r_satr = r_rbin[np.argmax(dz_cum<-0.3)] + 1e-3
plt.axvline(r_satr,color="k",ls="--",alpha=0.9)
plt.axvline(r_core,color="k",ls=":",alpha=0.9)
plt.axhline(I_sky,color="gray",ls="-.",alpha=0.7)
if yunit == "Intensity":
return r_rbin, z_rbin, logzerr_rbin
elif yunit == "SB":
return r_rbin, I_rbin, None
def calculate_fit_SB(psf, r=np.logspace(0.03,2.5,100), mags=[15,12,9], ZP=27.1):
frac = psf.frac
I_s = [10**((mag-ZP)/-2.5) for mag in mags]
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
I_tot_s = [Intensity2SB(((1-frac) * comp1 + comp2 * frac) * I,
0, ZP, psf.pixel_scale) for I in I_s]
return I_tot_s
### Funcs for measuring scaling ###
def get_star_pos(id, star_cat):
""" Get the position of an object from the catalog"""
X_c, Y_c = star_cat[id]["X_IMAGE"], star_cat[id]["Y_IMAGE"]
return (X_c, Y_c)
def get_star_thumb(id, star_cat, wcs, data, seg_map,
n_win=20, seeing=2.5, origin=1, verbose=True):
""" Crop the data and segment map into thumbnails.
Return thumbnail of image/segment/mask, and center of the star. """
(X_c, Y_c) = get_star_pos(id, star_cat)
# define thumbnail size
fwhm = max(star_cat[id]["FWHM_IMAGE"], seeing)
win_size = int(n_win * min(max(fwhm,2), 8))
# calculate boundary
X_min, X_max = max(1, X_c - win_size), min(data.shape[1], X_c + win_size)
Y_min, Y_max = max(1, Y_c - win_size), min(data.shape[0], Y_c + win_size)
x_min, y_min = coord_Im2Array(X_min, Y_min, origin)
x_max, y_max = coord_Im2Array(X_max, Y_max, origin)
if verbose:
num = star_cat[id]["NUMBER"]
print("NUMBER: ", num)
print("X_c, Y_c: ", (X_c, Y_c))
print("RA, DEC: ", (star_cat[id]["X_WORLD"], star_cat[id]["Y_WORLD"]))
print("x_min, x_max, y_min, y_max: ", x_min, x_max, y_min, y_max)
print("X_min, X_max, Y_min, Y_max: ", X_min, X_max, Y_min, Y_max)
# crop image and segment map
img_thumb = data[x_min:x_max, y_min:y_max].copy()
if seg_map is None:
seg_thumb = None
mask_thumb = np.zeros_like(data, dtype=bool)
else:
seg_thumb = seg_map[x_min:x_max, y_min:y_max]
mask_thumb = (seg_thumb!=0) # mask sources by 1
# the center position is converted from world with wcs
X_cen, Y_cen = wcs.wcs_world2pix(star_cat[id]["X_WORLD"], star_cat[id]["Y_WORLD"], origin)
cen_star = X_cen - X_min, Y_cen - Y_min
return (img_thumb, seg_thumb, mask_thumb), cen_star
def extract_star(id, star_cat, wcs, data, seg_map=None,
seeing=2.5, sn_thre=2.5, n_win=25, n_dilation=1,
display_bg=False, display=True, verbose=False):
""" Return the image thubnail, mask map, backgroud estimates, and center of star.
Do a finer detection&deblending to remove faint undetected source."""
from skimage import morphology
thumb_list, cen_star = get_star_thumb(id, star_cat, wcs, data, seg_map,
n_win=n_win, seeing=seeing, verbose=verbose)
img_thumb, seg_thumb, mask_thumb = thumb_list
# measure background, use a scalar value if the thumbnail is small
b_size = round(img_thumb.shape[0]//5/25)*25
if img_thumb.shape[0] >= 50:
back, back_rms = background_sub_SE(img_thumb, b_size=b_size)
else:
back, back_rms = (np.median(img_thumb[~mask_thumb])*np.ones_like(img_thumb),
mad_std(img_thumb[~mask_thumb])*np.ones_like(img_thumb))
if display_bg:
# show background subtraction
display_background_sub(img_thumb, back)
if seg_thumb is None:
# the same thumbnail size
fwhm = max([star_cat[id]["FWHM_IMAGE"], seeing])
# do segmentation (a second time) to remove faint undetected stars using photutils
sigma = seeing * gaussian_fwhm_to_sigma
threshold = back + (sn_thre * back_rms)
segm = detect_sources(img_thumb, threshold, npixels=5)
# do deblending using photutils
segm_deblend = deblend_sources(img_thumb, segm, npixels=5,
nlevels=64, contrast=0.005)
else:
segm_deblend = SegmentationImage(seg_thumb)
# the target star is at the center of the thumbnail
star_lab = segm_deblend.data[int(cen_star[1]), int(cen_star[0])]
star_ma = ~((segm_deblend.data==star_lab) | (segm_deblend.data==0)) # mask other source
# dilation
for i in range(n_dilation):
star_ma = morphology.dilation(star_ma)
if display:
med_back = np.median(back)
fig, (ax1,ax2,ax3) = plt.subplots(nrows=1,ncols=3,figsize=(12,4))
ax1.imshow(img_thumb, vmin=med_back-1, vmax=10000, norm=LogNorm(), cmap="viridis")
ax1.set_title("star", fontsize=16)
ax2.imshow(segm_deblend, cmap=segm_deblend.make_cmap(random_state=12345))
ax2.set_title("segment", fontsize=16)
img_thumb_ma = img_thumb.copy()
img_thumb_ma[star_ma] = -1
ax3.imshow(img_thumb_ma, cmap="viridis", norm=LogNorm(),
vmin=med_back-1, vmax=med_back+10*np.median(back_rms))
ax3.set_title("extracted star", fontsize=16)
plt.tight_layout()
return img_thumb, star_ma, back, cen_star
def compute_Rnorm(image, mask_field, cen, R=12, wid=1, mask_cross=True, display=False):
""" Compute (3 sigma-clipped) normalization using an annulus.
Note the output values of normalization contain background.
Paramters
----------
image : input image for measurement
mask_field : mask map with nearby sources masked as 1.
cen : center of target
R : radius of annulus
wid : half-width of annulus
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
annulus_ma = CircularAnnulus([cen], R-wid, R+wid).to_mask()[0]
mask_ring = annulus_ma.to_image(image.shape) > 0.5 # sky ring (R-wid, R+wid)
mask_clean = mask_ring & (~mask_field) # sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
cross = ((abs(xx-cen[0])<4)|(abs(yy-cen[1])<4))
mask_clean = mask_clean * (~cross)
if len(image[mask_clean]) < 5:
return [np.nan] * 3 + [1]
z = sigma_clip(np.log10(image[mask_clean]), sigma=2, maxiters=5)
I_mean, I_med, I_std = 10**np.mean(z), 10**np.median(z.compressed()), np.std(10**z)
if display:
z = 10**z
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask_clean, cmap="gray", alpha=0.7)
ax1.imshow(image, vmin=image.min(), vmax=I_med+50*I_std,
cmap='viridis', norm=AsinhNorm(), alpha=0.7)
ax1.plot(cen[0], cen[1], 'r*', ms=10)
ax2.hist(sigma_clip(z),alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean, color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z.compressed(), 0.2)
I_80 = np.quantile(z.compressed(), 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
return I_mean, I_med, I_std, 0
def compute_Rnorm_batch(table_target, data, seg_map, wcs,
R=12, wid=1, return_full=False,
display=False, verbose=True):
""" Combining the above functions. Compute for all object in table_target.
Return an arry with measurement on the intensity and a dictionary containing maps and centers."""
# Initialize
res_thumb = {}
res_Rnorm = np.empty((len(table_target), 5))
for i, (num, mag_auto) in enumerate(zip(table_target['NUMBER'], table_target['MAG_AUTO'])):
if verbose: counter(i, len(table_target))
ind = np.where(table_target['NUMBER']==num)[0][0]
# For very bright sources, use a broader window
n_win = 30 if mag_auto < 12 else 25
img, ma, bkg, cen = extract_star(ind, table_target, wcs, data, seg_map,
n_win=n_win, display_bg=False, display=False)
res_thumb[num] = {"image":img, "mask":ma, "bkg":bkg, "center":cen}
# Measure the mean, med and std of intensity at R
I_mean, I_med, I_std, Iflag = compute_Rnorm(img, ma, cen, R=R, wid=wid, display=display)
if (Iflag==1) & verbose: print ("Errorenous measurement: #", num)
# Use the median value of background as the local background
sky_mean = np.median(bkg)
res_Rnorm[i] = np.array([I_mean, I_med, I_std, sky_mean, Iflag])
return res_Rnorm, res_thumb
def measure_Rnorm_all(table, image_bound,
wcs_data, image, seg_map=None,
r_scale=12, width=1, mag_thre=15,
mag_name='rmag_PS', read=False,
obj_name="", save=True, dir_name='.',
display=False, verbose=True):
"""
Measure normalization at r_scale for bright stars in table.
If seg_map is not given, source detection will be run.
Parameters
----------
table : table containing list of sources
image_bound : 1X4 1d array defining the bound of region
wcs_data : wcs
image : image data
seg_map : segm map used to mask nearby sources during the measurement. If not given a source detection will be done.
r_scale : radius at which the flux scaling is measured (default: 10)
width : half-width of ring used to measure the flux scaling at r_scale (default: 0.5 pix)
mag_name : magnitude column name
mag_thre : magnitude threshold below which are measured
read : whether to read existed outputs
save : whether to save output table and thumbnails
obj_name : object name used as prefix of saved output
dir_name : path of saving
Returns
----------
table_res_Rnorm : table containing measurement results
res_thumb : thumbnails of image, mask, background and center of object, stored as dictionary
"""
Xmin, Ymin = image_bound[:2]
table_Rnorm_name = os.path.join(dir_name, '%s-norm_%dpix_%smag%d_X%sY%s.txt'\
%(obj_name, r_scale, mag_name[0], mag_thre, Xmin, Ymin))
res_thumb_name = os.path.join(dir_name, '%s-thumbnail_%smag%d_X%sY%s'\
%(obj_name, mag_name[0], mag_thre, Xmin, Ymin))
if read:
table_res_Rnorm = Table.read(table_Rnorm_name, format="ascii")
res_thumb = load_thumbs(res_thumb_name)
else:
tab = table[table[mag_name]<mag_thre]
res_Rnorm, res_thumb = compute_Rnorm_batch(tab, image, seg_map, wcs_data,
R=r_scale, wid=width,
return_full=True, display=display, verbose=verbose)
keep_columns = ['NUMBER', 'MAG_AUTO', 'MAG_AUTO_corr', mag_name] \
+ [s for s in tab.colnames if 'IMAGE' in s]
table_res_Rnorm = tab[keep_columns].copy()
for j, colname in enumerate(['Imean','Imed','Istd','Isky', 'Iflag']):
if colname=='Iflag':
col = res_Rnorm[:,j].astype(int)
else:
col = np.around(res_Rnorm[:,j], 5)
table_res_Rnorm[colname] = col
if save:
check_save_path(dir_name, make_new=False, verbose=False)
save_thumbs(res_thumb, res_thumb_name)
table_res_Rnorm.write(table_Rnorm_name, overwrite=True, format='ascii')
return table_res_Rnorm, res_thumb
### Catalog / Data Manipulation Helper ###
def id_generator(size=6, chars=None):
import random
import string
if chars is None:
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def check_save_path(dir_name, make_new=True, verbose=True):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
elif make_new:
if len(os.listdir(dir_name)) != 0:
while os.path.exists(dir_name):
dir_name = input("'%s' already existed. Enter a directory name for saving:"%dir_name)
os.makedirs(dir_name)
if verbose: print("Results will be saved in %s\n"%dir_name)
def find_keyword_header(header, keyword):
""" Search keyword value in header (converting to float).
Input a value by user if not found. """
try:
val = np.float(header[keyword])
except KeyError:
print("%s missing in header --->"%keyword)
try:
val = np.float(input("Input a value of %s :"%keyword))
except ValueError:
sys.exit("Invalid %s values!"%keyword)
return val
def crop_catalog(cat, bounds, keys=("X_IMAGE", "Y_IMAGE"), sortby=None):
Xmin, Ymin, Xmax, Ymax = bounds
A, B = keys
crop = (cat[A]>=Xmin) & (cat[A]<=Xmax) & (cat[B]>=Ymin) & (cat[B]<=Ymax)
if sortby is not None:
cat_crop = cat[crop]
cat_crop.sort(keys=sortby)
return cat_crop
else:
return cat[crop]
def crop_image(data, bounds, SE_seg_map=None, weight_map=None,
sub_bounds=None, origin=1, color="w", draw=False):
""" Crop the data (and segm map if given) with the given bouds. """
from matplotlib import patches
Xmin, Ymin, Xmax, Ymax = bounds
xmin, ymin = coord_Im2Array(Xmin, Ymin, origin)
xmax, ymax = coord_Im2Array(Xmax, Ymax, origin)
patch = np.copy(data[xmin:xmax, ymin:ymax])
if SE_seg_map is None:
seg_patch = None
else:
seg_patch = np.copy(SE_seg_map[xmin:xmax, ymin:ymax])
if draw:
if SE_seg_map is not None:
sky = data[(SE_seg_map==0)]
else:
sky = sigma_clip(data, 3)
sky_mean = np.mean(sky)
sky_std = max(mad_std(sky[sky>sky_mean]),5)
fig, ax = plt.subplots(figsize=(12,8))
plt.imshow(data, norm=AsinhNorm(a=0.01), cmap="viridis",
vmin=sky_mean, vmax=sky_mean+5*sky_std, alpha=0.95)
if weight_map is not None:
plt.imshow(data*weight_map, norm=AsinhNorm(a=0.01), cmap="viridis",
vmin=sky_mean, vmax=sky_mean+5*sky_std, alpha=0.3)
width = Xmax-Xmin, Ymax-Ymin
rect = patches.Rectangle((Xmin, Ymin), width[0], width[1],
linewidth=2.5, edgecolor=color, facecolor='none')
ax.add_patch(rect)
plt.plot([Xmin+width[0]//2-360,Xmin+width[0]//2+360], [450,450],"whitesmoke",lw=3)
plt.plot([Xmin+width[0]//2+360,Xmin+width[0]//2+360], [420,480],"whitesmoke",lw=3)
plt.plot([Xmin+width[0]//2-360,Xmin+width[0]//2-360], [420,480],"whitesmoke",lw=3)
plt.text(Xmin+width[0]//2, 220, r"$\bf 0.5\,deg$", color='whitesmoke', ha='center', fontsize=18)
if sub_bounds is not None:
for bounds in sub_bounds:
Xmin, Ymin, Xmax, Ymax = bounds
width = Xmax-Xmin, Ymax-Ymin
rect = patches.Rectangle((Xmin, Ymin), width[0], width[1],
linewidth=2.5, edgecolor='indianred', facecolor='none')
ax.add_patch(rect)
plt.show()
if SE_seg_map is None:
return patch
else:
return patch, seg_patch
def query_vizier(catalog_name, radius, columns, column_filters, header=None, coord=None):
""" Query catalog in Vizier database with the given catalog name,
search radius and column names. If coords is not given, look for fits header """
from astroquery.vizier import Vizier
from astropy import units as u
# Prepare for quearyinig Vizier with filters up to infinitely many rows. By default, this is 50.
viz_filt = Vizier(columns=columns, column_filters=column_filters)
viz_filt.ROW_LIMIT = -1
if coord==None:
RA, DEC = re.split(",", header['RADEC'])
coord = SkyCoord(RA+" "+DEC , unit=(u.hourangle, u.deg))
# Query!
result = viz_filt.query_region(coord, radius=radius,
catalog=[catalog_name])
return result
def transform_coords2pixel(table, wcs, name='', RA_key="RAJ2000", DE_key="DEJ2000", origin=1):
""" Transform the RA/DEC columns in the table into pixel coordinates given wcs"""
coords = np.vstack([np.array(table[RA_key]),
np.array(table[DE_key])]).T
pos = wcs.wcs_world2pix(coords, origin)
table.add_column(Column(np.around(pos[:,0], 4)*u.pix), name='X_IMAGE'+'_'+name)
table.add_column(Column(np.around(pos[:,1], 4)*u.pix), name='Y_IMAGE'+'_'+name)
table.add_column(Column(np.arange(len(table))+1, dtype=int),
index=0, name="ID"+'_'+name)
return table
def merge_catalog(SE_catalog, table_merge, sep=5 * u.arcsec,
RA_key="RAJ2000", DE_key="DEJ2000", keep_columns=None):
from astropy.table import join
c_SE = SkyCoord(ra=SE_catalog["X_WORLD"], dec=SE_catalog["Y_WORLD"])
c_tab = SkyCoord(ra=table_merge[RA_key], dec=table_merge[DE_key])
idx, d2d, d3d = c_SE.match_to_catalog_sky(c_tab)
match = d2d < sep
cat_SE_match = SE_catalog[match]
cat_tab_match = table_merge[idx[match]]
cat_tab_match.add_column(cat_SE_match["NUMBER"], index=0, name="NUMBER")
cat_match = join(cat_SE_match, cat_tab_match, keys='NUMBER')
if keep_columns is not None:
cat_match.keep_columns(keep_columns)
return cat_match
def read_measurement_tables(dir_name, image_bounds0_list,
obj_name='', band='G',
pad=100, r_scale=12, mag_limit=[15,23]):
""" Read measurement tables from the directory """
# Magnitude name
b_name = band.lower()
mag_name = b_name+'MeanPSFMag' if 'PS' in dir_name else b_name+'mag'
tables_res_Rnorm = []
tables_faint = []
for image_bounds0 in np.atleast_2d(image_bounds0_list):
# Clipped bounds
patch_Xmin0, patch_Ymin0, patch_Xmax0, patch_Ymax0 = image_bounds0
image_bounds = (patch_Xmin0+pad, patch_Ymin0+pad,
patch_Xmax0-pad, patch_Ymax0-pad)
## Read measurement for faint stars from catalog
# Faint star catalog name
fname_catalog = os.path.join(dir_name, "%s-catalog_PS_%s_all.txt"%(obj_name, b_name))
# Check if the file exist
if os.path.isfile(fname_catalog):
table_catalog = Table.read(fname_catalog, format="ascii")
mag_catalog = table_catalog[mag_name]
else:
sys.exit("Table %s does not exist. Exit."%fname_catalog)
# stars fainter than magnitude limit (fixed as background)
table_faint = table_catalog[(mag_catalog>=mag_limit[0]) & (mag_catalog<mag_limit[1])]
table_faint = crop_catalog(table_faint,
keys=("X_IMAGE_PS", "Y_IMAGE_PS"),
bounds=image_bounds)
tables_faint += [table_faint]
## Read measurement for bright stars
# Catalog name
fname_res_Rnorm = os.path.join(dir_name, "%s-norm_%dpix_%smag%s_X%dY%d.txt"\
%(obj_name, r_scale, b_name,
mag_limit[0], patch_Xmin0, patch_Ymin0))
# Check if the file exist
if os.path.isfile(fname_res_Rnorm):
table_res_Rnorm = Table.read(fname_res_Rnorm, format="ascii")
else:
sys.exit("Table %s does not exist. Exit."%fname_res_Rnorm)
# Crop the catalog
table_res_Rnorm = crop_catalog(table_res_Rnorm, bounds=image_bounds0)
# Do not use flagged measurement
Iflag = table_res_Rnorm["Iflag"]
tables_res_Rnorm += [table_res_Rnorm[Iflag==0]]
return tables_faint, tables_res_Rnorm
def assign_star_props(table_faint, table_res_Rnorm, Image,
r_scale=12, mag_threshold=[14,11],
psf=None, keys='Imed', verbose=True,
draw=True, save=False, save_dir='./'):
""" Assign position and flux for faint and bright stars from tables. """
from .modeling import Stars
# Image attributes
ZP = Image.ZP
sky_mean = Image.bkg
image_size = Image.image_size
pos_ref = (Image.image_bounds[0], Image.image_bounds[1])
try:
ma = table_faint['FLUX_AUTO'].data.mask
except AttributeError:
ma = np.isnan(table_faint['FLUX_AUTO'])
# Positions & Flux of faint stars from measured norm
star_pos1 = np.vstack([table_faint['X_IMAGE_PS'].data[~ma],
table_faint['Y_IMAGE_PS'].data[~ma]]).T - pos_ref
Flux1 = np.array(table_faint['FLUX_AUTO'].data[~ma])
# Positions & Flux (estimate) of bright stars from catalog
star_pos2 = np.vstack([table_res_Rnorm['X_IMAGE_PS'],
table_res_Rnorm['Y_IMAGE_PS']]).T - pos_ref
Flux2 = 10**((np.array(table_res_Rnorm["MAG_AUTO_corr"])-ZP)/(-2.5))
# Estimate of brightness I at r_scale (I = Intensity - BKG) and flux
z_norm = table_res_Rnorm['Imed'].data - sky_mean
z_norm[z_norm<=0] = z_norm[z_norm>0].min()
# Convert/printout thresholds
Flux_threshold = 10**((np.array(mag_threshold) - ZP) / (-2.5))
if verbose:
print('Magnitude Thresholds: {0}, {1} mag'.format(*mag_threshold))
print("(<=> Flux Thresholds: {0}, {1} ADU)".format(*np.around(Flux_threshold,2)))
try:
SB_threshold = psf.Flux2SB(Flux_threshold, BKG=sky_mean, ZP=ZP, r=r_scale)
print("(<=> Surface Brightness Thresholds: {0}, {1} mag/arcsec^2 at {2} pix)\n"\
.format(*np.around(SB_threshold,1),r_scale))
except:
pass
# Combine two samples, make sure they do not overlap
star_pos = np.vstack([star_pos1, star_pos2])
Flux = np.concatenate([Flux1, Flux2])
stars_all = Stars(star_pos, Flux, Flux_threshold=Flux_threshold)
# Bright stars in model
stars_0 = Stars(star_pos2, Flux2, Flux_threshold=Flux_threshold,
z_norm=z_norm, r_scale=r_scale, BKG=sky_mean, verbose=verbose)
stars_0 = stars_0.remove_outsider(image_size, d=[36, 12])
if draw:
stars_all.plot_flux_dist(label='All', color='plum')
stars_0.plot_flux_dist(label='Model', color='orange', ZP=ZP,
save=save, save_dir=save_dir)
plt.show()
return stars_0, stars_all
def cross_match(wcs_data, SE_catalog, bounds, radius=None,
pixel_scale=2.5, mag_thre=15, sep=5*u.arcsec,
clean_catalog=True, mag_name='rmag',
catalog={'Pan-STARRS': 'II/349/ps1'},
columns={'Pan-STARRS': ['RAJ2000', 'DEJ2000', 'e_RAJ2000', 'e_DEJ2000',
'objID', 'Qual', 'gmag', 'e_gmag', 'rmag', 'e_rmag']},
column_filters={'Pan-STARRS': {'rmag':'{0} .. {1}'.format(5, 23)}},
magnitude_name={'Pan-STARRS':['rmag','gmag']},
verbose=True):
"""
Cross match SExtractor catalog with Vizier Online catalog.
'URAT': 'I/329/urat1'
magnitude_name: "rmag"
columns: ['RAJ2000', 'DEJ2000', 'mfa', 'gmag', 'e_gmag', 'rmag', 'e_rmag']
column_filters: {'mfa':'=1', 'rmag':'{0} .. {1}'.format(8, 18)}
'USNO': 'I/252/out'
magnitude_name: "Rmag"
columns: ['RAJ2000', 'DEJ2000', 'Bmag', 'Rmag']
column_filters: {"Rmag":'{0} .. {1}'.format(5, 15)}
"""
from astropy.table import join, vstack
cen = (bounds[2]+bounds[0])/2., (bounds[3]+bounds[1])/2.
coord_cen = wcs_data.pixel_to_world(cen[0], cen[1])
if radius is None:
L = math.sqrt((cen[0]-bounds[0])**2 + (cen[1]-bounds[1])**2)
radius = L * pixel_scale * u.arcsec
print("Search", np.around(radius.to(u.deg), 3), "around:")
print(coord_cen)
for j, (cat_name, table_name) in enumerate(catalog.items()):
# Query from Vizier
result = query_vizier(catalog_name=cat_name,
radius=radius,
columns=columns[cat_name],
column_filters=column_filters[cat_name],
coord=coord_cen)
Cat_full = result[table_name]
if len(cat_name) > 4:
c_name = cat_name[0] + cat_name[-1]
else:
c_name = cat_name
m_name = np.atleast_1d(mag_name)[j]
# Transform catalog wcs coordinate into pixel postion
Cat_full = transform_coords2pixel(Cat_full, wcs_data, name=c_name)
# Crop catalog and sort by the catalog magnitude
Cat_crop = crop_catalog(Cat_full, bounds, sortby=m_name,
keys=("X_IMAGE"+'_'+c_name, "Y_IMAGE"+'_'+c_name))
# catalog magnitude
mag_cat = Cat_crop[m_name]
# Screen out bright stars (mainly for cleaning duplicate source in catalog)
Cat_bright = Cat_crop[(np.less(mag_cat, mag_thre,
where=~np.isnan(mag_cat))) & ~np.isnan(mag_cat)]
mag_cat.mask[np.isnan(mag_cat)] = True
if clean_catalog:
# Clean duplicate items in the catalog
c_bright = SkyCoord(Cat_bright['RAJ2000'], Cat_bright['DEJ2000'], unit=u.deg)
c_catalog = SkyCoord(Cat_crop['RAJ2000'], Cat_crop['DEJ2000'], unit=u.deg)
idxc, idxcatalog, d2d, d3d = c_catalog.search_around_sky(c_bright, sep)
inds_c, counts = np.unique(idxc, return_counts=True)
row_duplicate = np.array([], dtype=int)
# Use the measurement with min error in RA/DEC
for i in inds_c[counts>1]:
obj_duplicate = Cat_crop[idxcatalog][idxc==i]
# obj_duplicate.pprint(max_lines=-1, max_width=-1)
# Remove detection without magnitude measurement
mag_obj = obj_duplicate[m_name]
obj_duplicate = obj_duplicate[~np.isnan(mag_obj)]
# Use the detection with the best astrometry
e2_coord = obj_duplicate["e_RAJ2000"]**2 + obj_duplicate["e_DEJ2000"]**2
min_e2_coord = np.nanmin(e2_coord)
for ID in obj_duplicate[e2_coord>min_e2_coord]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
Cat_crop.remove_rows(np.unique(row_duplicate))
#Cat_bright = Cat_crop[mag_cat<mag_thre]
for m_name in magnitude_name[cat_name]:
mag = Cat_crop[m_name]
if verbose:
print("%s %s: %.3f ~ %.3f"%(cat_name, m_name, mag.min(), mag.max()))
# Merge Catalog
SE_columns = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD",
"MAG_AUTO", "FLUX_AUTO", "FWHM_IMAGE", "FLAGS"]
keep_columns = SE_columns + ["ID"+'_'+c_name] + magnitude_name[cat_name] + \
["X_IMAGE"+'_'+c_name, "Y_IMAGE"+'_'+c_name]
tab_match = merge_catalog(SE_catalog, Cat_crop, sep=sep,
keep_columns=keep_columns)
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
keep_columns=keep_columns)
# Rename columns
for m_name in magnitude_name[cat_name]:
tab_match[m_name].name = m_name+'_'+c_name
tab_match_bright[m_name].name = m_name+'_'+c_name
# Join tables
if j==0:
tab_target_all = tab_match
tab_target = tab_match_bright
else:
tab_target_all = join(tab_target_all, tab_match, keys=SE_columns,
join_type='left', metadata_conflicts='silent')
tab_target = join(tab_target, tab_match_bright, keys=SE_columns,
join_type='left', metadata_conflicts='silent')
# Sort matched catalog by SE MAG_AUTO
tab_target.sort('MAG_AUTO')
tab_target_all.sort('MAG_AUTO')
mag_all = tab_target_all[mag_name+'_'+c_name]
mag = tab_target[mag_name+'_'+c_name]
if verbose:
print("Matched stars with %s %s: %.3f ~ %.3f"\
%(cat_name, mag_name, mag_all.min(), mag_all.max()))
print("Matched bright stars with %s %s: %.3f ~ %.3f"\
%(cat_name, mag_name, mag.min(), mag.max()))
return tab_target, tab_target_all, Cat_crop
def cross_match_PS1_DR2(wcs_data, SE_catalog, image_bounds,
band='g', radius=None, clean_catalog=True,
pixel_scale=2.5, mag_thre=15, sep=2.5*u.arcsec,
verbose=True):
"""
Use PANSTARRS DR2 API to do cross-match with the SE source catalog.
Note this could be (much) slower compared to cross-match using Vizier.
Parameters
----------
wcs_data : wcs of data
SE_catalog : SE source catalog
image_bounds : Nx4 2d / 1d array defining the cross-match region(s) [Xmin, Ymin, Xmax, Ymax]
radius : radius (in astropy unit) of search to PS-1 catalog.
If not given, use the half diagonal length of the region.
clean_catalog : whether to clean the matched catalog. (default True)
The PS-1 catalog contains duplicate items on a single source with different
measurements. If True, duplicate items of bright sources will be cleaned by
removing those with large coordinate errors and pick the items with most
detections in that band .
mag_thre : magnitude threshould defining bright stars.
sep : maximum separation (in astropy unit) for crossmatch with SE.
Returns
----------
tab_target : table containing matched bright sources with SE source catalog
tab_target_all : table containing matched all sources with SE source catalog
catalog_star : PS-1 catalog of all sources in the region(s)
"""
from astropy.table import join, vstack
from astropy.nddata.bitmask import interpret_bit_flags
from .API_PS1_DR2 import ps1cone
band = band.lower()
mag_name = band + 'MeanPSFMag'
c_name = 'PS'
for j, bounds in enumerate(np.atleast_2d(image_bounds)):
cen = (bounds[2]+bounds[0])/2., (bounds[3]+bounds[1])/2.
coord_cen = wcs_data.pixel_to_world(cen[0], cen[1])
ra, dec = coord_cen.ra.value, coord_cen.dec.value
if radius is None:
L = math.sqrt((cen[0]-bounds[0])**2 + (cen[1]-bounds[1])**2)
radius = (L * pixel_scale * u.arcsec).to(u.deg)
print("Search", np.around(radius, 3), "around:")
print(coord_cen)
#### Query PANSTARRS start ####
constraints = {'nDetections.gt':1, band+'MeanPSFMag.lt':23}
# strip blanks and weed out blank and commented-out values
columns = """raMean,decMean,raMeanErr,decMeanErr,nDetections,ng,nr,
gMeanPSFMag,gMeanPSFMagErr,gFlags,rMeanPSFMag,rMeanPSFMagErr,rFlags""".split(',')
columns = [x.strip() for x in columns]
columns = [x for x in columns if x and not x.startswith('#')]
results = ps1cone(ra, dec, radius.value, release='dr2', columns=columns, **constraints)
Cat_full = ascii.read(results)
for filter in 'gr':
col = filter+'MeanPSFMag'
Cat_full[col].format = ".4f"
Cat_full[col][Cat_full[col] == -999.0] = np.nan
for coord in ['ra','dec']:
Cat_full[coord+'MeanErr'].format = ".5f"
#### Query PANSTARRS end ####
Cat_full.sort(mag_name)
Cat_full['raMean'].unit = u.deg
Cat_full['decMean'].unit = u.deg
Cat_full = transform_coords2pixel(Cat_full, wcs_data, name=c_name,
RA_key="raMean", DE_key="decMean")
# Crop catalog and sort by the catalog magnitude
Cat_crop = crop_catalog(Cat_full, bounds, sortby=mag_name,
keys=("X_IMAGE"+'_'+c_name, "Y_IMAGE"+'_'+c_name))
# Remove detection without magnitude
has_mag = ~np.isnan(Cat_crop[mag_name])
Cat_crop = Cat_crop[has_mag]
# Pick out bright stars
mag_cat = Cat_crop[mag_name]
Cat_bright = Cat_crop[mag_cat<mag_thre]
if clean_catalog:
# A first crossmatch with bright stars in catalog for cleaning
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
RA_key="raMean", DE_key="decMean")
tab_match_bright.sort(mag_name)
# Clean duplicate items in the catalog
c_bright = SkyCoord(tab_match_bright['X_WORLD'],
tab_match_bright['Y_WORLD'], unit=u.deg)
c_catalog = SkyCoord(Cat_crop['raMean'],
Cat_crop['decMean'], unit=u.deg)
idxc, idxcatalog, d2d, d3d = \
c_catalog.search_around_sky(c_bright, sep)
inds_c, counts = np.unique(idxc, return_counts=True)
row_duplicate = np.array([], dtype=int)
# Use the measurement following some criteria
for i in inds_c[counts>1]:
obj_dup = Cat_crop[idxcatalog][idxc==i]
obj_dup['sep'] = d2d[idxc==i]
#obj_dup.pprint(max_lines=-1, max_width=-1)
# Use the detection with mag
mag_obj_dup = obj_dup[mag_name]
obj_dup = obj_dup[~np.isnan(mag_obj_dup)]
# Use the closest match
good = (obj_dup['sep'] == min(obj_dup['sep']))
### Extra Criteria
# # Coordinate error of detection
# err2_coord = obj_dup["raMeanErr"]**2 + \
# obj_dup["decMeanErr"]**2
# # Use the detection with the best astrometry
# min_e2_coord = np.nanmin(err2_coord)
# good = (err2_coord == min_e2_coord)
# # Use the detection with PSF mag err
# has_err_mag = obj_dup[mag_name+'Err'] > 0
# # Use the detection > 0
# n_det = obj_dup['n'+band]
# has_n_det = n_det > 0
# # Use photometry not from tycho in measurement
# use_tycho_phot = extract_bool_bitflags(obj_dup[band+'Flags'], 7)
# good = has_err_mag & has_n_det & (~use_tycho_phot)
###
# Add rows to be removed
for ID in obj_dup[~good]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
obj_dup = obj_dup[good]
if len(obj_dup)<=1:
continue
# Use brightest detection
mag = obj_dup[mag_name]
for ID in obj_dup[mag>min(mag)]['ID'+'_'+c_name]:
k = np.where(Cat_crop['ID'+'_'+c_name]==ID)[0][0]
row_duplicate = np.append(row_duplicate, k)
# Remove rows
Cat_crop.remove_rows(np.unique(row_duplicate))
# Subset catalog containing bright stars
Cat_bright = Cat_crop[Cat_crop[mag_name]<mag_thre]
# Merge Catalog
SE_columns = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD",
"MAG_AUTO", "FLUX_AUTO", "FWHM_IMAGE"]
keep_columns = SE_columns + ["ID"+'_'+c_name] + columns + \
["X_IMAGE"+'_'+c_name, "Y_IMAGE"+'_'+c_name]
tab_match = merge_catalog(SE_catalog, Cat_crop, sep=sep,
RA_key="raMean", DE_key="decMean", keep_columns=keep_columns)
tab_match_bright = merge_catalog(SE_catalog, Cat_bright, sep=sep,
RA_key="raMean", DE_key="decMean", keep_columns=keep_columns)
if j==0:
tab_target_all = tab_match
tab_target = tab_match_bright
catalog_star = Cat_crop
else:
tab_target_all = vstack([tab_target_all, tab_match], join_type='exact')
tab_target = vstack([tab_target, tab_match_bright], join_type='exact')
catalog_star = vstack([catalog_star, Cat_crop], join_type='exact')
# Sort matched catalog by matched magnitude
tab_target.sort(mag_name)
tab_target_all.sort(mag_name)
if verbose:
print("Matched stars with PANSTARRS DR2 %s: %.3f ~ %.3f"\
%(mag_name, np.nanmin(tab_target_all[mag_name]),
np.nanmax(tab_target_all[mag_name])))
print("Matched bright stars with PANSTARRS DR2 %s: %.3f ~ %.3f"\
%(mag_name, np.nanmin(tab_target[mag_name]),
np.nanmax(tab_target[mag_name])))
return tab_target, tab_target_all, catalog_star
def calculate_color_term(tab_target, mag_range=[13,18], mag_name='gmag_PS', draw=True):
"""
Use non-saturated stars to calculate Color Correction between SE MAG_AUTO and magnitude in the matched catalog .
Parameters
----------
tab_target : full matched source catlog
mag_range : range of magnitude for stars to be used
mag_name : column name of magnitude in tab_target
draw : whethert to draw a plot showing MAG_AUTO vs diff.
Returns
----------
CT : color correction term (SE - catlog)
"""
mag = tab_target["MAG_AUTO"]
mag_cat = tab_target[mag_name]
d_mag = tab_target["MAG_AUTO"] - mag_cat
d_mag = d_mag[(mag>mag_range[0])&(mag<mag_range[1])&(~np.isnan(mag_cat))]
mag = mag[(mag>mag_range[0])&(mag<mag_range[1])&(~np.isnan(mag_cat))]
d_mag_clip = sigma_clip(d_mag, 3, maxiters=10)
CT = np.mean(d_mag_clip)
print('\nAverage Color Term [SE-catalog] = %.5f'%CT)
if draw:
plt.scatter(mag, d_mag, s=8, alpha=0.2, color='gray')
plt.scatter(mag, d_mag_clip, s=6, alpha=0.3)
plt.axhline(CT, color='k', alpha=0.7)
plt.ylim(-3,3)
plt.xlabel("MAG_AUTO (SE)")
plt.ylabel("MAG_AUTO $-$ %s"%mag_name)
plt.show()
return np.around(CT,5)
def fit_empirical_aperture(tab_target, seg_map, mag_name='rmag_PS',
mag_range=[13, 22], K=2, degree=3, draw=True):
"""
Fit an empirical polynomial curve for log radius of aperture based on corrected magnitudes and segm map of SE. Radius is enlarged K times.
Parameters
----------
tab_target : full matched source catlog
seg_map : training segm map
mag_name : column name of magnitude in tab_target
mag_range : range of magnitude for stars to be used
K : enlargement factor on the original segm map
degree : degree of polynomial (default 3)
draw : whether to draw log R vs mag
Returns
----------
estimate_radius : a function turns magnitude into log R
"""
print("\nFit %d-order empirical relation of aperture radii for catalog stars based on SE (X%.1f)"%(degree, K))
# Read from SE segm map
segm_deb = SegmentationImage(seg_map)
R_aper = (segm_deb.get_areas(tab_target["NUMBER"])/np.pi)**0.5
tab_target['logR'] = np.log10(K * R_aper)
mag_match = tab_target[mag_name]
mag_match[np.isnan(mag_match)] = -1
tab = tab_target[(mag_match>mag_range[0])&(mag_match<mag_range[1])]
mag_all = tab[mag_name]
logR = tab['logR']
p_poly = np.polyfit(mag_all, logR, degree)
f_poly = np.poly1d(p_poly)
if draw:
plt.scatter(tab_target[mag_name], tab_target['logR'], s=8, alpha=0.2, color='gray')
plt.scatter(mag_all, logR, s=8, alpha=0.2, color='k')
mag_ls = np.linspace(6,23)
clip = np.zeros_like(mag_all, dtype='bool')
for i in range(3):
if draw: plt.plot(mag_ls, f_poly(mag_ls), lw=1, ls='--')
mag, logr = mag_all[~clip], logR[~clip]
p_poly = np.polyfit(mag, logr, degree)
f_poly = np.poly1d(p_poly)
dev = np.sqrt((logR-f_poly(mag_all))**2)
clip = dev>3*np.mean(dev)
if draw:
plt.plot(mag_ls, f_poly(mag_ls), lw=2, color='gold')
plt.scatter(mag, logr, s=3, alpha=0.2, color='gold')
plt.xlabel("%s (catalog)"%mag_name)
plt.ylabel(r"$\log_{10}\,R$")
plt.xlim(7,23)
plt.ylim(0.15,2.2)
plt.show()
estimate_radius = lambda m: max(10**min(2, f_poly(m)), 2)
return estimate_radius
def make_segm_from_catalog(catalog_star, image_bound, estimate_radius,
mag_name='rmag', cat_name='PS', obj_name='', band='G',
draw=True, save=False, dir_name='./Measure'):
"""
Make segmentation map from star catalog. Aperture size used is based on SE semg map.
Parameters
----------
catalog_star : star catalog
image_bound : 1X4 1d array defining bounds of region
estimate_radius : function of turning magnitude into log R
mag_name : magnitude column name in catalog_star
cat_name : suffix of star catalog used
draw : whether to draw the output segm map
save : whether to save the segm map as fits
dir_name : path of saving
Returns
----------
seg_map_catalog : output segm map generated from catalog
"""
try:
catalog = catalog_star[~catalog_star[mag_name].mask]
except AttributeError:
catalog = catalog_star[~np.isnan(catalog_star[mag_name])]
print("\nMake segmentation map based on catalog %s %s: %d stars"%(cat_name, mag_name, len(catalog)))
R_est = np.array([estimate_radius(m) for m in catalog[mag_name]])
Xmin, Ymin = image_bound[:2]
apers = [CircularAperture((X_c-Xmin, Y_c-Ymin), r=r)
for (X_c,Y_c, r) in zip(catalog['X_IMAGE'+'_'+cat_name],
catalog['Y_IMAGE'+'_'+cat_name], R_est)]
image_size = image_bound[2] - image_bound[0]
seg_map_catalog = np.zeros((image_size, image_size))
# Segmentation k sorted by mag of source catalog
for (k, aper) in enumerate(apers):
star_ma = aper.to_mask(method='center').to_image((image_size, image_size))
if star_ma is not None:
seg_map_catalog[star_ma.astype(bool)] = k+2
if draw:
from .plotting import make_rand_cmap
plt.figure(figsize=(5,5))
plt.imshow(seg_map_catalog, vmin=1, cmap=make_rand_cmap(int(seg_map_catalog.max())))
plt.show()
# Save segmentation map built from catalog
if save:
check_save_path(dir_name, make_new=False, verbose=False)
hdu_seg = fits.PrimaryHDU(seg_map_catalog.astype(int))
b_name = band.lower()
file_name = os.path.join(dir_name, "%s-segm_%smag_catalog_X%dY%d.fits" %(obj_name, b_name, Xmin, Ymin))
hdu_seg.writeto(file_name, overwrite=True)
print("Save segmentation map made from catalog as %s\n"%file_name)
return seg_map_catalog
def save_thumbs(obj, filename):
import pickle
fname = filename+'.pkl'
print("Save thumbs to: %s"%fname)
with open(fname, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_thumbs(filename):
import pickle
fname = filename+'.pkl'
print("Read thumbs from: %s"%fname)
with open(fname, 'rb') as f:
return pickle.load(f)
### Prior Helper ###
def build_independent_priors(priors):
""" Build priors for Bayesian fitting. Priors should has a (scipy-like) ppf class method."""
def prior_transform(u):
v = u.copy()
for i in range(len(u)):
v[i] = priors[i].ppf(u[i])
return v
return prior_transform
# ###
# class DynamicNestedSampler:
# def __init__(self, loglike, prior_transform, ndim,
# sample='auto', bound='multi',
# n_cpu=None, n_thread=None):
# self.ndim = ndim
# if n_cpu is None:
# n_cpu = mp.cpu_count()
# if n_thread is not None:
# n_thread = max(n_thread, n_cpu-1)
# if n_cpu > 1:
# self.open_pool(n_cpu)
# self.use_pool = {'update_bound': False}
# else:
# self.pool = None
# self.use_pool = None
# dsampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim,
# sample=sample, bound=bound,
# pool=self.pool, queue_size=n_thread,
# use_pool=self.use_pool)
# self.dsampler = dsampler
# def run_fitting(self, nlive_init=100,
# maxiter=10000,
# nlive_batch=50, maxbatch=2,
# pfrac=0.8, close_pool=True,
# print_progress=True):
# print("Run Nested Fitting for the image... Dim of params: %d"%self.ndim)
# start = time.time()
# dlogz = 1e-3 * (nlive_init - 1) + 0.01
# self.dsampler.run_nested(nlive_init=nlive_init,
# nlive_batch=nlive_batch,
# maxbatch=maxbatch,
# maxiter=maxiter,
# dlogz_init=dlogz,
# wt_kwargs={'pfrac': pfrac},
# print_progress=print_progress)
# end = time.time()
# self.run_time = (end-start)
# print("\nFinish Fitting! Total time elapsed: %.3g s"%self.run_time)
# if (self.pool is not None) & close_pool:
# self.close_pool()
# def open_pool(self, n_cpu):
# print("\nOpening new pool: # of CPU used: %d"%(n_cpu - 1))
# self.pool = mp.Pool(processes=n_cpu - 1)
# self.pool.size = n_cpu - 1
# def close_pool(self):
# print("\nPool Closed.")
# self.pool.close()
# self.pool.join()
# @property
# def results(self):
# res = getattr(self.dsampler, 'results', {})
# return res
# def get_params(self, return_sample=False):
# return get_params_fit(self.results, return_sample)
# def save_results(self, filename, fit_info=None, save_dir='.'):
# res = {}
# if fit_info is not None:
# for key, val in fit_info.items():
# res[key] = val
# res['run_time'] = self.run_time
# res['fit_res'] = self.results
# fname = os.path.join(save_dir, filename)
# save_nested_fitting_result(res, fname)
# self.res = res
# def cornerplot(self, labels=None, truths=None, figsize=(16,15),
# save=False, save_dir='.', suffix=''):
# from plotting import draw_cornerplot
# draw_cornerplot(self.results, self.ndim,
# labels=labels, truths=truths, figsize=figsize,
# save=save, save_dir=save_dir, suffix=suffix)
# def cornerbound(self, prior_transform, labels=None, figsize=(10,10),
# save=False, save_dir='.', suffix=''):
# fig, axes = plt.subplots(self.ndim-1, self.ndim-1, figsize=figsize)
# fg, ax = dyplot.cornerbound(self.results, it=1000, labels=labels,
# prior_transform=prior_transform,
# show_live=True, fig=(fig, axes))
# if save:
# plt.savefig(os.path.join(save_dir, "Cornerbound%s.png"%suffix), dpi=120)
# plt.close()
# def plot_fit_PSF1D(self, psf, **kwargs):
# from plotting import plot_fit_PSF1D
# plot_fit_PSF1D(self.results, psf, **kwargs)
# def generate_fit(self, psf, stars, image_base,
# brightest_only=False, draw_real=True, n_spline=2,
# fit_sigma=True, fit_frac=False, leg2d=False, sigma=None,
# norm='brightness', n_out=4, theta_out=1200):
# from utils import make_psf_from_fit
# from modeling import generate_image_fit
# psf_fit, params = make_psf_from_fit(self.results, psf, leg2d=leg2d,
# sigma=sigma, n_spline=n_spline,
# fit_sigma=fit_sigma, fit_frac=fit_frac,
# n_out=n_out, theta_out=theta_out)
# image_star, noise_fit, bkg_fit = generate_image_fit(psf_fit, stars, norm=norm,
# brightest_only=brightest_only,
# draw_real=draw_real, leg2d=leg2d)
# if image_base is None:
# image_base = np.zeros_like(image_star)
# image_fit = image_star + image_base + bkg_fit
# self.image_fit = image_fit
# self.image_star = image_star
# self.bkg_fit = bkg_fit
# self.noise_fit = noise_fit
# return psf_fit, params
# def draw_comparison_2D(self, image, mask, **kwargs):
# from plotting import draw_comparison_2D
# draw_comparison_2D(self.image_fit, image, mask, self.image_star,
# self.noise_fit, **kwargs)
# def draw_background(self, save=False, save_dir='.', suffix=''):
# plt.figure()
# im = plt.imshow(self.bkg_fit); colorbar(im)
# if save:
# plt.savefig(os.path.join(save_dir,'Legendre2D%s.png'%(suffix)), dpi=80)
# else:
# plt.show()
# ###
### Recostruct PSF from fit ###
def make_psf_from_fit(fit_res, psf, image_size=600, n_out=4, theta_out=1200, n_spline=2,
fit_sigma=True, fit_frac=False, leg2d=False, sigma=None):
from .sampler import get_params_fit
psf_fit = psf.copy()
params, _, _ = get_params_fit(fit_res)
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
if psf.aureole_model == "moffat":
gamma1_fit, beta1_fit = params[:2]
param_update = {'gamma1':gamma1_fit, 'beta1':beta1_fit}
else:
N_n = n_spline
N_theta = n_spline - 1
if psf.aureole_model == "power":
n_fit = params[0]
param_update = {'n':n_fit}
elif psf.aureole_model == "multi-power":
n_s_fit = np.concatenate([params[:N_n], [n_out]])
theta_s_fit = np.concatenate([[psf.theta_0],
np.atleast_1d(10**params[N_n:N_n+N_theta]),[theta_out]])
param_update = {'n_s':n_s_fit, 'theta_s':theta_s_fit}
if fit_frac:
frac = 10**params[-1]
param_update['frac'] = frac
psf_fit.update(param_update)
mu_fit = params[-K-1]
if fit_sigma:
sigma_fit = 10**params[-K]
else:
sigma_fit = sigma
if leg2d:
psf_fit.A10, psf_fit.A01 = 10**params[-K-2], 10**params[-K-3]
psf_fit.bkg, psf_fit.bkg_std = mu_fit, sigma_fit
_ = psf_fit.generate_core()
_, _ = psf_fit.generate_aureole(psf_range=image_size)
return psf_fit, params
def calculate_reduced_chi2(fit, data, uncertainty, dof=5):
# uncertainty = 10**params[-1]
chi2_reduced = np.sum((fit-data)**2/uncertainty**2)/(len(data)-dof)
print("Reduced Chi^2: %.5f"%chi2_reduced)
class MyError(Exception):
def __init__(self, message): self.message = message
def __str__(self): return(repr(self.message))
def __repr__(self): return 'MyError(%r)'%(str(self))
class InconvergenceError(MyError):
def __init__(self, message): self.message = message
def __repr__(self):
return 'InconvergenceError: %r'%self.message
| 66,081 | 38.641272 | 142 |
py
|
elderflower
|
elderflower-master/elderflower/.ipynb_checkpoints/plotting-checkpoint.py
|
import os
import numpy as np
try:
import seaborn as sns
seaborn_plot = True
except ImportError:
import warnings
warnings.warn("Seaborn is not installed. Plot with matplotlib.")
seaborn_plot = False
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.cmap'] = 'gnuplot2'
plt.rcParams["font.serif"] = "Times New Roman"
rcParams.update({'xtick.major.pad': '5.0'})
rcParams.update({'xtick.major.size': '4'})
rcParams.update({'xtick.major.width': '1.'})
rcParams.update({'xtick.minor.pad': '5.0'})
rcParams.update({'xtick.minor.size': '4'})
rcParams.update({'xtick.minor.width': '0.8'})
rcParams.update({'ytick.major.pad': '5.0'})
rcParams.update({'ytick.major.size': '4'})
rcParams.update({'ytick.major.width': '1.'})
rcParams.update({'ytick.minor.pad': '5.0'})
rcParams.update({'ytick.minor.size': '4'})
rcParams.update({'ytick.minor.width': '0.8'})
rcParams.update({'axes.labelsize': 16})
rcParams.update({'font.size': 16})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import LogStretch, AsinhStretch, HistEqStretch
from astropy.stats import mad_std
from photutils import CircularAperture
### Plotting Helpers ###
def LogNorm():
return ImageNormalize(stretch=LogStretch())
def AsinhNorm(a=0.1):
return ImageNormalize(stretch=AsinhStretch(a=a))
def HistEqNorm(data):
return ImageNormalize(stretch=HistEqStretch(data))
def vmin_3mad(img):
""" lower limit of visual imshow defined by 3 mad above median """
return np.median(img)-3*mad_std(img)
def vmax_2sig(img):
""" upper limit of visual imshow defined by 2 sigma above median """
return np.median(img)+2*np.std(img)
def colorbar(mappable, pad=0.2, size="5%", loc="right", color_nan='gray', **args):
""" Customized colorbar """
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
if loc=="bottom":
orent = "horizontal"
pad = 1.5*pad
rot = 75
else:
orent = "vertical"
rot = 0
cax = divider.append_axes(loc, size=size, pad=pad)
cb = fig.colorbar(mappable, cax=cax, orientation=orent, **args)
cb.ax.set_xticklabels(cb.ax.get_xticklabels(),rotation=rot)
cmap = cb.get_cmap()
cmap.set_bad(color=color_nan, alpha=0.3)
return cb
def make_rand_cmap(n_label, rand_state = 12345):
from photutils.utils import make_random_cmap
rand_cmap = make_random_cmap(n_label, random_state=rand_state)
rand_cmap.set_under(color='black')
rand_cmap.set_over(color='white')
return rand_cmap
def make_rand_color(n_color, seed=1234,
colour = ["indianred", "plum", "seagreen", "lightcyan",
"orchid", 'gray', 'orange', 'yellow', "brown" ]):
import random
random.seed(seed)
rand_colours = [random.choice(colour) for i in range(n_color)]
return rand_colours
def draw_mask_map(image, seg_map, mask_deep, stars,
r_core=None, r_out=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map """
from matplotlib import patches
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6))
im1 = ax1.imshow(image, cmap='gray', norm=LogNorm(), vmin=vmin, vmax=1e4)
ax1.set_title("Image")
n_label = seg_map.max()
ax2.imshow(seg_map, vmin=1, vmax=n_label-2, cmap=make_rand_cmap(n_label))
ax2.set_title("Deep Mask")
image2 = image.copy()
image2[mask_deep] = 0
im3 = ax3.imshow(image2, norm=LogNorm(), vmin=vmin, vmax=vmax, aspect='auto')
ax3.set_title("'Sky'")
colorbar(im3)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
if r_out is not None:
aper = CircularAperture(star_pos_A, r=r_out[0])
aper.plot(color='lime',lw=1.5,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_out[1])
aper.plot(color='c',lw=1.5,label="",alpha=0.7, axes=ax3)
patch_size = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), patch_size, patch_size, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_dual.png"), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def draw_mask_map_strip(image, seg_comb, mask_comb, stars,
ma_example=None, r_core=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map w/ strips """
from matplotlib import patches
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
if ma_example is not None:
mask_strip, mask_cross = ma_example
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6))
mask_strip[mask_cross.astype(bool)]=0.5
ax1.imshow(mask_strip, cmap="gray_r")
ax1.plot(star_pos_A[0][0], star_pos_A[0][1], "r*",ms=18)
ax1.set_title("Strip/Cross")
n_label = seg_comb.max()
ax2.imshow(seg_comb, vmin=1, vmax=n_label-3, cmap=make_rand_cmap(n_label))
ax2.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax2.set_title("Mask Comb.")
image3 = image.copy()
image3[mask_comb] = 0
im3 = ax3.imshow(image3, norm=LogNorm(), aspect='auto', vmin=vmin, vmax=vmax)
ax3.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax3.set_title("'Sky'")
colorbar(im3)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
size = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), size, size, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_strip.png"), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def Fit_background_distribution(image, mask_deep):
# Check background, fit with gaussian and exp-gaussian distribution
from scipy import stats
plt.figure(figsize=(6,4))
z_sky = image[~mask_deep]
if seaborn_plot:
sns.distplot(z_sky, label='Data', hist_kws={'alpha':0.3})
else:
plt.hist(z_sky, label='Data', alpha=0.3)
mu_fit, std_fit = stats.norm.fit(z_sky)
print(mu_fit, std_fit)
d_mod = stats.norm(loc=mu_fit, scale=std_fit)
x = np.linspace(d_mod.ppf(0.001), d_mod.ppf(0.999), 100)
plt.plot(x, d_mod.pdf(x), 'g-', lw=2, alpha=0.6, label='Norm Fit')
K_fit, mu_fit, std_fit = stats.exponnorm.fit(z_sky)
print(K_fit, mu_fit, std_fit)
d_mod2 = stats.exponnorm(loc=mu_fit, scale=std_fit, K=K_fit)
x = np.linspace(d_mod2.ppf(0.001), d_mod2.ppf(0.9999), 100)
plt.plot(x, d_mod2.pdf(x), 'r-', lw=2, alpha=0.6, label='Exp-Norm Fit')
plt.legend(fontsize=12)
def plot_PSF_model_1D(frac, f_core, f_aureole, psf_range=400,
yunit='Intensity', label='combined', log_scale=True,
ZP=27.1, pixel_scale=2.5, decompose=True):
from .utils import Intensity2SB
r = np.logspace(0, np.log10(psf_range), 100)
I_core = (1-frac) * f_core(r)
I_aureole = frac * f_aureole(r)
I_tot = I_core + I_aureole
if log_scale:
I_core, I_aureole, I_tot = np.log10(I_core), np.log10(I_aureole), np.log10(I_tot)
if yunit=='Intensity':
plt.semilogx(r, I_tot,
ls="-", lw=3,alpha=0.9, zorder=5, label=label)
if decompose:
plt.semilogx(r, I_core,
ls="--", lw=3, alpha=0.9, zorder=1, label='core')
plt.semilogx(r, I_aureole,
ls="--", lw=3, alpha=0.9, label='aureole')
plt.ylabel('log Intensity', fontsize=14)
plt.ylim(I_aureole.min(), I_tot.max()+0.25)
elif yunit=='SB':
plt.semilogx(r, -14.5+Intensity2SB(I=I_tot, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="-", lw=3,alpha=0.9, zorder=5, label=label)
if decompose:
plt.semilogx(r, -14.5+Intensity2SB(I=I_core, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=0.9, zorder=1, label='core')
plt.semilogx(r, -14.5+Intensity2SB(I=I_aureole, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=0.9, label='aureole')
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.ylim(31,17)
plt.legend(loc=1, fontsize=12)
plt.xlabel('r [pix]', fontsize=14)
def plot_PSF_model_galsim(psf, image_size=800, contrast=None,
figsize=(7,6), save=False, save_dir='.'):
""" Plot and 1D PSF model and Galsim 2D model averaged in 1D """
from .utils import Intensity2SB, cal_profile_1d
pixel_scale = psf.pixel_scale
frac = psf.frac
psf_core = psf.psf_core
psf_aureole = psf.psf_aureole
psf_star = psf.psf_star
img_core = psf_core.drawImage(scale=pixel_scale, method="no_pixel")
img_aureole = psf_aureole.drawImage(nx=201, ny=201, scale=pixel_scale, method="no_pixel")
img_star = psf_star.drawImage(nx=image_size, ny=image_size, scale=pixel_scale, method="no_pixel")
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(frac*img_aureole.array, color="g",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label=psf.aureole_model)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d((1-frac)*img_core.array, color="orange",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Moffat")
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(img_star.array,
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Combined")
plt.legend(loc=1, fontsize=12)
r = np.logspace(0, np.log10(image_size), 100)
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
plt.plot(r, np.log10((1-frac) * comp1 + comp2 * frac), ls="-", lw=3, zorder=5)
plt.plot(r, np.log10((1-frac) * comp1), ls="--", lw=3, zorder=1)
plt.plot(r, np.log10(comp2 * frac), ls="--", lw=3)
if psf.aureole_model == "multi-power":
for t in psf.theta_s_pix:
plt.axvline(t, ls="--", color="k",alpha=0.3, zorder=1)
if contrast is not None:
plt.axhline(np.log10(comp1.max()/contrast),color="k",ls="--")
plt.title("Model PSF",fontsize=14)
plt.ylim(-8.5, -0.5)
plt.xlim(r_rbin.min()*0.8, r_rbin.max()*1.2)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Model_PSF.png"), dpi=120)
plt.close()
return img_star
def plot_flux_dist(Flux, Flux_thresholds, ZP=None,
save=False, save_dir='.', figsize=None, **kwargs):
import seaborn as sns
F_bright, F_verybright = Flux_thresholds
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
plt.axvline(np.log10(F_bright), color="k", ls="-",alpha=0.7, zorder=1)
plt.axvline(np.log10(F_verybright), color="k", ls="--",alpha=0.7, zorder=1)
plt.axvspan(1, np.log10(F_bright),
color='gray', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_bright), np.log10(F_verybright),
color='seagreen', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_verybright), 9,
color='steelblue', alpha=0.15, zorder=0)
if seaborn_plot:
sns.distplot(np.log10(Flux), kde=False, **kwargs)
else:
plt.hist(np.log10(Flux), alpha=0.5)
plt.yscale('log')
plt.xlabel('Estimated log Flux$_{tot}$ / Mag', fontsize=15)
plt.ylabel('# of stars', fontsize=15)
plt.legend(loc=1)
if ZP is not None:
ax1 = plt.gca()
xticks1 = ax1.get_xticks()
ax2 = ax1.twiny()
ax2.set_xticks(xticks1)
ax2.set_xticklabels(np.around(-2.5*xticks1+ZP ,1))
ax2.set_xbound(ax1.get_xbound())
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Flux_dist.png"), dpi=80)
plt.show()
plt.close()
def draw_independent_priors(priors, xlabels=None, plabels=None,
save=False, save_dir='./'):
x_s = [np.linspace(d.ppf(0.01), d.ppf(0.99), 100) for d in priors]
fig, axes = plt.subplots(1, len(priors), figsize=(15,4))
for k, ax in enumerate(axes):
ax.plot(x_s[k], priors[k].pdf(x_s[k]),'-', lw=5, alpha=0.6, label=plabels[k])
ax.legend()
if xlabels is not None:
ax.set_xlabel(xlabels[k], fontsize=12)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Prior.png"), dpi=100)
plt.close()
def draw_cornerplot(results, ndim, labels=None, truths=None, figsize=(16,14),
save=False, save_dir='.', suffix=''):
from dynesty import plotting as dyplot
fig = plt.subplots(ndim, ndim, figsize=figsize)
dyplot.cornerplot(results, truths=truths, labels=labels,
color="royalblue", truth_color="indianred",
title_kwargs={'fontsize':18, 'y': 1.04},
label_kwargs={'fontsize':16},
show_titles=True, fig=fig)
if save:
plt.savefig(os.path.join(save_dir, "Cornerplot%s.png"%suffix), dpi=150)
plt.show()
plt.close()
else:
plt.show()
def draw2D_fit_vs_truth_PSF_mpow(results, psf, stars, labels, image,
image_base=None, vmin=None, vmax=None,
avg_func='median', save=False, save_dir="."):
""" Compare 2D fit and truth image """
from .sampler import get_params_fit
N_n = len([lab for lab in labels if "n" in lab])
N_theta = len([lab for lab in labels if "theta" in lab])
pmed, pmean, pcov = get_params_fit(results)
fits = pmed if avg_func=='median' else pmean
print("Fitting (mean) : ", np.around(pmean,3))
print("Fitting (median) : ", np.around(pmed,3))
n_s_fit = fits[:N_n]
if N_theta > 0:
theta_s_fit = np.append([psf.theta_s[0]], 10**fits[N_n:N_n+N_theta])
else:
theta_s_fit = psf.theta_s
mu_fit, sigma_fit = fits[-2], 10**fits[-1]
noise_fit = make_noise_image(psf.image_size, sigma_fit)
psf_fit = psf.copy()
psf_fit.update({'n_s':n_s_fit, 'theta_s': theta_s_fit})
psf_range = psf.image_size * psf.pixel_scale
image_fit = generate_image_by_flux(psf_fit, stars, draw_real=True,
psf_range=[psf_range//2, psf_range])
image_fit = image_fit + mu_fit + noise_fit
if image_base is not None:
image_fit += image_base
if vmin is None:
vmin = mu_fit - 0.3 * sigma_fit
if vmax is None:
vmax = vmin + 11
fig, (ax1, ax2, ax3) = plt.subplots(1,3,figsize=(18,6))
im = ax1.imshow(image_fit, vmin=vmin, vmax=vmax, norm=LogNorm()); colorbar(im)
im = ax2.imshow(image, vmin=vmin, vmax=vmax, norm=LogNorm()); colorbar(im)
Diff = (image_fit-image)/image
im = ax3.imshow(Diff, vmin=-0.1, vmax=0.1, cmap='seismic'); colorbar(im)
ax1.set_title("Fit: I$_f$")
ax2.set_title("Original: I$_0$")
ax3.set_title("Frac.Diff: (I$_f$ - I$_0$) / I$_0$")
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir,
"Fit_vs_truth_image.png"), dpi=120)
plt.close()
def draw_comparison_2D(image_fit, data, mask, image_star, noise_fit=0,
r_core=None, vmin=None, vmax=None, cmap='gnuplot2', norm=None,
save=False, save_dir=".", suffix=""):
""" Compare data and fit in 2D """
mask_fit = getattr(mask, 'mask_comb', mask.mask_deep)
if vmin is None:
vmin = np.median(image_fit[~mask_fit]) - 1
if vmax is None:
vmax = vmin + 150
if norm is None:
norm1 = LogNorm()
norm2 = LogNorm()
else:
from copy import deepcopy
norm1 = norm
norm2 = deepcopy(norm1)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3,figsize=(16,9))
im = ax1.imshow(data, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax1.set_title("Data [I$_0$]", fontsize=15); colorbar(im)
im = ax2.imshow(image_fit+noise_fit, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax2.set_title("Fit [I$_f$]", fontsize=15); colorbar(im)
im = ax3.imshow(image_star, vmin=0, vmax=vmax-vmin, norm=norm2, cmap=cmap)
ax3.set_title("Bright Stars [I$_{f,B}$]", fontsize=15); colorbar(im)
frac_diff = (image_fit-data)/data
# frac_diff[mask_fit] = 0
im = ax4.imshow(frac_diff, vmin=-0.1, vmax=0.1, cmap="seismic")
ax4.set_title("Frac. Diff. [(I$_f$ - I$_0$)/I$_0$]", fontsize=15); colorbar(im)
# noise = np.sqrt((data/0.37/618)**2+(2/0.37/618)**2)
# chi = (image_fit-data)/noise
# im = ax4.imshow(chi, vmin=-10, vmax=10, cmap="seismic")
# ax4.set_title("Chi. [(I$_f$ - I$_0$)/$\sigma_0$]", fontsize=15); colorbar(im)
residual = (data-image_star)
im = ax5.imshow(residual, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax5.set_title("Bright Subtracted [I$_0$ - I$_{f,B}$]", fontsize=15); colorbar(im)
residual[mask_fit] = 0
im = ax6.imshow(residual, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax6.set_title("Bright Subtracted (masked)"); colorbar(im)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core,r_core]
aper1 = CircularAperture(mask.stars.star_pos_verybright, r=r_core[0])
aper1.plot(color='lime',lw=2,alpha=0.9, axes=ax6)
aper2 = CircularAperture(mask.stars.star_pos_medbright, r=r_core[1])
aper2.plot(color='skyblue',lw=2,label="",alpha=0.7, axes=ax6)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Comparison_fit_data2D%s.png"%suffix), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def plot_fit_PSF1D(results, psf, n_spline=2,
n_bootstrap=500, truth=None,
Amp_max=None, r_core=None,
n_out=4, theta_out=1200, image_size=800,
save=False, save_dir="./",
suffix='', figsize=(7,6)):
from astropy.stats import bootstrap
from .sampler import get_params_fit
pixel_scale = psf.pixel_scale
frac = psf.frac
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
if truth is not None:
print("Truth : ", psf.params)
psf.plot1D(psf_range=600, decompose=False, label='Truth')
# read fitting results
pmed, pmean, pcov, samples_eq = get_params_fit(results, return_sample=True)
print("Fitting (mean) : ", np.around(pmean,3))
print("Fitting (median) : ", np.around(pmed,3))
samples_eq_bs = bootstrap(samples_eq, bootnum=1, samples=n_bootstrap)[0]
# Number of n and theta in the fitting
if psf.aureole_model != "moffat":
theta_0 = psf.theta_0
N_n = n_spline
N_theta = n_spline - 1
psf_fit = psf.copy()
r = np.logspace(0., np.log10(image_size), 100)
comp1 = psf.f_core1D(r)
# Sample distribution from joint PDF
for sample in samples_eq_bs:
frac_k = frac
if psf.aureole_model == "moffat":
gamma1_k = sample[0]
beta1_k = sample[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_k = sample[0]
psf_fit.update({'n':n_k})
elif psf.aureole_model == "multi-power":
n_s_k = np.concatenate([sample[:N_n], [n_out]])
theta_s_k = np.concatenate([[theta_0],
np.atleast_1d(10**sample[N_n:N_n+N_theta]),
[theta_out]])
psf_fit.update({'n_s':n_s_k, 'theta_s':theta_s_k})
comp2_k = psf_fit.f_aureole1D(r)
plt.semilogy(r, (1-frac_k) * comp1 + frac_k * comp2_k,
color="lightblue", lw=2,alpha=0.1,zorder=1)
# Median and mean fitting
for fits, c, ls, lab in zip([pmed, pmean], ["royalblue", "b"],
["-.","-"], ["mean", "med"]):
if psf.aureole_model == "moffat":
gamma1_fit = fits[0]
beta1_fit = fits[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_fit = fits[0]
psf_fit.update({'n':n_fit})
elif psf.aureole_model == "multi-power":
n_s_fit = np.concatenate([fits[:N_n], [n_out]])
theta_s_fit = np.concatenate([[theta_0],
np.atleast_1d(10**fits[N_n:N_n+N_theta]),
[theta_out]])
psf_fit.update({'n_s':n_s_fit, 'theta_s':theta_s_fit})
comp2 = psf_fit.f_aureole1D(r)
y_fit = (1-frac) * comp1 + frac * comp2
plt.semilogy(r, y_fit, color=c, lw=2.5, ls=ls, alpha=0.8, label=lab+' comb.', zorder=4)
if lab=="med":
plt.semilogy(r, (1-frac) * comp1,
color="orange", lw=2, ls="--", alpha=0.7, label="med core",zorder=4)
plt.semilogy(r, frac * comp2,
color="seagreen", lw=2, ls="--", alpha=0.7, label="med aureole",zorder=4)
# if Amp_max is not None:
# std_fit = 10**fits[-1]
# contrast = Amp_max/(std_fit)
# y_min_contrast = y_fit.max()/contrast
# plt.axhline(y_min_contrast, color="k", ls="-.", alpha=0.5)
# plt.axhline(y_min_contrast*2, color="k", ls=":", alpha=0.5)
# plt.text(1, y_fit.max()/contrast*1.2, '1 $\sigma$', fontsize=10)
# plt.text(1, y_fit.max()/contrast*2.5, '2 $\sigma$', fontsize=10)
# r_max = r[np.argmin(abs(y_fit-y_fit.max()/contrast))]
# plt.xlim(0.9, 5*r_max)
# Draw boundaries etc.
if r_core is not None:
if figsize is not None:
plt.axvspan(np.atleast_1d(r_core).max(), theta_out/pixel_scale,
color='steelblue', alpha=0.15, zorder=1)
plt.axvspan(np.atleast_1d(r_core).min(), np.atleast_1d(r_core).max(),
color='seagreen', alpha=0.15, zorder=1)
plt.axvspan(plt.gca().get_xlim()[0], np.atleast_1d(r_core).min(),
color='gray', alpha=0.15, zorder=1)
if psf.aureole_model != "moffat":
for t in psf_fit.theta_s_pix:
plt.axvline(t, lw=2, ls='--', color='k', alpha=0.5)
plt.legend(loc=1, fontsize=12)
plt.xlabel(r"$\rm r\,[pix]$",fontsize=18)
plt.ylabel(r"$\rm Intensity$",fontsize=18)
plt.title("Recovered PSF from Fitting",fontsize=18)
plt.ylim(3e-9, 0.5)
plt.xscale("log")
plt.tight_layout()
if save:
plt.savefig("%s/Fit_PSF1D%s.png"%(save_dir, suffix),dpi=150)
plt.show()
plt.close()
def plot_bright_star_profile(tab_target, table_res_Rnorm, res_thumb,
bkg_sky=460, std_sky=2, pixel_scale=2.5, ZP=27.1,
mag_name='MAG_AUTO_corr', figsize=(8,6)):
from .utils import Intensity2SB, cal_profile_1d
r = np.logspace(0.03,3,100)
z_mean_s, z_med_s = table_res_Rnorm['Imean'], table_res_Rnorm['Imed']
z_std_s, sky_mean_s = table_res_Rnorm['Istd'], table_res_Rnorm['Isky']
plt.figure(figsize=figsize)
ax = plt.subplot(111)
# adaptive colormap
cmap = plt.cm.plasma(np.linspace(0.01, 0.99, len(res_thumb)+np.sum(tab_target[mag_name]<10)+1))
ax.set_prop_cycle(plt.cycler('color', cmap))
for i, (num, sky_m, mag) in enumerate(zip(list(res_thumb.keys())[::-1],
sky_mean_s[::-1],tab_target[mag_name][::-1])):
if num in tab_target["NUMBER"]:
alpha = min(0.05*(18-mag), 0.8)
errorbar = True if mag<10 else False
ms = max((15-mag), 0)
lw = max((12-mag), 1.5)
else:
alpha = 0.5; errorbar=False
ms, lw = 3, 3
img, ma, cen = res_thumb[num]['image'], res_thumb[num]['mask'], res_thumb[num]['center']
r_rbin, I_rbin, _ = cal_profile_1d(img, cen=cen, mask=ma, dr=1.25,
ZP=ZP, sky_mean=bkg_sky, sky_std=std_sky,
xunit="pix", yunit="SB", errorbar=errorbar,
core_undersample=False,
color=None, lw=lw, markersize=ms, alpha=alpha)
if i==0:
plt.text(3, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
plt.text(14, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
I_sky = Intensity2SB(std_sky, 0, ZP=ZP, pixel_scale=pixel_scale)
plt.axhline(I_sky, color="k", ls="-.", alpha=0.5)
plt.text(1.1, I_sky+0.5, '1 $\sigma$', fontsize=10)
plt.ylim(30.5,16.5)
plt.xlim(1.,3e2)
plt.xscale('log')
plt.show()
| 27,795 | 36.511471 | 101 |
py
|
elderflower
|
elderflower-master/elderflower/.ipynb_checkpoints/task-checkpoint.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
def Match_Mask_Measure(hdu_path, image_bounds,
SE_segmap, SE_catalog,
weight_map=None,
obj_name='', band="G",
pixel_scale=2.5,
ZP=None, field_pad=500,
r_scale=12, mag_thre=15,
draw=True, save=True,
use_PS1_DR2=False,
dir_name='../output/Measure'):
print("""Measure the intensity at R = %d for stars < %.1f
as normalization of fitting\n"""%(r_scale, mag_thre))
b_name = band.lower()
image_bounds = np.atleast_2d(image_bounds)
##################################################
# Read and Display
##################################################
from .utils import crop_image, crop_catalog
from .utils import find_keyword_header
from astropy.stats import mad_std
from astropy.table import Table
from astropy.io import fits
from astropy import wcs
# Read hdu
if os.path.isfile(hdu_path) is False:
sys.exit("Image does not exist. Check path.")
with fits.open(hdu_path) as hdul:
print("Read Image :", hdu_path)
data = hdul[0].data
header = hdul[0].header
wcs_data = wcs.WCS(header)
# Read output from create_photometric_light_APASS
if os.path.isfile(SE_segmap):
seg_map = fits.getdata(SE_segmap)
else:
seg_map = None
SE_cat_full = Table.read(SE_catalog, format="ascii.sextractor")
if weight_map is not None:
weight_edge = fits.getdata(weight_map)
else:
weight_edge = np.ones_like(data)
# Read global background model ZP and pixel scale from header
bkg = find_keyword_header(header, "BACKVAL")
if ZP is None:
ZP = find_keyword_header(header, "ZP")
# Estimate of background fluctuation (just for plot)
if seg_map is not None:
std = mad_std(data[(seg_map==0) & (weight_edge>0.5)])
else:
std = mad_std(data)
# Short summary
print("BACKVAL: %.2f +/- %.2f , ZP: %.2f\n"%(bkg, std, ZP))
# Convert SE measured flux into mag
flux = SE_cat_full["FLUX_AUTO"]
mag = -2.5 * np.ma.log10(flux).filled(flux[flux>0].min()) + ZP
SE_cat_full["MAG_AUTO"] = np.around(mag, 5)
field_bounds = [field_pad, field_pad,
data.shape[1]-field_pad,
data.shape[0]-field_pad]
if not use_PS1_DR2:
print("Match field %r with catalog\n"%field_bounds)
print("Measure Sky Patch (X min, Y min, X max, Y max) :")
[print("%r"%b) for b in image_bounds.tolist()]
# Display field_bounds and sub-regions to be matched
patch, seg_patch = crop_image(data, field_bounds, seg_map,
weight_map=weight_edge,
sub_bounds=image_bounds,
origin=0, draw=draw)
##################################################
# Crossmatch with Star Catalog (across the field)
##################################################
from .utils import cross_match_PS1_DR2, cross_match
from .utils import calculate_color_term
# Crossmatch with PANSTRRS at threshold of mag_thre mag
if use_PS1_DR2:
# Give 3 attempts in matching PS1 DR2 via MAST.
# This could fail if the FoV is too large.
for attempt in range(3):
try:
tab_target, tab_target_full, catalog_star = \
cross_match_PS1_DR2(wcs_data,
SE_cat_full,
image_bounds,
mag_thre=mag_thre,
band=b_name)
except HTTPError:
print('Gateway Time-out. Try Again.')
else:
break
else:
sys.exit('504 Server Error: 3 Failed Attempts. Exit.')
else:
mag_name = b_name+'mag'
tab_target, tab_target_full, catalog_star = \
cross_match(wcs_data,
SE_cat_full,
field_bounds,
mag_thre=mag_thre,
mag_name=mag_name)
# Calculate color correction between PANSTARRS and DF filter
if use_PS1_DR2:
mag_name = mag_name_cat = b_name+'MeanPSFMag'
else:
mag_name_cat = mag_name+'_PS'
CT = calculate_color_term(tab_target_full,
mag_name=mag_name_cat, draw=draw)
catalog_star["MAG_AUTO"] = catalog_star[mag_name] + CT
# Save matched table and catalog
if save:
tab_target_name = os.path.join(dir_name,
'%s-catalog_match_%smag%d.txt'%(obj_name, b_name, mag_thre))
tab_target["MAG_AUTO_corr"] = tab_target[mag_name_cat] + CT
tab_target.write(tab_target_name,
overwrite=True, format='ascii')
catalog_star_name = os.path.join(dir_name,
'%s-catalog_PS_%s_all.txt'%(obj_name, b_name))
catalog_star["FLUX_AUTO"] = 10**((catalog_star["MAG_AUTO"]-ZP)/(-2.5))
catalog_star.write(catalog_star_name,
overwrite=True, format='ascii')
print('Save PANSTARRS catalog & matched sources in %s'%dir_name)
##################################################
# Build Mask & Measure Scaling (in selected patch)
##################################################
from .utils import fit_empirical_aperture, make_segm_from_catalog
from .utils import measure_Rnorm_all
from .plotting import plot_bright_star_profile
# Empirical enlarged aperture size from magnitude based on matched SE detection
estimate_radius = fit_empirical_aperture(tab_target_full, seg_map,
mag_name=mag_name_cat,
mag_range=[13,22], K=2.5,
degree=3, draw=draw)
for image_bound in image_bounds:
# Crop the star catalog and matched SE catalog
patch_Xmin, patch_Ymin, patch_Xmax, patch_Ymax = image_bound
# Catalog bound slightly wider than the region
cat_bound = (patch_Xmin-50, patch_Ymin-50,
patch_Xmax+50, patch_Ymax+50)
catalog_star_patch = crop_catalog(catalog_star, cat_bound,
sortby=mag_name,
keys=("X_IMAGE"+'_PS',
"Y_IMAGE"+'_PS'))
tab_target_patch = crop_catalog(tab_target, cat_bound,
sortby=mag_name_cat,
keys=("X_IMAGE", "Y_IMAGE"))
# Make segmentation map from catalog based on SE seg map of one band
seg_map_cat = make_segm_from_catalog(catalog_star_patch,
image_bound,
estimate_radius,
mag_name=mag_name,
cat_name='PS',
obj_name=obj_name,
band=band, draw=draw,
save=save, dir_name=dir_name)
# Measure average intensity (source+background) at e_scale
print("""Measure intensity at R = %d
for catalog stars %s < %.1f in %r:"""\
%(r_scale, mag_name, mag_thre, image_bound))
tab_res_Rnorm, res_thumb = \
measure_Rnorm_all(tab_target_patch, image_bound,
wcs_data, data, seg_map,
mag_thre=mag_thre,
r_scale=r_scale, width=1,
obj_name=obj_name,
mag_name=mag_name_cat,
save=save, dir_name=dir_name)
plot_bright_star_profile(tab_target_patch,
tab_res_Rnorm, res_thumb,
bkg_sky=bkg, std_sky=std, ZP=ZP,
pixel_scale=pixel_scale)
def Run_PSF_Fitting(hdu_path, image_bounds0,
n_spline=2, obj_name='', band="G",
pixel_scale=2.5, ZP=None, pad=100,
r_scale=12, mag_threshold=[14,11],
mask_type='radius', SB_fit_thre=24.5,
r_core=24, r_out=None,
fit_sigma=True, fit_frac=False, leg2d=False,
wid_strip=24, n_strip=48,
n_cpu=None, parallel=False,
brightest_only=False, draw_real=True,
draw=True, print_progress=True,
save=False, dir_name='./',
dir_measure='../output/Measure-PS'):
############################################
# Read Image and Table
############################################
from .image import ImageList
DF_Images = ImageList(hdu_path, image_bounds0,
obj_name, band,
pixel_scale, ZP, pad)
from .utils import read_measurement_tables
tables_faint, tables_res_Rnorm = \
read_measurement_tables(dir_measure,
image_bounds0,
obj_name=obj_name,
band=band,
pad=pad,
r_scale=r_scale)
############################################
# Setup PSF
############################################
from .modeling import PSF_Model
# PSF Parameters (some from fitting stacked PSF)
frac = 0.3 # fraction of aureole
beta = 10 # moffat beta, in arcsec
fwhm = 2.3 * pixel_scale # moffat fwhm, in arcsec
n0 = 3.2 # estimated true power index
theta_0 = 5.
# radius in which power law is flattened, in arcsec (arbitrary)
n_s = np.array([n0, 2., 4]) # power index
theta_s = np.array([theta_0, 10**1.9, 1200])
# transition radius in arcsec
if n_spline == 1:
# Single-power PSF
params_pow = {"fwhm":fwhm, "beta":beta,
"frac":frac, "n":n0, 'theta_0':theta_0}
psf = PSF_Model(params=params_pow,
aureole_model='power')
else:
# Multi-power PSF
params_mpow = {"fwhm":fwhm, "beta":beta,
"frac":frac, "n_s":n_s, 'theta_s':theta_s}
psf = PSF_Model(params=params_mpow,
aureole_model='multi-power')
# Pixelize PSF
psf.pixelize(pixel_scale=pixel_scale)
# Generate core and aureole PSF
psf_c = psf.generate_core()
psf_e, psf_size = psf.generate_aureole(contrast=1e6,
psf_range=1000)
# Deep copy
psf_tri = psf.copy()
############################################
# Setup Stars
############################################
from .utils import assign_star_props
stars_0, stars_all = \
DF_Images.assign_star_props(tables_faint,
tables_res_Rnorm,
r_scale=r_scale,
mag_threshold=mag_threshold,
verbose=True, draw=False,
save=save, save_dir=dir_name)
#breakpoint()
############################################
# Setup Basement Image
############################################
# Make fixed background of dim stars
DF_Images.make_base_image(psf.psf_star, stars_all, draw=False)
############################################
# Masking
############################################
from .mask import Mask
if mask_type=='brightness':
from .utils import SB2Intensity
count = SB2Intensity(SB_fit_thre, DF_Images.bkg,
DF_Images.ZP, DF_Image.pixel_scale)[0]
else:
count = None
# Mask faint and centers of bright stars
DF_Images.make_mask(stars_0, dir_measure,
by=mask_type, r_core=r_core, r_out=None,
wid_strip=wid_strip, n_strip=n_strip,
sn_thre=2.5, n_dilation=5, draw=True,
save=save, save_dir=dir_name)
# Collect stars for fit. Choose if only use brightest stars
if brightest_only:
stars = [s.use_verybright() for s in DF_Images.stars]
else:
stars = DF_Images.stars # for fit
# Copy stars
stars_tri = stars.copy()
proceed = input('Is the Mask Reasonable?[y/n]')
if proceed == 'n': sys.exit("Reset the Mask.")
############################################
# Estimate Background
############################################
DF_Images.estimate_bkg()
############################################
# Setup Priors and Likelihood Models for Fitting
############################################
DF_Images.set_container(psf_tri, stars_tri,
n_spline=n_spline,
n_min=1, n_est=n0,
theta_in=50, theta_out=240,
leg2d=leg2d, parallel=parallel,
draw_real=draw_real,
fit_sigma=fit_sigma,
fit_frac=fit_frac,
brightest_only=brightest_only)
############################################
# Run Sampling
############################################
from .sampler import DynamicNestedSampler
dsamplers = []
for i in range(DF_Images.N_Image):
container = DF_Images.containers[i]
ndim = container.ndim
ds = DynamicNestedSampler(container,
sample='auto', n_cpu=n_cpu)
ds.run_fitting(nlive_init=ndim*10,
nlive_batch=2*ndim+2, maxbatch=2,
print_progress=print_progress)
# if save:
# fit_info = {'n_spline':n_spline, 'image_size':image_size,
# 'image_bounds0':image_bounds0, 'leg2d':leg2d,
# 'r_core':r_core, 'r_scale':r_scale}
# method = str(n_spline)+'p'
# fname='NGC5907-%s-fit_best_X%dY%d_%s'\
# %(band, image_bounds0[0], image_bounds0[1], method)
# if leg2d: fname+='l'
# if brightest_only: fname += 'b'
# ds.save_results(fname+'.res', fit_info, save_dir=dir_name)
############################################
# Plot Results
############################################
from .plotting import AsinhNorm
method = str(n_spline)+'p'
ds.cornerplot(figsize=(18, 16),
save=save, save_dir=dir_name,
suffix='_'+method)
# Plot recovered PSF
ds.plot_fit_PSF1D(psf, n_bootstrap=500, r_core=r_core,
save=save, save_dir=dir_name,
suffix='_'+method)
# Recovered 1D PSF
psf_fit, params = ds.generate_fit(psf, stars_tri[i],
n_out=4, theta_out=1200)
# Calculate Chi^2
ds.calculate_reduced_chi2()
# Draw 2D compaison
ds.draw_comparison_2D(r_core=r_core,
norm=AsinhNorm(a=0.01),
vmin=DF_Images.bkg-2,
vmax=DF_Images.bkg+50,
save=save, save_dir=dir_name,
suffix='_'+method)
if leg2d:
ds.draw_background(save=save, save_dir=dir_name,
suffix='_'+method)
dsamplers += [ds]
return dsamplers
| 16,785 | 37.5 | 83 |
py
|
elderflower
|
elderflower-master/elderflower/.ipynb_checkpoints/modeling-checkpoint.py
|
import os
import time
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from scipy.integrate import quad
from scipy.spatial import distance
from scipy.special import gamma as Gamma
from astropy import units as u
from astropy.io import fits, ascii
from astropy.modeling import models
from astropy.utils import lazyproperty
import galsim
from galsim import GalSimBoundsError
from copy import deepcopy
from numpy.polynomial.legendre import leggrid2d
from itertools import combinations
from functools import partial, lru_cache
try:
from .parallel import parallel_compute
parallel_enabled = True
except ImportError:
import warnings
warnings.warn("Joblib / psutil / multiprocessing / mpi4py is not installed. Parallelization is not enabled.")
parallel_enabled = False
try:
from numba import njit
except ImportError:
def njit(*args, **kwargs):
def dummy_decorator(func):
return func
return dummy_decorator
from .utils import fwhm_to_gamma, gamma_to_fwhm
from .utils import Intensity2SB, SB2Intensity
from .utils import round_good_fft, calculate_psf_size
############################################
# Functions for making PSF models
############################################
class PSF_Model:
""" A PSF Model object """
def __init__(self, params=None,
core_model='moffat',
aureole_model='power'):
"""
Parameters
----------
params : a dictionary containing keywords of PSF parameter
core_model : model of PSF core (moffat)
aureole_model : model of aureole ("moffat, "power" or "multi-power")
"""
self.core_model = core_model
self.aureole_model = aureole_model
self.params = params
# Build attribute for parameters from dictionary keys
for key, val in params.items():
exec('self.' + key + ' = val')
if hasattr(self, 'fwhm'):
self.gamma = fwhm_to_gamma(self.fwhm, self.beta)
self.params['gamma'] = self.gamma
if hasattr(self, 'gamma'):
self.fwhm = gamma_to_fwhm(self.gamma, self.beta)
self.params['fwhm'] = self.fwhm
self.gsparams = galsim.GSParams(folding_threshold=1e-10)
def __str__(self):
return "A PSF Model Class"
def __repr__(self):
return " ".join([f"{self.__class__.__name__}", f"<{self.aureole_model}>"])
def pixelize(self, pixel_scale=2.5):
""" Build grid for drawing """
self.pixel_scale = pixel_scale
for key, val in self.params.items():
if ('gamma' in key) | ('theta' in key):
val = val / pixel_scale
exec('self.' + key + '_pix' + ' = val')
def update(self, params):
""" Update PSF parameters from dictionary keys """
pixel_scale = self.pixel_scale
for key, val in params.items():
if np.ndim(val) > 0:
val = np.array(val)
exec('self.' + key + ' = val')
self.params[key] = val
if ('gamma' in key) | ('theta' in key):
val = val / pixel_scale
exec('self.' + key + '_pix' + ' = val')
def copy(self):
""" A deep copy of the object """
return deepcopy(self)
@property
def f_core1D(self):
""" 1D Core function """
gamma_pix, beta = self.gamma_pix, self.beta
c_mof2Dto1D = C_mof2Dto1D(gamma_pix, beta)
return lambda r: moffat1d_normed(r, gamma_pix, beta) / c_mof2Dto1D
@property
def f_aureole1D(self):
""" 1D Aureole function """
if self.aureole_model == "moffat":
gamma1_pix, beta1 = self.gamma1_pix, self.beta1
c_mof2Dto1D = C_mof2Dto1D(gamma1_pix, beta1)
f_aureole = lambda r: moffat1d_normed(r, gamma1_pix, beta1) / c_mof2Dto1D
elif self.aureole_model == "power":
n0, theta_0_pix = self.n0, self.theta_0_pix
c_aureole_2Dto1D = C_pow2Dto1D(n0, theta_0_pix)
f_aureole = lambda r: trunc_power1d_normed(r, n0, theta_0_pix) / c_aureole_2Dto1D
elif self.aureole_model == "multi-power":
n_s, theta_s_pix = self.n_s, self.theta_s_pix
c_aureole_2Dto1D = C_mpow2Dto1D(n_s, theta_s_pix)
f_aureole = lambda r: multi_power1d_normed(r, n_s, theta_s_pix) / c_aureole_2Dto1D
return f_aureole
def plot1D(self, **kwargs):
""" Plot 1D profile """
from .plotting import plot_PSF_model_1D
plot_PSF_model_1D(self.frac, self.f_core1D, self.f_aureole1D, **kwargs)
if self.aureole_model == "multi-power":
for t in self.theta_s_pix:
plt.axvline(t, ls="--", color="k", alpha=0.3, zorder=1)
def generate_core(self):
""" Generate Galsim PSF of core. """
gamma, beta = self.gamma, self.beta
self.fwhm = fwhm = gamma * 2. * math.sqrt(2**(1./beta)-1)
psf_core = galsim.Moffat(beta=beta, fwhm=fwhm,
flux=1., gsparams=self.gsparams) # in arcsec
self.psf_core = psf_core
return psf_core
def generate_aureole(self,
contrast=1e6,
psf_scale=None,
psf_range=None,
min_psf_range=60,
max_psf_range=720,
interpolant="cubic"):
"""
Generate Galsim PSF of aureole.
Parameters
----------
contrast: Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given.
psf_scale: Pixel scale of the PSF, <= pixel scale of data. In arcsec/pix.
psf_range: Range of PSF. In arcsec.
min_psf_range : Minimum range of PSF. In arcsec.
max_psf_range : Maximum range of PSF. In arcsec.
interpolant: Interpolant method in Galsim.
Returns
----------
psf_aureole: power law Galsim PSF, flux normalized to be 1.
psf_size: Full image size of PSF used. In pixel.
"""
if psf_scale is None:
psf_scale = self.pixel_scale
if self.aureole_model == "moffat":
gamma1, beta1 = self.gamma1, self.beta1
if psf_range is None:
psf_range = max_psf_range
psf_size = round_good_fft(2 * psf_range // psf_scale)
else:
if self.aureole_model == "power":
n0 = self.n0
theta_0 = self.theta_0
elif self.aureole_model == "multi-power":
n_s = self.n_s
theta_s = self.theta_s
self.n0 = n0 = n_s[0]
self.theta_0 = theta_0 = theta_s[0]
if psf_range is None:
psf_size = calculate_psf_size(n0, theta_0, contrast,
psf_scale, min_psf_range, max_psf_range)
else:
psf_size = round_good_fft(psf_range)
# Generate Grid of PSF and plot PSF model in real space onto it
xx_psf, yy_psf, cen_psf = generate_psf_grid(psf_size)
if self.aureole_model == "moffat":
psf_aureole = galsim.Moffat(beta=beta1, scale_radius=gamma1,
flux=1., gsparams=self.gsparams)
else:
if self.aureole_model == "power":
theta_0_pix = theta_0 / psf_scale
psf_model = trunc_power2d(xx_psf, yy_psf,
n0, theta_0_pix, I_theta0=1, cen=cen_psf)
elif self.aureole_model == "multi-power":
theta_s_pix = theta_s / psf_scale
psf_model = multi_power2d(xx_psf, yy_psf,
n_s, theta_s_pix, 1, cen=cen_psf)
# Parse the image to Galsim PSF model by interpolation
image_psf = galsim.ImageF(psf_model)
psf_aureole = galsim.InterpolatedImage(image_psf, flux=1,
scale=psf_scale,
x_interpolant=interpolant,
k_interpolant=interpolant)
self.psf_aureole = psf_aureole
return psf_aureole, psf_size
def Flux2Amp(self, Flux):
""" Convert Flux to Astropy Moffat Amplitude (pixel unit) """
Amps = [moffat2d_Flux2Amp(self.gamma_pix, self.beta, Flux=(1-self.frac)*F)
for F in Flux]
return np.array(Amps)
def I2I0(self, I, r=12):
""" Convert aureole I(r) at r to I0. r in pixel """
if self.aureole_model == "moffat":
return I2I0_mof(self.gamma1_pix, self.beta1, r, I=I)
elif self.aureole_model == "power":
return I2I0_pow(self.n0, self.theta_0_pix, r, I=I)
elif self.aureole_model == "multi-power":
return I2I0_mpow(self.n_s, self.theta_s_pix, r, I=I)
def I02I(self, I0, r=12):
""" Convert aureole I(r) at r to I0. r in pixel """
if self.aureole_model == "moffat":
return I02I_mof(self.gamma1_pix, self.beta1, r, I0=I0)
elif self.aureole_model == "power":
return I02I_pow(self.n0, self.theta_0_pix, r, I0=I0)
elif self.aureole_model == "multi-power":
return I02I_mpow(self.n_s, self.theta_s_pix, r, I0=I0)
def calculate_external_light(self, stars, n_iter=2):
""" Calculate the integrated external scatter light that affects
the flux scaling from very bright stars on the other stars.
Parameters
----------
stars : Star object
n_iter : iteration time to do the calculation
"""
I_ext = np.zeros(stars.n_bright)
if self.aureole_model == "moffat":
pass
else:
z_norm_verybright0 = stars.z_norm_verybright.copy()
pos_source, pos_eval = stars.star_pos_verybright, stars.star_pos_bright
if self.aureole_model == "power":
cal_ext_light = partial(calculate_external_light_pow,
n0=self.n0, theta0=self.theta_0_pix,
pos_source=pos_source, pos_eval=pos_eval)
elif self.aureole_model == "multi-power":
cal_ext_light = partial(calculate_external_light_mpow,
n_s=self.n_s, theta_s_pix=self.theta_s_pix,
pos_source=pos_source, pos_eval=pos_eval)
# Loop the subtraction
r_scale = stars.r_scale
n_verybright = stars.n_verybright
for i in range(n_iter):
z_norm_verybright = z_norm_verybright0 - I_ext[:n_verybright]
I0_verybright = self.I2I0(z_norm_verybright, r=r_scale)
I_ext = cal_ext_light(I0_source=I0_verybright)
return I_ext
def I2Flux(self, I, r=12):
""" Convert aureole I(r) at r to total flux. r in pixel """
if self.aureole_model == "moffat":
return I2Flux_mof(self.frac, self.gamma1_pix, self.beta1, r, I=I)
elif self.aureole_model == "power":
return I2Flux_pow(self.frac, self.n0, self.theta_0_pix, r, I=I)
elif self.aureole_model == "multi-power":
return I2Flux_mpow(self.frac, self.n_s, self.theta_s_pix, r, I=I)
def Flux2I(self, Flux, r=12):
""" Convert aureole I(r) at r to total flux. r in pixel """
if self.aureole_model == "moffat":
return Flux2I_mof(self.frac, self.gamma1_pix, self.beta1, r, Flux=Flux)
elif self.aureole_model == "power":
return Flux2I_pow(self.frac, self.n0, self.theta_0_pix, r, Flux=Flux)
elif self.aureole_model == "multi-power":
return Flux2I_mpow(self.frac, self.n_s, self.theta_s_pix, r, Flux=Flux)
def SB2Flux(self, SB, BKG, ZP, r=12):
""" Convert suface brightness SB at r to total flux, given background value and ZP. """
# Intensity = I + BKG
I = SB2Intensity(SB, BKG, ZP, self.pixel_scale) - BKG
return self.I2Flux(I, r=r)
def Flux2SB(self, Flux, BKG, ZP, r=12):
""" Convert total flux to suface brightness SB at r, given background value and ZP. """
I = self.Flux2I(Flux, r=r)
return Intensity2SB(I+ BKG, BKG, ZP, self.pixel_scale)
@property
def psf_star(self):
""" Galsim object of star psf (core+aureole) """
frac = self.frac
psf_core, psf_aureole = self.psf_core, self.psf_aureole
return (1-frac) * psf_core + frac * psf_aureole
def plot_PSF_model_galsim(self, contrast=None, save=False, save_dir='.'):
""" Build and plot Galsim 2D model averaged in 1D """
from .plotting import plot_PSF_model_galsim
image_psf = plot_PSF_model_galsim(self, contrast=contrast,
save=save, save_dir=save_dir)
self.image_psf = image_psf
@staticmethod
def write_psf_image(image_psf, filename='PSF_model.fits'):
""" Write the 2D psf image to fits """
hdu = fits.ImageHDU(image_psf)
hdu.writeto(filename, overwrite=True)
def draw_core2D_in_real(self, star_pos, Flux):
""" 2D drawing function of the core in real space given positions and flux (of core) of target stars """
gamma, alpha = self.gamma_pix, self.beta
Amps = np.array([moffat2d_Flux2Amp(gamma, alpha, Flux=flux)
for flux in Flux])
f_core_2d_s = np.array([models.Moffat2D(amplitude=amp, x_0=x0, y_0=y0,
gamma=gamma, alpha=alpha)
for ((x0,y0), amp) in zip(star_pos, Amps)])
return f_core_2d_s
def draw_aureole2D_in_real(self, star_pos, Flux=None, I0=None):
""" 2D drawing function of the aureole in real space given positions and flux / amplitude (of aureole) of target stars """
if self.aureole_model == "moffat":
gamma1_pix, alpha1 = self.gamma1_pix, self.beta1
# In this case I_theta0 is defined as the amplitude at gamma
if I0 is None:
I_theta0 = moffat2d_Flux2I0(gamma1_pix, alpha1, Flux=Flux)
elif Flux is None:
I_theta0 = I0
else:
raise MyError("Both Flux and I0 are not given.")
Amps = np.array([moffat2d_I02Amp(alpha1, I0=I0)
for I0 in I_theta0])
f_aureole_2d_s = np.array([models.Moffat2D(amplitude=amp,
x_0=x0, y_0=y0,
gamma=gamma1_pix,
alpha=alpha1)
for ((x0,y0), amp) in zip(star_pos, Amps)])
elif self.aureole_model == "power":
n0 = self.n0
theta_0_pix = self.theta_0_pix
if I0 is None:
I_theta0 = power2d_Flux2Amp(n0, theta_0_pix, Flux=1) * Flux
elif Flux is None:
I_theta0 = I0
else:
raise MyError("Both Flux and I0 are not given.")
f_aureole_2d_s = np.array([lambda xx, yy, cen=pos, I=I:\
trunc_power2d(xx, yy, cen=cen,
n=n0, theta0=theta_0_pix,
I_theta0=I)
for (I, pos) in zip(I_theta0, star_pos)])
elif self.aureole_model == "multi-power":
n_s = self.n_s
theta_s_pix = self.theta_s_pix
if I0 is None:
I_theta0 = multi_power2d_Flux2Amp(n_s, theta_s_pix, Flux=1) * Flux
elif Flux is None:
I_theta0 = I0
else:
raise MyError("Both Flux and I0 are not given.")
f_aureole_2d_s = np.array([lambda xx, yy, cen=pos, I=I:\
multi_power2d(xx, yy, cen=cen,
n_s=n_s, theta_s=theta_s_pix,
I_theta0=I)
for (I, pos) in zip(I_theta0, star_pos)])
return f_aureole_2d_s
class Stars:
"""
Class storing positions & flux of faint/medium-bright/bright stars
"""
def __init__(self, star_pos, Flux,
Flux_threshold=[7e4, 2.7e6],
z_norm=None, r_scale=12,
BKG=0, verbose=False):
"""
Parameters
----------
star_pos: positions of stars (in the region)
Flux: flux of stars (in ADU)
Flux_threshold : thereshold of flux
(default: corresponding to [15, 11] mag for DF)
z_norm : flux scaling measured at r_scale
r_scale : radius at which to measure the flux scaling
BKG : sky background value
"""
self.star_pos = np.atleast_2d(star_pos)
self.Flux = np.atleast_1d(Flux)
self.Flux_threshold = Flux_threshold
self.F_bright = Flux_threshold[0]
self.F_verybright = Flux_threshold[1]
self.n_tot = len(star_pos)
self.bright = (self.Flux >= self.F_bright)
self.verybright = (self.Flux >= self.F_verybright)
self.medbright = self.bright & (~self.verybright)
if z_norm is not None:
self.z_norm = z_norm
self.r_scale = r_scale
self.BKG = BKG
self.verbose = verbose
if verbose:
if len(Flux[self.medbright])>0:
print("# of medium bright (flux:%.2g~%.2g) stars: %d "\
%(Flux[self.medbright].min(),
Flux[self.medbright].max(), self.n_medbright))
if len(Flux[self.verybright])>0:
print("# of very bright (flux>%.2g) stars : %d"\
%(Flux[self.verybright].min(), self.n_verybright))
# Rendering stars in parallel if number of bright stars exceeds 50
if self.n_medbright < 50:
print("Not many bright stars, will draw in serial.\n")
self.parallel = False
else:
print("Crowded fields w/ bright stars > 50, will draw in parallel.\n")
self.parallel = True
def __str__(self):
return "A Star Class"
def __repr__(self):
return ' N='.join([f"{self.__class__.__name__}", str(self.n_tot)])
@classmethod
def from_znorm(cls, psf, star_pos, z_norm,
z_threshold=[9, 300], r_scale=12,
verbose=False):
""" Star object built from intensity at r_scale instead of flux """
Flux = psf.I2Flux(z_norm, r_scale)
Flux_threshold = psf.I2Flux(z_threshold, r=r_scale)
return cls(star_pos, Flux, Flux_threshold,
z_norm=z_norm, r_scale=r_scale, verbose=verbose)
def update_Flux(self, Flux):
self.Flux = Flux
@lazyproperty
def n_faint(self):
return np.sum(~self.bright)
@lazyproperty
def n_bright(self):
return np.sum(self.bright)
@lazyproperty
def n_verybright(self):
return np.sum(self.verybright)
@lazyproperty
def n_medbright(self):
return np.sum(self.medbright)
@property
def Flux_faint(self):
return self.Flux[~self.bright]
@property
def Flux_bright(self):
return self.Flux[self.bright]
@property
def Flux_verybright(self):
return self.Flux[self.verybright]
@property
def Flux_medbright(self):
return self.Flux[self.medbright]
@property
def z_norm_bright(self):
return self.z_norm[self.bright]
@property
def z_norm_verybright(self):
return self.z_norm[self.verybright]
@lazyproperty
def star_pos_faint(self):
return self.star_pos[~self.bright]
@lazyproperty
def star_pos_bright(self):
return self.star_pos[self.bright]
@lazyproperty
def star_pos_verybright(self):
return self.star_pos[self.verybright]
@lazyproperty
def star_pos_medbright(self):
return self.star_pos[self.medbright]
def plot_flux_dist(self, **kwargs):
from .plotting import plot_flux_dist
plot_flux_dist(self.Flux, [self.F_bright, self.F_verybright], **kwargs)
def copy(self):
return deepcopy(self)
def use_verybright(self):
""" Crop the object into a new object only contains its very bright stars """
if self.verbose:
print("\nOnly model brightest stars in the field.\n")
stars_vb = Stars(self.star_pos_verybright,
self.Flux_verybright,
Flux_threshold=self.Flux_threshold,
z_norm=self.z_norm_verybright,
r_scale=self.r_scale, BKG=self.BKG)
return stars_vb
def remove_outsider(self, image_size, d=[24,12]):
""" Remove out-of-field stars far from the edge. """
star_pos = self.star_pos
Flux = self.Flux
out_A = (star_pos<-d[0]) | (star_pos>image_size+d[0])
remove_A = np.logical_or.reduce(out_A, axis=1) & self.verybright
out_B = (star_pos<-d[1]) | (star_pos>image_size+d[1])
remove_B = np.logical_or.reduce(out_B, axis=1) & self.medbright
remove = remove_A | remove_B
return Stars(star_pos[~remove], Flux[~remove], self.Flux_threshold,
self.z_norm[~remove], r_scale=self.r_scale, BKG=self.BKG)
# ### (Old) Galsim Modelling Funcs ###
# def Generate_PSF_pow_Galsim(n, theta_t=5, psf_scale=2,
# contrast=1e5, psf_range=None,
# min_psf_range=30, max_psf_range=600,
# interpolant="cubic"):
# """
# Generate power law PSF using Galsim.
# Parameters
# ----------
# n: Power law index
# theta_t: Inner flattening radius of power law to avoid divergence at the center. In arcsec.
# Returns
# ----------
# psf_pow: power law Galsim PSF, flux normalized to be 1.
# psf_size: Size of PSF used. In pixel.
# """
# # Calculate a PSF size with contrast, if not given
# if psf_range is None:
# a = theta_t**n
# opt_psf_range = int((contrast * a) ** (1./n))
# psf_range = max(min_psf_range, min(opt_psf_range, max_psf_range))
# # full (image) PSF size in pixel
# psf_size = 2 * psf_range // psf_scale
# # Generate Grid of PSF and plot PSF model in real space onto it
# cen_psf = ((psf_size-1)/2., (psf_size-1)/2.)
# yy_psf, xx_psf = np.mgrid[:psf_size, :psf_size]
# theta_t_pix = theta_t / psf_scale
# psf_model = trunc_power2d(xx_psf, yy_psf, n, theta_t_pix, I_theta0=1, cen=cen_psf)
# # Parse the image to Galsim PSF model by interpolation
# image_psf = galsim.ImageF(psf_model)
# psf_pow = galsim.InterpolatedImage(image_psf, flux=1, scale=psf_scale,
# x_interpolant=interpolant, k_interpolant=interpolant)
# return psf_pow, psf_size
# def Generate_PSF_mpow_Galsim(contrast, n_s, theta_s,
# psf_scale=2, psf_range=None,
# min_psf_range=60, max_psf_range=1200,
# interpolant="cubic"):
# """
# Generate power law PSF using Galsim.
# Parameters
# ----------
# n_s: Power law indexs
# theta_s: Transition radius of power law to avoid divergence at the center. In arcsec.
# Returns
# ----------
# psf_mpow: multi-power law Galsim PSF, flux normalized to be 1.
# psf_size: Size of PSF used. In pixel.
# """
# # Calculate a PSF size with contrast, if not given
# if psf_range is None:
# a_psf = (theta_s[0])**n_s[0]
# opt_psf_range = int((contrast * a_psf) ** (1./n_s[0]))
# psf_range = max(min_psf_range, min(opt_psf_range, max_psf_range))
# psf_size = 2 * psf_range // psf_scale
# # Generate Grid of PSF and plot PSF model in real space onto it
# cen_psf = ((psf_size-1)/2., (psf_size-1)/2.)
# yy_psf, xx_psf = np.mgrid[:psf_size, :psf_size]
# theta_s_psf_pix = theta_s / psf_scale
# psf_model = multi_power2d(xx_psf, yy_psf, n_s, theta_s_psf_pix, 1, cen=cen_psf)
# # Parse the image to Galsim PSF model by interpolation
# image_psf = galsim.ImageF(psf_model)
# psf_mpow = galsim.InterpolatedImage(image_psf, flux=1, scale=psf_scale,
# x_interpolant=interpolant, k_interpolant=interpolant)
# return psf_mpow, psf_size
############################################
# Analytic Functions for models
############################################
### funcs on single element ###
def trunc_pow(x, n, theta0, I_theta0=1):
""" Truncated power law for single element, I = I_theta0 at theta0 """
a = I_theta0 / (theta0)**(-n)
y = a * x**(-n) if x > theta0 else I_theta0
return y
# deprecated
def compute_multi_pow_norm0(n0, n_s, theta0, theta_s, I_theta0):
""" Compute normalization factor of each power law component """
a_s = np.zeros(len(n_s))
a0 = I_theta0 * theta0**(n0)
I_theta_i = a0 * float(theta_s[0])**(-n0)
for i, (n_i, theta_i) in enumerate(zip(n_s, theta_s)):
a_i = I_theta_i/(theta_i)**(-n_i)
try:
a_s[i] = a_i
I_theta_i = a_i * float(theta_s[i+1])**(-n_i)
except IndexError:
pass
return a0, a_s
@njit
def compute_multi_pow_norm(n_s, theta_s, I_theta0):
""" Compute normalization factor A of each power law component A_i*(theta)^(n_i)"""
n0, theta0 = n_s[0], theta_s[0]
a0 = I_theta0 * theta0**(n0)
a_s = np.zeros(len(n_s))
a_s[0] = a0
I_theta_i = a0 * float(theta_s[1])**(-n0)
for i, (n_i, theta_i) in enumerate(zip(n_s[1:], theta_s[1:])):
# if (i+2) == len(n_s):
# break
a_i = I_theta_i/(theta_s[i+1])**(-n_i)
a_s[i+1] = a_i
I_theta_i = a_i * float(theta_s[i+2])**(-n_i)
return a_s
# deprecated
def multi_pow0(x, n0, n_s, theta0, theta_s, I_theta0, a0=None, a_s=None):
""" Continuous multi-power law for single element """
if a0 is None:
a0, a_s = compute_multi_pow_norm0(n0, n_s, theta0, theta_s, I_theta0)
if x <= theta0:
return I_theta0
elif x<= theta_s[0]:
y = a0 * x**(-n0)
return y
else:
for k in range(len(a_s-1)):
try:
if x <= theta_s[k+1]:
y = a_s[k] * x**(-n_s[k])
return y
except IndexError:
pass
else:
y = a_s[k] * x**(-n_s[k])
return y
def multi_pow(x, n_s, theta_s, I_theta0, a_s=None):
""" Continuous multi-power law for single element """
if a_s is None:
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
n0, theta0, a0 = n_s[0], theta_s[0], a_s[0]
if x <= theta0:
return I_theta0
elif x<= theta_s[1]:
y = a0 * x**(-n0)
return y
else:
for k in range(len(a_s)):
try:
if x <= theta_s[k+2]:
y = a_s[k+1] * x**(-n_s[k+1])
return y
except IndexError:
pass
else:
y = a_s[-1] * x**(-n_s[-1])
return y
### 1D functions ###
def power1d(x, n, theta0, I_theta0):
""" Power law for 1d array, I = I_theta0 at theta0, theta in pix """
a = I_theta0 / (theta0)**(-n)
y = a * np.power(x + 1e-6, -n)
return y
def trunc_power1d(x, n, theta0, I_theta0=1):
""" Truncated power law for 1d array, I = I_theta0 at theta0, theta in pix """
a = I_theta0 / (theta0)**(-n)
y = a * np.power(x + 1e-6, -n)
y[x<=theta0] = I_theta0
return y
# deprecated
def multi_power1d0(x, n0, theta0, I_theta0, n_s, theta_s):
""" Multi-power law for 1d array, I = I_theta0 at theta0, theta in pix"""
a0, a_s = compute_multi_pow_norm0(n0, n_s, theta0, theta_s, I_theta0)
y = a0 * np.power(x + 1e-6, -n0)
y[x<=theta0] = I_theta0
for i, (n_i, a_i, theta_i) in enumerate(zip(n_s, a_s, theta_s)):
y_i = a_i * np.power(x, -n_i)
y[x>theta_i] = y_i[x>theta_i]
return y
def multi_power1d(x, n_s, theta_s, I_theta0):
""" Multi-power law for 1d array, I = I_theta0 at theta0, theta in pix"""
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
theta0 = theta_s[0]
y = np.zeros_like(x)
y[x<=theta0] = I_theta0
for k in range(len(a_s)):
reg = (x>theta_s[k]) & (x<=theta_s[k+1]) if k<len(a_s)-1 else (x>theta_s[k])
y[reg] = a_s[k] * np.power(x[reg], -n_s[k])
return y
def moffat_power1d(x, gamma, alpha, n, theta0, A=1):
""" Moffat + Power for 1d array, flux normalized = 1, theta in pix """
Mof_mod_1d = models.Moffat1D(amplitude=A, x_0=0, gamma=gamma, alpha=alpha)
y[x<=theta0] = Mof_mod_1d(x)
y[x>theta0] = power1d(x[x>theta0], n, theta0, Mof_mod_1d(theta0))
return y
def trunc_power1d_normed(x, n, theta0):
""" Truncated power law for 1d array, flux normalized = 1, theta in pix """
norm_pow = quad(trunc_pow, 0, np.inf, args=(n, theta0, 1))[0]
y = trunc_power1d(x, n, theta0, 1) / norm_pow
return y
def moffat1d_normed(x, gamma, alpha):
""" Moffat for 1d array, flux normalized = 1 """
Mof_mod_1d = models.Moffat1D(amplitude=1, x_0=0, gamma=gamma, alpha=alpha)
norm_mof = quad(Mof_mod_1d, 0, np.inf)[0]
y = Mof_mod_1d(x) / norm_mof
return y
def multi_power1d_normed(x, n_s, theta_s):
""" Multi-power law for 1d array, flux normalized = 1, theta in pix """
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
norm_mpow = quad(multi_pow, 0, np.inf,
args=(n_s, theta_s, 1, a_s), limit=100)[0]
y = multi_power1d(x, n_s, theta_s, 1) / norm_mpow
return y
### 2D functions ###
def map2d(f, xx=None, yy=None):
return f(xx,yy)
def map2d_k(k, func_list, xx=None, yy=None):
return func_list[k](xx, yy)
@lru_cache(maxsize=16)
def generate_psf_grid(psf_size):
# Generate Grid of PSF and plot PSF model in real space onto it
cen_psf = ((psf_size-1)/2., (psf_size-1)/2.)
yy_psf, xx_psf = np.mgrid[:psf_size, :psf_size]
return xx_psf, yy_psf, cen_psf
def power2d(xx, yy, n, theta0, I_theta0, cen):
""" Power law for 2d array, normalized = I_theta0 at theta0 """
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2) + 1e-6
rr[rr<=1] = rr[rr>1].min()
a = I_theta0 / (theta0)**(-n)
z = a * np.power(rr, -n)
return z
@njit
def trunc_power2d(xx, yy, n, theta0, I_theta0, cen):
""" Truncated power law for 2d array, normalized = I_theta0 at theta0 """
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2).ravel() + 1e-6
a = I_theta0 / (theta0)**(-n)
z = a * np.power(rr, -n)
z[rr<=theta0] = I_theta0
return z.reshape(xx.shape)
# deprecated
def multi_power2d_cover(xx, yy, n0, theta0, I_theta0, n_s, theta_s, cen):
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2) + 1e-6
a0 = I_theta0/(theta0)**(-n0)
z = a0 * np.power(rr, -n0)
z[rr<=theta0] = I_theta0
I_theta_i = a0 * float(theta_s[0])**(-n0)
for i, (n_i, theta_i) in enumerate(zip(n_s, theta_s)):
a_i = I_theta_i/(theta_i)**(-n_i)
z_i = a_i * np.power(rr, -n_i)
z[rr>theta_i] = z_i[rr>theta_i]
try:
I_theta_i = a_i * float(theta_s[i+1])**(-n_i)
except IndexError:
pass
return z
@njit
def multi_power2d(xx, yy, n_s, theta_s, I_theta0, cen):
""" Multi-power law for 2d array, I = I_theta0 at theta0, theta in pix"""
a_s = compute_multi_pow_norm(n_s, theta_s, I_theta0)
rr = np.sqrt((xx-cen[0])**2 + (yy-cen[1])**2).ravel()
z = np.zeros(xx.size)
theta0 = theta_s[0]
z[rr<=theta0] = I_theta0
for k in range(len(a_s)):
reg = (rr>theta_s[k]) & (rr<=theta_s[k+1]) if k<len(a_s)-1 else (rr>theta_s[k])
z[reg] = a_s[k] * np.power(rr[reg], -n_s[k])
return z.reshape(xx.shape)
### Flux/Amplitude Convertion ###
def moffat1d_Flux2Amp(r_core, beta, Flux=1):
""" Calculate the (astropy) amplitude of 1d Moffat profile given the core width, power index, and total flux F.
Note in astropy unit (x,y) the amplitude should be scaled with 1/sqrt(pi)."""
Amp = Flux * Gamma(beta) / ( r_core * np.sqrt(np.pi) * Gamma(beta-1./2) ) # Derived scaling factor
return Amp
def moffat1d_Amp2Flux(r_core, beta, Amp=1):
Flux = Amp / moffat1d_Flux2Amp(r_core, beta, Flux=1)
return Flux
def power1d_Flux2Amp(n, theta0, Flux=1, trunc=True):
if trunc:
I_theta0 = Flux * (n-1)/n / theta0
else:
I_theta0 = Flux * (n-1) / theta0
return I_theta0
def power1d_Amp2Flux(n, theta0, Amp=1, trunc=True):
if trunc:
Flux = Amp * n/(n-1) * theta0
else:
Flux = Amp * 1./(n-1) * theta0
return Flux
def moffat2d_Flux2Amp(r_core, beta, Flux=1):
return Flux * (beta-1) / r_core**2 / np.pi
def moffat2d_Amp2Flux(r_core, beta, Amp=1):
return Amp / moffat2d_Flux2Amp(r_core, beta, Flux=1)
def moffat2d_Flux2I0(r_core, beta, Flux=1):
Amp = moffat2d_Flux2Amp(r_core, beta, Flux=Flux)
return moffat2d_Amp2I0(beta, Amp=Amp)
def moffat2d_I02Amp(beta, I0=1):
# Convert I0(r=r_core) to Amplitude
return I0 * 2**(2*beta)
def moffat2d_Amp2I0(beta, Amp=1):
# Convert I0(r=r_core) to Amplitude
return Amp * 2**(-2*beta)
# def power2d_Flux2Amp(n, theta0, Flux=1, trunc=True):
# if trunc:
# I_theta0 = (1./np.pi) * Flux * (n-2)/n / theta0**2
# else:
# I_theta0 = (1./np.pi) * Flux * (n-2)/2 / theta0**2
# return I_theta0
# def power2d_Amp2Flux(n, theta0, Amp=1, trunc=True):
# return Amp / power2d_Flux2Amp(n, theta0, Flux=1, trunc=trunc)
# def power2d_Flux2Amp(n, theta0, Flux=1, r_trunc=500):
# if n>2:
# I_theta0 = (1./np.pi) * Flux * (n-2)/n / theta0**2
# elif n<2:
# I_theta0 = (1./np.pi) * Flux / (1 + 2*r_trunc**(2-n)/(2-n)) / theta0**2
# else:
# I_theta0 = (1./np.pi) * Flux / (1 + 2*math.log(r_trunc/theta0)) / theta0**2
# return I_theta0
def power2d_Flux2Amp(n, theta0, Flux=1):
if n>2:
I_theta0 = (1./np.pi) * Flux * (n-2)/n / theta0**2
else:
raise InconvergenceError('PSF is not convergent in Infinity.')
return I_theta0
def power2d_Amp2Flux(n, theta0, Amp=1):
return Amp / power2d_Flux2Amp(n, theta0, Flux=1)
def multi_power2d_Amp2Flux(n_s, theta_s, Amp=1, theta_trunc=1e5):
""" convert amplitude(s) to integral flux with 2D multi-power law """
if np.ndim(Amp)>0:
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
a_s = np.multiply(a_s[:,np.newaxis], Amp)
else:
a_s = compute_multi_pow_norm(n_s, theta_s, Amp)
I_2D = sum_I2D_multi_power2d(Amp, a_s, n_s, theta_s, theta_trunc)
return I_2D
@njit
def sum_I2D_multi_power2d(Amp, a_s, n_s, theta_s, theta_trunc=1e5):
""" Supplementary function for multi_power2d_Amp2Flux tp speed up """
theta0 = theta_s[0]
I_2D = Amp * np.pi * theta0**2
for k in range(len(n_s)-1):
if n_s[k] == 2:
I_2D += 2*np.pi * a_s[k] * math.log(theta_s[k+1]/theta_s[k])
else:
I_2D += 2*np.pi * a_s[k] * (theta_s[k]**(2-n_s[k]) - theta_s[k+1]**(2-n_s[k])) / (n_s[k]-2)
if n_s[-1] > 2:
I_2D += 2*np.pi * a_s[-1] * theta_s[-1]**(2-n_s[-1]) / (n_s[-1]-2)
elif n_s[-1] == 2:
I_2D += 2*np.pi * a_s[-1] * math.log(theta_trunc/theta_s[-1])
else:
I_2D += 2*np.pi * a_s[-1] * (theta_trunc**(2-n_s[-1]) - theta_s[-1]**(2-n_s[-1])) / (2-n_s[-1])
return I_2D
def multi_power2d_Flux2Amp(n_s, theta_s, Flux=1):
return Flux / multi_power2d_Amp2Flux(n_s, theta_s, Amp=1)
def I2I0_mof(r_core, beta, r, I=1):
""" Convert Intensity I(r) at r to I at r_core with moffat.
r_core and r in pixel """
Amp = I * (1+(r/r_core)**2)**beta
I0 = moffat2d_Amp2I0(beta, Amp)
return I0
def I02I_mof(r_core, beta, r, I0=1):
""" Convert I at r_core to Intensity I(r) at r with moffat.
r_core and r in pixel """
Amp = moffat2d_I02Amp(beta, I0)
I = Amp * (1+(r/r_core)**2)**(-beta)
return I
def I2Flux_mof(frac, r_core, beta, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of moffat.
r_core and r in pixel """
Amp = I * (1+(r/r_core)**2)**beta
Flux_mof = moffat2d_Amp2Flux(r_core, beta, Amp=Amp)
Flux_tot = Flux_mof / frac
return Flux_tot
def Flux2I_mof(frac, r_core, beta, r, Flux=1):
""" Convert total flux at r to Intensity I(r) with fraction of moffat.
r_core and r in pixel """
Flux_mof = Flux * frac
Amp = moffat2d_Flux2Amp(r_core, beta, Flux=Flux_mof)
I = Amp * (1+(r/r_core)**2)**(-beta)
return I
def I2I0_pow(n0, theta0, r, I=1):
""" Convert Intensity I(r) at r to I at theta_0 with power law.
theata_s and r in pixel """
I0 = I * (r/theta0)**n0
return I0
def I02I_pow(n0, theta0, r, I0=1):
""" Convert Intensity I(r) at r to I at theta_0 with power law.
theata_s and r in pixel """
I = I0 / (r/theta0)**n0
return I
def I2Flux_pow(frac, n0, theta0, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of power law.
theata0 and r in pixel """
I0 = I2I0_pow(n0, theta0, r, I=I)
Flux_pow = power2d_Amp2Flux(n0, theta0, Amp=I0)
Flux_tot = Flux_pow / frac
return Flux_tot
def Flux2I_pow(frac, n0, theta0, r, Flux=1):
""" Convert total flux to Intensity I(r) at r.
theata0 and r in pixel """
Flux_pow = Flux * frac
I0 = power2d_Flux2Amp(n0, theta0, Flux=Flux_pow)
I = I0 / (r/theta0)**n0
return I
def I2I0_mpow(n_s, theta_s_pix, r, I=1):
""" Convert Intensity I(r) at r to I at theta_0 with multi-power law.
theata_s and r in pixel """
i = np.digitize(r, theta_s_pix, right=True) - 1
I0 = I * r**(n_s[i]) * theta_s_pix[0]**(-n_s[0])
for j in range(i):
I0 *= theta_s_pix[j+1]**(n_s[j]-n_s[j+1])
return I0
def I02I_mpow(n_s, theta_s_pix, r, I0=1):
""" Convert Intensity I(r) at r to I at theta_0 with multi-power law.
theata_s and r in pixel """
i = np.digitize(r, theta_s_pix, right=True) - 1
I = I0 / r**(n_s[i]) / theta_s_pix[0]**(-n_s[0])
for j in range(i):
I *= theta_s_pix[j+1]**(n_s[j+1]-n_s[j])
return I
def calculate_external_light_pow(n0, theta0, pos_source, pos_eval, I0_source):
# Calculate light produced by source (I0, pos_source) at pos_eval.
r_s = distance.cdist(pos_source, pos_eval)
I0_s = np.repeat(I0_source[:, np.newaxis], r_s.shape[-1], axis=1)
I_s = I0_s / (r_s/theta0)**n0
I_s[(r_s==0)] = 0
return I_s.sum(axis=0)
def calculate_external_light_mpow(n_s, theta_s_pix, pos_source, pos_eval, I0_source):
# Calculate light produced by source (I0_source, pos_source) at pos_eval.
r_s = distance.cdist(pos_source, pos_eval)
r_inds = np.digitize(r_s, theta_s_pix, right=True) - 1
r_inds_uni, r_inds_inv = np.unique(r_inds, return_inverse=True)
I0_s = np.repeat(I0_source[:, np.newaxis], r_s.shape[-1], axis=1)
# I(r) = I0 * (theta0/theta1)^(n0) * (theta1/theta2)^(n1) *...* (theta_{k}/r)^(nk)
I_s = I0_s * theta_s_pix[0]**n_s[0] / r_s**(n_s[r_inds])
factors = np.array([np.prod([theta_s_pix[j+1]**(n_s[j+1]-n_s[j])
for j in range(i)]) for i in r_inds_uni])
I_s *= factors[r_inds_inv].reshape(len(I0_source),-1)
I_s[(r_s==0)] = 0
return I_s.sum(axis=0)
# #deprecated
# def I02I_mpow_2d(n_s, theta_s, r_s, I0=1):
# """ Convert Intensity I(r) at multiple r to I at theta_0 with multi-power law.
# theata_s and r in pixel
# return I (# of I0, # of distance) """
# r_inds = np.digitize(r_s, theta_s, right=True) - 1
# r_inds_uni, r_inds_inv = np.unique(r_inds, return_inverse=True)
# I0 = np.atleast_1d(I0)
# I0_s = np.repeat(I0[:, np.newaxis], r_s.shape[-1], axis=1)
# I_s = I0_s / r_s**(n_s[r_inds]) / theta_s[0]**(-n_s[0])
# factors = np.array([np.prod([theta_s[j+1]**(n_s[j+1]-n_s[j])
# for j in range(i)]) for i in r_inds_uni])
# I_s *= factors[r_inds_inv]
# return I_s
# #deprecated
# def extract_external_light(I_s):
# inds = np.arange(I_s.shape[0])
# comb_inds = np.array(list(combinations(inds, 2)))
# mutual = (comb_inds, inds[:,np.newaxis])
# I_sum = np.zeros_like(I_s.shape[0])
# for (c_ind, I) in zip(comb_inds,I_s[mutual]):
# I_sum[c_ind[0]] += I[1]
# I_sum[c_ind[1]] += I[0]
# return I_sum
def I2Flux_mpow(frac, n_s, theta_s, r, I=1):
""" Convert Intensity I(r) at r to total flux with fraction of multi-power law.
theata_s and r in pixel """
I0 = I2I0_mpow(n_s, theta_s, r, I=I)
Flux_mpow = multi_power2d_Amp2Flux(n_s=n_s, theta_s=theta_s, Amp=I0)
Flux_tot = Flux_mpow / frac
return Flux_tot
def Flux2I_mpow(frac, n_s, theta_s, r, Flux=1):
""" Convert total flux to Intensity I(r) at r.
theata_s and r in pixel """
i = np.digitize(r, theta_s, right=True) - 1
Flux_mpow = Flux * frac
I0 = multi_power2d_Flux2Amp(n_s=n_s, theta_s=theta_s, Flux=Flux_mpow)
I = I0 / r**(n_s[i]) / theta_s[0]**(-n_s[0])
for j in range(i):
I /= theta_s[j+1]**(n_s[j]-n_s[j+1])
return I
### 1D/2D conversion factor ###
def C_mof2Dto1D(r_core, beta):
""" gamma in pixel """
return 1./(beta-1) * 2*math.sqrt(np.pi) * r_core * Gamma(beta) / Gamma(beta-1./2)
def C_mof1Dto2D(r_core, beta):
""" gamma in pixel """
return 1. / C_mof2Dto1D(r_core, beta)
@njit
def C_pow2Dto1D(n, theta0):
""" theta0 in pixel """
return np.pi * theta0 * (n-1) / (n-2)
@njit
def C_pow1Dto2D(n, theta0):
""" theta0 in pixel """
return 1. / C_pow2Dto1D(n, theta0)
@njit
def C_mpow2Dto1D(n_s, theta_s):
""" theta in pixel """
a_s = compute_multi_pow_norm(n_s, theta_s, 1)
n0, theta0, a0 = n_s[0], theta_s[0], a_s[0]
I_2D = 1. * np.pi * theta0**2
for k in range(len(n_s)-1):
if n_s[k] == 2:
I_2D += 2*np.pi * a_s[k] * np.log(theta_s[k+1]/theta_s[k])
else:
I_2D += 2*np.pi * a_s[k] * (theta_s[k]**(2-n_s[k]) - theta_s[k+1]**(2-n_s[k])) / (n_s[k]-2)
I_2D += 2*np.pi * a_s[-1] * theta_s[-1]**(2-n_s[-1]) / (n_s[-1]-2)
I_1D = 1. * theta0
for k in range(len(n_s)-1):
if n_s[k] == 1:
I_1D += a_s[k] * np.log(theta_s[k+1]/theta_s[k])
else:
I_1D += a_s[k] * (theta_s[k]**(1-n_s[k]) - theta_s[k+1]**(1-n_s[k])) / (n_s[k]-1)
I_1D += a_s[-1] * theta_s[-1]**(1-n_s[-1]) / (n_s[-1]-1)
return I_2D / I_1D
@njit
def C_mpow1Dto2D(n_s, theta_s):
""" theta in pixel """
return 1. / C_mpow2Dto1D(n_s, theta_s)
############################################
# Functions for PSF rendering with Galsim
############################################
def get_center_offset(pos):
# Shift center for the purpose of accuracy (by default galsim round to integer!)
# original
x_pos, y_pos = pos + 1
# test on Mar 28
x_pos, y_pos = pos
x_nominal = x_pos + 0.5
y_nominal = y_pos + 0.5
ix_nominal = int(math.floor(x_nominal+0.5))
iy_nominal = int(math.floor(y_nominal+0.5))
dx = x_nominal - ix_nominal
dy = y_nominal - iy_nominal
offset = galsim.PositionD(dx,dy)
return (ix_nominal, iy_nominal), offset
def draw_star(k, star_pos, Flux, psf_star, psf_size, full_image, pixel_scale=2.5):
""" Draw star #k at position star_pos[k] with Flux[k], using a combined PSF (psf_star) on full_image"""
# Function of drawing, practically devised to facilitate parallelization.
stamp, bounds = get_stamp_bounds(k, star_pos, Flux, psf_star, psf_size,
full_image, pixel_scale=pixel_scale)
full_image[bounds] += stamp[bounds]
def get_stamp_bounds(k, star_pos, Flux, psf_star, psf_size, full_image, pixel_scale=2.5):
""" Get stamp and boundary of star #k at position star_pos[k] with Flux[k], using a combined PSF (psf_star) on full_image"""
pos, flux = star_pos[k], Flux[k]
star = psf_star.withFlux(flux)
# Account for the fractional part of the position
(ix_nominal, iy_nominal), offset = get_center_offset(pos)
stamp = star.drawImage(nx=psf_size, ny=psf_size, scale=pixel_scale,
offset=offset, method='no_pixel')
stamp.setCenter(ix_nominal, iy_nominal)
bounds = stamp.bounds & full_image.bounds
return stamp, bounds
############################################
# Functions for making mock images
############################################
def make_noise_image(image_size, noise_std, random_seed=42, verbose=True):
""" Make noise image """
if verbose:
print("Generate noise background w/ stddev = %.3g"%noise_std)
noise_image = galsim.ImageF(image_size, image_size)
rng = galsim.BaseDeviate(random_seed)
gauss_noise = galsim.GaussianNoise(rng, sigma=noise_std)
noise_image.addNoise(gauss_noise)
return noise_image.array
def make_base_image(image_size, stars, psf_base, pad=100, psf_size=64, verbose=True):
""" Background images composed of dim stars with fixed PSF psf_base"""
if verbose:
print("Generate base image of faint stars (flux < %.2g)."%(stars.F_bright))
start = time.time()
image_size0 = image_size + 2 * pad
full_image0 = galsim.ImageF(image_size0, image_size0)
star_pos = stars.star_pos_faint + pad
Flux = stars.Flux_faint
if len(star_pos) == 0:
return np.zeros((image_size0, image_size0))
# draw faint stars with fixed PSF using galsim in Fourier space
for k in range(len(star_pos)):
try:
draw_star(k, star_pos=star_pos, Flux=Flux,
psf_star=psf_base, psf_size=psf_size, full_image=full_image0)
except GalSimBoundsError as e:
if verbose:
print(e.__doc__)
print(e.message)
continue
image_gs0 = full_image0.array
end = time.time()
if verbose: print("Total Time: %.3f s\n"%(end-start))
return image_gs0[pad:image_size0-pad, pad:image_size0-pad]
def make_truth_image(psf, stars, image_size, contrast=1e6,
parallel=False, verbose=False, saturation=4.5e4):
"""
Draw truth image according to the given position & flux.
In two manners: 1) convolution in FFT w/ Galsim;
and 2) plot in real space w/ astropy model.
"""
if verbose:
print("Generate the truth image.")
start = time.time()
frac = psf.frac
gamma_pix = psf.gamma_pix
beta = psf.beta
yy, xx = np.mgrid[:image_size, :image_size]
psf_core = psf.psf_core
psf_aureole = psf.psf_aureole
full_image = galsim.ImageF(image_size, image_size)
Flux_A = stars.Flux_bright
star_pos_A = stars.star_pos_bright
image_gs = full_image.array
# Draw bright stars in real space
func_core_2d_s = psf.draw_core2D_in_real(star_pos_A, (1-frac) * Flux_A)
func_aureole_2d_s = psf.draw_aureole2D_in_real(star_pos_A, frac * Flux_A)
# option for drawing in parallel
if (not parallel) | (parallel_enabled==False) :
if verbose:
print("Rendering bright stars in serial...")
image_real = np.sum([f2d(xx,yy) + p2d(xx,yy)
for (f2d, p2d) in zip(func_core_2d_s,
func_aureole_2d_s)], axis=0)
else:
if verbose:
print("Rendering bright stars in parallel...")
func2d_s = np.concatenate([func_core_2d_s, func_aureole_2d_s])
p_map2d = partial(map2d, xx=xx, yy=yy)
image_stars = parallel_compute(func2d_s, p_map2d,
lengthy_computation=False, verbose=verbose)
image_real = np.sum(image_stars, axis=0)
# combine the two image
image = image_gs + image_real
# saturation limit
image[image>saturation] = saturation
if verbose:
end = time.time()
print("Total Time: %.3f s\n"%(end-start))
return image
def generate_image_by_flux(psf, stars, xx, yy,
contrast=[5e4,5e5],
psf_range=[None,None],
min_psf_range=60,
max_psf_range=1200,
psf_scale=2.5,
parallel=False,
draw_real=True,
n_real=2.5,
subtract_external=False,
draw_core=False,
brightest_only=False,
interpolant='cubic'):
"""
Generate the image by total flux, given the PSF object and Star object.
Parameters
----------
psf : PSF model describing the PSF model shape
stars : Star model describing positions and scaling of stars
contrast : Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given.
psf_range : full range of PSF size (in arcsec) for drawing [medium, very] bright stars in convolution. Use contrast if not given.
min_psf_range : Minimum range of PSF if a contrast is used. In arcsec.
max_psf_range : Maximum range of PSF if a contrast is used. In arcsec.
psf_scale : pixel scale of PSF. iN arcsec/pixel. Default to DF pixel scale.
parallel : whether to run drawing for medium bright stars in parallel.
draw_real : whether to draw very bright stars in real.
n_real : first power index below which stars will be draw by conv.
draw_core : whether to draw the core for very bright stars in real.
brightest_only : whether to draw very bright stars only.
interpolant : Interpolant method in Galsim.
Returns
----------
image : drawn image
"""
image_size = xx.shape[0]
frac = psf.frac
if psf_scale is None:
psf_scale = psf.pixel_scale
if not(draw_real & brightest_only):
psf_c = psf.psf_core
# Setup the canvas
full_image = galsim.ImageF(image_size, image_size)
if not brightest_only:
# Draw medium bright stars with galsim in Fourier space
psf_e, psf_size = psf.generate_aureole(contrast=contrast[0],
psf_scale=psf_scale,
psf_range=psf_range[0],
min_psf_range=min_psf_range//2,
max_psf_range=max_psf_range//2,
interpolant=interpolant)
psf_size = psf_size // 2 * 2
psf_star = (1-frac) * psf_c + frac * psf_e
if (not parallel) | (parallel_enabled==False):
# Draw in serial
for k in range(stars.n_medbright):
draw_star(k,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
else:
# Draw in parallel, automatically back to serial computing if too few jobs
p_get_stamp_bounds = partial(get_stamp_bounds,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
results = parallel_compute(np.arange(stars.n_medbright), p_get_stamp_bounds,
lengthy_computation=False, verbose=False)
for (stamp, bounds) in results:
full_image[bounds] += stamp[bounds]
if draw_real:
# Draw aureole of very bright star (if high cost in FFT) in real space
image_gs = full_image.array
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright,
Flux=frac * stars.Flux_verybright)
image_aureole = np.sum([f2d(xx,yy) for f2d in func_aureole_2d_s], axis=0)
if draw_core:
func_core_2d_s = psf.draw_core2D_in_real(stars.star_pos_verybright,
Flux=(1-frac) * stars.Flux_verybright)
image_gs += np.sum([f2d(xx,yy) for f2d in func_core_2d_s], axis=0)
image = image_gs + image_aureole
else:
# Draw very bright star in Fourier space
psf_e_2, psf_size_2 = psf.generate_aureole(contrast=contrast[1],
psf_scale=psf_scale,
psf_range=psf_range[1],
min_psf_range=min_psf_range,
max_psf_range=max_psf_range,
interpolant=interpolant)
psf_size_2 = psf_size_2 // 2 * 2
psf_star_2 = (1-frac) * psf_c + frac * psf_e_2
for k in range(stars.n_verybright):
draw_star(k,
star_pos=stars.star_pos_verybright,
Flux=stars.Flux_verybright,
psf_star=psf_star_2,
psf_size=psf_size_2,
full_image=full_image)
image = full_image.array
return image
def generate_image_by_znorm(psf, stars, xx, yy,
contrast=[1e5,1e6],
psf_range=[None,None],
min_psf_range=120,
max_psf_range=1200,
psf_scale=2.5,
parallel=False,
draw_real=True,
brightest_only=False,
subtract_external=True,
draw_core=False,
interpolant='cubic'):
"""
Generate the image by flux scaling, given the PSF object and Star object.
Parameters
----------
psf : PSF model describing the PSF model shape
stars : Star model describing positions and scaling of stars
contrast : Ratio of the intensity at max range and at center. Used to calculate the PSF size if not given.
psf_range : full range of PSF size (in arcsec) for drawing [medium, very] bright stars in convolution. Use contrast if not given.
min_psf_range : Minimum range of PSF if a contrast is used. In arcsec.
max_psf_range : Maximum range of PSF if a contrast is used. In arcsec.
psf_scale : pixel scale of PSF. iN arcsec/pixel. Default to DF pixel scale.
parallel : whether to run drawing for medium bright stars in parallel.
draw_real : whether to draw very bright stars in real.
brightest_only : whether to draw very bright stars only.
draw_core : whether to draw the core for very bright stars in real.
subtract_external : whether to subtract external scattter light from very bright stars.
interpolant : Interpolant method in Galsim.
Returns
----------
image : drawn image
"""
image_size = xx.shape[0]
frac = psf.frac
r_scale = stars.r_scale
z_norm = stars.z_norm.copy()
# Subtract external light from brightest stars
if subtract_external:
I_ext = psf.calculate_external_light(stars)
z_norm[stars.bright] -= I_ext
if draw_real & brightest_only:
# Skip computation of Flux, and ignore core PSF
I0_verybright = psf.I2I0(z_norm[stars.verybright], r_scale)
else:
# Core PSF
psf_c = psf.psf_core
# Update stellar flux:
z_norm[z_norm<=0] = z_norm[z_norm>0].min()/10 # problematic negatives
Flux = psf.I2Flux(z_norm, r_scale)
stars.update_Flux(Flux)
# Setup the canvas
full_image = galsim.ImageF(image_size, image_size)
if not brightest_only:
# 1. Draw medium bright stars with galsim in Fourier space
psf_e, psf_size = psf.generate_aureole(contrast=contrast[0],
psf_scale=psf_scale,
psf_range=psf_range[0],
min_psf_range=min_psf_range//2,
max_psf_range=max_psf_range//4,
interpolant=interpolant)
# psf_size = psf_size // 2 * 2
# Draw medium bright stars with galsim in Fourier space
psf_star = (1-frac) * psf_c + frac * psf_e
if (not parallel) | (parallel_enabled==False):
# Draw in serial
for k in range(stars.n_medbright):
draw_star(k,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
else:
# Draw in parallel, automatically back to serial computing if too few jobs
p_get_stamp_bounds = partial(get_stamp_bounds,
star_pos=stars.star_pos_medbright,
Flux=stars.Flux_medbright,
psf_star=psf_star,
psf_size=psf_size,
full_image=full_image)
results = parallel_compute(np.arange(stars.n_medbright), p_get_stamp_bounds,
lengthy_computation=False, verbose=False)
for (stamp, bounds) in results:
full_image[bounds] += stamp[bounds]
if draw_real:
# Draw very bright star in real space (high cost in convolution)
image_gs = full_image.array
if brightest_only:
# Only plot the aureole. A Deeper mask is required.
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright,
I0=I0_verybright)
else:
# Plot core + aureole.
func_aureole_2d_s = psf.draw_aureole2D_in_real(stars.star_pos_verybright,
Flux=frac * stars.Flux_verybright)
if draw_core:
func_core_2d_s = psf.draw_core2D_in_real(stars.star_pos_verybright,
Flux=(1-frac) * stars.Flux_verybright)
image_gs += np.sum([f2d(xx,yy) for f2d in func_core_2d_s], axis=0)
image_aureole = np.sum([f2d(xx,yy) for f2d in func_aureole_2d_s], axis=0)
image = image_gs + image_aureole
else:
# Draw very bright star in Fourier space
psf_e_2, psf_size_2 = psf.generate_aureole(contrast=contrast[1],
psf_scale=psf_scale,
psf_range=psf_range[1],
min_psf_range=min_psf_range,
max_psf_range=max_psf_range,
interpolant=interpolant)
# psf_size_2 = psf_size_2 // 2 * 2
psf_star_2 = (1-frac) * psf_c + frac * psf_e_2
for k in range(stars.n_verybright):
draw_star(k,
star_pos=stars.star_pos_verybright,
Flux=stars.Flux_verybright,
psf_star=psf_star_2,
psf_size=psf_size_2,
full_image=full_image)
image = full_image.array
return image
def generate_image_fit(psf_fit, stars, image_size, norm='brightness',
brightest_only=False, draw_real=True, leg2d=False):
yy, xx = np.mgrid[:image_size, :image_size]
noise_fit = make_noise_image(image_size, psf_fit.bkg_std, verbose=False)
if norm=='brightness':
draw_func = generate_image_by_znorm
elif norm=='flux':
draw_func = generate_image_by_flux
image_fit = draw_func(psf_fit, stars, xx, yy,
psf_range=[900,1800], psf_scale=psf_fit.pixel_scale,
brightest_only=brightest_only, draw_real=draw_real)
bkg_fit = psf_fit.bkg * np.ones((image_size, image_size))
if leg2d:
x_grid = y_grid = np.linspace(0, image_size-1, image_size)
H10 = leggrid2d((x_grid-psf_fit.cen[1])/image_size,
(y_grid-psf_fit.cen[0])/image_size, c=[[0,1],[0,0]])
H01 = leggrid2d((x_grid-psf_fit.cen[1])/image_size,
(y_grid-psf_fit.cen[0])/image_size, c=[[0,0],[1,0]])
bkg_fit += psf_fit.A10 * H10 + psf_fit.A01 * H01
print("Bakground : %.2f +/- %.2f"%(psf_fit.bkg, psf_fit.bkg_std))
return image_fit, noise_fit, bkg_fit
############################################
# Priors and Likelihood Models for Fitting
############################################
def set_prior(n_est, mu_est, std_est, n_spline=2,
n_min=1, d_n0=0.3, theta_in=50, theta_out=240,
std_poi=None, leg2d=False,
fit_sigma=True, fit_frac=False, **kwargs):
"""
Setup prior transforms for models.
Parameters
----------
n_est : estimate of the first power-law index, typically from a simultaneous fitting on the core
mu_est : estimate of sky background level, from either the global DF reduction pipeline or a local sigma-clipped mean after aggresive mask
std_est : esimtate of sky uncertainty, from a local sigma-clipped stddev after aggresive mask
n_spline : number of power-law component fot modeling the aureole
n_min : minium power index allowed in fitting
d_n0 : stddev of noraml prior of n_0
theta_in : inner boundary of the first transition radius
theta_out : outer boundary of the first transition radius
std_poi : poisson noise as minimum noise
leg2d : whether a legendre polynomial background will be fit
fit_frac : whether the aureole fraction will be fit
fit_sigma : whether the sky uncertainty will be fit
Returns
----------
prior_tf : prior transform function for fitting
"""
log_t_in = np.log10(theta_in)
log_t_out = np.log10(theta_out)
dlog_t = log_t_out - log_t_in
Prior_mu = stats.truncnorm(a=-2, b=2., loc=mu_est, scale=std_est) # mu : N(mu_est, std_est)
# counting helper for # of parameters
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
# logsigma : [std_poi, std_est]
if std_poi is None:
Prior_logsigma = stats.truncnorm(a=-2, b=1,
loc=np.log10(std_est), scale=0.3)
else:
bound_a = (np.log10(std_poi)+0.3-np.log10(std_est))/0.3
Prior_logsigma = stats.truncnorm(a=bound_a, b=1,
loc=np.log10(std_est), scale=0.3)
if n_spline == 'm':
Prior_gamma = stats.uniform(loc=0., scale=10.)
Prior_beta = stats.uniform(loc=1.1, scale=6.)
Prior_logfrac = stats.uniform(loc=-2, scale=2.)
def prior_tf_mof(u):
v = u.copy()
v[0] = Prior_gamma.ppf(u[0]) # gamma1
v[1] = Prior_beta.ppf(u[1]) # beta1
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_level = v[-K]
else:
leg_level = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_level-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_level-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_mof
else:
Prior_n = stats.truncnorm(a=-3, b=3., loc=n_est, scale=d_n0) # n0 : N(n, d_n0)
Prior_logfrac = stats.uniform(loc=-2, scale=1.7)
if n_spline == 1:
# Single power law
from .plotting import draw_independent_priors
Priors = [Prior_n, Prior_mu, Prior_logsigma]
# Draw the priors
draw_independent_priors(Priors, **kwargs)
# Build independent priors
prior_tf_p = build_independent_priors(Priors)
return prior_tf_p
elif n_spline==2:
def prior_tf_2p(u):
v = u.copy()
# v[0] = u[0] * 2*d_n0 + (n_est-d_n0) # n0 : n +/- d_n0
v[0] = Prior_n.ppf(u[0]) # n0 : N (n +/- d_n0)
v[1] = u[1] * (v[0]- d_n0 - n_min) + n_min # n1 : n_min - (n0-d_n0)
v[2] = u[2] * dlog_t + log_t_in # log theta1 : t_in-t_out arcsec
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_amp = v[-K]
else:
leg_amp = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_amp-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_amp-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_2p
elif n_spline==3:
def prior_tf_3p(u):
v = u.copy()
v[0] = Prior_n.ppf(u[0])
v[1] = u[1] * 0.5 + (v[0]-1) # n1 : n0-1.0 - n0-0.5
v[2] = u[2] * max(-1., n_min+0.5-v[1]) + (v[1]-0.5)
# n2 : max[n_min, n1-1.5] - n1-0.5
v[3] = u[3] * dlog_t + log_t_in
# log theta1 : t_in-t_out arcsec
v[4] = u[4] * (log_t_out - v[3]) + v[3]
# log theta2 : theta1 - t_out arcsec
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_amp = v[-K]
else:
leg_amp = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_amp-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_amp-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_3p
else:
def prior_tf_sp(u):
v = u.copy()
v[0] = Prior_n.ppf(u[0])
for k in range(n_spline-1):
v[k+1] = u[k+1] * max(-0.3, 1.3-v[k]) + (v[k]-0.3)
# n_k+1 : [1, n_k-0.6] - n_k-0.3a
v[n_spline] = u[n_spline] * dlog_t + log_t_in
# log theta1 : t_in-t_out arcsec
for k in range(n_spline-2):
v[k+n_spline+1] = u[k+n_spline+1] * \
min(0.3, log_t_out - v[k+n_spline]) + v[k+n_spline]
# log theta_k+1: theta_k - [2*theta_k, t_out] # in arcsec
v[-K-1] = Prior_mu.ppf(u[-K-1]) # mu
if fit_sigma:
v[-K] = Prior_logsigma.ppf(u[-K]) # log sigma
leg_amp = v[-K]
else:
leg_amp = 0.5
if leg2d:
v[-K-2] = stats.uniform.ppf(u[-K-2],
loc=leg_amp-1.3, scale=1.3) # log A10
v[-K-3] = stats.uniform.ppf(u[-K-3],
loc=leg_amp-1.3, scale=1.3) # log A01
if fit_frac:
v[-1] = Prior_logfrac.ppf(u[-1]) # log frac
return v
return prior_tf_sp
def draw_proposal(draw_func, proposal, psf, stars, xx, yy, image_base,
leg2d=False, H10=None, H01=None, K=0, **kwargs):
# Draw image and calculate log-likelihood
mu = proposal[-K-1]
image_tri = draw_func(psf, stars, xx, yy, **kwargs)
image_tri += image_base + mu
if leg2d:
A10, A01 = 10**proposal[-K-2], 10**proposal[-K-3]
image_tri += A10 * H10 + A01 * H01
return image_tri
def calculate_likelihood(ypred, data, sigma):
# Calculate log-likelihood
residsq = (ypred - data)**2 / sigma**2
loglike = -0.5 * np.sum(residsq + np.log(2 * np.pi * sigma**2))
if not np.isfinite(loglike):
loglike = -1e100
return loglike
def set_likelihood(data, mask_fit, psf_tri, stars_tri,
norm='brightness',
n_spline=2, n_cutoff=4, theta_cutoff=1200,
image_base=None, psf_range=[None,None],
leg2d=False, fit_sigma=True, fit_frac=False,
brightest_only=False, parallel=False, draw_real=False):
"""
Setup likelihood function.
Parameters
----------
data: 1d data to be fit
mask_fit: mask map (masked region is 1)
psf: PSF class
stars: Stars class
Returns
----------
loglike : log-likelihood function for fitting
"""
stars = stars_tri.copy()
psf = psf_tri.copy()
image_size = mask_fit.shape[0]
yy, xx = np.mgrid[:image_size, :image_size]
z_norm = stars.z_norm.copy()
pixel_scale = psf.pixel_scale
bkg = stars.BKG
if norm=='brightness':
draw_func = generate_image_by_znorm
elif norm=='flux':
draw_func = generate_image_by_flux
if (psf.aureole_model!='moffat') & (stars.n_verybright > 0) & (norm=='brightness'):
subtract_external = True
else:
subtract_external = False
if image_base is None:
image_base = np.zeros((image_size, image_size))
# if sigma is None:
# fit_sigma =True
# 1st-order Legendre Polynomial
cen = ((image_size-1)/2., (image_size-1)/2.)
x_grid = y_grid = np.linspace(0,image_size-1, image_size)
H10 = leggrid2d((x_grid-cen[1])/image_size,
(y_grid-cen[0])/image_size, c=[[0,1],[0,0]])
H01 = leggrid2d((x_grid-cen[1])/image_size,
(y_grid-cen[0])/image_size, c=[[0,0],[1,0]])
if n_spline == 'm':
def loglike_mof(v):
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
gamma1, beta1 = v[:2]
mu = v[-K-1]
if fit_sigma:
sigma = 10**v[-K]
param_update = {'gamma1':gamma1, 'beta1':beta1}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (bkg - mu)
image_tri = draw_func(psf, stars, xx, yy,
psf_range=psf_range,
brightest_only=brightest_only,
subtract_external=subtract_external,
parallel=parallel, draw_real=draw_real)
image_tri = image_tri + image_base + mu
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_mof
else:
theta_0 = psf.theta_0
if n_spline==1:
def loglike_p(v):
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
n, mu = v[0], v[-K-1]
if fit_sigma:
sigma = 10**v[-K]
param_update = {'n':n}
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (bkg - mu)
image_tri = draw_func(psf, stars, xx, yy,
psf_range=psf_range,
brightest_only=brightest_only,
parallel=parallel, draw_real=draw_real)
image_tri = image_tri + image_base + mu
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_p
if n_spline==2:
def loglike_2p(v):
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
n_s = np.append(v[:2], n_cutoff)
theta_s = np.append([theta_0, 10**v[2]], theta_cutoff)
mu = v[-K-1]
loglike = -1000
param_update = {'n_s':n_s, 'theta_s':theta_s}
if fit_sigma:
sigma = 10**v[-K]
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
psf.update({'n_s':n_s, 'theta_s':theta_s})
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (bkg - mu)
image_tri = draw_func(psf, stars, xx, yy,
psf_range=psf_range,
psf_scale=pixel_scale,
brightest_only=brightest_only,
subtract_external=subtract_external,
parallel=parallel, draw_real=draw_real)
image_tri += image_base + mu
if leg2d:
A10, A01 = 10**v[-K-2], 10**v[-K-3]
image_tri += A10 * H10 + A01 * H01
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_2p
elif n_spline==3:
def loglike_3p(v):
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
n_s = np.append(v[:3], n_cutoff)
theta_s = np.append([theta_0, 10**v[3], 10**v[4]], theta_cutoff)
mu = v[-K-1]
param_update ={'n_s':n_s, 'theta_s':theta_s}
if fit_sigma:
sigma = 10**v[-K]
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
if norm=='brightness':
# I varies with sky background
stars.z_norm = z_norm + (bkg - mu)
image_tri = draw_func(psf, stars, xx, yy,
psf_range=psf_range,
psf_scale=pixel_scale,
brightest_only=brightest_only,
subtract_external=subtract_external,
parallel=parallel, draw_real=draw_real)
image_tri += image_base + mu
if leg2d:
A10, A01 = 10**v[-K-2], 10**v[-K-3]
image_tri += A10 * H10 + A01 * H01
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_3p
else:
def loglike_sp(v):
K = 0
if fit_frac: K += 1
if fit_sigma: K += 1
n_s = np.append(v[:n_spline], n_cutoff)
theta_s = np.concatenate([[theta_0], 10**v[n_spline:2*n_spline-1], [theta_cutoff]])
mu = v[-K-1]
param_update ={'n_s':n_s, 'theta_s':theta_s}
if fit_sigma:
sigma = 10**v[-K]
if fit_frac:
frac = 10**v[-1]
param_update['frac'] = frac
psf.update(param_update)
image_tri = draw_proposal(draw_func, v, psf, stars, xx, yy, image_base,
leg2d=leg2d, H10=H10, H01=H01, K=K,
psf_range=psf_range, psf_scale=pixel_scale,
brightest_only=brightest_only,
subtract_external=subtract_external,
parallel=parallel, draw_real=draw_real)
ypred = image_tri[~mask_fit].ravel()
loglike = calculate_likelihood(ypred, data, sigma)
return loglike
return loglike_sp
| 80,363 | 35.134892 | 142 |
py
|
elderflower
|
elderflower-master/scripts/Run_Fitting_Mock.py
|
import galsim
import dynesty
from dynesty import plotting as dyplot
from dynesty import utils as dyfunc
from utils import *
from modeling import *
from plotting import *
import matplotlib
matplotlib.use('Agg')
# Option
save = True
dir_name = os.path.join('tmp', id_generator())
check_save_path(dir_name)
# dir_name = 'mp_mock'
print_progress = True
n_spline = 3
n_thread = None
n_cpu = 4
############################################Z
# Setting
############################################
# Meta-parameter
n_star = 400
wid_strip, n_strip = 8, 32
mu = 884
sigma = 1e-1
# Image Parameter
image_size = 801
pixel_scale = 2.5 # arcsec/pixel
# PSF Parameters
beta = 10 # moffat beta, in arcsec
fwhm = 2.28 * pixel_scale # moffat FWHM, in arcsec
n0 = 3.3 # true power index
frac = 0.1 # fraction of power law component
theta_0 = 5. # radius at which power law is flattened, in arcsec
n_s = np.array([n0, 3, 2.4, 2., 1.4, 4])
theta_s = np.array([theta_0, 60, 120, 200, 320, 900]) # transition radius in arcsec
# Multi-power PSF
params_mpow = {"fwhm":fwhm, "beta":beta, "frac":frac, "n_s":n_s, 'theta_s':theta_s}
psf = PSF_Model(params=params_mpow, aureole_model='multi-power')
# Build grid of image for drawing
psf.make_grid(image_size, pixel_scale=pixel_scale)
############################################
# Star Distribution (position, flux)
############################################
# Generate randomn star positions
np.random.seed(62)
star_pos = (image_size-2) * np.random.random(size=(n_star,2)) + 1
# Read SE measurement based on APASS
SE_cat_full = Table.read("./SE_APASS/coadd_SloanR_NGC_5907.cat", format="ascii.sextractor").to_pandas()
Flux_Auto_SE = SE_cat_full[SE_cat_full['FLAGS']<8]["FLUX_AUTO"]
# Star flux sampling from SE catalog
np.random.seed(888)
Flux = Flux_Auto_SE.sample(n=n_star).values
Flux[Flux>5e5] *= 15
# Flux Thresholds
F_bright = 1e5
F_verybright = 3e6
stars = Stars(star_pos, Flux, Flux_threshold=[F_bright, F_verybright], verbose=True)
stars.plot_flux_dist(label='flux', save=True, dir_name=dir_name)
############################################
# Generate mock truth and base
############################################
# Generate core and (initial) aureole PSF
psf_c = psf.generate_core()
psf_e, psf_size = psf.generate_aureole(contrast=1e6, psf_range=image_size)
star_psf = (1-frac) * psf_c + frac * psf_e
psf0 = psf.copy()
# Galsim 2D model averaged in 1D
Amp_m = psf.Flux2Amp(Flux).max()
contrast = Amp_m / sigma
psf.plot_model_galsim(psf_c, psf_e, image_size,
contrast=contrast, save=True, dir_name=dir_name)
# Make noise image
noise_image = make_noise_image(image_size, sigma)
# Make sky background and dim stars
image_base = make_base_image(image_size, stars, psf_base=star_psf)
# Make truth image
image = make_truth_image(psf, stars)
image = image + image_base + mu + noise_image
# Masking
mask = Mask(image, stars, image_size, mu=mu)
# Core mask
r_core_s = [36, 36]
mask.make_mask_map_dual(r_core_s, sn_thre=2.5, n_dilation=5,
draw=True, save=True, dir_name=dir_name)
# Strip + Cross mask
mask.make_mask_strip(wid_strip, n_strip, dist_strip=320, clean=True,
draw=True, save=True, dir_name=dir_name)
stars = mask.stars_new
# Fitting Preparation
############################################
mask_fit = mask.mask_comb
mask_deep = mask.mask_deep
X = np.array([psf.xx,psf.yy])
Y = image[~mask_fit].copy().ravel()
# Estimated mu and sigma used as prior
Y_clip = sigma_clip(image[~mask_deep].ravel(), sigma=3, maxiters=10)
mu_patch, std_patch = np.mean(Y_clip), np.std(Y_clip)
print("\nEstimate of Background: (%.3f, %.3f)"%(mu_patch, std_patch))
# Choose fitting parameterization
prior_tf = set_prior(3.3, mu_patch, std_patch,
theta_in=90, theta_out=500, n_spline=n_spline)
loglike = set_likelihood(Y, mask_fit, psf, stars, image_base,
psf_range=[320, 640], n_spline=n_spline, norm='flux',
brightest_only=True, parallel=False, draw_real=False)
if n_spline==2:
labels = [r'$n0$', r'$n1$', r'$\theta_1$', r'$\mu$', r'$\log\,\sigma$']
elif n_spline==3:
labels = [r'$n0$', r'$n1$', r'$n2$', r'$\theta_1$', r'$\theta_2$',
r'$\mu$', r'$\log\,\sigma$']
if leg2d:
labels = np.insert(labels, -2, [r'$\log\,A_{01}$', r'$\log\,A_{10}$'])
if fit_frac:
labels = np.insert(labels, -2, [r'$f_{pow}$'])
ndim = len(labels)
print("Labels: ", labels)
############################################
# Run & Plot
############################################
ds = DynamicNestedSampler(loglike, prior_tf, ndim,
n_cpu=n_cpu, n_thread=n_thread)
ds.run_fitting(nlive_init=min(20*(ndim-1),100), nlive_batch=25, maxbatch=2,
print_progress=print_progress)
ds.save_result(filename='Mock-fit_best_%s.res'%(n_spline+'p'),
dir_name=dir_name)
ds.cornerplot(labels=labels, save=save, dir_name=dir_name, figsize=(22, 20))
ds.plot_fit_PSF1D(psf0, labels, n_bootstrap=500,
Amp_max=Amp_m, r_core=r_core_s, save=save, dir_name=dir_name)
draw2D_fit_vs_truth_PSF_mpow(ds.results, psf0, stars, labels, image,
save=save, dir_name=dir_name)
| 5,464 | 29.361111 | 103 |
py
|
elderflower
|
elderflower-master/scripts/Run_Fitting.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run 2D Bayesian PSF fitting on a sub-region with
dynamic nested sampling. The Model PSF is composed
of an inner (fixed) Moffat core and an outer (user-
specified) multi-power law aureole. The fitting
result containing the joint PDF, samples and
weights, etc. and diagnostic plots will be saved.
> Parameter
[-f][--FILTER]: filter of image to be crossmatched. g/G/r/R for Dragonfly.
[-b][--IMAGE_BOUNDS]: bounds of the region to be processed in pixel coordinate. [Xmin, Ymin, Xmax, Ymax]
[-I][--IMAGE]: path of image.
[-n][--N_COMP]: number of multi-power law component (default: 2).
[-r][--R_SCALE]: radius at which normalziation is measured, in pixel.
[-m][--MAG_THRE]: magnitude thresholds used to select [medium, very] bright stars (default: [14,11]).
[-M][--MASK_TYPE]: mask core by "radius" or "brightness" (default: "radius").
[-c][--R_CORE]: inner masked radius for [medium, very] bright stars, in pixel (default: 24). A single value can be passed for both.
[-s][--SB_FIT_THRE]: inner masked surface brightness, in mag/arcsec^2 (default: 26)
[-B][--BRIGHTEST_ONLY]: whether to fit brightest stars only.
[-L]: whether to fit a 1st-order Legendre polynomial for the background.
[--PARALLEL]: whether to draw meidum bright stars in parallel.
[--N_CPU]: number of CPU used in nested sampling (default: n_cpu-1).
[--NO_PRINT]: if yes, suppress progress print.
[--DIR_MEASURE]: directory name where normalization measurements are saved.
[--DIR_NAME]: directory name for saving fitting outputs.
> Example Usage
1. In jupyter notebook / lab
%matplotlib inline
%run -i Run_Fitting.py -f 'G' -b '[3000, 1300, 4000, 2300]' -n 2 -r 12 -B
2. In bash
"""
import sys
import getopt
from src.task import Run_PSF_Fitting
def main(argv):
# Image Parameter (default)
band = "G"
pixel_scale = 2.5 # arcsec/pixel
# Fitting Setup (default)
n_cpu = 4
n_spline = 2
draw_real = True
brightest_only = False
parallel = False
leg2d = False
fit_frac = False
# Fitting Option (default)
print_progress = True
draw = True
save = True
# Measure Parameter
r_scale = 12
mag_threshold = np.array([14, 11])
# Mask Setup
mask_type = 'radius'
r_core = 24
r_out = None
SB_fit_thre = 26
wid_strip, n_strip = 24, 48
# Get Script Options
try:
optlists, args = getopt.getopt(argv, "f:b:n:r:m:c:s:M:I:BLFCP",
["FILTER=", "IMAGE=", "IMAGE_BOUNDS=",
"N_COMP=", "R_SCALE=", "MAG_THRE=",
"MASK_TYPE=", "R_CORE=", "SB_FIT_THRE=",
"N_CPU=", "PARALLEL", "BRIGHTEST_ONLY",
"NO_PRINT", "W_STRIP=", "N_STRIP=",
"CONV", "NO_SAVE",
"DIR_NAME=", "DIR_MEASURE=", "DIR_DATA="])
opts = [opt for opt, arg in optlists]
except getopt.GetoptError as e:
print(e)
sys.exit('Wrong Option.')
for opt, arg in optlists:
if opt in ("-f", "--FILTER"):
if arg in ["G", "R", "r", "g"]:
band = arg.upper()
else:
sys.exit("Filter Not Available.")
# Work Path
work_dir = "/home/qliu/Desktop/PSF"
# Default Input/Output Path
hdu_path = os.path.join(work_dir, "data/coadd_Sloan%s_NGC_5907.fits"%band)
dir_name = os.path.join(work_dir, 'output/fit')
dir_measure = os.path.join(work_dir, 'output/Measure')
# Handling Options
for opt, arg in optlists:
if opt in ("-I", "--IMAGE"):
hdu_path = arg
elif opt in ("-b", "--IMAGE_BOUNDS"):
image_bounds0 = np.array(re.findall(r'\d+', arg), dtype=int)
elif opt in ("-n", "--N_COMP"):
try:
n_spline = np.int(arg)
except ValueError:
sys.exit("Model Not Available.")
elif opt in ("-r", "--R_SCALE"):
r_scale = np.float(arg)
elif opt in ("-m", "--MAG_THRE"):
mag_threshold = np.array(re.findall(r"\d*\.\d+|\d+", arg), dtype=float)
elif opt in ("-M", "--MASK_TYPE"):
mask_type = arg
elif opt in ("-c", "--R_CORE"):
r_core = np.array(re.findall(r"\d*\.\d+|\d+", arg), dtype=float)
elif opt in ("-s","--SB_FIT_THRE"):
SB_fit_thre = np.float(arg)
elif opt in ("--W_STRIP"):
wid_strip = np.float(arg)
elif opt in ("--N_STRIP"):
n_strip = np.float(arg)
elif opt in ("--N_CPU"):
n_cpu = np.int(arg)
elif opt in ("--DIR_NAME"):
dir_name = arg
elif opt in ("--DIR_MEASURE"):
dir_measure = arg
if 'image_bounds0' not in locals():
sys.exit("Image Bounds Required.")
if '-L' in opts: leg2d = True
if '-F' in opts: fit_frac = True
if ('-B' in opts)|("--BRIGHTEST_ONLY" in opts): brightest_only = True
if ('-C' in opts)|("--CONV" in opts): draw_real = False
if ('-P' in opts)|("--PARALLEL" in opts): parallel = True
if ("--NO_PRINT" in opts): print_progress = False
if ("--NO_SAVE" in opts): save = False
if mask_type=='radius':
dir_name = os.path.join(dir_name, "NGC5907-%s-R%dM%dpix_X%dY%d"\
%(band, r_scale, r_core, image_bounds0[0], image_bounds0[1]))
elif mask_type=='brightness':
dir_name = os.path.join(dir_name, "NGC5907-%s-R%dB%.1f_X%dY%d"\
%(band, r_scale, SB_fit_thre, image_bounds0[0], image_bounds0[1]))
if save:
check_save_path(dir_name, make_new=False)
# Run Fitting~!
ds = Run_PSF_Fitting(hdu_path, image_bounds0, n_spline, band,
r_scale=r_scale, mag_threshold=mag_threshold,
mask_type=mask_type, SB_fit_thre=SB_fit_thre,
r_core=r_core, r_out=r_out, leg2d=leg2d,
pixel_scale=pixel_scale, n_cpu=n_cpu,
wid_strip=wid_strip, n_strip=n_strip,
brightest_only=brightest_only, draw_real=draw_real,
parallel=parallel, print_progress=print_progress,
draw=draw, dir_measure=dir_measure,
save=save, dir_name=dir_name)
return opts
if __name__ == "__main__":
main(sys.argv[1:])
| 6,660 | 37.50289 | 131 |
py
|
elderflower
|
elderflower-master/scripts/Measure_Rnorm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Preparatory script doing the following tasks:
1) Crossmatch a source table (e.g. from SExtractor,
SEP, etc.) with star catalog(s), by default using
PANSTARRS DR2 catalog. A new table will be saved which
contains corrected source magnitude (e.g. MAG_AUTO).
2) Make enlarged segmentation map(s) for sub-region(s)
from the star catalog (by default < 23 mag).
3) Measure normalization (i.e. flux scaling) at a
certain radius for bright stars (by default < 15 mag).
> Parameter
[-f][--FILTER]: filter of image to be crossmatched. g/G/r/R for Dragonfly.
[-b][--IMAGE_BOUNDS]: bounds of region(s) to be processed in pixel coordinate.[Xmin, Ymin, Xmax, Ymax],[...],...
[-r][--R_SCALE]: radius at which normalziation is measured, in pixel.
[-I][--IMAGE]: path of image.
[-C][--SE_CATALOG]: path of source table containing following columns: "NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD", "MAG_AUTO".
[-S][--SEGMAP]: path of segmentation map corresponding to SE_CATALOG.
[--PS]: if set, use PANSTARRS DR2 API to do crossmatch.
[-m][--MAG_THRESHOLD]: magnitude threshold below which normalization of stars are measured (default: 15).
[-W][--WEIGHT]: path of weight map used in source extraction. Optional.
[--DIR_NAME]: directory name for saving outputs.
> Example Usage
1. In jupyter notebook / lab
%matplotlib inline
%run Measure_Rnorm.py -f "G" -r 12 -b "[3000, 1300, 4000, 2300]"
2. In bash
python Measure_Rnorm.py -f "G" -r 12 -b "[3000, 1300, 4000, 2300]"
"""
import sys
import getopt
# from src.utils import *
# from src.modeling import *
# from src.plotting import *
from src.task import Match_Mask_Measure
def main(argv):
# Default Parameters
band = "G"
save, draw = True, True
use_PS1_DR2 = False
mag_thre, r_scale = 15, 12
image_bounds = [3000, 1300, 4000, 2300] # in image coords
# Get Script Options
try:
optlists, args = getopt.getopt(argv, "f:b:m:r:I:C:S:W:",
["FILTER=", "IMAGE_BOUNDS=", "IMAGE=",
"MAG_THRESHOLD=", "R_SCALE=", "PS", "DIR_NAME=",
"SE_CATALOG=", "SEGMAP=", "WEIGHT_MAP="])
opts = [opt for opt, arg in optlists]
except getopt.GetoptError as e:
print(e)
sys.exit('Wrong Option.')
for opt, arg in optlists:
if opt in ("-f", "--FILTER"):
if arg in ["G", "R", "r", "g"]:
band = arg.upper()
else:
sys.exit("Filter Not Available.")
# Work Path
work_dir = "/home/qliu/Desktop/PSF"
# Input Path
hdu_path = os.path.join(work_dir, "data/coadd_Sloan%s_NGC_5907.fits"%band)
seg_map = os.path.join(work_dir, "SE_APASS/coadd_Sloan%s_NGC_5907_seg.fits"%band)
SE_catalog = os.path.join(work_dir, "SE_APASS/coadd_Sloan%s_NGC_5907.cat"%band)
weight_map = os.path.join(work_dir, "SE_APASS/weight_NGC5907.fits")
# Output Path
dir_name = os.path.join(work_dir, 'psf_modeling/output/Measure')
# Handling Options
for opt, arg in optlists:
if opt in ("-I", "--IMAGE"):
hdu_path = arg
elif opt in ("-b", "--IMAGE_BOUNDS"):
image_bounds = np.array(re.findall(r'\d+', arg), dtype=int).reshape(-1,4)
elif opt in ("-r", "--R_SCALE"):
r_scale = np.float(arg)
elif opt in ("-m", "--MAG_THRESHOLD"):
mag_thre = np.float(arg)
elif opt in ("-C", "--SE_CATALOG"):
SE_catalog = arg
elif opt in ("-S", "--SEGMAP"):
seg_map = arg
elif opt in ("-W", "--WEIGHT_MAP"):
weight_map = arg
elif opt in ("--DIR_NAME"):
dir_name = arg
if ("--PS" in opts): use_PS1_DR2 = True
check_save_path(dir_name, make_new=False)
# Run Measurement~!
Match_Mask_Measure(hdu_path, image_bounds, seg_map, SE_catalog,
weight_map=weight_map, band=band,
r_scale=r_scale, mag_thre=mag_thre,
draw=draw, use_PS1_DR2=use_PS1_DR2,
save=save, dir_name=dir_name)
return opts
if __name__ == "__main__":
main(sys.argv[1:])
| 4,319 | 36.241379 | 136 |
py
|
elderflower
|
elderflower-master/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# Mock imports, useful when some external dependencies are not met at build time and break the building process.
autodoc_mock_imports = ['galsim']
# -- Project information -----------------------------------------------------
project = 'elderflower'
copyright = '2020, Q. Liu'
author = 'Q. Liu'
# The full version, including alpha/beta/rc tags
release = '0.3'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 2,136 | 33.467742 | 112 |
py
|
3SD
|
3SD-main/data_loader.py
|
# data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if(3==len(image.shape) and 2==len(label.shape)):
label = label[:,:,np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label}
if self.transform:
sample = self.transform(sample)
return sample
| 9,040 | 32.609665 | 113 |
py
|
3SD
|
3SD-main/new_data_loader.py
|
# data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'],sample['label'],sample['edge']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
edge = transform.resize(edge, (self.output_size, self.output_size), mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl,'edge':edge}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'],sample['label'],sample['edge']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
edge = edge[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
edge = transform.resize(edge, (new_h, new_w), mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl,'edge':edge}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'], sample['label'], sample['edge']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
edge = edge[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
edge = edge[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label, 'edge':edge}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label, edge = sample['imidx'], sample['image'], sample['label'], sample['edge']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
tmpedge = np.zeros(edge.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
edge = edge / np.max(edge)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpedge[:, :, 0] = edge[:, :, 0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
tmpedge = edge.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl), 'edge': torch.from_numpy(tmpedge)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label, edge =sample['imidx'], sample['image'], sample['label'], sample['edge']
tmpLbl = np.zeros(label.shape)
tmpedge = np.zeros(edge.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
edge = edge / np.max(edge)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpedge[:, :, 0] = edge[:, :, 0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
tmpedge = edge.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl), 'edge': torch.from_numpy(tmpedge)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,edge_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.edge_name_list = edge_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if (0 == len(self.edge_name_list)):
edge_3 = np.zeros(image.shape)
else:
edge_3 = io.imread(self.edge_name_list[idx])
edge = np.zeros(edge_3.shape[0:2])
if (3 == len(edge_3.shape)):
edge = edge_3[:, :, 0]
elif (2 == len(edge_3.shape)):
edge = edge_3
if(3==len(image.shape) and 2==len(label.shape) and 2==len(edge.shape)):
label = label[:,:,np.newaxis]
edge = edge[:, :, np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape) and 2==len(edge.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
edge = edge[:, :, np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label, 'edge':edge}
if self.transform:
sample = self.transform(sample)
return sample
| 10,287 | 33.756757 | 147 |
py
|
3SD
|
3SD-main/basenet_train.py
|
import os
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
import numpy as np
import random
import glob
import os
import copy
from new_data_loader import Rescale
from new_data_loader import RescaleT
from new_data_loader import RandomCrop
from new_data_loader import ToTensor
from new_data_loader import ToTensorLab
from new_data_loader import SalObjDataset
from functools import wraps, partial
import smoothness
from model import U2NET
from model import U2NETP
import pdb
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
#normalize camp map
def norm_cam_map(input_cam,bag_map,pred_class):
B, C, H, W = input_cam.shape
bag_map = F.upsample(bag_map, size=[H,W], mode='bilinear')
cam_map = torch.zeros(B,1,H,W).cuda()
probs = pred_class.softmax(dim = -1)
for idx in range(B):
tmp_cam_vec = input_cam[idx,:,:,:].view( C, H * W).softmax(dim = -1)
tmp_cam_vec = tmp_cam_vec[torch.argmax(probs[idx,:]),:]
tmp_cam_vec = tmp_cam_vec - tmp_cam_vec.min()
tmp_cam_vec = tmp_cam_vec / (tmp_cam_vec.max())
tmp_vec = tmp_cam_vec
tmp_vec = tmp_vec.view(1, H, W)
cam_map[idx,:,:,:] = tmp_vec
cam_map = F.upsample(cam_map, size=[320,320], mode='bilinear')
return cam_map
# ------- 1. define loss function --------
bce_loss = nn.BCELoss(size_average=True)
def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
eps = 0.000000001
loss0 = bce_loss(d0,labels_v)
loss1 = bce_loss(d1,labels_v)
loss2 = bce_loss(d2,labels_v)
loss3 = bce_loss(d3,labels_v)
loss4 = bce_loss(d4,labels_v)
loss5 = bce_loss(d5,labels_v)
loss6 = bce_loss(d6,labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
def gated_edge(pred,edge):
kernel = np.ones((11, 11)) / 121.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
if torch.cuda.is_available():
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor).cuda(), requires_grad=False)
dilated_pred = torch.clamp(torch.nn.functional.conv2d(pred, kernel_tensor, padding=(5, 5)), 0, 1) # performing dilation
gated_edge_out = edge *dilated_pred
'''B, C, H, W = gated_edge_out.shape
gated_edge_out = gated_edge_out.view(B, C * H * W)
gated_edge_out = gated_edge_out / (gated_edge_out.max(dim=1)[0].view(B, 1))
gated_edge_out = gated_edge_out.view(B, C, H, W)'''
return gated_edge_out
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
def dino_loss_bag_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = student_logits
teacher_probs = ((teacher_logits-centers))
# creating positive and negative pairs
student_global = F.upsample(student_logits, size=[1,1], mode='bilinear')
B,C,H,W = student_logits.shape
student_probs = student_probs.view(B,C,H*W).transpose(1,2)
student_global = student_global.view(B,C,1)
student_global = student_global/student_global.norm(dim=1).view(B,1,1)
student_probs = student_probs/student_probs.norm(dim=-1).view(B,H*W,1)
sim_student = torch.bmm(student_probs,student_global)
pos_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
pos_student_mask[sim_student>0.95*sim_student.data.detach().max()] = 1
neg_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
neg_student_mask[sim_student<1.1*sim_student.data.detach().min()] = 1
neg_student_mask = torch.bmm(pos_student_mask,neg_student_mask.transpose(1,2))
teacher_global = F.upsample(teacher_probs, size=[1,1], mode='bilinear')
teacher_probs = teacher_probs.view(B,C,H*W).transpose(1,2)
teacher_global = teacher_global.view(B,C,1)
teacher_global = teacher_global/teacher_global.norm(dim=1).view(B,1,1)
teacher_probs = teacher_probs/teacher_probs.norm(dim=-1).view(B,H*W,1)
sim_teacher = torch.bmm(teacher_probs,teacher_global)
pos_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
pos_teacher_mask[sim_teacher>0.95*sim_teacher.data.detach().max()] = 1
pos_teacher_mask = torch.bmm(pos_student_mask,pos_teacher_mask.transpose(1,2))
neg_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
neg_teacher_mask[sim_teacher<1.1*sim_teacher.data.detach().min()] = 1
neg_teacher_mask = torch.bmm(pos_student_mask,neg_teacher_mask.transpose(1,2))
pos_student_mask = torch.bmm(pos_student_mask,pos_student_mask.transpose(1,2))
sim_student = torch.exp(torch.bmm(student_probs,student_probs.transpose(1,2))/student_temp)
sim_teacher = torch.exp(torch.bmm(student_probs,teacher_probs.transpose(1,2))/teacher_temp)
denom = (pos_student_mask+neg_student_mask)*sim_student + (pos_teacher_mask+neg_teacher_mask)*sim_teacher
denom = denom.sum(dim=-1).view(B,H*W,1) +0.000001
loss = pos_student_mask*sim_student/denom + (1-pos_student_mask)
loss = -1*pos_student_mask*torch.log(loss) -1*pos_teacher_mask*torch.log(pos_teacher_mask*sim_teacher/denom + (1-pos_teacher_mask))
return 0.003*loss.mean()
# ------- 2. set the directory of training dataset --------
model_name = 'u2net' #'u2netp'
data_dir = './data/training/DUTS/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
tra_edge_dir = 'edge/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
image_ext = '.jpg'
label_ext = '.png'
model_dir = os.path.join(os.getcwd(), 'saved_models', 'fullysup_patch32_' + model_name + os.sep)
if (os.path.isdir(model_dir)==False):
os.mkdir(model_dir)
epoch_num = 100000
batch_size_train = 10
batch_size_val = 1
train_num = 0
val_num = 0
tra_img_name_list = list(glob.glob(data_dir + tra_image_dir + '*' + image_ext))
tra_lbl_name_list = []
tra_edge_name_list = []
for img_path in tra_img_name_list:
img_name = img_path.split(os.sep)[-1]
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)
tra_edge_name_list.append(data_dir + tra_edge_dir + imidx + label_ext)
print("---")
print("train images: ", len(tra_img_name_list))
print("train labels: ", len(tra_lbl_name_list))
print("train edges: ", len(tra_edge_name_list))
print("---")
train_num = len(tra_img_name_list)
salobj_dataset = SalObjDataset(
img_name_list=tra_img_name_list,
lbl_name_list=tra_lbl_name_list,
edge_name_list=tra_edge_name_list,
transform=transforms.Compose([
RescaleT(352),
RandomCrop(320),
ToTensorLab(flag=0)]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=1)
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
#print(self.teacher_centers_bag.shape)
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def bag_loss(self, x, return_embedding = False,return_projection = True,student_temp = None,teacher_temp = None):
if return_embedding:
return self.student_encoder(x, return_projection=return_projection)
image_one, image_two = self.augment_bag(x), self.augment_bag(x)
local_image_one, local_image_two = self.local_crop_bag(image_one), self.local_crop_bag(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-2]
student_proj_two = self.student_encoder(local_image_two)[-2]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)
teacher_proj_two = teacher_encoder(global_image_two)
#pdb.set_trace()
teacher_logits_avg = torch.cat((teacher_proj_one[-2], teacher_proj_two[-2])).mean(dim=0)
self.last_teacher_centers_bag.copy_(teacher_logits_avg)
student_proj_two_glb = student_proj_two.mean(dim=-1).mean(dim=-1)
student_proj_one_glb = student_proj_one.mean(dim=-1).mean(dim=-1)
loss_fn_bag = partial(
dino_loss_bag_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers_bag
)
loss_fn_ = partial(
dino_loss_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers
)
loss = (loss_fn_bag(teacher_proj_one[-2], student_proj_two) + loss_fn_bag(teacher_proj_two[-2],
student_proj_one)) / 4
loss += (loss_fn_(teacher_proj_one[-1], student_proj_two_glb) + loss_fn_(teacher_proj_two[-1],
student_proj_one_glb)) / 4
return loss
# ------- 4. define model --------
# define the net
'''if(model_name=='u2net'):
net = U2NET(3, 1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)'''
dino = Dino(model_name,[320],32)
if torch.cuda.is_available():
dino.cuda()
#dino = torch.nn.DataParallel(dino)
# ------- 5. define optimizer --------
print("---define optimizer...")
optimizer = optim.Adam(dino.parameters(), lr=0.0006, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
dino_optimizer = optim.Adam(dino.parameters(), lr=0.0003, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# ------- 6. training process --------
print("---start training...")
ite_num = 0
running_loss = 0.0
running_tar_loss = 0.0
ite_num4val = 0
save_frq = 10000 # save the model every 10000 iterations
sm_loss_weight = 0.3
smooth_loss = smoothness.smoothness_loss(size_average=True)
for epoch in range(0,epoch_num):
#net.train()
dino.train()
for i, data in enumerate(salobj_dataloader):
ite_num = ite_num + 1
ite_num4val = ite_num4val + 1
inputs, labels, edges = data['image'], data['label'], data['edge']
inputs = inputs.type(torch.FloatTensor)
labels = labels.type(torch.FloatTensor)
edges = edges.type(torch.FloatTensor)
# wrap them in Variable
if torch.cuda.is_available():
inputs_v, labels_v, edges_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(),requires_grad=False), Variable(edges.cuda(),requires_grad=False)
else:
inputs_v, labels_v, edges_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False), Variable(edges, requires_grad=False)
# y zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
loss = 0
loss2 = 0
pseudo_label_gts = 0
d0, d1, d2, d3, d4, d5, d6, pred_edges, cam_map, bag_map, pred_class = dino.student_encoder(inputs_v)
loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6 , labels_v)
smoothLoss_cur1 = sm_loss_weight * smooth_loss(d0, T.Grayscale()(inputs_v))
edge_loss = bce_loss(gated_edge(labels_v,pred_edges), gated_edge(labels_v,edges_v))
loss += edge_loss + smoothLoss_cur1
if loss == loss:
loss.backward()
optimizer.step()
# # print statistics
if loss == loss:
running_loss += loss.data.item()
if loss2 >0:
running_tar_loss += loss2.data.item()
# del temporary outputs and loss
del d0, d1, d2, d3, d4, d5, d6, loss2, loss, cam_map, pred_edges, edge_loss, pseudo_label_gts, pred_class, dino_loss, dino_bag_loss
print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f " % (
epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
if ite_num % save_frq == 0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
running_loss = 0.0
running_tar_loss = 0.0
dino.train() # resume train
ite_num4val = 0
if (epoch+1) % 10 ==0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_epoch_%d_train.pth" % (epoch))
torch.save(dino.state_dict(), model_dir + model_name+"_bce_epoch_%d_train_fulldino.pth" % (epoch))
| 21,206 | 37.279783 | 204 |
py
|
3SD
|
3SD-main/3SD_train.py
|
import os
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
import numpy as np
import random
import glob
import os
import copy
from new_data_loader import Rescale
from new_data_loader import RescaleT
from new_data_loader import RandomCrop
from new_data_loader import ToTensor
from new_data_loader import ToTensorLab
from new_data_loader import SalObjDataset
from functools import wraps, partial
import smoothness
from model import U2NET
from model import U2NETP
import pdb
#from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
#from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
#from pytorch_grad_cam.utils.image import show_cam_on_image
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
#normalize camp map
def norm_cam_map(input_cam,bag_map,pred_class):
B, C, H, W = input_cam.shape
bag_map = F.upsample(bag_map, size=[H,W], mode='bilinear')
cam_map = torch.zeros(B,1,H,W).cuda()
probs = pred_class.softmax(dim = -1)
for idx in range(B):
tmp_cam_vec = input_cam[idx,:,:,:].view( C, H * W).softmax(dim = -1)
tmp_cam_vec = tmp_cam_vec[torch.argmax(probs[idx,:]),:]
tmp_cam_vec = tmp_cam_vec - tmp_cam_vec.min()
tmp_cam_vec = tmp_cam_vec / (tmp_cam_vec.max())
tmp_vec = tmp_cam_vec
tmp_vec = tmp_vec.view(1, H, W)
cam_map[idx,:,:,:] = tmp_vec
cam_map = F.upsample(cam_map, size=[320,320], mode='bilinear')
return cam_map
# ------- 1. define loss function --------
bce_loss = nn.BCELoss(size_average=True)
def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
eps = 0.000000001
loss0 = bce_loss(d0,labels_v)
loss1 = bce_loss(d1,labels_v)
loss2 = bce_loss(d2,labels_v)
loss3 = bce_loss(d3,labels_v)
loss4 = bce_loss(d4,labels_v)
loss5 = bce_loss(d5,labels_v)
loss6 = bce_loss(d6,labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
def gated_edge(pred,edge):
kernel = np.ones((11, 11)) / 121.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
if torch.cuda.is_available():
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor).cuda(), requires_grad=False)
dilated_pred = torch.clamp(torch.nn.functional.conv2d(pred, kernel_tensor, padding=(5, 5)), 0, 1) # performing dilation
gated_edge_out = edge *dilated_pred
'''B, C, H, W = gated_edge_out.shape
gated_edge_out = gated_edge_out.view(B, C * H * W)
gated_edge_out = gated_edge_out / (gated_edge_out.max(dim=1)[0].view(B, 1))
gated_edge_out = gated_edge_out.view(B, C, H, W)'''
return gated_edge_out
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
def dino_loss_bag_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = student_logits
teacher_probs = ((teacher_logits-centers))
# creating positive and negative pairs
student_global = F.upsample(student_logits, size=[1,1], mode='bilinear')
B,C,H,W = student_logits.shape
student_probs = student_probs.view(B,C,H*W).transpose(1,2)
student_global = student_global.view(B,C,1)
student_global = student_global/student_global.norm(dim=1).view(B,1,1)
student_probs = student_probs/student_probs.norm(dim=-1).view(B,H*W,1)
sim_student = torch.bmm(student_probs,student_global)
pos_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
pos_student_mask[sim_student>0.95*sim_student.data.detach().max()] = 1
neg_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
neg_student_mask[sim_student<1.1*sim_student.data.detach().min()] = 1
neg_student_mask = torch.bmm(pos_student_mask,neg_student_mask.transpose(1,2))
teacher_global = F.upsample(teacher_probs, size=[1,1], mode='bilinear')
teacher_probs = teacher_probs.view(B,C,H*W).transpose(1,2)
teacher_global = teacher_global.view(B,C,1)
teacher_global = teacher_global/teacher_global.norm(dim=1).view(B,1,1)
teacher_probs = teacher_probs/teacher_probs.norm(dim=-1).view(B,H*W,1)
sim_teacher = torch.bmm(teacher_probs,teacher_global)
pos_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
pos_teacher_mask[sim_teacher>0.95*sim_teacher.data.detach().max()] = 1
pos_teacher_mask = torch.bmm(pos_student_mask,pos_teacher_mask.transpose(1,2))
neg_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
neg_teacher_mask[sim_teacher<1.1*sim_teacher.data.detach().min()] = 1
neg_teacher_mask = torch.bmm(pos_student_mask,neg_teacher_mask.transpose(1,2))
pos_student_mask = torch.bmm(pos_student_mask,pos_student_mask.transpose(1,2))
sim_student = torch.exp(torch.bmm(student_probs,student_probs.transpose(1,2))/student_temp)
sim_teacher = torch.exp(torch.bmm(student_probs,teacher_probs.transpose(1,2))/teacher_temp)
denom = (neg_student_mask)*sim_student + (neg_teacher_mask)*sim_teacher
denom = denom.sum(dim=-1).view(B,H*W,1) +0.000001
loss = pos_student_mask*sim_student/denom + (1-pos_student_mask)
loss = -1*pos_student_mask*torch.log(loss) -1*pos_teacher_mask*torch.log(pos_teacher_mask*sim_teacher/denom + (1-pos_teacher_mask))
return 0.003*loss.mean()
# ------- 2. set the directory of training dataset --------
model_name = 'u2net' #'u2netp'
data_dir = './data/training/DUTS/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
tra_edge_dir = 'edge/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
#syn_data_dir = './data/training/DUTS-TR/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
#syn_tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
#syn_tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
image_ext = '.jpg'
label_ext = '.png'
model_dir = os.path.join(os.getcwd(), 'saved_models', 'final_patch32_pseudo_dino_edge_pre_trans_' + model_name + os.sep)
if (os.path.isdir(model_dir)==False):
os.mkdir(model_dir)
epoch_num = 100000
batch_size_train = 10
batch_size_val = 1
train_num = 0
val_num = 0
tra_img_name_list = list(glob.glob(data_dir + tra_image_dir + '*' + image_ext))
tra_lbl_name_list = []
tra_edge_name_list = []
for img_path in tra_img_name_list:
img_name = img_path.split(os.sep)[-1]
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)
tra_edge_name_list.append(data_dir + tra_edge_dir + imidx + label_ext)
#syn_tra_img_name_list = list(glob.glob(syn_data_dir + syn_tra_image_dir + '*' + label_ext))
#pdb.set_trace()
#syn_tra_lbl_name_list = []
#for img_path in syn_tra_img_name_list:
# img_name = img_path.split(os.sep)[-1]
# aaa = img_name.split(".")
# bbb = aaa[0:-1]
# imidx = bbb[0]
# for i in range(1,len(bbb)):
# imidx = imidx + "." + bbb[i]
# syn_tra_lbl_name_list.append(syn_data_dir + syn_tra_label_dir + imidx + label_ext)
#pdb.set_trace()
#tra_img_name_list += syn_tra_img_name_list
#tra_lbl_name_list += syn_tra_lbl_name_list
print("---")
print("train images: ", len(tra_img_name_list))
print("train labels: ", len(tra_lbl_name_list))
print("train edges: ", len(tra_edge_name_list))
print("---")
train_num = len(tra_img_name_list)
salobj_dataset = SalObjDataset(
img_name_list=tra_img_name_list,
lbl_name_list=tra_lbl_name_list,
edge_name_list=tra_edge_name_list,
transform=transforms.Compose([
RescaleT(352),
RandomCrop(320),
ToTensorLab(flag=0)]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=1)
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
#print(self.teacher_centers_bag.shape)
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def bag_loss(self, x, return_embedding = False,return_projection = True,student_temp = None,teacher_temp = None):
if return_embedding:
return self.student_encoder(x, return_projection=return_projection)
image_one, image_two = self.augment_bag(x), self.augment_bag(x)
local_image_one, local_image_two = self.local_crop_bag(image_one), self.local_crop_bag(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-2]
student_proj_two = self.student_encoder(local_image_two)[-2]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)
teacher_proj_two = teacher_encoder(global_image_two)
#pdb.set_trace()
teacher_logits_avg = torch.cat((teacher_proj_one[-2], teacher_proj_two[-2])).mean(dim=0)
self.last_teacher_centers_bag.copy_(teacher_logits_avg)
student_proj_two_glb = student_proj_two.mean(dim=-1).mean(dim=-1)
student_proj_one_glb = student_proj_one.mean(dim=-1).mean(dim=-1)
loss_fn_bag = partial(
dino_loss_bag_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers_bag
)
loss_fn_ = partial(
dino_loss_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers
)
loss = (loss_fn_bag(teacher_proj_one[-2], student_proj_two) + loss_fn_bag(teacher_proj_two[-2],
student_proj_one)) / 4
loss += (loss_fn_(teacher_proj_one[-1], student_proj_two_glb) + loss_fn_(teacher_proj_two[-1],
student_proj_one_glb)) / 4
return loss
# ------- 4. define model --------
# define the net
'''if(model_name=='u2net'):
net = U2NET(3, 1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)'''
dino = Dino(model_name,[320],32)
if torch.cuda.is_available():
dino.cuda()
#dino = torch.nn.DataParallel(dino)
# ------- 5. define optimizer --------
print("---define optimizer...")
optimizer = optim.Adam(dino.parameters(), lr=0.0006, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
dino_optimizer = optim.Adam(dino.parameters(), lr=0.0003, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# ------- 6. training process --------
print("---start training...")
ite_num = 0
running_loss = 0.0
running_tar_loss = 0.0
ite_num4val = 0
save_frq = 10000 # save the model every 10000 iterations
sm_loss_weight = 0.3
smooth_loss = smoothness.smoothness_loss(size_average=True)
for epoch in range(0,epoch_num):
#net.train()
dino.train()
for i, data in enumerate(salobj_dataloader):
ite_num = ite_num + 1
ite_num4val = ite_num4val + 1
inputs, labels, edges = data['image'], data['label'], data['edge']
inputs = inputs.type(torch.FloatTensor)
labels = labels.type(torch.FloatTensor)
edges = edges.type(torch.FloatTensor)
# wrap them in Variable
if torch.cuda.is_available():
inputs_v, labels_v, edges_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(),requires_grad=False), Variable(edges.cuda(),requires_grad=False)
else:
inputs_v, labels_v, edges_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False), Variable(edges, requires_grad=False)
# y zero the parameter gradients
dino_optimizer.zero_grad()
# forward + backward + optimize
dino_loss = 0
if (ite_num % 2 == 0):
dino_loss = dino(inputs_v)
if dino_loss == dino_loss and dino_loss !=0 :
print("dino_loss : %3f "%(dino_loss))
dino_loss.backward()
dino_optimizer.step()
dino.update_moving_average()
dino_optimizer.zero_grad()
dino_optimizer.zero_grad()
dino_bag_loss = 0
if (ite_num % 2 == 1):
dino_bag_loss = dino.bag_loss(inputs_v)
if dino_loss == dino_loss and dino_bag_loss != 0:
print("dino_bag_loss : %3f " % (dino_bag_loss))
dino_bag_loss.backward()
dino_optimizer.step()
dino.update_moving_average()
dino_optimizer.zero_grad()
optimizer.zero_grad()
loss = 0
loss2 = 0
pseudo_label_gts = 0
d0, d1, d2, d3, d4, d5, d6, pred_edges, cam_map, bag_map, pred_class = dino.student_encoder(inputs_v)
edge_loss = 0
if epoch>=120:
norm_cam = norm_cam_map(cam_map.detach().data,bag_map.detach().data,pred_class)
norm_cam[norm_cam<0.5] = 0
pseudo_label_gts = gated_edge(norm_cam,norm_cam+edges_v-(norm_cam*edges_v))
B, C, H, W = labels_v.shape
pseudo_label_gts = pseudo_label_gts.view(B, C * H * W)
pseudo_label_gts = (pseudo_label_gts-pseudo_label_gts.min(dim=1)[0].view(B, 1))/ (pseudo_label_gts.max(dim=1)[0].view(B, 1))
pseudo_label_gts = pseudo_label_gts.view(B, C, H, W).detach().data
loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6 , pseudo_label_gts)
edge_loss = bce_loss(gated_edge(pseudo_label_gts,pred_edges), gated_edge(pseudo_label_gts,edges_v))
smoothLoss_cur1 = sm_loss_weight * smooth_loss(d0, T.Grayscale()(inputs_v))
loss += edge_loss + smoothLoss_cur1
if loss == loss:
loss.backward()
optimizer.step()
# # print statistics
if loss == loss:
running_loss += loss.data.item()
if loss2 >0:
running_tar_loss += loss2.data.item()
# del temporary outputs and loss
del d0, d1, d2, d3, d4, d5, d6, loss2, loss, cam_map, pred_edges, edge_loss, pseudo_label_gts, pred_class, dino_loss, dino_bag_loss
print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f " % (
epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
if ite_num % save_frq == 0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
running_loss = 0.0
running_tar_loss = 0.0
dino.train() # resume train
ite_num4val = 0
if (epoch+1) % 10 ==0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_epoch_%d_train.pth" % (epoch))
torch.save(dino.state_dict(), model_dir + model_name+"_bce_epoch_%d_train_fulldino.pth" % (epoch))
| 23,488 | 38.018272 | 204 |
py
|
3SD
|
3SD-main/compute_and_plot.py
|
import os
import torch
from sklearn.metrics import f1_score, precision_score, recall_score
'''from sklearn.metrics import (precision_recall_curve, PrecisionRecallDisplay)
from sklearn.metrics import precision_recall_curve'''
import cv2
import pdb
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib as mpl
import glob
def Sobel_op(img):
kernel_x = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
kernel_x_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_x, 0), 0)) # size: (1, 1, 11,11)
kernel_x_tensor = Variable(kernel_x_tensor.type(torch.FloatTensor), requires_grad=False)
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
kernel_y_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_y, 0), 0)) # size: (1, 1, 11,11)
kernel_y_tensor = Variable(kernel_y_tensor.type(torch.FloatTensor), requires_grad=False)
Gx = torch.nn.functional.conv2d(img, kernel_x_tensor, padding=(1, 1))
Gy = torch.nn.functional.conv2d(img, kernel_y_tensor, padding=(1, 1))
G = torch.sqrt(Gx*Gx + Gy*Gy)
G = F.tanh(G)
kernel = np.ones((3, 3)) / 9.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor), requires_grad=False)
dilated_G = torch.clamp(torch.nn.functional.conv2d(G, kernel_tensor, padding=(1,1)), 0, 1)
return dilated_G
def B_measure(gt,target):
h, w = gt.shape
gt = gt.astype(np.float32)
target = target.astype(np.float32)
gt = torch.from_numpy(gt)
target = torch.from_numpy(target)
G_gt = Sobel_op(gt.view(1,1,h,w))
G_target = Sobel_op(target.view(1, 1, h, w))
B = 1 - (2*(torch.sum(G_gt*G_target))/(torch.sum(G_target*G_target)+torch.sum(G_gt*G_gt)))
return B
def E_measure(gt,target):
gt=gt
target=target
#pdb.set_trace()
phi_gt = np.subtract(gt, gt.mean())
phi_target = np.subtract(target, target.mean())
numerator = 2*phi_gt*phi_target
deno = phi_gt*phi_gt + phi_target*phi_target
phi = numerator/deno
Enhance_phi = 0.25*(1+phi)**2
Em = Enhance_phi.mean()
return Em
def files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path,file)):
yield file
def object_s(pred, gt):
temp = pred[gt == 1]
x = temp.mean()
sigma_x = temp.std()
score = 2.0 * x / (x * x + 1.0 + sigma_x + 1e-20)
return score
def S_object(pred, gt):
fg = torch.where(gt == 0, torch.zeros_like(pred), pred)
bg = torch.where(gt == 1, torch.zeros_like(pred), 1 - pred)
o_fg = object_s(fg, gt)
o_bg = object_s(bg, 1 - gt)
u = gt.mean()
Q = u * o_fg + (1 - u) * o_bg
return Q
def centroid( gt):
rows, cols = gt.size()[-2:]
gt = gt.view(rows, cols)
cuda = False
if gt.sum() == 0:
if cuda:
X = torch.eye(1).cuda() * round(cols / 2)
Y = torch.eye(1).cuda() * round(rows / 2)
else:
X = torch.eye(1) * round(cols / 2)
Y = torch.eye(1) * round(rows / 2)
else:
total = gt.sum()
if cuda:
i = torch.from_numpy(np.arange(0, cols)).cuda().float()
j = torch.from_numpy(np.arange(0, rows)).cuda().float()
else:
i = torch.from_numpy(np.arange(0, cols)).float()
j = torch.from_numpy(np.arange(0, rows)).float()
X = torch.round((gt.sum(dim=0) * i).sum() / total + 1e-20)
Y = torch.round((gt.sum(dim=1) * j).sum() / total + 1e-20)
return X.long(), Y.long()
def divideGT( gt, X, Y):
h, w = gt.size()[-2:]
area = h * w
gt = gt.view(h, w)
LT = gt[:Y, :X]
RT = gt[:Y, X:w]
LB = gt[Y:h, :X]
RB = gt[Y:h, X:w]
X = X.float()
Y = Y.float()
w1 = X * Y / area
w2 = (w - X) * Y / area
w3 = X * (h - Y) / area
w4 = 1 - w1 - w2 - w3
return LT, RT, LB, RB, w1, w2, w3, w4
def dividePrediction( pred, X, Y):
h, w = pred.size()[-2:]
pred = pred.view(h, w)
LT = pred[:Y, :X]
RT = pred[:Y, X:w]
LB = pred[Y:h, :X]
RB = pred[Y:h, X:w]
return LT, RT, LB, RB
def ssim( pred, gt):
gt = gt.float()
h, w = pred.size()[-2:]
N = h * w
x = pred.mean()
y = gt.mean()
sigma_x2 = ((pred - x) * (pred - x)).sum() / (N - 1 + 1e-20)
sigma_y2 = ((gt - y) * (gt - y)).sum() / (N - 1 + 1e-20)
sigma_xy = ((pred - x) * (gt - y)).sum() / (N - 1 + 1e-20)
aplha = 4 * x * y * sigma_xy
beta = (x * x + y * y) * (sigma_x2 + sigma_y2)
if aplha != 0:
Q = aplha / (beta + 1e-20)
elif aplha == 0 and beta == 0:
Q = 1.0
else:
Q = 0
return Q
def S_region(pred, gt):
X, Y = centroid(gt)
gt1, gt2, gt3, gt4, w1, w2, w3, w4 = divideGT(gt, X, Y)
p1, p2, p3, p4 = dividePrediction(pred, X, Y)
Q1 = ssim(p1, gt1)
Q2 = ssim(p2, gt2)
Q3 = ssim(p3, gt3)
Q4 = ssim(p4, gt4)
Q = w1 * Q1 + w2 * Q2 + w3 * Q3 + w4 * Q4
return Q
def S_measure(target,gt):
alpha = 0.5
h, w = gt.shape
gt = torch.from_numpy(gt).type(torch.FloatTensor)
target = torch.from_numpy(target).type(torch.FloatTensor)
gt = gt.view(1,1,h,w)
target = target.view(1,1,h,w)
Q = alpha * S_object(target, gt) + (1 - alpha) * S_region(target, gt)
return Q
gt_path = './testing/gt/'
target_path = './testing/output_u2net_results/'
test_datasets = ['DUTS']
output_dir = './plots/'
Num_th = 20
Threshold = 0.5
Flag_figs = 0
for dataset in test_datasets:
name = 'exp' + '_' + dataset
precision_list = np.zeros((Num_th, 1))
recall_list = np.zeros((Num_th, 1))
F_score = np.zeros((Num_th, 1))
f1_score_list = []
MAE_list = []
Emeasure_list = []
Bmeasure_list = []
Smeasure_list = []
count = 0
print("----------------------------------------------------------------------------------------")
img_name_list = list(glob.glob(gt_path + dataset + '/*' + '.jpg')) + list(glob.glob(gt_path + dataset + '/*' + '.png'))
print("{} dataset starting, Total image : {} ".format(name,len(img_name_list)))
for file in files(gt_path + dataset):
gt_name = os.path.join(gt_path,dataset,file)
target_name = os.path.join(target_path,dataset,file)
# pdb.set_trace()
# print(target_name)#,precision_list,recall_list)
Gt = cv2.imread(gt_name,0)
pred = cv2.imread(target_name,0)
h, w = Gt.shape
# print(w,h,pred.shape)
pred = cv2.resize(pred,(w,h))
Gt = Gt.astype(np.float32)
pred = pred.astype(np.float32)
Bmeasure_list.append(B_measure(Gt, pred))
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt<Threshold] = 0
gt[Gt>=Threshold] = 1
target[pred<Threshold] = 0
target[pred>=Threshold] = 1
Emeasure_list.append(E_measure(gt, target))
MAE_list.append(np.absolute(np.subtract(gt, target)).mean())
Smeasure_list.append(S_measure(target, gt))
f1_score_list.append(f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary'))
if Flag_figs == 1:
t_count = 0
for th in np.linspace(0.001, 0.99, Num_th):
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt < th] = 0
gt[Gt >= th] = 1
target[pred < th] = 0
target[pred >= th] = 1
precision_list[t_count] += precision_score(gt.reshape(h*w),target.reshape(h*w))
recall_list[t_count] += recall_score(gt.reshape(h*w),target.reshape(h*w))
#F_score[t_count] += f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary')
t_count +=1
count +=1
if count%500==0:
print(count)
# print("{} : F1_score : {} gtsum : {} pred sum : {} ".format(file,f1_score_list[-1],gt.sum(),target.sum()))
# pdb.set_trace()
precision_list = precision_list/count
recall_list = recall_list/count
F_score = F_score/count
MAE = sum(MAE_list)/len(MAE_list)
F_mu = sum(f1_score_list)/len(f1_score_list)
E_mu = sum(Emeasure_list)/len(Emeasure_list)
B_mu = sum(Bmeasure_list)/len(Bmeasure_list)
S_mu = sum(Smeasure_list) / len(Smeasure_list)
np.savez('%s/%s.npz' % (output_dir, name), precision_list=precision_list, recall_list=recall_list, F_score=F_score, MAE=MAE, F_mu=F_mu, E_mu=E_mu, B_mu=B_mu, S_mu=S_mu)
print("Dataset:{} Mean F1_Score : {}".format(dataset,F_mu))
print("Dataset:{} Mean MAE : {}".format(dataset,MAE))
print("Dataset:{} Mean E_measure : {}".format(dataset,E_mu))
print("Dataset:{} Mean B_measure : {}".format(dataset,B_mu))
print("Dataset:{} Mean S_measure : {}".format(dataset, S_mu))
print("{} dataset done".format(dataset))
print("----------------------------------------------------------------------------------------")
#print("Mean precision_Score : {}".format(sum(precision_list)/len(precision_list)))
#print("Mean recall_Score : {}".format(sum(recall_list)/len(recall_list)))
#pr_display = PrecisionRecallDisplay(precision=precision_list, recall=recall_list).plot()
#mpl.use('tkagg')
plt.plot(recall_list,precision_list)
plt.savefig(output_dir + name+'_'+'Precision_recall.png')
plt.clf()
plt.plot(np.linspace(0, 255, Num_th), F_score)
plt.savefig(output_dir + name+'_'+'Fscore.png')
plt.clf()
| 9,521 | 33.625455 | 172 |
py
|
3SD
|
3SD-main/u2net_test_pseudo_dino_final.py
|
import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
from functools import wraps, partial
import pdb
import numpy as np
from PIL import Image
import glob
import random
from data_loader import RescaleT
from data_loader import ToTensor
from data_loader import ToTensorLab
from data_loader import SalObjDataset
from model import U2NET # full size version 173.6 MB
from model import U2NETP # small version u2net 4.7 MB
import smoothness
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def main():
# --------- 1. get image path and name ---------
model_name='u2net'#u2netp
test_datasets = ['DUTS_Test','HKU-IS','DUT','THUR']
for dataset in test_datasets:
image_dir = os.path.join(os.getcwd(), './../testing/', 'img',dataset)
folder_pred = os.path.join(os.getcwd(), '../testing/','output_' + model_name + '_results' + os.sep)
prediction_dir = os.path.join(os.getcwd(), '../testing/', 'output_' + model_name + '_results' , dataset+ os.sep)
model_dir = os.path.join(os.getcwd(), 'saved_models', 'final_patch32_pseudo_dino_edge_pre_trans_' + model_name, model_name + '_bce_epoch_139_train_fulldino.pth')
if (os.path.exists(folder_pred) == False):
os.mkdir(folder_pred)
if (os.path.exists(prediction_dir)==False):
os.mkdir(prediction_dir)
img_name_list = list(glob.glob(image_dir + '/*'+'.jpg')) + list(glob.glob(image_dir + '/*'+'.png'))
#print(img_name_list)
# --------- 2. dataloader ---------
#1. dataloader
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=1)
# --------- 3. model define ---------
dino = Dino(model_name, [320],32)
if torch.cuda.is_available():
dino.load_state_dict(torch.load(model_dir))
dino.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
dino.train()
# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):
#print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
#loss = dino(inputs_test)
d1,d2,d3,d4,d5,d6,d7,edge,cam_map,bag_map,pred_class = dino.student_encoder(inputs_test)
#pdb.set_trace()
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to test_results folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[i_test],pred,prediction_dir)
print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
del d1,d2,d3,d4,d5,d6,d7
if __name__ == "__main__":
main()
| 12,334 | 34.14245 | 169 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.