repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
DarkEnergySurvey/ugali | ugali/utils/skymap.py | surveyPixel | def surveyPixel(lon, lat, nside_pix, nside_subpix = None):
"""
Return the set of HEALPix pixels that cover the given coordinates at resolution nside.
Optionally return the set of subpixels within those pixels at resolution nside_subpix
"""
pix = np.unique(ang2pix(nside_pix, lon, lat))
if nside_subpix is None:
return pix
else:
subpix_array = []
for ii in range(0, len(pix)):
subpix = subpixel(pix[ii], nside_pix, nside_subpix)
subpix_array.append(subpix)
return pix, np.array(subpix_array) | python | def surveyPixel(lon, lat, nside_pix, nside_subpix = None):
"""
Return the set of HEALPix pixels that cover the given coordinates at resolution nside.
Optionally return the set of subpixels within those pixels at resolution nside_subpix
"""
pix = np.unique(ang2pix(nside_pix, lon, lat))
if nside_subpix is None:
return pix
else:
subpix_array = []
for ii in range(0, len(pix)):
subpix = subpixel(pix[ii], nside_pix, nside_subpix)
subpix_array.append(subpix)
return pix, np.array(subpix_array) | [
"def",
"surveyPixel",
"(",
"lon",
",",
"lat",
",",
"nside_pix",
",",
"nside_subpix",
"=",
"None",
")",
":",
"pix",
"=",
"np",
".",
"unique",
"(",
"ang2pix",
"(",
"nside_pix",
",",
"lon",
",",
"lat",
")",
")",
"if",
"nside_subpix",
"is",
"None",
":",
"return",
"pix",
"else",
":",
"subpix_array",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"pix",
")",
")",
":",
"subpix",
"=",
"subpixel",
"(",
"pix",
"[",
"ii",
"]",
",",
"nside_pix",
",",
"nside_subpix",
")",
"subpix_array",
".",
"append",
"(",
"subpix",
")",
"return",
"pix",
",",
"np",
".",
"array",
"(",
"subpix_array",
")"
] | Return the set of HEALPix pixels that cover the given coordinates at resolution nside.
Optionally return the set of subpixels within those pixels at resolution nside_subpix | [
"Return",
"the",
"set",
"of",
"HEALPix",
"pixels",
"that",
"cover",
"the",
"given",
"coordinates",
"at",
"resolution",
"nside",
".",
"Optionally",
"return",
"the",
"set",
"of",
"subpixels",
"within",
"those",
"pixels",
"at",
"resolution",
"nside_subpix"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L17-L30 | train |
DarkEnergySurvey/ugali | ugali/utils/skymap.py | allSkyCoordinates | def allSkyCoordinates(nside):
"""
Generate a set of coordinates at the centers of pixels of resolutions nside across the full sky.
"""
lon,lat = pix2ang(nside, np.arange(0, hp.nside2npix(nside)))
return lon, lat | python | def allSkyCoordinates(nside):
"""
Generate a set of coordinates at the centers of pixels of resolutions nside across the full sky.
"""
lon,lat = pix2ang(nside, np.arange(0, hp.nside2npix(nside)))
return lon, lat | [
"def",
"allSkyCoordinates",
"(",
"nside",
")",
":",
"lon",
",",
"lat",
"=",
"pix2ang",
"(",
"nside",
",",
"np",
".",
"arange",
"(",
"0",
",",
"hp",
".",
"nside2npix",
"(",
"nside",
")",
")",
")",
"return",
"lon",
",",
"lat"
] | Generate a set of coordinates at the centers of pixels of resolutions nside across the full sky. | [
"Generate",
"a",
"set",
"of",
"coordinates",
"at",
"the",
"centers",
"of",
"pixels",
"of",
"resolutions",
"nside",
"across",
"the",
"full",
"sky",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L86-L91 | train |
DarkEnergySurvey/ugali | ugali/utils/skymap.py | randomPositionsMask | def randomPositionsMask(mask, nside_pix, n):
"""
Generate n random positions within a HEALPix mask of booleans.
KCB: Likely superceded by the randomPositions function, but more generic.
"""
npix = len(mask)
nside = hp.npix2nside(npix)
# Estimate the number of points that need to be thrown based off
# coverage fraction of the HEALPix mask
coverage_fraction = float(np.sum(mask)) / len(mask)
n_throw = int(n / coverage_fraction)
lon, lat = [], []
latch = True
count = 0
while len(lon) < n:
lon_throw = np.random.uniform(0., 360., n_throw)
lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw)))
pix = ugali.utils.healpix.angToPix(nside, lon_throw, lat_throw)
cut = mask[pix].astype(bool)
lon = np.append(lon, lon_throw[cut])
lat = np.append(lat, lat_throw[cut])
count += 1
if count > 10:
raise RuntimeError('Too many loops...')
return lon[0:n], lat[0:n] | python | def randomPositionsMask(mask, nside_pix, n):
"""
Generate n random positions within a HEALPix mask of booleans.
KCB: Likely superceded by the randomPositions function, but more generic.
"""
npix = len(mask)
nside = hp.npix2nside(npix)
# Estimate the number of points that need to be thrown based off
# coverage fraction of the HEALPix mask
coverage_fraction = float(np.sum(mask)) / len(mask)
n_throw = int(n / coverage_fraction)
lon, lat = [], []
latch = True
count = 0
while len(lon) < n:
lon_throw = np.random.uniform(0., 360., n_throw)
lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw)))
pix = ugali.utils.healpix.angToPix(nside, lon_throw, lat_throw)
cut = mask[pix].astype(bool)
lon = np.append(lon, lon_throw[cut])
lat = np.append(lat, lat_throw[cut])
count += 1
if count > 10:
raise RuntimeError('Too many loops...')
return lon[0:n], lat[0:n] | [
"def",
"randomPositionsMask",
"(",
"mask",
",",
"nside_pix",
",",
"n",
")",
":",
"npix",
"=",
"len",
"(",
"mask",
")",
"nside",
"=",
"hp",
".",
"npix2nside",
"(",
"npix",
")",
"# Estimate the number of points that need to be thrown based off",
"# coverage fraction of the HEALPix mask",
"coverage_fraction",
"=",
"float",
"(",
"np",
".",
"sum",
"(",
"mask",
")",
")",
"/",
"len",
"(",
"mask",
")",
"n_throw",
"=",
"int",
"(",
"n",
"/",
"coverage_fraction",
")",
"lon",
",",
"lat",
"=",
"[",
"]",
",",
"[",
"]",
"latch",
"=",
"True",
"count",
"=",
"0",
"while",
"len",
"(",
"lon",
")",
"<",
"n",
":",
"lon_throw",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0.",
",",
"360.",
",",
"n_throw",
")",
"lat_throw",
"=",
"np",
".",
"degrees",
"(",
"np",
".",
"arcsin",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"1.",
",",
"1.",
",",
"n_throw",
")",
")",
")",
"pix",
"=",
"ugali",
".",
"utils",
".",
"healpix",
".",
"angToPix",
"(",
"nside",
",",
"lon_throw",
",",
"lat_throw",
")",
"cut",
"=",
"mask",
"[",
"pix",
"]",
".",
"astype",
"(",
"bool",
")",
"lon",
"=",
"np",
".",
"append",
"(",
"lon",
",",
"lon_throw",
"[",
"cut",
"]",
")",
"lat",
"=",
"np",
".",
"append",
"(",
"lat",
",",
"lat_throw",
"[",
"cut",
"]",
")",
"count",
"+=",
"1",
"if",
"count",
">",
"10",
":",
"raise",
"RuntimeError",
"(",
"'Too many loops...'",
")",
"return",
"lon",
"[",
"0",
":",
"n",
"]",
",",
"lat",
"[",
"0",
":",
"n",
"]"
] | Generate n random positions within a HEALPix mask of booleans.
KCB: Likely superceded by the randomPositions function, but more generic. | [
"Generate",
"n",
"random",
"positions",
"within",
"a",
"HEALPix",
"mask",
"of",
"booleans",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L153-L185 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | embed_ising | def embed_ising(source_linear, source_quadratic, embedding, target_adjacency, chain_strength=1.0):
"""Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0}
"""
# store variables in the target graph that the embedding hasn't used
unused = {v for v in target_adjacency} - set().union(*embedding.values())
# ok, let's begin with the linear biases.
# we spread the value of h evenly over the chain
target_linear = {v: 0. for v in target_adjacency}
for v, bias in iteritems(source_linear):
try:
chain_variables = embedding[v]
except KeyError:
# if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of
# the unused variables. if this turns out to not be an isolated vertex, it will be caught below when
# handling quadratic biases
try:
embedding[v] = {unused.pop()}
except KeyError:
raise ValueError('no embedding provided for source variable {}'.format(v))
chain_variables = embedding[v]
b = bias / len(chain_variables)
for s in chain_variables:
try:
target_linear[s] += b
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
# next up the quadratic biases.
# We spread the quadratic biases evenly over the edges
target_quadratic = {}
for (u, v), bias in iteritems(source_quadratic):
edges = set()
if u not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(u))
if v not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(v))
for s in embedding[u]:
for t in embedding[v]:
try:
if s in target_adjacency[t] and (t, s) not in edges:
edges.add((s, t))
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
if not edges:
raise ValueError("no edges in target graph between source variables {}, {}".format(u, v))
b = bias / len(edges)
# in some cases the logical J can have (u, v) and (v, u) as inputs, so make
# sure we are not doubling them up with our choice of ordering
for s, t in edges:
if (s, t) in target_quadratic:
target_quadratic[(s, t)] += b
elif (t, s) in target_quadratic:
target_quadratic[(t, s)] += b
else:
target_quadratic[(s, t)] = b
# finally we need to connect the nodes in the chains
chain_quadratic = {}
for chain in itervalues(embedding):
chain_quadratic.update(chain_to_quadratic(chain, target_adjacency, chain_strength))
return target_linear, target_quadratic, chain_quadratic | python | def embed_ising(source_linear, source_quadratic, embedding, target_adjacency, chain_strength=1.0):
"""Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0}
"""
# store variables in the target graph that the embedding hasn't used
unused = {v for v in target_adjacency} - set().union(*embedding.values())
# ok, let's begin with the linear biases.
# we spread the value of h evenly over the chain
target_linear = {v: 0. for v in target_adjacency}
for v, bias in iteritems(source_linear):
try:
chain_variables = embedding[v]
except KeyError:
# if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of
# the unused variables. if this turns out to not be an isolated vertex, it will be caught below when
# handling quadratic biases
try:
embedding[v] = {unused.pop()}
except KeyError:
raise ValueError('no embedding provided for source variable {}'.format(v))
chain_variables = embedding[v]
b = bias / len(chain_variables)
for s in chain_variables:
try:
target_linear[s] += b
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
# next up the quadratic biases.
# We spread the quadratic biases evenly over the edges
target_quadratic = {}
for (u, v), bias in iteritems(source_quadratic):
edges = set()
if u not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(u))
if v not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(v))
for s in embedding[u]:
for t in embedding[v]:
try:
if s in target_adjacency[t] and (t, s) not in edges:
edges.add((s, t))
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
if not edges:
raise ValueError("no edges in target graph between source variables {}, {}".format(u, v))
b = bias / len(edges)
# in some cases the logical J can have (u, v) and (v, u) as inputs, so make
# sure we are not doubling them up with our choice of ordering
for s, t in edges:
if (s, t) in target_quadratic:
target_quadratic[(s, t)] += b
elif (t, s) in target_quadratic:
target_quadratic[(t, s)] += b
else:
target_quadratic[(s, t)] = b
# finally we need to connect the nodes in the chains
chain_quadratic = {}
for chain in itervalues(embedding):
chain_quadratic.update(chain_to_quadratic(chain, target_adjacency, chain_strength))
return target_linear, target_quadratic, chain_quadratic | [
"def",
"embed_ising",
"(",
"source_linear",
",",
"source_quadratic",
",",
"embedding",
",",
"target_adjacency",
",",
"chain_strength",
"=",
"1.0",
")",
":",
"# store variables in the target graph that the embedding hasn't used",
"unused",
"=",
"{",
"v",
"for",
"v",
"in",
"target_adjacency",
"}",
"-",
"set",
"(",
")",
".",
"union",
"(",
"*",
"embedding",
".",
"values",
"(",
")",
")",
"# ok, let's begin with the linear biases.",
"# we spread the value of h evenly over the chain",
"target_linear",
"=",
"{",
"v",
":",
"0.",
"for",
"v",
"in",
"target_adjacency",
"}",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"source_linear",
")",
":",
"try",
":",
"chain_variables",
"=",
"embedding",
"[",
"v",
"]",
"except",
"KeyError",
":",
"# if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of",
"# the unused variables. if this turns out to not be an isolated vertex, it will be caught below when",
"# handling quadratic biases",
"try",
":",
"embedding",
"[",
"v",
"]",
"=",
"{",
"unused",
".",
"pop",
"(",
")",
"}",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"v",
")",
")",
"chain_variables",
"=",
"embedding",
"[",
"v",
"]",
"b",
"=",
"bias",
"/",
"len",
"(",
"chain_variables",
")",
"for",
"s",
"in",
"chain_variables",
":",
"try",
":",
"target_linear",
"[",
"s",
"]",
"+=",
"b",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'chain variable {} not in target_adjacency'",
".",
"format",
"(",
"s",
")",
")",
"# next up the quadratic biases.",
"# We spread the quadratic biases evenly over the edges",
"target_quadratic",
"=",
"{",
"}",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"source_quadratic",
")",
":",
"edges",
"=",
"set",
"(",
")",
"if",
"u",
"not",
"in",
"embedding",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"u",
")",
")",
"if",
"v",
"not",
"in",
"embedding",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"v",
")",
")",
"for",
"s",
"in",
"embedding",
"[",
"u",
"]",
":",
"for",
"t",
"in",
"embedding",
"[",
"v",
"]",
":",
"try",
":",
"if",
"s",
"in",
"target_adjacency",
"[",
"t",
"]",
"and",
"(",
"t",
",",
"s",
")",
"not",
"in",
"edges",
":",
"edges",
".",
"add",
"(",
"(",
"s",
",",
"t",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'chain variable {} not in target_adjacency'",
".",
"format",
"(",
"s",
")",
")",
"if",
"not",
"edges",
":",
"raise",
"ValueError",
"(",
"\"no edges in target graph between source variables {}, {}\"",
".",
"format",
"(",
"u",
",",
"v",
")",
")",
"b",
"=",
"bias",
"/",
"len",
"(",
"edges",
")",
"# in some cases the logical J can have (u, v) and (v, u) as inputs, so make",
"# sure we are not doubling them up with our choice of ordering",
"for",
"s",
",",
"t",
"in",
"edges",
":",
"if",
"(",
"s",
",",
"t",
")",
"in",
"target_quadratic",
":",
"target_quadratic",
"[",
"(",
"s",
",",
"t",
")",
"]",
"+=",
"b",
"elif",
"(",
"t",
",",
"s",
")",
"in",
"target_quadratic",
":",
"target_quadratic",
"[",
"(",
"t",
",",
"s",
")",
"]",
"+=",
"b",
"else",
":",
"target_quadratic",
"[",
"(",
"s",
",",
"t",
")",
"]",
"=",
"b",
"# finally we need to connect the nodes in the chains",
"chain_quadratic",
"=",
"{",
"}",
"for",
"chain",
"in",
"itervalues",
"(",
"embedding",
")",
":",
"chain_quadratic",
".",
"update",
"(",
"chain_to_quadratic",
"(",
"chain",
",",
"target_adjacency",
",",
"chain_strength",
")",
")",
"return",
"target_linear",
",",
"target_quadratic",
",",
"chain_quadratic"
] | Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0} | [
"Embeds",
"a",
"logical",
"Ising",
"model",
"onto",
"another",
"graph",
"via",
"an",
"embedding",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L163-L278 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | chain_break_frequency | def chain_break_frequency(samples, embedding):
"""Determines the frequency of chain breaks in the given samples.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
Returns:
dict: The frequency of chain breaks in the form {v: f, ...} where v
is a variable in the source graph and frequency is the fraction
of chains that were broken as a float.
"""
counts = {v: 0 for v in embedding}
total = 0
for sample in samples:
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if not _all_equal(vals):
counts[v] += 1
total += 1
return {v: counts[v] / total for v in embedding} | python | def chain_break_frequency(samples, embedding):
"""Determines the frequency of chain breaks in the given samples.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
Returns:
dict: The frequency of chain breaks in the form {v: f, ...} where v
is a variable in the source graph and frequency is the fraction
of chains that were broken as a float.
"""
counts = {v: 0 for v in embedding}
total = 0
for sample in samples:
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if not _all_equal(vals):
counts[v] += 1
total += 1
return {v: counts[v] / total for v in embedding} | [
"def",
"chain_break_frequency",
"(",
"samples",
",",
"embedding",
")",
":",
"counts",
"=",
"{",
"v",
":",
"0",
"for",
"v",
"in",
"embedding",
"}",
"total",
"=",
"0",
"for",
"sample",
"in",
"samples",
":",
"for",
"v",
",",
"chain",
"in",
"iteritems",
"(",
"embedding",
")",
":",
"vals",
"=",
"[",
"sample",
"[",
"u",
"]",
"for",
"u",
"in",
"chain",
"]",
"if",
"not",
"_all_equal",
"(",
"vals",
")",
":",
"counts",
"[",
"v",
"]",
"+=",
"1",
"total",
"+=",
"1",
"return",
"{",
"v",
":",
"counts",
"[",
"v",
"]",
"/",
"total",
"for",
"v",
"in",
"embedding",
"}"
] | Determines the frequency of chain breaks in the given samples.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
Returns:
dict: The frequency of chain breaks in the form {v: f, ...} where v
is a variable in the source graph and frequency is the fraction
of chains that were broken as a float. | [
"Determines",
"the",
"frequency",
"of",
"chain",
"breaks",
"in",
"the",
"given",
"samples",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L331-L359 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | unembed_samples | def unembed_samples(samples, embedding, chain_break_method=None):
"""Return samples over the variables in the source graph.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target model and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
chain_break_method (function, optional): The method used to resolve chain
breaks. Default is :method:`majority_vote`.
Returns:
list: A list of unembedded samples. Each sample is a dict of the form
{v: val, ...} where v is a variable in the source graph and val
is the value associated with the variable.
"""
if chain_break_method is None:
chain_break_method = majority_vote
return list(itertools.chain(*(chain_break_method(sample, embedding) for sample in samples))) | python | def unembed_samples(samples, embedding, chain_break_method=None):
"""Return samples over the variables in the source graph.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target model and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
chain_break_method (function, optional): The method used to resolve chain
breaks. Default is :method:`majority_vote`.
Returns:
list: A list of unembedded samples. Each sample is a dict of the form
{v: val, ...} where v is a variable in the source graph and val
is the value associated with the variable.
"""
if chain_break_method is None:
chain_break_method = majority_vote
return list(itertools.chain(*(chain_break_method(sample, embedding) for sample in samples))) | [
"def",
"unembed_samples",
"(",
"samples",
",",
"embedding",
",",
"chain_break_method",
"=",
"None",
")",
":",
"if",
"chain_break_method",
"is",
"None",
":",
"chain_break_method",
"=",
"majority_vote",
"return",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"(",
"chain_break_method",
"(",
"sample",
",",
"embedding",
")",
"for",
"sample",
"in",
"samples",
")",
")",
")"
] | Return samples over the variables in the source graph.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target model and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
chain_break_method (function, optional): The method used to resolve chain
breaks. Default is :method:`majority_vote`.
Returns:
list: A list of unembedded samples. Each sample is a dict of the form
{v: val, ...} where v is a variable in the source graph and val
is the value associated with the variable. | [
"Return",
"samples",
"over",
"the",
"variables",
"in",
"the",
"source",
"graph",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L362-L384 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | discard | def discard(sample, embedding):
"""Discards the sample if broken.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample is no chains were broken.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if _all_equal(vals):
unembeded[v] = vals.pop()
else:
return
yield unembeded | python | def discard(sample, embedding):
"""Discards the sample if broken.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample is no chains were broken.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if _all_equal(vals):
unembeded[v] = vals.pop()
else:
return
yield unembeded | [
"def",
"discard",
"(",
"sample",
",",
"embedding",
")",
":",
"unembeded",
"=",
"{",
"}",
"for",
"v",
",",
"chain",
"in",
"iteritems",
"(",
"embedding",
")",
":",
"vals",
"=",
"[",
"sample",
"[",
"u",
"]",
"for",
"u",
"in",
"chain",
"]",
"if",
"_all_equal",
"(",
"vals",
")",
":",
"unembeded",
"[",
"v",
"]",
"=",
"vals",
".",
"pop",
"(",
")",
"else",
":",
"return",
"yield",
"unembeded"
] | Discards the sample if broken.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample is no chains were broken. | [
"Discards",
"the",
"sample",
"if",
"broken",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L387-L412 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | majority_vote | def majority_vote(sample, embedding):
"""Determines the sample values by majority vote.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen to match the most common value in the chain.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if _all_equal(vals):
unembeded[v] = vals.pop()
else:
unembeded[v] = _most_common(vals)
yield unembeded | python | def majority_vote(sample, embedding):
"""Determines the sample values by majority vote.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen to match the most common value in the chain.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if _all_equal(vals):
unembeded[v] = vals.pop()
else:
unembeded[v] = _most_common(vals)
yield unembeded | [
"def",
"majority_vote",
"(",
"sample",
",",
"embedding",
")",
":",
"unembeded",
"=",
"{",
"}",
"for",
"v",
",",
"chain",
"in",
"iteritems",
"(",
"embedding",
")",
":",
"vals",
"=",
"[",
"sample",
"[",
"u",
"]",
"for",
"u",
"in",
"chain",
"]",
"if",
"_all_equal",
"(",
"vals",
")",
":",
"unembeded",
"[",
"v",
"]",
"=",
"vals",
".",
"pop",
"(",
")",
"else",
":",
"unembeded",
"[",
"v",
"]",
"=",
"_most_common",
"(",
"vals",
")",
"yield",
"unembeded"
] | Determines the sample values by majority vote.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen to match the most common value in the chain. | [
"Determines",
"the",
"sample",
"values",
"by",
"majority",
"vote",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L415-L441 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | weighted_random | def weighted_random(sample, embedding):
"""Determines the sample values by weighed random choice.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen randomly, weighted by the frequency of the values
within the chain.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
# pick a random element uniformly from all vals, this weights them by
# the proportion of each
unembeded[v] = random.choice(vals)
yield unembeded | python | def weighted_random(sample, embedding):
"""Determines the sample values by weighed random choice.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen randomly, weighted by the frequency of the values
within the chain.
"""
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
# pick a random element uniformly from all vals, this weights them by
# the proportion of each
unembeded[v] = random.choice(vals)
yield unembeded | [
"def",
"weighted_random",
"(",
"sample",
",",
"embedding",
")",
":",
"unembeded",
"=",
"{",
"}",
"for",
"v",
",",
"chain",
"in",
"iteritems",
"(",
"embedding",
")",
":",
"vals",
"=",
"[",
"sample",
"[",
"u",
"]",
"for",
"u",
"in",
"chain",
"]",
"# pick a random element uniformly from all vals, this weights them by",
"# the proportion of each",
"unembeded",
"[",
"v",
"]",
"=",
"random",
".",
"choice",
"(",
"vals",
")",
"yield",
"unembeded"
] | Determines the sample values by weighed random choice.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
Yields:
dict: The unembedded sample. When there is a chain break, the value
is chosen randomly, weighted by the frequency of the values
within the chain. | [
"Determines",
"the",
"sample",
"values",
"by",
"weighed",
"random",
"choice",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L444-L470 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | _all_equal | def _all_equal(iterable):
"""True if all values in `iterable` are equal, else False."""
iterator = iter(iterable)
first = next(iterator)
return all(first == rest for rest in iterator) | python | def _all_equal(iterable):
"""True if all values in `iterable` are equal, else False."""
iterator = iter(iterable)
first = next(iterator)
return all(first == rest for rest in iterator) | [
"def",
"_all_equal",
"(",
"iterable",
")",
":",
"iterator",
"=",
"iter",
"(",
"iterable",
")",
"first",
"=",
"next",
"(",
"iterator",
")",
"return",
"all",
"(",
"first",
"==",
"rest",
"for",
"rest",
"in",
"iterator",
")"
] | True if all values in `iterable` are equal, else False. | [
"True",
"if",
"all",
"values",
"in",
"iterable",
"are",
"equal",
"else",
"False",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L549-L553 | train |
dwavesystems/dwave_embedding_utilities | dwave_embedding_utilities.py | _most_common | def _most_common(iterable):
"""Returns the most common element in `iterable`."""
data = Counter(iterable)
return max(data, key=data.__getitem__) | python | def _most_common(iterable):
"""Returns the most common element in `iterable`."""
data = Counter(iterable)
return max(data, key=data.__getitem__) | [
"def",
"_most_common",
"(",
"iterable",
")",
":",
"data",
"=",
"Counter",
"(",
"iterable",
")",
"return",
"max",
"(",
"data",
",",
"key",
"=",
"data",
".",
"__getitem__",
")"
] | Returns the most common element in `iterable`. | [
"Returns",
"the",
"most",
"common",
"element",
"in",
"iterable",
"."
] | 2e485e0ae89d96f3c0005f144bab4b465a3039a3 | https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L556-L559 | train |
DarkEnergySurvey/ugali | ugali/analysis/mcmc.py | MCMC.lnlike | def lnlike(self, theta):
""" Logarithm of the likelihood """
params,loglike = self.params,self.loglike
kwargs = dict(list(zip(params,theta)))
try:
lnlike = loglike.value(**kwargs)
except ValueError as AssertionError:
lnlike = -np.inf
return lnlike | python | def lnlike(self, theta):
""" Logarithm of the likelihood """
params,loglike = self.params,self.loglike
kwargs = dict(list(zip(params,theta)))
try:
lnlike = loglike.value(**kwargs)
except ValueError as AssertionError:
lnlike = -np.inf
return lnlike | [
"def",
"lnlike",
"(",
"self",
",",
"theta",
")",
":",
"params",
",",
"loglike",
"=",
"self",
".",
"params",
",",
"self",
".",
"loglike",
"kwargs",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"params",
",",
"theta",
")",
")",
")",
"try",
":",
"lnlike",
"=",
"loglike",
".",
"value",
"(",
"*",
"*",
"kwargs",
")",
"except",
"ValueError",
"as",
"AssertionError",
":",
"lnlike",
"=",
"-",
"np",
".",
"inf",
"return",
"lnlike"
] | Logarithm of the likelihood | [
"Logarithm",
"of",
"the",
"likelihood"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L150-L158 | train |
DarkEnergySurvey/ugali | ugali/analysis/mcmc.py | MCMC.lnprior | def lnprior(self,theta):
""" Logarithm of the prior """
params,priors = self.params,self.priors
kwargs = dict(list(zip(params,theta)))
err = np.seterr(invalid='raise')
try:
lnprior = np.sum(np.log([priors[k](v) for k,v in list(kwargs.items())]))
except (FloatingPointError,ValueError):
lnprior = -np.inf
np.seterr(**err)
return lnprior | python | def lnprior(self,theta):
""" Logarithm of the prior """
params,priors = self.params,self.priors
kwargs = dict(list(zip(params,theta)))
err = np.seterr(invalid='raise')
try:
lnprior = np.sum(np.log([priors[k](v) for k,v in list(kwargs.items())]))
except (FloatingPointError,ValueError):
lnprior = -np.inf
np.seterr(**err)
return lnprior | [
"def",
"lnprior",
"(",
"self",
",",
"theta",
")",
":",
"params",
",",
"priors",
"=",
"self",
".",
"params",
",",
"self",
".",
"priors",
"kwargs",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"params",
",",
"theta",
")",
")",
")",
"err",
"=",
"np",
".",
"seterr",
"(",
"invalid",
"=",
"'raise'",
")",
"try",
":",
"lnprior",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"log",
"(",
"[",
"priors",
"[",
"k",
"]",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
"]",
")",
")",
"except",
"(",
"FloatingPointError",
",",
"ValueError",
")",
":",
"lnprior",
"=",
"-",
"np",
".",
"inf",
"np",
".",
"seterr",
"(",
"*",
"*",
"err",
")",
"return",
"lnprior"
] | Logarithm of the prior | [
"Logarithm",
"of",
"the",
"prior"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L160-L170 | train |
DarkEnergySurvey/ugali | ugali/analysis/mcmc.py | MCMC.lnprob | def lnprob(self,theta):
""" Logarithm of the probability """
global niter
params,priors,loglike = self.params,self.priors,self.loglike
# Avoid extra likelihood calls with bad priors
_lnprior = self.lnprior(theta)
if np.isfinite(_lnprior):
_lnlike = self.lnlike(theta)
else:
_lnprior = -np.inf
_lnlike = -np.inf
_lnprob = _lnprior + _lnlike
if (niter%100==0):
msg = "%i function calls ...\n"%niter
msg+= ', '.join('%s: %.3f'%(k,v) for k,v in zip(params,theta))
msg+= '\nlog(like): %.3f, log(prior): %.3f'%(_lnprior,_lnlike)
logger.debug(msg)
niter+=1
return _lnprob | python | def lnprob(self,theta):
""" Logarithm of the probability """
global niter
params,priors,loglike = self.params,self.priors,self.loglike
# Avoid extra likelihood calls with bad priors
_lnprior = self.lnprior(theta)
if np.isfinite(_lnprior):
_lnlike = self.lnlike(theta)
else:
_lnprior = -np.inf
_lnlike = -np.inf
_lnprob = _lnprior + _lnlike
if (niter%100==0):
msg = "%i function calls ...\n"%niter
msg+= ', '.join('%s: %.3f'%(k,v) for k,v in zip(params,theta))
msg+= '\nlog(like): %.3f, log(prior): %.3f'%(_lnprior,_lnlike)
logger.debug(msg)
niter+=1
return _lnprob | [
"def",
"lnprob",
"(",
"self",
",",
"theta",
")",
":",
"global",
"niter",
"params",
",",
"priors",
",",
"loglike",
"=",
"self",
".",
"params",
",",
"self",
".",
"priors",
",",
"self",
".",
"loglike",
"# Avoid extra likelihood calls with bad priors",
"_lnprior",
"=",
"self",
".",
"lnprior",
"(",
"theta",
")",
"if",
"np",
".",
"isfinite",
"(",
"_lnprior",
")",
":",
"_lnlike",
"=",
"self",
".",
"lnlike",
"(",
"theta",
")",
"else",
":",
"_lnprior",
"=",
"-",
"np",
".",
"inf",
"_lnlike",
"=",
"-",
"np",
".",
"inf",
"_lnprob",
"=",
"_lnprior",
"+",
"_lnlike",
"if",
"(",
"niter",
"%",
"100",
"==",
"0",
")",
":",
"msg",
"=",
"\"%i function calls ...\\n\"",
"%",
"niter",
"msg",
"+=",
"', '",
".",
"join",
"(",
"'%s: %.3f'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"params",
",",
"theta",
")",
")",
"msg",
"+=",
"'\\nlog(like): %.3f, log(prior): %.3f'",
"%",
"(",
"_lnprior",
",",
"_lnlike",
")",
"logger",
".",
"debug",
"(",
"msg",
")",
"niter",
"+=",
"1",
"return",
"_lnprob"
] | Logarithm of the probability | [
"Logarithm",
"of",
"the",
"probability"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L172-L192 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | write_membership | def write_membership(filename,config,srcfile,section=None):
"""
Top level interface to write the membership from a config and source model.
"""
source = Source()
source.load(srcfile,section=section)
loglike = createLoglike(config,source)
loglike.write_membership(filename) | python | def write_membership(filename,config,srcfile,section=None):
"""
Top level interface to write the membership from a config and source model.
"""
source = Source()
source.load(srcfile,section=section)
loglike = createLoglike(config,source)
loglike.write_membership(filename) | [
"def",
"write_membership",
"(",
"filename",
",",
"config",
",",
"srcfile",
",",
"section",
"=",
"None",
")",
":",
"source",
"=",
"Source",
"(",
")",
"source",
".",
"load",
"(",
"srcfile",
",",
"section",
"=",
"section",
")",
"loglike",
"=",
"createLoglike",
"(",
"config",
",",
"source",
")",
"loglike",
".",
"write_membership",
"(",
"filename",
")"
] | Top level interface to write the membership from a config and source model. | [
"Top",
"level",
"interface",
"to",
"write",
"the",
"membership",
"from",
"a",
"config",
"and",
"source",
"model",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L512-L519 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | createCatalog | def createCatalog(config,roi=None,lon=None,lat=None):
"""
Create a catalog object
"""
import ugali.observation.catalog
if roi is None: roi = createROI(config,lon,lat)
catalog = ugali.observation.catalog.Catalog(config,roi=roi)
return catalog | python | def createCatalog(config,roi=None,lon=None,lat=None):
"""
Create a catalog object
"""
import ugali.observation.catalog
if roi is None: roi = createROI(config,lon,lat)
catalog = ugali.observation.catalog.Catalog(config,roi=roi)
return catalog | [
"def",
"createCatalog",
"(",
"config",
",",
"roi",
"=",
"None",
",",
"lon",
"=",
"None",
",",
"lat",
"=",
"None",
")",
":",
"import",
"ugali",
".",
"observation",
".",
"catalog",
"if",
"roi",
"is",
"None",
":",
"roi",
"=",
"createROI",
"(",
"config",
",",
"lon",
",",
"lat",
")",
"catalog",
"=",
"ugali",
".",
"observation",
".",
"catalog",
".",
"Catalog",
"(",
"config",
",",
"roi",
"=",
"roi",
")",
"return",
"catalog"
] | Create a catalog object | [
"Create",
"a",
"catalog",
"object"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L566-L573 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | simulateCatalog | def simulateCatalog(config,roi=None,lon=None,lat=None):
"""
Simulate a catalog object.
"""
import ugali.simulation.simulator
if roi is None: roi = createROI(config,lon,lat)
sim = ugali.simulation.simulator.Simulator(config,roi)
return sim.catalog() | python | def simulateCatalog(config,roi=None,lon=None,lat=None):
"""
Simulate a catalog object.
"""
import ugali.simulation.simulator
if roi is None: roi = createROI(config,lon,lat)
sim = ugali.simulation.simulator.Simulator(config,roi)
return sim.catalog() | [
"def",
"simulateCatalog",
"(",
"config",
",",
"roi",
"=",
"None",
",",
"lon",
"=",
"None",
",",
"lat",
"=",
"None",
")",
":",
"import",
"ugali",
".",
"simulation",
".",
"simulator",
"if",
"roi",
"is",
"None",
":",
"roi",
"=",
"createROI",
"(",
"config",
",",
"lon",
",",
"lat",
")",
"sim",
"=",
"ugali",
".",
"simulation",
".",
"simulator",
".",
"Simulator",
"(",
"config",
",",
"roi",
")",
"return",
"sim",
".",
"catalog",
"(",
")"
] | Simulate a catalog object. | [
"Simulate",
"a",
"catalog",
"object",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L576-L583 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | LogLikelihood.calc_observable_fraction | def calc_observable_fraction(self,distance_modulus):
"""
Calculated observable fraction within each pixel of the target region.
"""
# This is the observable fraction after magnitude cuts in each
# pixel of the ROI.
observable_fraction = self.isochrone.observableFraction(self.mask,distance_modulus)
if not observable_fraction.sum() > 0:
msg = "No observable fraction"
msg += ("\n"+str(self.source.params))
logger.error(msg)
raise ValueError(msg)
return observable_fraction | python | def calc_observable_fraction(self,distance_modulus):
"""
Calculated observable fraction within each pixel of the target region.
"""
# This is the observable fraction after magnitude cuts in each
# pixel of the ROI.
observable_fraction = self.isochrone.observableFraction(self.mask,distance_modulus)
if not observable_fraction.sum() > 0:
msg = "No observable fraction"
msg += ("\n"+str(self.source.params))
logger.error(msg)
raise ValueError(msg)
return observable_fraction | [
"def",
"calc_observable_fraction",
"(",
"self",
",",
"distance_modulus",
")",
":",
"# This is the observable fraction after magnitude cuts in each ",
"# pixel of the ROI.",
"observable_fraction",
"=",
"self",
".",
"isochrone",
".",
"observableFraction",
"(",
"self",
".",
"mask",
",",
"distance_modulus",
")",
"if",
"not",
"observable_fraction",
".",
"sum",
"(",
")",
">",
"0",
":",
"msg",
"=",
"\"No observable fraction\"",
"msg",
"+=",
"(",
"\"\\n\"",
"+",
"str",
"(",
"self",
".",
"source",
".",
"params",
")",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"observable_fraction"
] | Calculated observable fraction within each pixel of the target region. | [
"Calculated",
"observable",
"fraction",
"within",
"each",
"pixel",
"of",
"the",
"target",
"region",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L265-L277 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | LogLikelihood.calc_signal_spatial | def calc_signal_spatial(self):
"""
Calculate the spatial signal probability for each catalog object.
Parameters:
-----------
None
Returns:
--------
u_spatial : array of spatial probabilities per object
"""
# Calculate the surface intensity
self.surface_intensity_sparse = self.calc_surface_intensity()
# Calculate the probability per object-by-object level
self.surface_intensity_object = self.kernel.pdf(self.catalog.lon,
self.catalog.lat)
# Spatial component of signal probability
u_spatial = self.surface_intensity_object
return u_spatial | python | def calc_signal_spatial(self):
"""
Calculate the spatial signal probability for each catalog object.
Parameters:
-----------
None
Returns:
--------
u_spatial : array of spatial probabilities per object
"""
# Calculate the surface intensity
self.surface_intensity_sparse = self.calc_surface_intensity()
# Calculate the probability per object-by-object level
self.surface_intensity_object = self.kernel.pdf(self.catalog.lon,
self.catalog.lat)
# Spatial component of signal probability
u_spatial = self.surface_intensity_object
return u_spatial | [
"def",
"calc_signal_spatial",
"(",
"self",
")",
":",
"# Calculate the surface intensity",
"self",
".",
"surface_intensity_sparse",
"=",
"self",
".",
"calc_surface_intensity",
"(",
")",
"# Calculate the probability per object-by-object level",
"self",
".",
"surface_intensity_object",
"=",
"self",
".",
"kernel",
".",
"pdf",
"(",
"self",
".",
"catalog",
".",
"lon",
",",
"self",
".",
"catalog",
".",
"lat",
")",
"# Spatial component of signal probability",
"u_spatial",
"=",
"self",
".",
"surface_intensity_object",
"return",
"u_spatial"
] | Calculate the spatial signal probability for each catalog object.
Parameters:
-----------
None
Returns:
--------
u_spatial : array of spatial probabilities per object | [
"Calculate",
"the",
"spatial",
"signal",
"probability",
"for",
"each",
"catalog",
"object",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L350-L371 | train |
DarkEnergySurvey/ugali | ugali/analysis/loglike.py | LogLikelihood.fit_richness | def fit_richness(self, atol=1.e-3, maxiter=50):
"""
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
"""
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
return 0., 0., None
if not np.any(self.u):
logger.warning("Signal probability is zero for all objects")
return 0., 0., None
if self.f == 0:
logger.warning("Observable fraction is zero")
return 0., 0., None
# Richness corresponding to 0, 1, and 10 observable stars
richness = np.array([0., 1./self.f, 10./self.f])
loglike = np.array([self.value(richness=r) for r in richness])
found_maximum = False
iteration = 0
while not found_maximum:
parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)
if parabola.vertex_x < 0.:
found_maximum = True
else:
richness = np.append(richness, parabola.vertex_x)
loglike = np.append(loglike, self.value(richness=richness[-1]))
if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:
found_maximum = True
iteration+=1
if iteration > maxiter:
logger.warning("Maximum number of iterations reached")
break
index = np.argmax(loglike)
return loglike[index], richness[index], parabola | python | def fit_richness(self, atol=1.e-3, maxiter=50):
"""
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
"""
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
return 0., 0., None
if not np.any(self.u):
logger.warning("Signal probability is zero for all objects")
return 0., 0., None
if self.f == 0:
logger.warning("Observable fraction is zero")
return 0., 0., None
# Richness corresponding to 0, 1, and 10 observable stars
richness = np.array([0., 1./self.f, 10./self.f])
loglike = np.array([self.value(richness=r) for r in richness])
found_maximum = False
iteration = 0
while not found_maximum:
parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)
if parabola.vertex_x < 0.:
found_maximum = True
else:
richness = np.append(richness, parabola.vertex_x)
loglike = np.append(loglike, self.value(richness=richness[-1]))
if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:
found_maximum = True
iteration+=1
if iteration > maxiter:
logger.warning("Maximum number of iterations reached")
break
index = np.argmax(loglike)
return loglike[index], richness[index], parabola | [
"def",
"fit_richness",
"(",
"self",
",",
"atol",
"=",
"1.e-3",
",",
"maxiter",
"=",
"50",
")",
":",
"# Check whether the signal probability for all objects are zero",
"# This can occur for finite kernels on the edge of the survey footprint",
"if",
"np",
".",
"isnan",
"(",
"self",
".",
"u",
")",
".",
"any",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"NaN signal probability found\"",
")",
"return",
"0.",
",",
"0.",
",",
"None",
"if",
"not",
"np",
".",
"any",
"(",
"self",
".",
"u",
")",
":",
"logger",
".",
"warning",
"(",
"\"Signal probability is zero for all objects\"",
")",
"return",
"0.",
",",
"0.",
",",
"None",
"if",
"self",
".",
"f",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Observable fraction is zero\"",
")",
"return",
"0.",
",",
"0.",
",",
"None",
"# Richness corresponding to 0, 1, and 10 observable stars",
"richness",
"=",
"np",
".",
"array",
"(",
"[",
"0.",
",",
"1.",
"/",
"self",
".",
"f",
",",
"10.",
"/",
"self",
".",
"f",
"]",
")",
"loglike",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"value",
"(",
"richness",
"=",
"r",
")",
"for",
"r",
"in",
"richness",
"]",
")",
"found_maximum",
"=",
"False",
"iteration",
"=",
"0",
"while",
"not",
"found_maximum",
":",
"parabola",
"=",
"ugali",
".",
"utils",
".",
"parabola",
".",
"Parabola",
"(",
"richness",
",",
"2.",
"*",
"loglike",
")",
"if",
"parabola",
".",
"vertex_x",
"<",
"0.",
":",
"found_maximum",
"=",
"True",
"else",
":",
"richness",
"=",
"np",
".",
"append",
"(",
"richness",
",",
"parabola",
".",
"vertex_x",
")",
"loglike",
"=",
"np",
".",
"append",
"(",
"loglike",
",",
"self",
".",
"value",
"(",
"richness",
"=",
"richness",
"[",
"-",
"1",
"]",
")",
")",
"if",
"np",
".",
"fabs",
"(",
"loglike",
"[",
"-",
"1",
"]",
"-",
"np",
".",
"max",
"(",
"loglike",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
"<",
"atol",
":",
"found_maximum",
"=",
"True",
"iteration",
"+=",
"1",
"if",
"iteration",
">",
"maxiter",
":",
"logger",
".",
"warning",
"(",
"\"Maximum number of iterations reached\"",
")",
"break",
"index",
"=",
"np",
".",
"argmax",
"(",
"loglike",
")",
"return",
"loglike",
"[",
"index",
"]",
",",
"richness",
"[",
"index",
"]",
",",
"parabola"
] | Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola | [
"Maximize",
"the",
"log",
"-",
"likelihood",
"as",
"a",
"function",
"of",
"richness",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L380-L431 | train |
akx/lepo | lepo/apidef/doc.py | APIDefinition.resolve_reference | def resolve_reference(self, ref):
"""
Resolve a JSON Pointer object reference to the object itself.
:param ref: Reference string (`#/foo/bar`, for instance)
:return: The object, if found
:raises jsonschema.exceptions.RefResolutionError: if there is trouble resolving the reference
"""
url, resolved = self.resolver.resolve(ref)
return resolved | python | def resolve_reference(self, ref):
"""
Resolve a JSON Pointer object reference to the object itself.
:param ref: Reference string (`#/foo/bar`, for instance)
:return: The object, if found
:raises jsonschema.exceptions.RefResolutionError: if there is trouble resolving the reference
"""
url, resolved = self.resolver.resolve(ref)
return resolved | [
"def",
"resolve_reference",
"(",
"self",
",",
"ref",
")",
":",
"url",
",",
"resolved",
"=",
"self",
".",
"resolver",
".",
"resolve",
"(",
"ref",
")",
"return",
"resolved"
] | Resolve a JSON Pointer object reference to the object itself.
:param ref: Reference string (`#/foo/bar`, for instance)
:return: The object, if found
:raises jsonschema.exceptions.RefResolutionError: if there is trouble resolving the reference | [
"Resolve",
"a",
"JSON",
"Pointer",
"object",
"reference",
"to",
"the",
"object",
"itself",
"."
] | 34cfb24a40f18ea40f672c1ea9a0734ee1816b7d | https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L25-L34 | train |
akx/lepo | lepo/apidef/doc.py | APIDefinition.get_path | def get_path(self, path):
"""
Construct a Path object from a path string.
The Path string must be declared in the API.
:type path: str
:rtype: lepo.path.Path
"""
mapping = self.get_path_mapping(path)
return self.path_class(api=self, path=path, mapping=mapping) | python | def get_path(self, path):
"""
Construct a Path object from a path string.
The Path string must be declared in the API.
:type path: str
:rtype: lepo.path.Path
"""
mapping = self.get_path_mapping(path)
return self.path_class(api=self, path=path, mapping=mapping) | [
"def",
"get_path",
"(",
"self",
",",
"path",
")",
":",
"mapping",
"=",
"self",
".",
"get_path_mapping",
"(",
"path",
")",
"return",
"self",
".",
"path_class",
"(",
"api",
"=",
"self",
",",
"path",
"=",
"path",
",",
"mapping",
"=",
"mapping",
")"
] | Construct a Path object from a path string.
The Path string must be declared in the API.
:type path: str
:rtype: lepo.path.Path | [
"Construct",
"a",
"Path",
"object",
"from",
"a",
"path",
"string",
"."
] | 34cfb24a40f18ea40f672c1ea9a0734ee1816b7d | https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L43-L53 | train |
akx/lepo | lepo/apidef/doc.py | APIDefinition.from_file | def from_file(cls, filename):
"""
Construct an APIDefinition by parsing the given `filename`.
If PyYAML is installed, YAML files are supported.
JSON files are always supported.
:param filename: The filename to read.
:rtype: APIDefinition
"""
with open(filename) as infp:
if filename.endswith('.yaml') or filename.endswith('.yml'):
import yaml
data = yaml.safe_load(infp)
else:
import json
data = json.load(infp)
return cls.from_data(data) | python | def from_file(cls, filename):
"""
Construct an APIDefinition by parsing the given `filename`.
If PyYAML is installed, YAML files are supported.
JSON files are always supported.
:param filename: The filename to read.
:rtype: APIDefinition
"""
with open(filename) as infp:
if filename.endswith('.yaml') or filename.endswith('.yml'):
import yaml
data = yaml.safe_load(infp)
else:
import json
data = json.load(infp)
return cls.from_data(data) | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"infp",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.yaml'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'.yml'",
")",
":",
"import",
"yaml",
"data",
"=",
"yaml",
".",
"safe_load",
"(",
"infp",
")",
"else",
":",
"import",
"json",
"data",
"=",
"json",
".",
"load",
"(",
"infp",
")",
"return",
"cls",
".",
"from_data",
"(",
"data",
")"
] | Construct an APIDefinition by parsing the given `filename`.
If PyYAML is installed, YAML files are supported.
JSON files are always supported.
:param filename: The filename to read.
:rtype: APIDefinition | [
"Construct",
"an",
"APIDefinition",
"by",
"parsing",
"the",
"given",
"filename",
"."
] | 34cfb24a40f18ea40f672c1ea9a0734ee1816b7d | https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L65-L82 | train |
juju/theblues | theblues/utils.py | _server_error_message | def _server_error_message(url, message):
"""Log and return a server error message."""
msg = _error_message.format(url=url, message=message)
log.error(msg)
return msg | python | def _server_error_message(url, message):
"""Log and return a server error message."""
msg = _error_message.format(url=url, message=message)
log.error(msg)
return msg | [
"def",
"_server_error_message",
"(",
"url",
",",
"message",
")",
":",
"msg",
"=",
"_error_message",
".",
"format",
"(",
"url",
"=",
"url",
",",
"message",
"=",
"message",
")",
"log",
".",
"error",
"(",
"msg",
")",
"return",
"msg"
] | Log and return a server error message. | [
"Log",
"and",
"return",
"a",
"server",
"error",
"message",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/utils.py#L23-L27 | train |
juju/theblues | theblues/utils.py | make_request | def make_request(
url, method='GET', query=None, body=None, auth=None, timeout=10,
client=None, macaroons=None):
"""Make a request with the provided data.
@param url The url to make the request to.
@param method The HTTP request method (defaulting to "GET").
@param query A dict of the query key and values.
@param body The optional body as a string or as a JSON decoded dict.
@param auth The optional username and password as a tuple,
not used if client is not None
@param timeout The request timeout in seconds, defaulting to 10 seconds.
@param client (httpbakery.Client) holds a context for making http
requests with macaroons.
@param macaroons Optional JSON serialized, base64 encoded macaroons to be
included in the request header.
POST/PUT request bodies are assumed to be in JSON format.
Return the response content as a JSON decoded object, or an empty dict.
Raise a ServerError if a problem occurs in the request/response process.
Raise a ValueError if invalid parameters are provided.
"""
headers = {}
kwargs = {'timeout': timeout, 'headers': headers}
# Handle the request body.
if body is not None:
if isinstance(body, collections.Mapping):
body = json.dumps(body)
kwargs['data'] = body
# Handle request methods.
if method in ('GET', 'HEAD'):
if query:
url = '{}?{}'.format(url, urlencode(query, True))
elif method in ('DELETE', 'PATCH', 'POST', 'PUT'):
headers['Content-Type'] = 'application/json'
else:
raise ValueError('invalid method {}'.format(method))
if macaroons is not None:
headers['Macaroons'] = macaroons
kwargs['auth'] = auth if client is None else client.auth()
api_method = getattr(requests, method.lower())
# Perform the request.
try:
response = api_method(url, **kwargs)
except requests.exceptions.Timeout:
raise timeout_error(url, timeout)
except Exception as err:
msg = _server_error_message(url, err)
raise ServerError(msg)
# Handle error responses.
try:
response.raise_for_status()
except HTTPError as err:
msg = _server_error_message(url, err.response.text)
raise ServerError(err.response.status_code, msg)
except requests.exceptions.RequestException as err:
msg = _server_error_message(url, err.message)
raise ServerError(msg)
# Some requests just result in a status with no response body.
if not response.content:
return {}
# Assume the response body is a JSON encoded string.
try:
return response.json()
except Exception as err:
msg = 'Error decoding JSON response: {} message: {}'.format(url, err)
log.error(msg)
raise ServerError(msg) | python | def make_request(
url, method='GET', query=None, body=None, auth=None, timeout=10,
client=None, macaroons=None):
"""Make a request with the provided data.
@param url The url to make the request to.
@param method The HTTP request method (defaulting to "GET").
@param query A dict of the query key and values.
@param body The optional body as a string or as a JSON decoded dict.
@param auth The optional username and password as a tuple,
not used if client is not None
@param timeout The request timeout in seconds, defaulting to 10 seconds.
@param client (httpbakery.Client) holds a context for making http
requests with macaroons.
@param macaroons Optional JSON serialized, base64 encoded macaroons to be
included in the request header.
POST/PUT request bodies are assumed to be in JSON format.
Return the response content as a JSON decoded object, or an empty dict.
Raise a ServerError if a problem occurs in the request/response process.
Raise a ValueError if invalid parameters are provided.
"""
headers = {}
kwargs = {'timeout': timeout, 'headers': headers}
# Handle the request body.
if body is not None:
if isinstance(body, collections.Mapping):
body = json.dumps(body)
kwargs['data'] = body
# Handle request methods.
if method in ('GET', 'HEAD'):
if query:
url = '{}?{}'.format(url, urlencode(query, True))
elif method in ('DELETE', 'PATCH', 'POST', 'PUT'):
headers['Content-Type'] = 'application/json'
else:
raise ValueError('invalid method {}'.format(method))
if macaroons is not None:
headers['Macaroons'] = macaroons
kwargs['auth'] = auth if client is None else client.auth()
api_method = getattr(requests, method.lower())
# Perform the request.
try:
response = api_method(url, **kwargs)
except requests.exceptions.Timeout:
raise timeout_error(url, timeout)
except Exception as err:
msg = _server_error_message(url, err)
raise ServerError(msg)
# Handle error responses.
try:
response.raise_for_status()
except HTTPError as err:
msg = _server_error_message(url, err.response.text)
raise ServerError(err.response.status_code, msg)
except requests.exceptions.RequestException as err:
msg = _server_error_message(url, err.message)
raise ServerError(msg)
# Some requests just result in a status with no response body.
if not response.content:
return {}
# Assume the response body is a JSON encoded string.
try:
return response.json()
except Exception as err:
msg = 'Error decoding JSON response: {} message: {}'.format(url, err)
log.error(msg)
raise ServerError(msg) | [
"def",
"make_request",
"(",
"url",
",",
"method",
"=",
"'GET'",
",",
"query",
"=",
"None",
",",
"body",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"client",
"=",
"None",
",",
"macaroons",
"=",
"None",
")",
":",
"headers",
"=",
"{",
"}",
"kwargs",
"=",
"{",
"'timeout'",
":",
"timeout",
",",
"'headers'",
":",
"headers",
"}",
"# Handle the request body.",
"if",
"body",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"body",
",",
"collections",
".",
"Mapping",
")",
":",
"body",
"=",
"json",
".",
"dumps",
"(",
"body",
")",
"kwargs",
"[",
"'data'",
"]",
"=",
"body",
"# Handle request methods.",
"if",
"method",
"in",
"(",
"'GET'",
",",
"'HEAD'",
")",
":",
"if",
"query",
":",
"url",
"=",
"'{}?{}'",
".",
"format",
"(",
"url",
",",
"urlencode",
"(",
"query",
",",
"True",
")",
")",
"elif",
"method",
"in",
"(",
"'DELETE'",
",",
"'PATCH'",
",",
"'POST'",
",",
"'PUT'",
")",
":",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid method {}'",
".",
"format",
"(",
"method",
")",
")",
"if",
"macaroons",
"is",
"not",
"None",
":",
"headers",
"[",
"'Macaroons'",
"]",
"=",
"macaroons",
"kwargs",
"[",
"'auth'",
"]",
"=",
"auth",
"if",
"client",
"is",
"None",
"else",
"client",
".",
"auth",
"(",
")",
"api_method",
"=",
"getattr",
"(",
"requests",
",",
"method",
".",
"lower",
"(",
")",
")",
"# Perform the request.",
"try",
":",
"response",
"=",
"api_method",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
":",
"raise",
"timeout_error",
"(",
"url",
",",
"timeout",
")",
"except",
"Exception",
"as",
"err",
":",
"msg",
"=",
"_server_error_message",
"(",
"url",
",",
"err",
")",
"raise",
"ServerError",
"(",
"msg",
")",
"# Handle error responses.",
"try",
":",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"HTTPError",
"as",
"err",
":",
"msg",
"=",
"_server_error_message",
"(",
"url",
",",
"err",
".",
"response",
".",
"text",
")",
"raise",
"ServerError",
"(",
"err",
".",
"response",
".",
"status_code",
",",
"msg",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"err",
":",
"msg",
"=",
"_server_error_message",
"(",
"url",
",",
"err",
".",
"message",
")",
"raise",
"ServerError",
"(",
"msg",
")",
"# Some requests just result in a status with no response body.",
"if",
"not",
"response",
".",
"content",
":",
"return",
"{",
"}",
"# Assume the response body is a JSON encoded string.",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"Exception",
"as",
"err",
":",
"msg",
"=",
"'Error decoding JSON response: {} message: {}'",
".",
"format",
"(",
"url",
",",
"err",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"ServerError",
"(",
"msg",
")"
] | Make a request with the provided data.
@param url The url to make the request to.
@param method The HTTP request method (defaulting to "GET").
@param query A dict of the query key and values.
@param body The optional body as a string or as a JSON decoded dict.
@param auth The optional username and password as a tuple,
not used if client is not None
@param timeout The request timeout in seconds, defaulting to 10 seconds.
@param client (httpbakery.Client) holds a context for making http
requests with macaroons.
@param macaroons Optional JSON serialized, base64 encoded macaroons to be
included in the request header.
POST/PUT request bodies are assumed to be in JSON format.
Return the response content as a JSON decoded object, or an empty dict.
Raise a ServerError if a problem occurs in the request/response process.
Raise a ValueError if invalid parameters are provided. | [
"Make",
"a",
"request",
"with",
"the",
"provided",
"data",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/utils.py#L30-L99 | train |
juju/theblues | theblues/plans.py | Plans.get_plans | def get_plans(self, reference):
"""Get the plans for a given charm.
@param the Reference to a charm.
@return a tuple of plans or an empty tuple if no plans.
@raise ServerError
"""
response = make_request(
'{}charm?charm-url={}'.format(self.url,
'cs:' + reference.path()),
timeout=self.timeout, client=self._client)
try:
return tuple(map(lambda plan: Plan(
url=plan['url'], plan=plan['plan'],
created_on=datetime.datetime.strptime(
plan['created-on'],
"%Y-%m-%dT%H:%M:%SZ"
),
description=plan.get('description'),
price=plan.get('price')), response))
except Exception as err:
log.error(
'cannot process plans: invalid JSON response: {!r}'.format(
response))
raise ServerError(
'unable to get list of plans for {}: {}'.format(
reference.path(), err)) | python | def get_plans(self, reference):
"""Get the plans for a given charm.
@param the Reference to a charm.
@return a tuple of plans or an empty tuple if no plans.
@raise ServerError
"""
response = make_request(
'{}charm?charm-url={}'.format(self.url,
'cs:' + reference.path()),
timeout=self.timeout, client=self._client)
try:
return tuple(map(lambda plan: Plan(
url=plan['url'], plan=plan['plan'],
created_on=datetime.datetime.strptime(
plan['created-on'],
"%Y-%m-%dT%H:%M:%SZ"
),
description=plan.get('description'),
price=plan.get('price')), response))
except Exception as err:
log.error(
'cannot process plans: invalid JSON response: {!r}'.format(
response))
raise ServerError(
'unable to get list of plans for {}: {}'.format(
reference.path(), err)) | [
"def",
"get_plans",
"(",
"self",
",",
"reference",
")",
":",
"response",
"=",
"make_request",
"(",
"'{}charm?charm-url={}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"'cs:'",
"+",
"reference",
".",
"path",
"(",
")",
")",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")",
"try",
":",
"return",
"tuple",
"(",
"map",
"(",
"lambda",
"plan",
":",
"Plan",
"(",
"url",
"=",
"plan",
"[",
"'url'",
"]",
",",
"plan",
"=",
"plan",
"[",
"'plan'",
"]",
",",
"created_on",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"plan",
"[",
"'created-on'",
"]",
",",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
",",
"description",
"=",
"plan",
".",
"get",
"(",
"'description'",
")",
",",
"price",
"=",
"plan",
".",
"get",
"(",
"'price'",
")",
")",
",",
"response",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'cannot process plans: invalid JSON response: {!r}'",
".",
"format",
"(",
"response",
")",
")",
"raise",
"ServerError",
"(",
"'unable to get list of plans for {}: {}'",
".",
"format",
"(",
"reference",
".",
"path",
"(",
")",
",",
"err",
")",
")"
] | Get the plans for a given charm.
@param the Reference to a charm.
@return a tuple of plans or an empty tuple if no plans.
@raise ServerError | [
"Get",
"the",
"plans",
"for",
"a",
"given",
"charm",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L46-L72 | train |
juju/theblues | theblues/plans.py | Plans.list_wallets | def list_wallets(self):
"""Get the list of wallets.
@return an dict containing a list of wallets, a total, and available
credit.
@raise ServerError
"""
response = make_request(
'{}wallet'.format(self.url),
timeout=self.timeout,
client=self._client)
try:
total = response['total']
return {
'credit': response['credit'],
'total': WalletTotal(
limit=total['limit'],
budgeted=total['budgeted'],
available=total['available'],
unallocated=total['unallocated'],
usage=total['usage'],
consumed=total['consumed']),
'wallets': tuple(Wallet(
owner=wallet['owner'],
wallet=wallet['wallet'],
limit=wallet['limit'],
budgeted=wallet['budgeted'],
unallocated=wallet['unallocated'],
available=wallet['available'],
consumed=wallet['consumed'],
default='default' in wallet)
for wallet in response['wallets']),
}
except Exception as err:
log.error(
'cannot process wallets: invalid JSON response: {!r}'.format(
response))
raise ServerError(
'unable to get list of wallets: {!r}'.format(err)) | python | def list_wallets(self):
"""Get the list of wallets.
@return an dict containing a list of wallets, a total, and available
credit.
@raise ServerError
"""
response = make_request(
'{}wallet'.format(self.url),
timeout=self.timeout,
client=self._client)
try:
total = response['total']
return {
'credit': response['credit'],
'total': WalletTotal(
limit=total['limit'],
budgeted=total['budgeted'],
available=total['available'],
unallocated=total['unallocated'],
usage=total['usage'],
consumed=total['consumed']),
'wallets': tuple(Wallet(
owner=wallet['owner'],
wallet=wallet['wallet'],
limit=wallet['limit'],
budgeted=wallet['budgeted'],
unallocated=wallet['unallocated'],
available=wallet['available'],
consumed=wallet['consumed'],
default='default' in wallet)
for wallet in response['wallets']),
}
except Exception as err:
log.error(
'cannot process wallets: invalid JSON response: {!r}'.format(
response))
raise ServerError(
'unable to get list of wallets: {!r}'.format(err)) | [
"def",
"list_wallets",
"(",
"self",
")",
":",
"response",
"=",
"make_request",
"(",
"'{}wallet'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")",
"try",
":",
"total",
"=",
"response",
"[",
"'total'",
"]",
"return",
"{",
"'credit'",
":",
"response",
"[",
"'credit'",
"]",
",",
"'total'",
":",
"WalletTotal",
"(",
"limit",
"=",
"total",
"[",
"'limit'",
"]",
",",
"budgeted",
"=",
"total",
"[",
"'budgeted'",
"]",
",",
"available",
"=",
"total",
"[",
"'available'",
"]",
",",
"unallocated",
"=",
"total",
"[",
"'unallocated'",
"]",
",",
"usage",
"=",
"total",
"[",
"'usage'",
"]",
",",
"consumed",
"=",
"total",
"[",
"'consumed'",
"]",
")",
",",
"'wallets'",
":",
"tuple",
"(",
"Wallet",
"(",
"owner",
"=",
"wallet",
"[",
"'owner'",
"]",
",",
"wallet",
"=",
"wallet",
"[",
"'wallet'",
"]",
",",
"limit",
"=",
"wallet",
"[",
"'limit'",
"]",
",",
"budgeted",
"=",
"wallet",
"[",
"'budgeted'",
"]",
",",
"unallocated",
"=",
"wallet",
"[",
"'unallocated'",
"]",
",",
"available",
"=",
"wallet",
"[",
"'available'",
"]",
",",
"consumed",
"=",
"wallet",
"[",
"'consumed'",
"]",
",",
"default",
"=",
"'default'",
"in",
"wallet",
")",
"for",
"wallet",
"in",
"response",
"[",
"'wallets'",
"]",
")",
",",
"}",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'cannot process wallets: invalid JSON response: {!r}'",
".",
"format",
"(",
"response",
")",
")",
"raise",
"ServerError",
"(",
"'unable to get list of wallets: {!r}'",
".",
"format",
"(",
"err",
")",
")"
] | Get the list of wallets.
@return an dict containing a list of wallets, a total, and available
credit.
@raise ServerError | [
"Get",
"the",
"list",
"of",
"wallets",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L74-L112 | train |
juju/theblues | theblues/plans.py | Plans.get_wallet | def get_wallet(self, wallet_name):
"""Get a single wallet.
@param the name of the wallet.
@return the wallet's total.
@raise ServerError
"""
response = make_request(
'{}wallet/{}'.format(self.url, wallet_name),
timeout=self.timeout,
client=self._client)
try:
total = response['total']
return {
'credit': response['credit'],
'limit': response['limit'],
'total': WalletTotal(
limit=total['limit'],
budgeted=total['budgeted'],
available=total['available'],
unallocated=total['unallocated'],
usage=total['usage'],
consumed=total['consumed'])
}
except Exception as exc:
log.error(
'cannot get wallet from server: {!r}'.format(exc))
raise ServerError(
'unable to get list of wallets: {!r}'.format(exc)) | python | def get_wallet(self, wallet_name):
"""Get a single wallet.
@param the name of the wallet.
@return the wallet's total.
@raise ServerError
"""
response = make_request(
'{}wallet/{}'.format(self.url, wallet_name),
timeout=self.timeout,
client=self._client)
try:
total = response['total']
return {
'credit': response['credit'],
'limit': response['limit'],
'total': WalletTotal(
limit=total['limit'],
budgeted=total['budgeted'],
available=total['available'],
unallocated=total['unallocated'],
usage=total['usage'],
consumed=total['consumed'])
}
except Exception as exc:
log.error(
'cannot get wallet from server: {!r}'.format(exc))
raise ServerError(
'unable to get list of wallets: {!r}'.format(exc)) | [
"def",
"get_wallet",
"(",
"self",
",",
"wallet_name",
")",
":",
"response",
"=",
"make_request",
"(",
"'{}wallet/{}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"wallet_name",
")",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")",
"try",
":",
"total",
"=",
"response",
"[",
"'total'",
"]",
"return",
"{",
"'credit'",
":",
"response",
"[",
"'credit'",
"]",
",",
"'limit'",
":",
"response",
"[",
"'limit'",
"]",
",",
"'total'",
":",
"WalletTotal",
"(",
"limit",
"=",
"total",
"[",
"'limit'",
"]",
",",
"budgeted",
"=",
"total",
"[",
"'budgeted'",
"]",
",",
"available",
"=",
"total",
"[",
"'available'",
"]",
",",
"unallocated",
"=",
"total",
"[",
"'unallocated'",
"]",
",",
"usage",
"=",
"total",
"[",
"'usage'",
"]",
",",
"consumed",
"=",
"total",
"[",
"'consumed'",
"]",
")",
"}",
"except",
"Exception",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'cannot get wallet from server: {!r}'",
".",
"format",
"(",
"exc",
")",
")",
"raise",
"ServerError",
"(",
"'unable to get list of wallets: {!r}'",
".",
"format",
"(",
"exc",
")",
")"
] | Get a single wallet.
@param the name of the wallet.
@return the wallet's total.
@raise ServerError | [
"Get",
"a",
"single",
"wallet",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L114-L142 | train |
juju/theblues | theblues/plans.py | Plans.update_wallet | def update_wallet(self, wallet_name, limit):
"""Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
request = {
'update': {
'limit': str(limit),
}
}
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='PATCH',
body=request,
timeout=self.timeout,
client=self._client) | python | def update_wallet(self, wallet_name, limit):
"""Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
request = {
'update': {
'limit': str(limit),
}
}
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='PATCH',
body=request,
timeout=self.timeout,
client=self._client) | [
"def",
"update_wallet",
"(",
"self",
",",
"wallet_name",
",",
"limit",
")",
":",
"request",
"=",
"{",
"'update'",
":",
"{",
"'limit'",
":",
"str",
"(",
"limit",
")",
",",
"}",
"}",
"return",
"make_request",
"(",
"'{}wallet/{}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"wallet_name",
")",
",",
"method",
"=",
"'PATCH'",
",",
"body",
"=",
"request",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request. | [
"Update",
"a",
"wallet",
"with",
"a",
"new",
"limit",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L144-L162 | train |
juju/theblues | theblues/plans.py | Plans.delete_wallet | def delete_wallet(self, wallet_name):
"""Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='DELETE',
timeout=self.timeout,
client=self._client) | python | def delete_wallet(self, wallet_name):
"""Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='DELETE',
timeout=self.timeout,
client=self._client) | [
"def",
"delete_wallet",
"(",
"self",
",",
"wallet_name",
")",
":",
"return",
"make_request",
"(",
"'{}wallet/{}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"wallet_name",
")",
",",
"method",
"=",
"'DELETE'",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request. | [
"Delete",
"a",
"wallet",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L183-L194 | train |
juju/theblues | theblues/plans.py | Plans.create_budget | def create_budget(self, wallet_name, model_uuid, limit):
"""Create a new budget for a model and wallet.
@param the name of the wallet.
@param the model UUID.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
request = {
'model': model_uuid,
'limit': limit,
}
return make_request(
'{}wallet/{}/budget'.format(self.url, wallet_name),
method='POST',
body=request,
timeout=self.timeout,
client=self._client) | python | def create_budget(self, wallet_name, model_uuid, limit):
"""Create a new budget for a model and wallet.
@param the name of the wallet.
@param the model UUID.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
request = {
'model': model_uuid,
'limit': limit,
}
return make_request(
'{}wallet/{}/budget'.format(self.url, wallet_name),
method='POST',
body=request,
timeout=self.timeout,
client=self._client) | [
"def",
"create_budget",
"(",
"self",
",",
"wallet_name",
",",
"model_uuid",
",",
"limit",
")",
":",
"request",
"=",
"{",
"'model'",
":",
"model_uuid",
",",
"'limit'",
":",
"limit",
",",
"}",
"return",
"make_request",
"(",
"'{}wallet/{}/budget'",
".",
"format",
"(",
"self",
".",
"url",
",",
"wallet_name",
")",
",",
"method",
"=",
"'POST'",
",",
"body",
"=",
"request",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Create a new budget for a model and wallet.
@param the name of the wallet.
@param the model UUID.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request. | [
"Create",
"a",
"new",
"budget",
"for",
"a",
"model",
"and",
"wallet",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L196-L214 | train |
juju/theblues | theblues/plans.py | Plans.delete_budget | def delete_budget(self, model_uuid):
"""Delete a budget.
@param the name of the wallet.
@param the model UUID.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
return make_request(
'{}model/{}/budget'.format(self.url, model_uuid),
method='DELETE',
timeout=self.timeout,
client=self._client) | python | def delete_budget(self, model_uuid):
"""Delete a budget.
@param the name of the wallet.
@param the model UUID.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
return make_request(
'{}model/{}/budget'.format(self.url, model_uuid),
method='DELETE',
timeout=self.timeout,
client=self._client) | [
"def",
"delete_budget",
"(",
"self",
",",
"model_uuid",
")",
":",
"return",
"make_request",
"(",
"'{}model/{}/budget'",
".",
"format",
"(",
"self",
".",
"url",
",",
"model_uuid",
")",
",",
"method",
"=",
"'DELETE'",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Delete a budget.
@param the name of the wallet.
@param the model UUID.
@return a success string from the plans server.
@raise ServerError via make_request. | [
"Delete",
"a",
"budget",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L238-L250 | train |
totalgood/pugnlp | src/pugnlp/stats.py | confusion | def confusion(df, labels=['neg', 'pos']):
""" Binary classification confusion """
c = pd.DataFrame(np.zeros((2, 2)), dtype=int)
a, b = df.columns[:2] # labels[df.columns[:2]]
c.columns = sorted(set(df[a]))[:2]
c.columns.name = a
c.index = list(c.columns)
c.index.name = b
c1, c2 = c.columns
c[c1][c1] = ((df[a] == c1) & (df[b] == c1)).sum()
c[c1][c2] = ((df[a] == c1) & (df[b] == c2)).sum()
c[c2][c2] = ((df[a] == c2) & (df[b] == c2)).sum()
c[c2][c1] = ((df[a] == c2) & (df[b] == c1)).sum()
return c | python | def confusion(df, labels=['neg', 'pos']):
""" Binary classification confusion """
c = pd.DataFrame(np.zeros((2, 2)), dtype=int)
a, b = df.columns[:2] # labels[df.columns[:2]]
c.columns = sorted(set(df[a]))[:2]
c.columns.name = a
c.index = list(c.columns)
c.index.name = b
c1, c2 = c.columns
c[c1][c1] = ((df[a] == c1) & (df[b] == c1)).sum()
c[c1][c2] = ((df[a] == c1) & (df[b] == c2)).sum()
c[c2][c2] = ((df[a] == c2) & (df[b] == c2)).sum()
c[c2][c1] = ((df[a] == c2) & (df[b] == c1)).sum()
return c | [
"def",
"confusion",
"(",
"df",
",",
"labels",
"=",
"[",
"'neg'",
",",
"'pos'",
"]",
")",
":",
"c",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
")",
")",
",",
"dtype",
"=",
"int",
")",
"a",
",",
"b",
"=",
"df",
".",
"columns",
"[",
":",
"2",
"]",
"# labels[df.columns[:2]]",
"c",
".",
"columns",
"=",
"sorted",
"(",
"set",
"(",
"df",
"[",
"a",
"]",
")",
")",
"[",
":",
"2",
"]",
"c",
".",
"columns",
".",
"name",
"=",
"a",
"c",
".",
"index",
"=",
"list",
"(",
"c",
".",
"columns",
")",
"c",
".",
"index",
".",
"name",
"=",
"b",
"c1",
",",
"c2",
"=",
"c",
".",
"columns",
"c",
"[",
"c1",
"]",
"[",
"c1",
"]",
"=",
"(",
"(",
"df",
"[",
"a",
"]",
"==",
"c1",
")",
"&",
"(",
"df",
"[",
"b",
"]",
"==",
"c1",
")",
")",
".",
"sum",
"(",
")",
"c",
"[",
"c1",
"]",
"[",
"c2",
"]",
"=",
"(",
"(",
"df",
"[",
"a",
"]",
"==",
"c1",
")",
"&",
"(",
"df",
"[",
"b",
"]",
"==",
"c2",
")",
")",
".",
"sum",
"(",
")",
"c",
"[",
"c2",
"]",
"[",
"c2",
"]",
"=",
"(",
"(",
"df",
"[",
"a",
"]",
"==",
"c2",
")",
"&",
"(",
"df",
"[",
"b",
"]",
"==",
"c2",
")",
")",
".",
"sum",
"(",
")",
"c",
"[",
"c2",
"]",
"[",
"c1",
"]",
"=",
"(",
"(",
"df",
"[",
"a",
"]",
"==",
"c2",
")",
"&",
"(",
"df",
"[",
"b",
"]",
"==",
"c1",
")",
")",
".",
"sum",
"(",
")",
"return",
"c"
] | Binary classification confusion | [
"Binary",
"classification",
"confusion"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L227-L240 | train |
totalgood/pugnlp | src/pugnlp/stats.py | thresh_from_spec | def thresh_from_spec(spec, labels, scores, **kwargs):
r"""Find the threshold level that accomplishes the desired specificity"""
cost_fun.verbose = kwargs.pop('verbose', cost_fun.verbose)
cost_fun.target = spec
return minimize(cost_fun,
x0=[.5],
args=(labels, scores),
method='SLSQP',
constraints=({'type': 'ineq',
'fun': lambda x: np.array([x[0]]),
'jac': lambda x: np.array([1.])},
),
**kwargs
) | python | def thresh_from_spec(spec, labels, scores, **kwargs):
r"""Find the threshold level that accomplishes the desired specificity"""
cost_fun.verbose = kwargs.pop('verbose', cost_fun.verbose)
cost_fun.target = spec
return minimize(cost_fun,
x0=[.5],
args=(labels, scores),
method='SLSQP',
constraints=({'type': 'ineq',
'fun': lambda x: np.array([x[0]]),
'jac': lambda x: np.array([1.])},
),
**kwargs
) | [
"def",
"thresh_from_spec",
"(",
"spec",
",",
"labels",
",",
"scores",
",",
"*",
"*",
"kwargs",
")",
":",
"cost_fun",
".",
"verbose",
"=",
"kwargs",
".",
"pop",
"(",
"'verbose'",
",",
"cost_fun",
".",
"verbose",
")",
"cost_fun",
".",
"target",
"=",
"spec",
"return",
"minimize",
"(",
"cost_fun",
",",
"x0",
"=",
"[",
".5",
"]",
",",
"args",
"=",
"(",
"labels",
",",
"scores",
")",
",",
"method",
"=",
"'SLSQP'",
",",
"constraints",
"=",
"(",
"{",
"'type'",
":",
"'ineq'",
",",
"'fun'",
":",
"lambda",
"x",
":",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"0",
"]",
"]",
")",
",",
"'jac'",
":",
"lambda",
"x",
":",
"np",
".",
"array",
"(",
"[",
"1.",
"]",
")",
"}",
",",
")",
",",
"*",
"*",
"kwargs",
")"
] | r"""Find the threshold level that accomplishes the desired specificity | [
"r",
"Find",
"the",
"threshold",
"level",
"that",
"accomplishes",
"the",
"desired",
"specificity"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L938-L951 | train |
stevearc/dynamo3 | dynamo3/result.py | add_dicts | def add_dicts(d1, d2):
""" Merge two dicts of addable values """
if d1 is None:
return d2
if d2 is None:
return d1
keys = set(d1)
keys.update(set(d2))
ret = {}
for key in keys:
v1 = d1.get(key)
v2 = d2.get(key)
if v1 is None:
ret[key] = v2
elif v2 is None:
ret[key] = v1
else:
ret[key] = v1 + v2
return ret | python | def add_dicts(d1, d2):
""" Merge two dicts of addable values """
if d1 is None:
return d2
if d2 is None:
return d1
keys = set(d1)
keys.update(set(d2))
ret = {}
for key in keys:
v1 = d1.get(key)
v2 = d2.get(key)
if v1 is None:
ret[key] = v2
elif v2 is None:
ret[key] = v1
else:
ret[key] = v1 + v2
return ret | [
"def",
"add_dicts",
"(",
"d1",
",",
"d2",
")",
":",
"if",
"d1",
"is",
"None",
":",
"return",
"d2",
"if",
"d2",
"is",
"None",
":",
"return",
"d1",
"keys",
"=",
"set",
"(",
"d1",
")",
"keys",
".",
"update",
"(",
"set",
"(",
"d2",
")",
")",
"ret",
"=",
"{",
"}",
"for",
"key",
"in",
"keys",
":",
"v1",
"=",
"d1",
".",
"get",
"(",
"key",
")",
"v2",
"=",
"d2",
".",
"get",
"(",
"key",
")",
"if",
"v1",
"is",
"None",
":",
"ret",
"[",
"key",
"]",
"=",
"v2",
"elif",
"v2",
"is",
"None",
":",
"ret",
"[",
"key",
"]",
"=",
"v1",
"else",
":",
"ret",
"[",
"key",
"]",
"=",
"v1",
"+",
"v2",
"return",
"ret"
] | Merge two dicts of addable values | [
"Merge",
"two",
"dicts",
"of",
"addable",
"values"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L7-L25 | train |
stevearc/dynamo3 | dynamo3/result.py | PagedIterator._update_capacity | def _update_capacity(self, data):
""" Update the consumed capacity metrics """
if 'ConsumedCapacity' in data:
# This is all for backwards compatibility
consumed = data['ConsumedCapacity']
if not isinstance(consumed, list):
consumed = [consumed]
for cap in consumed:
self.capacity += cap.get('CapacityUnits', 0)
self.table_capacity += cap.get('Table',
{}).get('CapacityUnits', 0)
local_indexes = cap.get('LocalSecondaryIndexes', {})
for k, v in six.iteritems(local_indexes):
self.indexes.setdefault(k, 0)
self.indexes[k] += v['CapacityUnits']
global_indexes = cap.get('GlobalSecondaryIndexes', {})
for k, v in six.iteritems(global_indexes):
self.global_indexes.setdefault(k, 0)
self.global_indexes[k] += v['CapacityUnits'] | python | def _update_capacity(self, data):
""" Update the consumed capacity metrics """
if 'ConsumedCapacity' in data:
# This is all for backwards compatibility
consumed = data['ConsumedCapacity']
if not isinstance(consumed, list):
consumed = [consumed]
for cap in consumed:
self.capacity += cap.get('CapacityUnits', 0)
self.table_capacity += cap.get('Table',
{}).get('CapacityUnits', 0)
local_indexes = cap.get('LocalSecondaryIndexes', {})
for k, v in six.iteritems(local_indexes):
self.indexes.setdefault(k, 0)
self.indexes[k] += v['CapacityUnits']
global_indexes = cap.get('GlobalSecondaryIndexes', {})
for k, v in six.iteritems(global_indexes):
self.global_indexes.setdefault(k, 0)
self.global_indexes[k] += v['CapacityUnits'] | [
"def",
"_update_capacity",
"(",
"self",
",",
"data",
")",
":",
"if",
"'ConsumedCapacity'",
"in",
"data",
":",
"# This is all for backwards compatibility",
"consumed",
"=",
"data",
"[",
"'ConsumedCapacity'",
"]",
"if",
"not",
"isinstance",
"(",
"consumed",
",",
"list",
")",
":",
"consumed",
"=",
"[",
"consumed",
"]",
"for",
"cap",
"in",
"consumed",
":",
"self",
".",
"capacity",
"+=",
"cap",
".",
"get",
"(",
"'CapacityUnits'",
",",
"0",
")",
"self",
".",
"table_capacity",
"+=",
"cap",
".",
"get",
"(",
"'Table'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'CapacityUnits'",
",",
"0",
")",
"local_indexes",
"=",
"cap",
".",
"get",
"(",
"'LocalSecondaryIndexes'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"local_indexes",
")",
":",
"self",
".",
"indexes",
".",
"setdefault",
"(",
"k",
",",
"0",
")",
"self",
".",
"indexes",
"[",
"k",
"]",
"+=",
"v",
"[",
"'CapacityUnits'",
"]",
"global_indexes",
"=",
"cap",
".",
"get",
"(",
"'GlobalSecondaryIndexes'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"global_indexes",
")",
":",
"self",
".",
"global_indexes",
".",
"setdefault",
"(",
"k",
",",
"0",
")",
"self",
".",
"global_indexes",
"[",
"k",
"]",
"+=",
"v",
"[",
"'CapacityUnits'",
"]"
] | Update the consumed capacity metrics | [
"Update",
"the",
"consumed",
"capacity",
"metrics"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L233-L251 | train |
stevearc/dynamo3 | dynamo3/result.py | ResultSet.fetch | def fetch(self):
""" Fetch more results from Dynamo """
self.limit.set_request_args(self.kwargs)
data = self.connection.call(*self.args, **self.kwargs)
self.limit.post_fetch(data)
self.last_evaluated_key = data.get('LastEvaluatedKey')
if self.last_evaluated_key is None:
self.kwargs.pop('ExclusiveStartKey', None)
else:
self.kwargs['ExclusiveStartKey'] = self.last_evaluated_key
self._update_capacity(data)
if 'consumed_capacity' in data:
self.consumed_capacity += data['consumed_capacity']
for raw_item in data['Items']:
item = self.connection.dynamizer.decode_keys(raw_item)
if self.limit.accept(item):
yield item | python | def fetch(self):
""" Fetch more results from Dynamo """
self.limit.set_request_args(self.kwargs)
data = self.connection.call(*self.args, **self.kwargs)
self.limit.post_fetch(data)
self.last_evaluated_key = data.get('LastEvaluatedKey')
if self.last_evaluated_key is None:
self.kwargs.pop('ExclusiveStartKey', None)
else:
self.kwargs['ExclusiveStartKey'] = self.last_evaluated_key
self._update_capacity(data)
if 'consumed_capacity' in data:
self.consumed_capacity += data['consumed_capacity']
for raw_item in data['Items']:
item = self.connection.dynamizer.decode_keys(raw_item)
if self.limit.accept(item):
yield item | [
"def",
"fetch",
"(",
"self",
")",
":",
"self",
".",
"limit",
".",
"set_request_args",
"(",
"self",
".",
"kwargs",
")",
"data",
"=",
"self",
".",
"connection",
".",
"call",
"(",
"*",
"self",
".",
"args",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"self",
".",
"limit",
".",
"post_fetch",
"(",
"data",
")",
"self",
".",
"last_evaluated_key",
"=",
"data",
".",
"get",
"(",
"'LastEvaluatedKey'",
")",
"if",
"self",
".",
"last_evaluated_key",
"is",
"None",
":",
"self",
".",
"kwargs",
".",
"pop",
"(",
"'ExclusiveStartKey'",
",",
"None",
")",
"else",
":",
"self",
".",
"kwargs",
"[",
"'ExclusiveStartKey'",
"]",
"=",
"self",
".",
"last_evaluated_key",
"self",
".",
"_update_capacity",
"(",
"data",
")",
"if",
"'consumed_capacity'",
"in",
"data",
":",
"self",
".",
"consumed_capacity",
"+=",
"data",
"[",
"'consumed_capacity'",
"]",
"for",
"raw_item",
"in",
"data",
"[",
"'Items'",
"]",
":",
"item",
"=",
"self",
".",
"connection",
".",
"dynamizer",
".",
"decode_keys",
"(",
"raw_item",
")",
"if",
"self",
".",
"limit",
".",
"accept",
"(",
"item",
")",
":",
"yield",
"item"
] | Fetch more results from Dynamo | [
"Fetch",
"more",
"results",
"from",
"Dynamo"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L288-L304 | train |
stevearc/dynamo3 | dynamo3/result.py | GetResultSet.build_kwargs | def build_kwargs(self):
""" Construct the kwargs to pass to batch_get_item """
keys, self.keys = self.keys[:MAX_GET_BATCH], self.keys[MAX_GET_BATCH:]
query = {'ConsistentRead': self.consistent}
if self.attributes is not None:
query['ProjectionExpression'] = self.attributes
if self.alias:
query['ExpressionAttributeNames'] = self.alias
query['Keys'] = keys
return {
'RequestItems': {
self.tablename: query,
},
'ReturnConsumedCapacity': self.return_capacity,
} | python | def build_kwargs(self):
""" Construct the kwargs to pass to batch_get_item """
keys, self.keys = self.keys[:MAX_GET_BATCH], self.keys[MAX_GET_BATCH:]
query = {'ConsistentRead': self.consistent}
if self.attributes is not None:
query['ProjectionExpression'] = self.attributes
if self.alias:
query['ExpressionAttributeNames'] = self.alias
query['Keys'] = keys
return {
'RequestItems': {
self.tablename: query,
},
'ReturnConsumedCapacity': self.return_capacity,
} | [
"def",
"build_kwargs",
"(",
"self",
")",
":",
"keys",
",",
"self",
".",
"keys",
"=",
"self",
".",
"keys",
"[",
":",
"MAX_GET_BATCH",
"]",
",",
"self",
".",
"keys",
"[",
"MAX_GET_BATCH",
":",
"]",
"query",
"=",
"{",
"'ConsistentRead'",
":",
"self",
".",
"consistent",
"}",
"if",
"self",
".",
"attributes",
"is",
"not",
"None",
":",
"query",
"[",
"'ProjectionExpression'",
"]",
"=",
"self",
".",
"attributes",
"if",
"self",
".",
"alias",
":",
"query",
"[",
"'ExpressionAttributeNames'",
"]",
"=",
"self",
".",
"alias",
"query",
"[",
"'Keys'",
"]",
"=",
"keys",
"return",
"{",
"'RequestItems'",
":",
"{",
"self",
".",
"tablename",
":",
"query",
",",
"}",
",",
"'ReturnConsumedCapacity'",
":",
"self",
".",
"return_capacity",
",",
"}"
] | Construct the kwargs to pass to batch_get_item | [
"Construct",
"the",
"kwargs",
"to",
"pass",
"to",
"batch_get_item"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L331-L345 | train |
stevearc/dynamo3 | dynamo3/result.py | GetResultSet.fetch | def fetch(self):
""" Fetch a set of items from their keys """
kwargs = self.build_kwargs()
data = self.connection.call('batch_get_item', **kwargs)
if 'UnprocessedKeys' in data:
for items in six.itervalues(data['UnprocessedKeys']):
self.keys.extend(items['Keys'])
# Getting UnprocessedKeys indicates that we are exceeding our
# throughput. So sleep for a bit.
self._attempt += 1
self.connection.exponential_sleep(self._attempt)
else:
# No UnprocessedKeys means our request rate is fine, so we can
# reset the attempt number.
self._attempt = 0
self._update_capacity(data)
if 'consumed_capacity' in data:
# Comes back as a list from BatchWriteItem
self.consumed_capacity = \
sum(data['consumed_capacity'], self.consumed_capacity)
return iter(data['Responses'][self.tablename]) | python | def fetch(self):
""" Fetch a set of items from their keys """
kwargs = self.build_kwargs()
data = self.connection.call('batch_get_item', **kwargs)
if 'UnprocessedKeys' in data:
for items in six.itervalues(data['UnprocessedKeys']):
self.keys.extend(items['Keys'])
# Getting UnprocessedKeys indicates that we are exceeding our
# throughput. So sleep for a bit.
self._attempt += 1
self.connection.exponential_sleep(self._attempt)
else:
# No UnprocessedKeys means our request rate is fine, so we can
# reset the attempt number.
self._attempt = 0
self._update_capacity(data)
if 'consumed_capacity' in data:
# Comes back as a list from BatchWriteItem
self.consumed_capacity = \
sum(data['consumed_capacity'], self.consumed_capacity)
return iter(data['Responses'][self.tablename]) | [
"def",
"fetch",
"(",
"self",
")",
":",
"kwargs",
"=",
"self",
".",
"build_kwargs",
"(",
")",
"data",
"=",
"self",
".",
"connection",
".",
"call",
"(",
"'batch_get_item'",
",",
"*",
"*",
"kwargs",
")",
"if",
"'UnprocessedKeys'",
"in",
"data",
":",
"for",
"items",
"in",
"six",
".",
"itervalues",
"(",
"data",
"[",
"'UnprocessedKeys'",
"]",
")",
":",
"self",
".",
"keys",
".",
"extend",
"(",
"items",
"[",
"'Keys'",
"]",
")",
"# Getting UnprocessedKeys indicates that we are exceeding our",
"# throughput. So sleep for a bit.",
"self",
".",
"_attempt",
"+=",
"1",
"self",
".",
"connection",
".",
"exponential_sleep",
"(",
"self",
".",
"_attempt",
")",
"else",
":",
"# No UnprocessedKeys means our request rate is fine, so we can",
"# reset the attempt number.",
"self",
".",
"_attempt",
"=",
"0",
"self",
".",
"_update_capacity",
"(",
"data",
")",
"if",
"'consumed_capacity'",
"in",
"data",
":",
"# Comes back as a list from BatchWriteItem",
"self",
".",
"consumed_capacity",
"=",
"sum",
"(",
"data",
"[",
"'consumed_capacity'",
"]",
",",
"self",
".",
"consumed_capacity",
")",
"return",
"iter",
"(",
"data",
"[",
"'Responses'",
"]",
"[",
"self",
".",
"tablename",
"]",
")"
] | Fetch a set of items from their keys | [
"Fetch",
"a",
"set",
"of",
"items",
"from",
"their",
"keys"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L347-L367 | train |
stevearc/dynamo3 | dynamo3/result.py | Limit.copy | def copy(self):
""" Return a copy of the limit """
return Limit(self.scan_limit, self.item_limit, self.min_scan_limit,
self.strict, self.filter) | python | def copy(self):
""" Return a copy of the limit """
return Limit(self.scan_limit, self.item_limit, self.min_scan_limit,
self.strict, self.filter) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"Limit",
"(",
"self",
".",
"scan_limit",
",",
"self",
".",
"item_limit",
",",
"self",
".",
"min_scan_limit",
",",
"self",
".",
"strict",
",",
"self",
".",
"filter",
")"
] | Return a copy of the limit | [
"Return",
"a",
"copy",
"of",
"the",
"limit"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L494-L497 | train |
stevearc/dynamo3 | dynamo3/result.py | Limit.set_request_args | def set_request_args(self, args):
""" Set the Limit parameter into the request args """
if self.scan_limit is not None:
args['Limit'] = self.scan_limit
elif self.item_limit is not None:
args['Limit'] = max(self.item_limit, self.min_scan_limit)
else:
args.pop('Limit', None) | python | def set_request_args(self, args):
""" Set the Limit parameter into the request args """
if self.scan_limit is not None:
args['Limit'] = self.scan_limit
elif self.item_limit is not None:
args['Limit'] = max(self.item_limit, self.min_scan_limit)
else:
args.pop('Limit', None) | [
"def",
"set_request_args",
"(",
"self",
",",
"args",
")",
":",
"if",
"self",
".",
"scan_limit",
"is",
"not",
"None",
":",
"args",
"[",
"'Limit'",
"]",
"=",
"self",
".",
"scan_limit",
"elif",
"self",
".",
"item_limit",
"is",
"not",
"None",
":",
"args",
"[",
"'Limit'",
"]",
"=",
"max",
"(",
"self",
".",
"item_limit",
",",
"self",
".",
"min_scan_limit",
")",
"else",
":",
"args",
".",
"pop",
"(",
"'Limit'",
",",
"None",
")"
] | Set the Limit parameter into the request args | [
"Set",
"the",
"Limit",
"parameter",
"into",
"the",
"request",
"args"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L499-L506 | train |
stevearc/dynamo3 | dynamo3/result.py | Limit.complete | def complete(self):
""" Return True if the limit has been reached """
if self.scan_limit is not None and self.scan_limit == 0:
return True
if self.item_limit is not None and self.item_limit == 0:
return True
return False | python | def complete(self):
""" Return True if the limit has been reached """
if self.scan_limit is not None and self.scan_limit == 0:
return True
if self.item_limit is not None and self.item_limit == 0:
return True
return False | [
"def",
"complete",
"(",
"self",
")",
":",
"if",
"self",
".",
"scan_limit",
"is",
"not",
"None",
"and",
"self",
".",
"scan_limit",
"==",
"0",
":",
"return",
"True",
"if",
"self",
".",
"item_limit",
"is",
"not",
"None",
"and",
"self",
".",
"item_limit",
"==",
"0",
":",
"return",
"True",
"return",
"False"
] | Return True if the limit has been reached | [
"Return",
"True",
"if",
"the",
"limit",
"has",
"been",
"reached"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L509-L515 | train |
stevearc/dynamo3 | dynamo3/result.py | Limit.accept | def accept(self, item):
""" Apply the filter and item_limit, and return True to accept """
accept = self.filter(item)
if accept and self.item_limit is not None:
if self.item_limit > 0:
self.item_limit -= 1
elif self.strict:
return False
return accept | python | def accept(self, item):
""" Apply the filter and item_limit, and return True to accept """
accept = self.filter(item)
if accept and self.item_limit is not None:
if self.item_limit > 0:
self.item_limit -= 1
elif self.strict:
return False
return accept | [
"def",
"accept",
"(",
"self",
",",
"item",
")",
":",
"accept",
"=",
"self",
".",
"filter",
"(",
"item",
")",
"if",
"accept",
"and",
"self",
".",
"item_limit",
"is",
"not",
"None",
":",
"if",
"self",
".",
"item_limit",
">",
"0",
":",
"self",
".",
"item_limit",
"-=",
"1",
"elif",
"self",
".",
"strict",
":",
"return",
"False",
"return",
"accept"
] | Apply the filter and item_limit, and return True to accept | [
"Apply",
"the",
"filter",
"and",
"item_limit",
"and",
"return",
"True",
"to",
"accept"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L522-L530 | train |
aht/stream.py | example/randwalk.py | returned | def returned(n):
"""Generate a random walk and return True if the walker has returned to
the origin after taking `n` steps.
"""
## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk
for pos in randwalk() >> drop(1) >> takei(xrange(n-1)):
if pos == Origin:
return True
return False | python | def returned(n):
"""Generate a random walk and return True if the walker has returned to
the origin after taking `n` steps.
"""
## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk
for pos in randwalk() >> drop(1) >> takei(xrange(n-1)):
if pos == Origin:
return True
return False | [
"def",
"returned",
"(",
"n",
")",
":",
"## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk",
"for",
"pos",
"in",
"randwalk",
"(",
")",
">>",
"drop",
"(",
"1",
")",
">>",
"takei",
"(",
"xrange",
"(",
"n",
"-",
"1",
")",
")",
":",
"if",
"pos",
"==",
"Origin",
":",
"return",
"True",
"return",
"False"
] | Generate a random walk and return True if the walker has returned to
the origin after taking `n` steps. | [
"Generate",
"a",
"random",
"walk",
"and",
"return",
"True",
"if",
"the",
"walker",
"has",
"returned",
"to",
"the",
"origin",
"after",
"taking",
"n",
"steps",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/example/randwalk.py#L16-L24 | train |
aht/stream.py | example/randwalk.py | first_return | def first_return():
"""Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
"""
walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list
return len(walk) | python | def first_return():
"""Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
"""
walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list
return len(walk) | [
"def",
"first_return",
"(",
")",
":",
"walk",
"=",
"randwalk",
"(",
")",
">>",
"drop",
"(",
"1",
")",
">>",
"takewhile",
"(",
"lambda",
"v",
":",
"v",
"!=",
"Origin",
")",
">>",
"list",
"return",
"len",
"(",
"walk",
")"
] | Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only. | [
"Generate",
"a",
"random",
"walk",
"and",
"return",
"its",
"length",
"upto",
"the",
"moment",
"that",
"the",
"walker",
"first",
"returns",
"to",
"the",
"origin",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/example/randwalk.py#L26-L36 | train |
aht/stream.py | stream.py | seq | def seq(start=0, step=1):
"""An arithmetic sequence generator. Works with any type with + defined.
>>> seq(1, 0.25) >> item[:10]
[1, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25]
"""
def seq(a, d):
while 1:
yield a
a += d
return seq(start, step) | python | def seq(start=0, step=1):
"""An arithmetic sequence generator. Works with any type with + defined.
>>> seq(1, 0.25) >> item[:10]
[1, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25]
"""
def seq(a, d):
while 1:
yield a
a += d
return seq(start, step) | [
"def",
"seq",
"(",
"start",
"=",
"0",
",",
"step",
"=",
"1",
")",
":",
"def",
"seq",
"(",
"a",
",",
"d",
")",
":",
"while",
"1",
":",
"yield",
"a",
"a",
"+=",
"d",
"return",
"seq",
"(",
"start",
",",
"step",
")"
] | An arithmetic sequence generator. Works with any type with + defined.
>>> seq(1, 0.25) >> item[:10]
[1, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25] | [
"An",
"arithmetic",
"sequence",
"generator",
".",
"Works",
"with",
"any",
"type",
"with",
"+",
"defined",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L1194-L1204 | train |
aht/stream.py | stream.py | Stream.pipe | def pipe(inpipe, outpipe):
"""Connect inpipe and outpipe. If outpipe is not a Stream instance,
it should be an function callable on an iterable.
"""
if hasattr(outpipe, '__pipe__'):
return outpipe.__pipe__(inpipe)
elif hasattr(outpipe, '__call__'):
return outpipe(inpipe)
else:
raise BrokenPipe('No connection mechanism defined') | python | def pipe(inpipe, outpipe):
"""Connect inpipe and outpipe. If outpipe is not a Stream instance,
it should be an function callable on an iterable.
"""
if hasattr(outpipe, '__pipe__'):
return outpipe.__pipe__(inpipe)
elif hasattr(outpipe, '__call__'):
return outpipe(inpipe)
else:
raise BrokenPipe('No connection mechanism defined') | [
"def",
"pipe",
"(",
"inpipe",
",",
"outpipe",
")",
":",
"if",
"hasattr",
"(",
"outpipe",
",",
"'__pipe__'",
")",
":",
"return",
"outpipe",
".",
"__pipe__",
"(",
"inpipe",
")",
"elif",
"hasattr",
"(",
"outpipe",
",",
"'__call__'",
")",
":",
"return",
"outpipe",
"(",
"inpipe",
")",
"else",
":",
"raise",
"BrokenPipe",
"(",
"'No connection mechanism defined'",
")"
] | Connect inpipe and outpipe. If outpipe is not a Stream instance,
it should be an function callable on an iterable. | [
"Connect",
"inpipe",
"and",
"outpipe",
".",
"If",
"outpipe",
"is",
"not",
"a",
"Stream",
"instance",
"it",
"should",
"be",
"an",
"function",
"callable",
"on",
"an",
"iterable",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L170-L179 | train |
aht/stream.py | stream.py | Executor.submit | def submit(self, *items):
"""Return job ids assigned to the submitted items."""
with self.lock:
if self.closed:
raise BrokenPipe('Job submission has been closed.')
id = self.jobcount
self._status += ['SUBMITTED'] * len(items)
self.jobcount += len(items)
for item in items:
self.waitqueue.put((id, item))
id += 1
if len(items) == 1:
return id - 1
else:
return range(id - len(items), id) | python | def submit(self, *items):
"""Return job ids assigned to the submitted items."""
with self.lock:
if self.closed:
raise BrokenPipe('Job submission has been closed.')
id = self.jobcount
self._status += ['SUBMITTED'] * len(items)
self.jobcount += len(items)
for item in items:
self.waitqueue.put((id, item))
id += 1
if len(items) == 1:
return id - 1
else:
return range(id - len(items), id) | [
"def",
"submit",
"(",
"self",
",",
"*",
"items",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"BrokenPipe",
"(",
"'Job submission has been closed.'",
")",
"id",
"=",
"self",
".",
"jobcount",
"self",
".",
"_status",
"+=",
"[",
"'SUBMITTED'",
"]",
"*",
"len",
"(",
"items",
")",
"self",
".",
"jobcount",
"+=",
"len",
"(",
"items",
")",
"for",
"item",
"in",
"items",
":",
"self",
".",
"waitqueue",
".",
"put",
"(",
"(",
"id",
",",
"item",
")",
")",
"id",
"+=",
"1",
"if",
"len",
"(",
"items",
")",
"==",
"1",
":",
"return",
"id",
"-",
"1",
"else",
":",
"return",
"range",
"(",
"id",
"-",
"len",
"(",
"items",
")",
",",
"id",
")"
] | Return job ids assigned to the submitted items. | [
"Return",
"job",
"ids",
"assigned",
"to",
"the",
"submitted",
"items",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L980-L994 | train |
aht/stream.py | stream.py | Executor.cancel | def cancel(self, *ids):
"""Try to cancel jobs with associated ids.
Return the actual number of jobs cancelled.
"""
ncancelled = 0
with self.lock:
for id in ids:
try:
if self._status[id] == 'SUBMITTED':
self._status[id] = 'CANCELLED'
ncancelled += 1
except IndexError:
pass
return ncancelled | python | def cancel(self, *ids):
"""Try to cancel jobs with associated ids.
Return the actual number of jobs cancelled.
"""
ncancelled = 0
with self.lock:
for id in ids:
try:
if self._status[id] == 'SUBMITTED':
self._status[id] = 'CANCELLED'
ncancelled += 1
except IndexError:
pass
return ncancelled | [
"def",
"cancel",
"(",
"self",
",",
"*",
"ids",
")",
":",
"ncancelled",
"=",
"0",
"with",
"self",
".",
"lock",
":",
"for",
"id",
"in",
"ids",
":",
"try",
":",
"if",
"self",
".",
"_status",
"[",
"id",
"]",
"==",
"'SUBMITTED'",
":",
"self",
".",
"_status",
"[",
"id",
"]",
"=",
"'CANCELLED'",
"ncancelled",
"+=",
"1",
"except",
"IndexError",
":",
"pass",
"return",
"ncancelled"
] | Try to cancel jobs with associated ids.
Return the actual number of jobs cancelled. | [
"Try",
"to",
"cancel",
"jobs",
"with",
"associated",
"ids",
".",
"Return",
"the",
"actual",
"number",
"of",
"jobs",
"cancelled",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L996-L1010 | train |
aht/stream.py | stream.py | Executor.shutdown | def shutdown(self):
"""Shut down the Executor. Suspend all waiting jobs.
Running workers will terminate after finishing their current job items.
The call will block until all workers are terminated.
"""
with self.lock:
self.pool.inqueue.put(StopIteration) # Stop the pool workers
self.waitqueue.put(StopIteration) # Stop the input_feeder
_iterqueue(self.waitqueue) >> item[-1] # Exhaust the waitqueue
self.closed = True
self.join() | python | def shutdown(self):
"""Shut down the Executor. Suspend all waiting jobs.
Running workers will terminate after finishing their current job items.
The call will block until all workers are terminated.
"""
with self.lock:
self.pool.inqueue.put(StopIteration) # Stop the pool workers
self.waitqueue.put(StopIteration) # Stop the input_feeder
_iterqueue(self.waitqueue) >> item[-1] # Exhaust the waitqueue
self.closed = True
self.join() | [
"def",
"shutdown",
"(",
"self",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"pool",
".",
"inqueue",
".",
"put",
"(",
"StopIteration",
")",
"# Stop the pool workers",
"self",
".",
"waitqueue",
".",
"put",
"(",
"StopIteration",
")",
"# Stop the input_feeder",
"_iterqueue",
"(",
"self",
".",
"waitqueue",
")",
">>",
"item",
"[",
"-",
"1",
"]",
"# Exhaust the waitqueue",
"self",
".",
"closed",
"=",
"True",
"self",
".",
"join",
"(",
")"
] | Shut down the Executor. Suspend all waiting jobs.
Running workers will terminate after finishing their current job items.
The call will block until all workers are terminated. | [
"Shut",
"down",
"the",
"Executor",
".",
"Suspend",
"all",
"waiting",
"jobs",
".",
"Running",
"workers",
"will",
"terminate",
"after",
"finishing",
"their",
"current",
"job",
"items",
".",
"The",
"call",
"will",
"block",
"until",
"all",
"workers",
"are",
"terminated",
"."
] | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L1045-L1056 | train |
Zephrys/monica | monica/monica.py | main | def main():
'''monica helps you order food from the timeline'''
arguments = docopt(__doc__, version=__version__)
if arguments['configure'] and flag:
configure()
if arguments['cuisine']:
if arguments['list']:
cuisine('list')
else:
cuisine(arguments['<cuisine-id>'])
elif arguments['surprise']:
surprise()
elif arguments['reviews']:
reviews(arguments['<restaurant-id>'])
elif arguments['search']:
search(arguments['QUERY'])
elif arguments['budget']:
try:
money = arguments['<budget>']
money = float(money)
budget(money)
except:
print 'Budget should be a number!'
elif arguments['restaurant']:
restaurant(arguments['<restaurant-id>'])
else:
print (__doc__) | python | def main():
'''monica helps you order food from the timeline'''
arguments = docopt(__doc__, version=__version__)
if arguments['configure'] and flag:
configure()
if arguments['cuisine']:
if arguments['list']:
cuisine('list')
else:
cuisine(arguments['<cuisine-id>'])
elif arguments['surprise']:
surprise()
elif arguments['reviews']:
reviews(arguments['<restaurant-id>'])
elif arguments['search']:
search(arguments['QUERY'])
elif arguments['budget']:
try:
money = arguments['<budget>']
money = float(money)
budget(money)
except:
print 'Budget should be a number!'
elif arguments['restaurant']:
restaurant(arguments['<restaurant-id>'])
else:
print (__doc__) | [
"def",
"main",
"(",
")",
":",
"arguments",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__version__",
")",
"if",
"arguments",
"[",
"'configure'",
"]",
"and",
"flag",
":",
"configure",
"(",
")",
"if",
"arguments",
"[",
"'cuisine'",
"]",
":",
"if",
"arguments",
"[",
"'list'",
"]",
":",
"cuisine",
"(",
"'list'",
")",
"else",
":",
"cuisine",
"(",
"arguments",
"[",
"'<cuisine-id>'",
"]",
")",
"elif",
"arguments",
"[",
"'surprise'",
"]",
":",
"surprise",
"(",
")",
"elif",
"arguments",
"[",
"'reviews'",
"]",
":",
"reviews",
"(",
"arguments",
"[",
"'<restaurant-id>'",
"]",
")",
"elif",
"arguments",
"[",
"'search'",
"]",
":",
"search",
"(",
"arguments",
"[",
"'QUERY'",
"]",
")",
"elif",
"arguments",
"[",
"'budget'",
"]",
":",
"try",
":",
"money",
"=",
"arguments",
"[",
"'<budget>'",
"]",
"money",
"=",
"float",
"(",
"money",
")",
"budget",
"(",
"money",
")",
"except",
":",
"print",
"'Budget should be a number!'",
"elif",
"arguments",
"[",
"'restaurant'",
"]",
":",
"restaurant",
"(",
"arguments",
"[",
"'<restaurant-id>'",
"]",
")",
"else",
":",
"print",
"(",
"__doc__",
")"
] | monica helps you order food from the timeline | [
"monica",
"helps",
"you",
"order",
"food",
"from",
"the",
"timeline"
] | ff0bc7df18d86ad66af6c655cdd292ddceb84fd7 | https://github.com/Zephrys/monica/blob/ff0bc7df18d86ad66af6c655cdd292ddceb84fd7/monica/monica.py#L214-L241 | train |
alphagov/performanceplatform-collector | setup.py | _get_requirements | def _get_requirements(fname):
"""
Create a list of requirements from the output of the pip freeze command
saved in a text file.
"""
packages = _read(fname).split('\n')
packages = (p.strip() for p in packages)
packages = (p for p in packages if p and not p.startswith('#'))
return list(packages) | python | def _get_requirements(fname):
"""
Create a list of requirements from the output of the pip freeze command
saved in a text file.
"""
packages = _read(fname).split('\n')
packages = (p.strip() for p in packages)
packages = (p for p in packages if p and not p.startswith('#'))
return list(packages) | [
"def",
"_get_requirements",
"(",
"fname",
")",
":",
"packages",
"=",
"_read",
"(",
"fname",
")",
".",
"split",
"(",
"'\\n'",
")",
"packages",
"=",
"(",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"packages",
")",
"packages",
"=",
"(",
"p",
"for",
"p",
"in",
"packages",
"if",
"p",
"and",
"not",
"p",
".",
"startswith",
"(",
"'#'",
")",
")",
"return",
"list",
"(",
"packages",
")"
] | Create a list of requirements from the output of the pip freeze command
saved in a text file. | [
"Create",
"a",
"list",
"of",
"requirements",
"from",
"the",
"output",
"of",
"the",
"pip",
"freeze",
"command",
"saved",
"in",
"a",
"text",
"file",
"."
] | de68ab4aa500c31e436e050fa1268fa928c522a5 | https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/setup.py#L41-L49 | train |
BertrandBordage/django-terms | terms/cms_plugin_processors.py | TermsProcessor | def TermsProcessor(instance, placeholder, rendered_content, original_context):
"""
Adds links all placeholders plugins except django-terms plugins
"""
if 'terms' in original_context:
return rendered_content
return mark_safe(replace_terms(rendered_content)) | python | def TermsProcessor(instance, placeholder, rendered_content, original_context):
"""
Adds links all placeholders plugins except django-terms plugins
"""
if 'terms' in original_context:
return rendered_content
return mark_safe(replace_terms(rendered_content)) | [
"def",
"TermsProcessor",
"(",
"instance",
",",
"placeholder",
",",
"rendered_content",
",",
"original_context",
")",
":",
"if",
"'terms'",
"in",
"original_context",
":",
"return",
"rendered_content",
"return",
"mark_safe",
"(",
"replace_terms",
"(",
"rendered_content",
")",
")"
] | Adds links all placeholders plugins except django-terms plugins | [
"Adds",
"links",
"all",
"placeholders",
"plugins",
"except",
"django",
"-",
"terms",
"plugins"
] | 2555c2cf5abf14adef9a8e2dd22c4a9076396a10 | https://github.com/BertrandBordage/django-terms/blob/2555c2cf5abf14adef9a8e2dd22c4a9076396a10/terms/cms_plugin_processors.py#L7-L14 | train |
consbio/ncdjango | ncdjango/models.py | Variable.time_stops | def time_stops(self):
""" Valid time steps for this service as a list of datetime objects. """
if not self.supports_time:
return []
if self.service.calendar == 'standard':
units = self.service.time_interval_units
interval = self.service.time_interval
steps = [self.time_start]
if units in ('years', 'decades', 'centuries'):
if units == 'years':
years = interval
elif units == 'decades':
years = 10 * interval
else:
years = 100 * interval
next_value = lambda x: x.replace(year=x.year + years)
elif units == 'months':
def _fn(x):
year = x.year + (x.month+interval-1) // 12
month = (x.month+interval) % 12 or 12
day = min(x.day, calendar.monthrange(year, month)[1])
return x.replace(year=year, month=month, day=day)
next_value = _fn
else:
if units == 'milliseconds':
delta = timedelta(milliseconds=interval)
elif units == 'seconds':
delta = timedelta(seconds=interval)
elif units == 'minutes':
delta = timedelta(minutes=interval)
elif units == 'hours':
delta = timedelta(hours=interval)
elif units == 'days':
delta = timedelta(days=interval)
elif units == 'weeks':
delta = timedelta(weeks=interval)
else:
raise ValidationError(
"Service has an invalid time_interval_units: {}".format(self.service.time_interval_units)
)
next_value = lambda x: x + delta
while steps[-1] < self.time_end:
value = next_value(steps[-1])
if value > self.time_end:
break
steps.append(value)
return steps
else:
# TODO
raise NotImplementedError | python | def time_stops(self):
""" Valid time steps for this service as a list of datetime objects. """
if not self.supports_time:
return []
if self.service.calendar == 'standard':
units = self.service.time_interval_units
interval = self.service.time_interval
steps = [self.time_start]
if units in ('years', 'decades', 'centuries'):
if units == 'years':
years = interval
elif units == 'decades':
years = 10 * interval
else:
years = 100 * interval
next_value = lambda x: x.replace(year=x.year + years)
elif units == 'months':
def _fn(x):
year = x.year + (x.month+interval-1) // 12
month = (x.month+interval) % 12 or 12
day = min(x.day, calendar.monthrange(year, month)[1])
return x.replace(year=year, month=month, day=day)
next_value = _fn
else:
if units == 'milliseconds':
delta = timedelta(milliseconds=interval)
elif units == 'seconds':
delta = timedelta(seconds=interval)
elif units == 'minutes':
delta = timedelta(minutes=interval)
elif units == 'hours':
delta = timedelta(hours=interval)
elif units == 'days':
delta = timedelta(days=interval)
elif units == 'weeks':
delta = timedelta(weeks=interval)
else:
raise ValidationError(
"Service has an invalid time_interval_units: {}".format(self.service.time_interval_units)
)
next_value = lambda x: x + delta
while steps[-1] < self.time_end:
value = next_value(steps[-1])
if value > self.time_end:
break
steps.append(value)
return steps
else:
# TODO
raise NotImplementedError | [
"def",
"time_stops",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_time",
":",
"return",
"[",
"]",
"if",
"self",
".",
"service",
".",
"calendar",
"==",
"'standard'",
":",
"units",
"=",
"self",
".",
"service",
".",
"time_interval_units",
"interval",
"=",
"self",
".",
"service",
".",
"time_interval",
"steps",
"=",
"[",
"self",
".",
"time_start",
"]",
"if",
"units",
"in",
"(",
"'years'",
",",
"'decades'",
",",
"'centuries'",
")",
":",
"if",
"units",
"==",
"'years'",
":",
"years",
"=",
"interval",
"elif",
"units",
"==",
"'decades'",
":",
"years",
"=",
"10",
"*",
"interval",
"else",
":",
"years",
"=",
"100",
"*",
"interval",
"next_value",
"=",
"lambda",
"x",
":",
"x",
".",
"replace",
"(",
"year",
"=",
"x",
".",
"year",
"+",
"years",
")",
"elif",
"units",
"==",
"'months'",
":",
"def",
"_fn",
"(",
"x",
")",
":",
"year",
"=",
"x",
".",
"year",
"+",
"(",
"x",
".",
"month",
"+",
"interval",
"-",
"1",
")",
"//",
"12",
"month",
"=",
"(",
"x",
".",
"month",
"+",
"interval",
")",
"%",
"12",
"or",
"12",
"day",
"=",
"min",
"(",
"x",
".",
"day",
",",
"calendar",
".",
"monthrange",
"(",
"year",
",",
"month",
")",
"[",
"1",
"]",
")",
"return",
"x",
".",
"replace",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"day",
"=",
"day",
")",
"next_value",
"=",
"_fn",
"else",
":",
"if",
"units",
"==",
"'milliseconds'",
":",
"delta",
"=",
"timedelta",
"(",
"milliseconds",
"=",
"interval",
")",
"elif",
"units",
"==",
"'seconds'",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"interval",
")",
"elif",
"units",
"==",
"'minutes'",
":",
"delta",
"=",
"timedelta",
"(",
"minutes",
"=",
"interval",
")",
"elif",
"units",
"==",
"'hours'",
":",
"delta",
"=",
"timedelta",
"(",
"hours",
"=",
"interval",
")",
"elif",
"units",
"==",
"'days'",
":",
"delta",
"=",
"timedelta",
"(",
"days",
"=",
"interval",
")",
"elif",
"units",
"==",
"'weeks'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"interval",
")",
"else",
":",
"raise",
"ValidationError",
"(",
"\"Service has an invalid time_interval_units: {}\"",
".",
"format",
"(",
"self",
".",
"service",
".",
"time_interval_units",
")",
")",
"next_value",
"=",
"lambda",
"x",
":",
"x",
"+",
"delta",
"while",
"steps",
"[",
"-",
"1",
"]",
"<",
"self",
".",
"time_end",
":",
"value",
"=",
"next_value",
"(",
"steps",
"[",
"-",
"1",
"]",
")",
"if",
"value",
">",
"self",
".",
"time_end",
":",
"break",
"steps",
".",
"append",
"(",
"value",
")",
"return",
"steps",
"else",
":",
"# TODO",
"raise",
"NotImplementedError"
] | Valid time steps for this service as a list of datetime objects. | [
"Valid",
"time",
"steps",
"for",
"this",
"service",
"as",
"a",
"list",
"of",
"datetime",
"objects",
"."
] | f807bfd1e4083ab29fbc3c4d4418be108383a710 | https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/models.py#L98-L155 | train |
DarkEnergySurvey/ugali | ugali/utils/parser.py | Parser._parse_coords | def _parse_coords(self,opts):
""" Parse target coordinates in various ways...
"""
# The coordinates are mutually exclusive, so
# shouldn't have to worry about over-writing them.
if 'coords' in vars(opts): return
radius = vars(opts).get('radius',0)
gal = None
if vars(opts).get('gal') is not None:
gal = opts.gal
elif vars(opts).get('cel') is not None:
gal = cel2gal(*opts.cel)
elif vars(opts).get('hpx') is not None:
gal = pix2ang(*opts.hpx)
if gal is not None:
opts.coords = [(gal[0],gal[1],radius)]
opts.names = [vars(opts).get('name','')]
else:
opts.coords = None
opts.names = None
if vars(opts).get('targets') is not None:
opts.names,opts.coords = self.parse_targets(opts.targets)
if vars(opts).get('radius') is not None:
opts.coords['radius'] = vars(opts).get('radius') | python | def _parse_coords(self,opts):
""" Parse target coordinates in various ways...
"""
# The coordinates are mutually exclusive, so
# shouldn't have to worry about over-writing them.
if 'coords' in vars(opts): return
radius = vars(opts).get('radius',0)
gal = None
if vars(opts).get('gal') is not None:
gal = opts.gal
elif vars(opts).get('cel') is not None:
gal = cel2gal(*opts.cel)
elif vars(opts).get('hpx') is not None:
gal = pix2ang(*opts.hpx)
if gal is not None:
opts.coords = [(gal[0],gal[1],radius)]
opts.names = [vars(opts).get('name','')]
else:
opts.coords = None
opts.names = None
if vars(opts).get('targets') is not None:
opts.names,opts.coords = self.parse_targets(opts.targets)
if vars(opts).get('radius') is not None:
opts.coords['radius'] = vars(opts).get('radius') | [
"def",
"_parse_coords",
"(",
"self",
",",
"opts",
")",
":",
"# The coordinates are mutually exclusive, so",
"# shouldn't have to worry about over-writing them.",
"if",
"'coords'",
"in",
"vars",
"(",
"opts",
")",
":",
"return",
"radius",
"=",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'radius'",
",",
"0",
")",
"gal",
"=",
"None",
"if",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'gal'",
")",
"is",
"not",
"None",
":",
"gal",
"=",
"opts",
".",
"gal",
"elif",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'cel'",
")",
"is",
"not",
"None",
":",
"gal",
"=",
"cel2gal",
"(",
"*",
"opts",
".",
"cel",
")",
"elif",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'hpx'",
")",
"is",
"not",
"None",
":",
"gal",
"=",
"pix2ang",
"(",
"*",
"opts",
".",
"hpx",
")",
"if",
"gal",
"is",
"not",
"None",
":",
"opts",
".",
"coords",
"=",
"[",
"(",
"gal",
"[",
"0",
"]",
",",
"gal",
"[",
"1",
"]",
",",
"radius",
")",
"]",
"opts",
".",
"names",
"=",
"[",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"]",
"else",
":",
"opts",
".",
"coords",
"=",
"None",
"opts",
".",
"names",
"=",
"None",
"if",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'targets'",
")",
"is",
"not",
"None",
":",
"opts",
".",
"names",
",",
"opts",
".",
"coords",
"=",
"self",
".",
"parse_targets",
"(",
"opts",
".",
"targets",
")",
"if",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'radius'",
")",
"is",
"not",
"None",
":",
"opts",
".",
"coords",
"[",
"'radius'",
"]",
"=",
"vars",
"(",
"opts",
")",
".",
"get",
"(",
"'radius'",
")"
] | Parse target coordinates in various ways... | [
"Parse",
"target",
"coordinates",
"in",
"various",
"ways",
"..."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/parser.py#L87-L112 | train |
Genida/django-appsettings | src/appsettings/settings.py | Setting.default_value | def default_value(self):
"""
Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value.
"""
if callable(self.default) and self.call_default:
return self.default()
return self.default | python | def default_value(self):
"""
Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value.
"""
if callable(self.default) and self.call_default:
return self.default()
return self.default | [
"def",
"default_value",
"(",
"self",
")",
":",
"if",
"callable",
"(",
"self",
".",
"default",
")",
"and",
"self",
".",
"call_default",
":",
"return",
"self",
".",
"default",
"(",
")",
"return",
"self",
".",
"default"
] | Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value. | [
"Property",
"to",
"return",
"the",
"default",
"value",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L446-L458 | train |
Genida/django-appsettings | src/appsettings/settings.py | Setting.raw_value | def raw_value(self):
"""
Property to return the variable defined in ``django.conf.settings``.
Returns:
object: the variable defined in ``django.conf.settings``.
Raises:
AttributeError: if the variable is missing.
KeyError: if the item is missing from nested setting.
"""
if self.parent_setting is not None:
return self.parent_setting.raw_value[self.full_name]
else:
return getattr(settings, self.full_name) | python | def raw_value(self):
"""
Property to return the variable defined in ``django.conf.settings``.
Returns:
object: the variable defined in ``django.conf.settings``.
Raises:
AttributeError: if the variable is missing.
KeyError: if the item is missing from nested setting.
"""
if self.parent_setting is not None:
return self.parent_setting.raw_value[self.full_name]
else:
return getattr(settings, self.full_name) | [
"def",
"raw_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent_setting",
"is",
"not",
"None",
":",
"return",
"self",
".",
"parent_setting",
".",
"raw_value",
"[",
"self",
".",
"full_name",
"]",
"else",
":",
"return",
"getattr",
"(",
"settings",
",",
"self",
".",
"full_name",
")"
] | Property to return the variable defined in ``django.conf.settings``.
Returns:
object: the variable defined in ``django.conf.settings``.
Raises:
AttributeError: if the variable is missing.
KeyError: if the item is missing from nested setting. | [
"Property",
"to",
"return",
"the",
"variable",
"defined",
"in",
"django",
".",
"conf",
".",
"settings",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L461-L475 | train |
Genida/django-appsettings | src/appsettings/settings.py | Setting.get_value | def get_value(self):
"""
Return the transformed raw or default value.
If the variable is missing from the project settings, and the setting
is required, re-raise an AttributeError. If it is not required,
return the (optionally transformed) default value.
Returns:
object: the transformed raw value.
"""
try:
value = self.raw_value
except (AttributeError, KeyError) as err:
self._reraise_if_required(err)
default_value = self.default_value
if self.transform_default:
return self.transform(default_value)
return default_value
else:
return self.transform(value) | python | def get_value(self):
"""
Return the transformed raw or default value.
If the variable is missing from the project settings, and the setting
is required, re-raise an AttributeError. If it is not required,
return the (optionally transformed) default value.
Returns:
object: the transformed raw value.
"""
try:
value = self.raw_value
except (AttributeError, KeyError) as err:
self._reraise_if_required(err)
default_value = self.default_value
if self.transform_default:
return self.transform(default_value)
return default_value
else:
return self.transform(value) | [
"def",
"get_value",
"(",
"self",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"raw_value",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
"as",
"err",
":",
"self",
".",
"_reraise_if_required",
"(",
"err",
")",
"default_value",
"=",
"self",
".",
"default_value",
"if",
"self",
".",
"transform_default",
":",
"return",
"self",
".",
"transform",
"(",
"default_value",
")",
"return",
"default_value",
"else",
":",
"return",
"self",
".",
"transform",
"(",
"value",
")"
] | Return the transformed raw or default value.
If the variable is missing from the project settings, and the setting
is required, re-raise an AttributeError. If it is not required,
return the (optionally transformed) default value.
Returns:
object: the transformed raw value. | [
"Return",
"the",
"transformed",
"raw",
"or",
"default",
"value",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L489-L509 | train |
Genida/django-appsettings | src/appsettings/settings.py | Setting.run_validators | def run_validators(self, value):
"""Run the validators on the setting value."""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as error:
errors.extend(error.messages)
if errors:
raise ValidationError(errors) | python | def run_validators(self, value):
"""Run the validators on the setting value."""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as error:
errors.extend(error.messages)
if errors:
raise ValidationError(errors) | [
"def",
"run_validators",
"(",
"self",
",",
"value",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"validator",
"in",
"self",
".",
"validators",
":",
"try",
":",
"validator",
"(",
"value",
")",
"except",
"ValidationError",
"as",
"error",
":",
"errors",
".",
"extend",
"(",
"error",
".",
"messages",
")",
"if",
"errors",
":",
"raise",
"ValidationError",
"(",
"errors",
")"
] | Run the validators on the setting value. | [
"Run",
"the",
"validators",
"on",
"the",
"setting",
"value",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L522-L531 | train |
Genida/django-appsettings | src/appsettings/settings.py | ObjectSetting.transform | def transform(self, path):
"""
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object.
"""
if path is None or not path:
return None
obj_parent_modules = path.split(".")
objects = [obj_parent_modules.pop(-1)]
while True:
try:
parent_module_path = ".".join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if len(obj_parent_modules) == 1:
raise ImportError("No module named '%s'" % obj_parent_modules[0])
objects.insert(0, obj_parent_modules.pop(-1))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
return current_object | python | def transform(self, path):
"""
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object.
"""
if path is None or not path:
return None
obj_parent_modules = path.split(".")
objects = [obj_parent_modules.pop(-1)]
while True:
try:
parent_module_path = ".".join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if len(obj_parent_modules) == 1:
raise ImportError("No module named '%s'" % obj_parent_modules[0])
objects.insert(0, obj_parent_modules.pop(-1))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
return current_object | [
"def",
"transform",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
"or",
"not",
"path",
":",
"return",
"None",
"obj_parent_modules",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"objects",
"=",
"[",
"obj_parent_modules",
".",
"pop",
"(",
"-",
"1",
")",
"]",
"while",
"True",
":",
"try",
":",
"parent_module_path",
"=",
"\".\"",
".",
"join",
"(",
"obj_parent_modules",
")",
"parent_module",
"=",
"importlib",
".",
"import_module",
"(",
"parent_module_path",
")",
"break",
"except",
"ImportError",
":",
"if",
"len",
"(",
"obj_parent_modules",
")",
"==",
"1",
":",
"raise",
"ImportError",
"(",
"\"No module named '%s'\"",
"%",
"obj_parent_modules",
"[",
"0",
"]",
")",
"objects",
".",
"insert",
"(",
"0",
",",
"obj_parent_modules",
".",
"pop",
"(",
"-",
"1",
")",
")",
"current_object",
"=",
"parent_module",
"for",
"obj",
"in",
"objects",
":",
"current_object",
"=",
"getattr",
"(",
"current_object",
",",
"obj",
")",
"return",
"current_object"
] | Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object. | [
"Transform",
"a",
"path",
"into",
"an",
"actual",
"Python",
"object",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L1085-L1120 | train |
Genida/django-appsettings | src/appsettings/settings.py | NestedSetting.get_value | def get_value(self):
"""
Return dictionary with values of subsettings.
Returns:
dict: values of subsettings.
"""
try:
self.raw_value
except (AttributeError, KeyError) as err:
self._reraise_if_required(err)
default_value = self.default_value
if self.transform_default:
return self.transform(default_value)
return default_value
else:
# If setting is defined, load values of all subsettings.
value = {}
for key, subsetting in self.settings.items():
value[key] = subsetting.get_value()
return value | python | def get_value(self):
"""
Return dictionary with values of subsettings.
Returns:
dict: values of subsettings.
"""
try:
self.raw_value
except (AttributeError, KeyError) as err:
self._reraise_if_required(err)
default_value = self.default_value
if self.transform_default:
return self.transform(default_value)
return default_value
else:
# If setting is defined, load values of all subsettings.
value = {}
for key, subsetting in self.settings.items():
value[key] = subsetting.get_value()
return value | [
"def",
"get_value",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"raw_value",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
"as",
"err",
":",
"self",
".",
"_reraise_if_required",
"(",
"err",
")",
"default_value",
"=",
"self",
".",
"default_value",
"if",
"self",
".",
"transform_default",
":",
"return",
"self",
".",
"transform",
"(",
"default_value",
")",
"return",
"default_value",
"else",
":",
"# If setting is defined, load values of all subsettings.",
"value",
"=",
"{",
"}",
"for",
"key",
",",
"subsetting",
"in",
"self",
".",
"settings",
".",
"items",
"(",
")",
":",
"value",
"[",
"key",
"]",
"=",
"subsetting",
".",
"get_value",
"(",
")",
"return",
"value"
] | Return dictionary with values of subsettings.
Returns:
dict: values of subsettings. | [
"Return",
"dictionary",
"with",
"values",
"of",
"subsettings",
"."
] | f98867d133558af7dc067f12b44fc1ee4edd4239 | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L1154-L1174 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | sum_mags | def sum_mags(mags, weights=None):
"""
Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars
"""
flux = 10**(-np.asarray(mags) / 2.5)
if weights is None:
return -2.5 * np.log10(np.sum(flux))
else:
return -2.5 * np.log10(np.sum(weights*flux)) | python | def sum_mags(mags, weights=None):
"""
Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars
"""
flux = 10**(-np.asarray(mags) / 2.5)
if weights is None:
return -2.5 * np.log10(np.sum(flux))
else:
return -2.5 * np.log10(np.sum(weights*flux)) | [
"def",
"sum_mags",
"(",
"mags",
",",
"weights",
"=",
"None",
")",
":",
"flux",
"=",
"10",
"**",
"(",
"-",
"np",
".",
"asarray",
"(",
"mags",
")",
"/",
"2.5",
")",
"if",
"weights",
"is",
"None",
":",
"return",
"-",
"2.5",
"*",
"np",
".",
"log10",
"(",
"np",
".",
"sum",
"(",
"flux",
")",
")",
"else",
":",
"return",
"-",
"2.5",
"*",
"np",
".",
"log10",
"(",
"np",
".",
"sum",
"(",
"weights",
"*",
"flux",
")",
")"
] | Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars | [
"Sum",
"an",
"array",
"of",
"magnitudes",
"in",
"flux",
"space",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L57-L74 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | absolute_magnitude | def absolute_magnitude(distance_modulus,g,r,prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(10**(-(V-distance_modulus)/2.5))
Mv = -2.5*np.log10(flux)
return Mv | python | def absolute_magnitude(distance_modulus,g,r,prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(10**(-(V-distance_modulus)/2.5))
Mv = -2.5*np.log10(flux)
return Mv | [
"def",
"absolute_magnitude",
"(",
"distance_modulus",
",",
"g",
",",
"r",
",",
"prob",
"=",
"None",
")",
":",
"V",
"=",
"g",
"-",
"0.487",
"*",
"(",
"g",
"-",
"r",
")",
"-",
"0.0249",
"flux",
"=",
"np",
".",
"sum",
"(",
"10",
"**",
"(",
"-",
"(",
"V",
"-",
"distance_modulus",
")",
"/",
"2.5",
")",
")",
"Mv",
"=",
"-",
"2.5",
"*",
"np",
".",
"log10",
"(",
"flux",
")",
"return",
"Mv"
] | Calculate the absolute magnitude from a set of bands | [
"Calculate",
"the",
"absolute",
"magnitude",
"from",
"a",
"set",
"of",
"bands"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L1208-L1214 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | IsochroneModel.observableFractionCDF | def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster?
"""
method = 'step'
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# "upper" bound set by maglim
delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2
# "lower" bound set by bins_mag (maglim shouldn't be 0)
delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2
cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
cdf = cdf_1*cdf_2
if method is None or method == 'none':
comp_cdf = cdf
elif self.band_1_detection == True:
comp = mask.mask_1.completeness(mag_1, method=method)
comp_cdf = comp*cdf
elif self.band_1_detection == False:
comp =mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp*cdf
else:
comp_1 = mask.mask_1.completeness(mag_1, method=method)
comp_2 = mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp_1*comp_2*cdf
observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]] | python | def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster?
"""
method = 'step'
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# "upper" bound set by maglim
delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2
# "lower" bound set by bins_mag (maglim shouldn't be 0)
delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2
cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
cdf = cdf_1*cdf_2
if method is None or method == 'none':
comp_cdf = cdf
elif self.band_1_detection == True:
comp = mask.mask_1.completeness(mag_1, method=method)
comp_cdf = comp*cdf
elif self.band_1_detection == False:
comp =mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp*cdf
else:
comp_1 = mask.mask_1.completeness(mag_1, method=method)
comp_2 = mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp_1*comp_2*cdf
observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]] | [
"def",
"observableFractionCDF",
"(",
"self",
",",
"mask",
",",
"distance_modulus",
",",
"mass_min",
"=",
"0.1",
")",
":",
"method",
"=",
"'step'",
"mass_init",
",",
"mass_pdf",
",",
"mass_act",
",",
"mag_1",
",",
"mag_2",
"=",
"self",
".",
"sample",
"(",
"mass_min",
"=",
"mass_min",
",",
"full_data_range",
"=",
"False",
")",
"mag_1",
"=",
"mag_1",
"+",
"distance_modulus",
"mag_2",
"=",
"mag_2",
"+",
"distance_modulus",
"mask_1",
",",
"mask_2",
"=",
"mask",
".",
"mask_roi_unique",
".",
"T",
"mag_err_1",
"=",
"mask",
".",
"photo_err_1",
"(",
"mask_1",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"mag_1",
")",
"mag_err_2",
"=",
"mask",
".",
"photo_err_2",
"(",
"mask_2",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"mag_2",
")",
"# \"upper\" bound set by maglim",
"delta_hi_1",
"=",
"(",
"mask_1",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"mag_1",
")",
"/",
"mag_err_1",
"delta_hi_2",
"=",
"(",
"mask_2",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"mag_2",
")",
"/",
"mag_err_2",
"# \"lower\" bound set by bins_mag (maglim shouldn't be 0)",
"delta_lo_1",
"=",
"(",
"mask",
".",
"roi",
".",
"bins_mag",
"[",
"0",
"]",
"-",
"mag_1",
")",
"/",
"mag_err_1",
"delta_lo_2",
"=",
"(",
"mask",
".",
"roi",
".",
"bins_mag",
"[",
"0",
"]",
"-",
"mag_2",
")",
"/",
"mag_err_2",
"cdf_1",
"=",
"norm_cdf",
"(",
"delta_hi_1",
")",
"-",
"norm_cdf",
"(",
"delta_lo_1",
")",
"cdf_2",
"=",
"norm_cdf",
"(",
"delta_hi_2",
")",
"-",
"norm_cdf",
"(",
"delta_lo_2",
")",
"cdf",
"=",
"cdf_1",
"*",
"cdf_2",
"if",
"method",
"is",
"None",
"or",
"method",
"==",
"'none'",
":",
"comp_cdf",
"=",
"cdf",
"elif",
"self",
".",
"band_1_detection",
"==",
"True",
":",
"comp",
"=",
"mask",
".",
"mask_1",
".",
"completeness",
"(",
"mag_1",
",",
"method",
"=",
"method",
")",
"comp_cdf",
"=",
"comp",
"*",
"cdf",
"elif",
"self",
".",
"band_1_detection",
"==",
"False",
":",
"comp",
"=",
"mask",
".",
"mask_2",
".",
"completeness",
"(",
"mag_2",
",",
"method",
"=",
"method",
")",
"comp_cdf",
"=",
"comp",
"*",
"cdf",
"else",
":",
"comp_1",
"=",
"mask",
".",
"mask_1",
".",
"completeness",
"(",
"mag_1",
",",
"method",
"=",
"method",
")",
"comp_2",
"=",
"mask",
".",
"mask_2",
".",
"completeness",
"(",
"mag_2",
",",
"method",
"=",
"method",
")",
"comp_cdf",
"=",
"comp_1",
"*",
"comp_2",
"*",
"cdf",
"observable_fraction",
"=",
"(",
"mass_pdf",
"[",
"np",
".",
"newaxis",
"]",
"*",
"comp_cdf",
")",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
"return",
"observable_fraction",
"[",
"mask",
".",
"mask_roi_digi",
"[",
"mask",
".",
"roi",
".",
"pixel_interior_cut",
"]",
"]"
] | Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster? | [
"Compute",
"observable",
"fraction",
"of",
"stars",
"with",
"masses",
"greater",
"than",
"mass_min",
"in",
"each",
"pixel",
"in",
"the",
"interior",
"region",
"of",
"the",
"mask",
".",
"Incorporates",
"simplistic",
"photometric",
"errors",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L544-L595 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | IsochroneModel.histogram2d | def histogram2d(self,distance_modulus=None,delta_mag=0.03,steps=10000):
"""
Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : bin edges for first magnitude
bins_mag_2 : bin edges for second magnitude
isochrone_pdf : weighted pdf of isochrone in each bin
"""
if distance_modulus is not None:
self.distance_modulus = distance_modulus
# Isochrone will be binned, so might as well sample lots of points
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=steps)
#logger.warning("Fudging intrinisic dispersion in isochrone.")
#mag_1 += np.random.normal(scale=0.02,size=len(mag_1))
#mag_2 += np.random.normal(scale=0.02,size=len(mag_2))
# We cast to np.float32 to save memory
bins_mag_1 = np.arange(self.mod+mag_1.min() - (0.5*delta_mag),
self.mod+mag_1.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
bins_mag_2 = np.arange(self.mod+mag_2.min() - (0.5*delta_mag),
self.mod+mag_2.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
# ADW: Completeness needs to go in mass_pdf here...
isochrone_pdf = np.histogram2d(self.mod + mag_1,
self.mod + mag_2,
bins=[bins_mag_1, bins_mag_2],
weights=mass_pdf)[0].astype(np.float32)
return isochrone_pdf, bins_mag_1, bins_mag_2 | python | def histogram2d(self,distance_modulus=None,delta_mag=0.03,steps=10000):
"""
Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : bin edges for first magnitude
bins_mag_2 : bin edges for second magnitude
isochrone_pdf : weighted pdf of isochrone in each bin
"""
if distance_modulus is not None:
self.distance_modulus = distance_modulus
# Isochrone will be binned, so might as well sample lots of points
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=steps)
#logger.warning("Fudging intrinisic dispersion in isochrone.")
#mag_1 += np.random.normal(scale=0.02,size=len(mag_1))
#mag_2 += np.random.normal(scale=0.02,size=len(mag_2))
# We cast to np.float32 to save memory
bins_mag_1 = np.arange(self.mod+mag_1.min() - (0.5*delta_mag),
self.mod+mag_1.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
bins_mag_2 = np.arange(self.mod+mag_2.min() - (0.5*delta_mag),
self.mod+mag_2.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
# ADW: Completeness needs to go in mass_pdf here...
isochrone_pdf = np.histogram2d(self.mod + mag_1,
self.mod + mag_2,
bins=[bins_mag_1, bins_mag_2],
weights=mass_pdf)[0].astype(np.float32)
return isochrone_pdf, bins_mag_1, bins_mag_2 | [
"def",
"histogram2d",
"(",
"self",
",",
"distance_modulus",
"=",
"None",
",",
"delta_mag",
"=",
"0.03",
",",
"steps",
"=",
"10000",
")",
":",
"if",
"distance_modulus",
"is",
"not",
"None",
":",
"self",
".",
"distance_modulus",
"=",
"distance_modulus",
"# Isochrone will be binned, so might as well sample lots of points",
"mass_init",
",",
"mass_pdf",
",",
"mass_act",
",",
"mag_1",
",",
"mag_2",
"=",
"self",
".",
"sample",
"(",
"mass_steps",
"=",
"steps",
")",
"#logger.warning(\"Fudging intrinisic dispersion in isochrone.\")",
"#mag_1 += np.random.normal(scale=0.02,size=len(mag_1))",
"#mag_2 += np.random.normal(scale=0.02,size=len(mag_2))",
"# We cast to np.float32 to save memory",
"bins_mag_1",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"mod",
"+",
"mag_1",
".",
"min",
"(",
")",
"-",
"(",
"0.5",
"*",
"delta_mag",
")",
",",
"self",
".",
"mod",
"+",
"mag_1",
".",
"max",
"(",
")",
"+",
"(",
"0.5",
"*",
"delta_mag",
")",
",",
"delta_mag",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"bins_mag_2",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"mod",
"+",
"mag_2",
".",
"min",
"(",
")",
"-",
"(",
"0.5",
"*",
"delta_mag",
")",
",",
"self",
".",
"mod",
"+",
"mag_2",
".",
"max",
"(",
")",
"+",
"(",
"0.5",
"*",
"delta_mag",
")",
",",
"delta_mag",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# ADW: Completeness needs to go in mass_pdf here...",
"isochrone_pdf",
"=",
"np",
".",
"histogram2d",
"(",
"self",
".",
"mod",
"+",
"mag_1",
",",
"self",
".",
"mod",
"+",
"mag_2",
",",
"bins",
"=",
"[",
"bins_mag_1",
",",
"bins_mag_2",
"]",
",",
"weights",
"=",
"mass_pdf",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"return",
"isochrone_pdf",
",",
"bins_mag_1",
",",
"bins_mag_2"
] | Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : bin edges for first magnitude
bins_mag_2 : bin edges for second magnitude
isochrone_pdf : weighted pdf of isochrone in each bin | [
"Return",
"a",
"2D",
"histogram",
"the",
"isochrone",
"in",
"mag",
"-",
"mag",
"space",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L746-L786 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | IsochroneModel.pdf_mmd | def pdf_mmd(self, lon, lat, mag_1, mag_2, distance_modulus, mask, delta_mag=0.03, steps=1000):
"""
Ok, now here comes the beauty of having the signal MMD.
"""
logger.info('Running MMD pdf')
roi = mask.roi
mmd = self.signalMMD(mask,distance_modulus,delta_mag=delta_mag,mass_steps=steps)
# This is fragile, store this information somewhere else...
nedges = np.rint((roi.bins_mag[-1]-roi.bins_mag[0])/delta_mag)+1
edges_mag,delta_mag = np.linspace(roi.bins_mag[0],roi.bins_mag[-1],nedges,retstep=True)
idx_mag_1 = np.searchsorted(edges_mag,mag_1)
idx_mag_2 = np.searchsorted(edges_mag,mag_2)
if np.any(idx_mag_1 > nedges) or np.any(idx_mag_1 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
if np.any(idx_mag_2 > nedges) or np.any(idx_mag_2 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
idx = mask.roi.indexROI(lon,lat)
u_color = mmd[(mask.mask_roi_digi[idx],idx_mag_1,idx_mag_2)]
# Remove the bin size to convert the pdf to units of mag^-2
u_color /= delta_mag**2
return u_color | python | def pdf_mmd(self, lon, lat, mag_1, mag_2, distance_modulus, mask, delta_mag=0.03, steps=1000):
"""
Ok, now here comes the beauty of having the signal MMD.
"""
logger.info('Running MMD pdf')
roi = mask.roi
mmd = self.signalMMD(mask,distance_modulus,delta_mag=delta_mag,mass_steps=steps)
# This is fragile, store this information somewhere else...
nedges = np.rint((roi.bins_mag[-1]-roi.bins_mag[0])/delta_mag)+1
edges_mag,delta_mag = np.linspace(roi.bins_mag[0],roi.bins_mag[-1],nedges,retstep=True)
idx_mag_1 = np.searchsorted(edges_mag,mag_1)
idx_mag_2 = np.searchsorted(edges_mag,mag_2)
if np.any(idx_mag_1 > nedges) or np.any(idx_mag_1 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
if np.any(idx_mag_2 > nedges) or np.any(idx_mag_2 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
idx = mask.roi.indexROI(lon,lat)
u_color = mmd[(mask.mask_roi_digi[idx],idx_mag_1,idx_mag_2)]
# Remove the bin size to convert the pdf to units of mag^-2
u_color /= delta_mag**2
return u_color | [
"def",
"pdf_mmd",
"(",
"self",
",",
"lon",
",",
"lat",
",",
"mag_1",
",",
"mag_2",
",",
"distance_modulus",
",",
"mask",
",",
"delta_mag",
"=",
"0.03",
",",
"steps",
"=",
"1000",
")",
":",
"logger",
".",
"info",
"(",
"'Running MMD pdf'",
")",
"roi",
"=",
"mask",
".",
"roi",
"mmd",
"=",
"self",
".",
"signalMMD",
"(",
"mask",
",",
"distance_modulus",
",",
"delta_mag",
"=",
"delta_mag",
",",
"mass_steps",
"=",
"steps",
")",
"# This is fragile, store this information somewhere else...",
"nedges",
"=",
"np",
".",
"rint",
"(",
"(",
"roi",
".",
"bins_mag",
"[",
"-",
"1",
"]",
"-",
"roi",
".",
"bins_mag",
"[",
"0",
"]",
")",
"/",
"delta_mag",
")",
"+",
"1",
"edges_mag",
",",
"delta_mag",
"=",
"np",
".",
"linspace",
"(",
"roi",
".",
"bins_mag",
"[",
"0",
"]",
",",
"roi",
".",
"bins_mag",
"[",
"-",
"1",
"]",
",",
"nedges",
",",
"retstep",
"=",
"True",
")",
"idx_mag_1",
"=",
"np",
".",
"searchsorted",
"(",
"edges_mag",
",",
"mag_1",
")",
"idx_mag_2",
"=",
"np",
".",
"searchsorted",
"(",
"edges_mag",
",",
"mag_2",
")",
"if",
"np",
".",
"any",
"(",
"idx_mag_1",
">",
"nedges",
")",
"or",
"np",
".",
"any",
"(",
"idx_mag_1",
"==",
"0",
")",
":",
"msg",
"=",
"\"Magnitude out of range...\"",
"raise",
"Exception",
"(",
"msg",
")",
"if",
"np",
".",
"any",
"(",
"idx_mag_2",
">",
"nedges",
")",
"or",
"np",
".",
"any",
"(",
"idx_mag_2",
"==",
"0",
")",
":",
"msg",
"=",
"\"Magnitude out of range...\"",
"raise",
"Exception",
"(",
"msg",
")",
"idx",
"=",
"mask",
".",
"roi",
".",
"indexROI",
"(",
"lon",
",",
"lat",
")",
"u_color",
"=",
"mmd",
"[",
"(",
"mask",
".",
"mask_roi_digi",
"[",
"idx",
"]",
",",
"idx_mag_1",
",",
"idx_mag_2",
")",
"]",
"# Remove the bin size to convert the pdf to units of mag^-2",
"u_color",
"/=",
"delta_mag",
"**",
"2",
"return",
"u_color"
] | Ok, now here comes the beauty of having the signal MMD. | [
"Ok",
"now",
"here",
"comes",
"the",
"beauty",
"of",
"having",
"the",
"signal",
"MMD",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L788-L817 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | IsochroneModel.raw_separation | def raw_separation(self,mag_1,mag_2,steps=10000):
"""
Calculate the separation in magnitude-magnitude space between points and isochrone. Uses a dense sampling of the isochrone and calculates the metric distance from any isochrone sample point.
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
steps : Number of steps to sample the isochrone
Returns:
--------
sep : Minimum separation between test points and isochrone sample
"""
# http://stackoverflow.com/q/12653120/
mag_1 = np.array(mag_1,copy=False,ndmin=1)
mag_2 = np.array(mag_2,copy=False,ndmin=1)
init,pdf,act,iso_mag_1,iso_mag_2 = self.sample(mass_steps=steps)
iso_mag_1+=self.distance_modulus
iso_mag_2+=self.distance_modulus
iso_cut = (iso_mag_1<np.max(mag_1))&(iso_mag_1>np.min(mag_1)) | \
(iso_mag_2<np.max(mag_2))&(iso_mag_2>np.min(mag_2))
iso_mag_1 = iso_mag_1[iso_cut]
iso_mag_2 = iso_mag_2[iso_cut]
dist_mag_1 = mag_1[:,np.newaxis]-iso_mag_1
dist_mag_2 = mag_2[:,np.newaxis]-iso_mag_2
return np.min(np.sqrt(dist_mag_1**2 + dist_mag_2**2),axis=1) | python | def raw_separation(self,mag_1,mag_2,steps=10000):
"""
Calculate the separation in magnitude-magnitude space between points and isochrone. Uses a dense sampling of the isochrone and calculates the metric distance from any isochrone sample point.
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
steps : Number of steps to sample the isochrone
Returns:
--------
sep : Minimum separation between test points and isochrone sample
"""
# http://stackoverflow.com/q/12653120/
mag_1 = np.array(mag_1,copy=False,ndmin=1)
mag_2 = np.array(mag_2,copy=False,ndmin=1)
init,pdf,act,iso_mag_1,iso_mag_2 = self.sample(mass_steps=steps)
iso_mag_1+=self.distance_modulus
iso_mag_2+=self.distance_modulus
iso_cut = (iso_mag_1<np.max(mag_1))&(iso_mag_1>np.min(mag_1)) | \
(iso_mag_2<np.max(mag_2))&(iso_mag_2>np.min(mag_2))
iso_mag_1 = iso_mag_1[iso_cut]
iso_mag_2 = iso_mag_2[iso_cut]
dist_mag_1 = mag_1[:,np.newaxis]-iso_mag_1
dist_mag_2 = mag_2[:,np.newaxis]-iso_mag_2
return np.min(np.sqrt(dist_mag_1**2 + dist_mag_2**2),axis=1) | [
"def",
"raw_separation",
"(",
"self",
",",
"mag_1",
",",
"mag_2",
",",
"steps",
"=",
"10000",
")",
":",
"# http://stackoverflow.com/q/12653120/",
"mag_1",
"=",
"np",
".",
"array",
"(",
"mag_1",
",",
"copy",
"=",
"False",
",",
"ndmin",
"=",
"1",
")",
"mag_2",
"=",
"np",
".",
"array",
"(",
"mag_2",
",",
"copy",
"=",
"False",
",",
"ndmin",
"=",
"1",
")",
"init",
",",
"pdf",
",",
"act",
",",
"iso_mag_1",
",",
"iso_mag_2",
"=",
"self",
".",
"sample",
"(",
"mass_steps",
"=",
"steps",
")",
"iso_mag_1",
"+=",
"self",
".",
"distance_modulus",
"iso_mag_2",
"+=",
"self",
".",
"distance_modulus",
"iso_cut",
"=",
"(",
"iso_mag_1",
"<",
"np",
".",
"max",
"(",
"mag_1",
")",
")",
"&",
"(",
"iso_mag_1",
">",
"np",
".",
"min",
"(",
"mag_1",
")",
")",
"|",
"(",
"iso_mag_2",
"<",
"np",
".",
"max",
"(",
"mag_2",
")",
")",
"&",
"(",
"iso_mag_2",
">",
"np",
".",
"min",
"(",
"mag_2",
")",
")",
"iso_mag_1",
"=",
"iso_mag_1",
"[",
"iso_cut",
"]",
"iso_mag_2",
"=",
"iso_mag_2",
"[",
"iso_cut",
"]",
"dist_mag_1",
"=",
"mag_1",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"iso_mag_1",
"dist_mag_2",
"=",
"mag_2",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"-",
"iso_mag_2",
"return",
"np",
".",
"min",
"(",
"np",
".",
"sqrt",
"(",
"dist_mag_1",
"**",
"2",
"+",
"dist_mag_2",
"**",
"2",
")",
",",
"axis",
"=",
"1",
")"
] | Calculate the separation in magnitude-magnitude space between points and isochrone. Uses a dense sampling of the isochrone and calculates the metric distance from any isochrone sample point.
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
steps : Number of steps to sample the isochrone
Returns:
--------
sep : Minimum separation between test points and isochrone sample | [
"Calculate",
"the",
"separation",
"in",
"magnitude",
"-",
"magnitude",
"space",
"between",
"points",
"and",
"isochrone",
".",
"Uses",
"a",
"dense",
"sampling",
"of",
"the",
"isochrone",
"and",
"calculates",
"the",
"metric",
"distance",
"from",
"any",
"isochrone",
"sample",
"point",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L929-L960 | train |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | IsochroneModel.separation | def separation(self, mag_1, mag_2):
"""
Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
Returns:
--------
sep : Minimum separation between test points and isochrone interpolation
"""
iso_mag_1 = self.mag_1 + self.distance_modulus
iso_mag_2 = self.mag_2 + self.distance_modulus
def interp_iso(iso_mag_1,iso_mag_2,mag_1,mag_2):
interp_1 = scipy.interpolate.interp1d(iso_mag_1,iso_mag_2,bounds_error=False)
interp_2 = scipy.interpolate.interp1d(iso_mag_2,iso_mag_1,bounds_error=False)
dy = interp_1(mag_1) - mag_2
dx = interp_2(mag_2) - mag_1
dmag_1 = np.fabs(dx*dy) / (dx**2 + dy**2) * dy
dmag_2 = np.fabs(dx*dy) / (dx**2 + dy**2) * dx
return dmag_1, dmag_2
# Separate the various stellar evolution stages
if np.issubdtype(self.stage.dtype,np.number):
sel = (self.stage < self.hb_stage)
else:
sel = (self.stage != self.hb_stage)
# First do the MS/RGB
rgb_mag_1 = iso_mag_1[sel]
rgb_mag_2 = iso_mag_2[sel]
dmag_1,dmag_2 = interp_iso(rgb_mag_1,rgb_mag_2,mag_1,mag_2)
# Then do the HB (if it exists)
if not np.all(sel):
hb_mag_1 = iso_mag_1[~sel]
hb_mag_2 = iso_mag_2[~sel]
hb_dmag_1,hb_dmag_2 = interp_iso(hb_mag_1,hb_mag_2,mag_1,mag_2)
dmag_1 = np.nanmin([dmag_1,hb_dmag_1],axis=0)
dmag_2 = np.nanmin([dmag_2,hb_dmag_2],axis=0)
#return dmag_1,dmag_2
return np.sqrt(dmag_1**2 + dmag_2**2) | python | def separation(self, mag_1, mag_2):
"""
Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
Returns:
--------
sep : Minimum separation between test points and isochrone interpolation
"""
iso_mag_1 = self.mag_1 + self.distance_modulus
iso_mag_2 = self.mag_2 + self.distance_modulus
def interp_iso(iso_mag_1,iso_mag_2,mag_1,mag_2):
interp_1 = scipy.interpolate.interp1d(iso_mag_1,iso_mag_2,bounds_error=False)
interp_2 = scipy.interpolate.interp1d(iso_mag_2,iso_mag_1,bounds_error=False)
dy = interp_1(mag_1) - mag_2
dx = interp_2(mag_2) - mag_1
dmag_1 = np.fabs(dx*dy) / (dx**2 + dy**2) * dy
dmag_2 = np.fabs(dx*dy) / (dx**2 + dy**2) * dx
return dmag_1, dmag_2
# Separate the various stellar evolution stages
if np.issubdtype(self.stage.dtype,np.number):
sel = (self.stage < self.hb_stage)
else:
sel = (self.stage != self.hb_stage)
# First do the MS/RGB
rgb_mag_1 = iso_mag_1[sel]
rgb_mag_2 = iso_mag_2[sel]
dmag_1,dmag_2 = interp_iso(rgb_mag_1,rgb_mag_2,mag_1,mag_2)
# Then do the HB (if it exists)
if not np.all(sel):
hb_mag_1 = iso_mag_1[~sel]
hb_mag_2 = iso_mag_2[~sel]
hb_dmag_1,hb_dmag_2 = interp_iso(hb_mag_1,hb_mag_2,mag_1,mag_2)
dmag_1 = np.nanmin([dmag_1,hb_dmag_1],axis=0)
dmag_2 = np.nanmin([dmag_2,hb_dmag_2],axis=0)
#return dmag_1,dmag_2
return np.sqrt(dmag_1**2 + dmag_2**2) | [
"def",
"separation",
"(",
"self",
",",
"mag_1",
",",
"mag_2",
")",
":",
"iso_mag_1",
"=",
"self",
".",
"mag_1",
"+",
"self",
".",
"distance_modulus",
"iso_mag_2",
"=",
"self",
".",
"mag_2",
"+",
"self",
".",
"distance_modulus",
"def",
"interp_iso",
"(",
"iso_mag_1",
",",
"iso_mag_2",
",",
"mag_1",
",",
"mag_2",
")",
":",
"interp_1",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"iso_mag_1",
",",
"iso_mag_2",
",",
"bounds_error",
"=",
"False",
")",
"interp_2",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"iso_mag_2",
",",
"iso_mag_1",
",",
"bounds_error",
"=",
"False",
")",
"dy",
"=",
"interp_1",
"(",
"mag_1",
")",
"-",
"mag_2",
"dx",
"=",
"interp_2",
"(",
"mag_2",
")",
"-",
"mag_1",
"dmag_1",
"=",
"np",
".",
"fabs",
"(",
"dx",
"*",
"dy",
")",
"/",
"(",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
")",
"*",
"dy",
"dmag_2",
"=",
"np",
".",
"fabs",
"(",
"dx",
"*",
"dy",
")",
"/",
"(",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
")",
"*",
"dx",
"return",
"dmag_1",
",",
"dmag_2",
"# Separate the various stellar evolution stages",
"if",
"np",
".",
"issubdtype",
"(",
"self",
".",
"stage",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"sel",
"=",
"(",
"self",
".",
"stage",
"<",
"self",
".",
"hb_stage",
")",
"else",
":",
"sel",
"=",
"(",
"self",
".",
"stage",
"!=",
"self",
".",
"hb_stage",
")",
"# First do the MS/RGB",
"rgb_mag_1",
"=",
"iso_mag_1",
"[",
"sel",
"]",
"rgb_mag_2",
"=",
"iso_mag_2",
"[",
"sel",
"]",
"dmag_1",
",",
"dmag_2",
"=",
"interp_iso",
"(",
"rgb_mag_1",
",",
"rgb_mag_2",
",",
"mag_1",
",",
"mag_2",
")",
"# Then do the HB (if it exists)",
"if",
"not",
"np",
".",
"all",
"(",
"sel",
")",
":",
"hb_mag_1",
"=",
"iso_mag_1",
"[",
"~",
"sel",
"]",
"hb_mag_2",
"=",
"iso_mag_2",
"[",
"~",
"sel",
"]",
"hb_dmag_1",
",",
"hb_dmag_2",
"=",
"interp_iso",
"(",
"hb_mag_1",
",",
"hb_mag_2",
",",
"mag_1",
",",
"mag_2",
")",
"dmag_1",
"=",
"np",
".",
"nanmin",
"(",
"[",
"dmag_1",
",",
"hb_dmag_1",
"]",
",",
"axis",
"=",
"0",
")",
"dmag_2",
"=",
"np",
".",
"nanmin",
"(",
"[",
"dmag_2",
",",
"hb_dmag_2",
"]",
",",
"axis",
"=",
"0",
")",
"#return dmag_1,dmag_2",
"return",
"np",
".",
"sqrt",
"(",
"dmag_1",
"**",
"2",
"+",
"dmag_2",
"**",
"2",
")"
] | Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
Returns:
--------
sep : Minimum separation between test points and isochrone interpolation | [
"Calculate",
"the",
"separation",
"between",
"a",
"specific",
"point",
"and",
"the",
"isochrone",
"in",
"magnitude",
"-",
"magnitude",
"space",
".",
"Uses",
"an",
"interpolation"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L963-L1017 | train |
akx/lepo | lepo/router.py | Router.get_handler | def get_handler(self, operation_id):
"""
Get the handler function for a given operation.
To remain Pythonic, both the original and the snake_cased version of the operation ID are
supported.
This could be overridden in a subclass.
:param operation_id: Operation ID.
:return: Handler function
:rtype: function
"""
handler = (
self.handlers.get(operation_id)
or self.handlers.get(snake_case(operation_id))
)
if handler:
return handler
raise MissingHandler(
'Missing handler for operation %s (tried %s too)' % (operation_id, snake_case(operation_id))
) | python | def get_handler(self, operation_id):
"""
Get the handler function for a given operation.
To remain Pythonic, both the original and the snake_cased version of the operation ID are
supported.
This could be overridden in a subclass.
:param operation_id: Operation ID.
:return: Handler function
:rtype: function
"""
handler = (
self.handlers.get(operation_id)
or self.handlers.get(snake_case(operation_id))
)
if handler:
return handler
raise MissingHandler(
'Missing handler for operation %s (tried %s too)' % (operation_id, snake_case(operation_id))
) | [
"def",
"get_handler",
"(",
"self",
",",
"operation_id",
")",
":",
"handler",
"=",
"(",
"self",
".",
"handlers",
".",
"get",
"(",
"operation_id",
")",
"or",
"self",
".",
"handlers",
".",
"get",
"(",
"snake_case",
"(",
"operation_id",
")",
")",
")",
"if",
"handler",
":",
"return",
"handler",
"raise",
"MissingHandler",
"(",
"'Missing handler for operation %s (tried %s too)'",
"%",
"(",
"operation_id",
",",
"snake_case",
"(",
"operation_id",
")",
")",
")"
] | Get the handler function for a given operation.
To remain Pythonic, both the original and the snake_cased version of the operation ID are
supported.
This could be overridden in a subclass.
:param operation_id: Operation ID.
:return: Handler function
:rtype: function | [
"Get",
"the",
"handler",
"function",
"for",
"a",
"given",
"operation",
"."
] | 34cfb24a40f18ea40f672c1ea9a0734ee1816b7d | https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/router.py#L105-L126 | train |
akx/lepo | lepo/router.py | Router.add_handlers | def add_handlers(self, namespace):
"""
Add handler functions from the given `namespace`, for instance a module.
The namespace may be a string, in which case it is expected to be a name of a module.
It may also be a dictionary mapping names to functions.
Only non-underscore-prefixed functions and methods are imported.
:param namespace: Namespace object.
:type namespace: str|module|dict[str, function]
"""
if isinstance(namespace, str):
namespace = import_module(namespace)
if isinstance(namespace, dict):
namespace = namespace.items()
else:
namespace = vars(namespace).items()
for name, value in namespace:
if name.startswith('_'):
continue
if isfunction(value) or ismethod(value):
self.handlers[name] = value | python | def add_handlers(self, namespace):
"""
Add handler functions from the given `namespace`, for instance a module.
The namespace may be a string, in which case it is expected to be a name of a module.
It may also be a dictionary mapping names to functions.
Only non-underscore-prefixed functions and methods are imported.
:param namespace: Namespace object.
:type namespace: str|module|dict[str, function]
"""
if isinstance(namespace, str):
namespace = import_module(namespace)
if isinstance(namespace, dict):
namespace = namespace.items()
else:
namespace = vars(namespace).items()
for name, value in namespace:
if name.startswith('_'):
continue
if isfunction(value) or ismethod(value):
self.handlers[name] = value | [
"def",
"add_handlers",
"(",
"self",
",",
"namespace",
")",
":",
"if",
"isinstance",
"(",
"namespace",
",",
"str",
")",
":",
"namespace",
"=",
"import_module",
"(",
"namespace",
")",
"if",
"isinstance",
"(",
"namespace",
",",
"dict",
")",
":",
"namespace",
"=",
"namespace",
".",
"items",
"(",
")",
"else",
":",
"namespace",
"=",
"vars",
"(",
"namespace",
")",
".",
"items",
"(",
")",
"for",
"name",
",",
"value",
"in",
"namespace",
":",
"if",
"name",
".",
"startswith",
"(",
"'_'",
")",
":",
"continue",
"if",
"isfunction",
"(",
"value",
")",
"or",
"ismethod",
"(",
"value",
")",
":",
"self",
".",
"handlers",
"[",
"name",
"]",
"=",
"value"
] | Add handler functions from the given `namespace`, for instance a module.
The namespace may be a string, in which case it is expected to be a name of a module.
It may also be a dictionary mapping names to functions.
Only non-underscore-prefixed functions and methods are imported.
:param namespace: Namespace object.
:type namespace: str|module|dict[str, function] | [
"Add",
"handler",
"functions",
"from",
"the",
"given",
"namespace",
"for",
"instance",
"a",
"module",
"."
] | 34cfb24a40f18ea40f672c1ea9a0734ee1816b7d | https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/router.py#L128-L152 | train |
consbio/ncdjango | ncdjango/geoprocessing/tasks/raster.py | SingleArrayExpressionBase.get_context | def get_context(self, arr, expr, context):
"""
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
"""
expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])]
if len(expression_names) != 1:
raise ValueError('The expression must have exactly one variable.')
return {expression_names[0]: arr} | python | def get_context(self, arr, expr, context):
"""
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
"""
expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])]
if len(expression_names) != 1:
raise ValueError('The expression must have exactly one variable.')
return {expression_names[0]: arr} | [
"def",
"get_context",
"(",
"self",
",",
"arr",
",",
"expr",
",",
"context",
")",
":",
"expression_names",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"get_expression_names",
"(",
"expr",
")",
"if",
"x",
"not",
"in",
"set",
"(",
"context",
".",
"keys",
"(",
")",
")",
".",
"union",
"(",
"[",
"'i'",
"]",
")",
"]",
"if",
"len",
"(",
"expression_names",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'The expression must have exactly one variable.'",
")",
"return",
"{",
"expression_names",
"[",
"0",
"]",
":",
"arr",
"}"
] | Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context. | [
"Returns",
"a",
"context",
"dictionary",
"for",
"use",
"in",
"evaluating",
"the",
"expression",
"."
] | f807bfd1e4083ab29fbc3c4d4418be108383a710 | https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/tasks/raster.py#L82-L96 | train |
consbio/ncdjango | ncdjango/geoprocessing/tasks/raster.py | MaskByExpression.execute | def execute(self, array_in, expression, **kwargs):
"""Creates and returns a masked view of the input array."""
context = self.get_context(array_in, expression, kwargs)
context.update(kwargs)
return ma.masked_where(self.evaluate_expression(expression, context), array_in) | python | def execute(self, array_in, expression, **kwargs):
"""Creates and returns a masked view of the input array."""
context = self.get_context(array_in, expression, kwargs)
context.update(kwargs)
return ma.masked_where(self.evaluate_expression(expression, context), array_in) | [
"def",
"execute",
"(",
"self",
",",
"array_in",
",",
"expression",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"self",
".",
"get_context",
"(",
"array_in",
",",
"expression",
",",
"kwargs",
")",
"context",
".",
"update",
"(",
"kwargs",
")",
"return",
"ma",
".",
"masked_where",
"(",
"self",
".",
"evaluate_expression",
"(",
"expression",
",",
"context",
")",
",",
"array_in",
")"
] | Creates and returns a masked view of the input array. | [
"Creates",
"and",
"returns",
"a",
"masked",
"view",
"of",
"the",
"input",
"array",
"."
] | f807bfd1e4083ab29fbc3c4d4418be108383a710 | https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/tasks/raster.py#L104-L109 | train |
juju/theblues | theblues/errors.py | timeout_error | def timeout_error(url, timeout):
"""Raise a server error indicating a request timeout to the given URL."""
msg = 'Request timed out: {} timeout: {}s'.format(url, timeout)
log.warning(msg)
return ServerError(msg) | python | def timeout_error(url, timeout):
"""Raise a server error indicating a request timeout to the given URL."""
msg = 'Request timed out: {} timeout: {}s'.format(url, timeout)
log.warning(msg)
return ServerError(msg) | [
"def",
"timeout_error",
"(",
"url",
",",
"timeout",
")",
":",
"msg",
"=",
"'Request timed out: {} timeout: {}s'",
".",
"format",
"(",
"url",
",",
"timeout",
")",
"log",
".",
"warning",
"(",
"msg",
")",
"return",
"ServerError",
"(",
"msg",
")"
] | Raise a server error indicating a request timeout to the given URL. | [
"Raise",
"a",
"server",
"error",
"indicating",
"a",
"request",
"timeout",
"to",
"the",
"given",
"URL",
"."
] | f4431f29e43d04fc32f38f4f86cea45cd4e6ae98 | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/errors.py#L19-L23 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | histogram | def histogram(title, title_x, title_y,
x, bins_x):
"""
Plot a basic histogram.
"""
plt.figure()
plt.hist(x, bins_x)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title) | python | def histogram(title, title_x, title_y,
x, bins_x):
"""
Plot a basic histogram.
"""
plt.figure()
plt.hist(x, bins_x)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title) | [
"def",
"histogram",
"(",
"title",
",",
"title_x",
",",
"title_y",
",",
"x",
",",
"bins_x",
")",
":",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"hist",
"(",
"x",
",",
"bins_x",
")",
"plt",
".",
"xlabel",
"(",
"title_x",
")",
"plt",
".",
"ylabel",
"(",
"title_y",
")",
"plt",
".",
"title",
"(",
"title",
")"
] | Plot a basic histogram. | [
"Plot",
"a",
"basic",
"histogram",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L61-L70 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | twoDimensionalHistogram | def twoDimensionalHistogram(title, title_x, title_y,
z, bins_x, bins_y,
lim_x=None, lim_y=None,
vmin=None, vmax=None):
"""
Create a two-dimension histogram plot or binned map.
If using the outputs of np.histogram2d, remember to transpose the histogram.
INPUTS
"""
plt.figure()
mesh_x, mesh_y = np.meshgrid(bins_x, bins_y)
if vmin != None and vmin == vmax:
plt.pcolor(mesh_x, mesh_y, z)
else:
plt.pcolor(mesh_x, mesh_y, z, vmin=vmin, vmax=vmax)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | python | def twoDimensionalHistogram(title, title_x, title_y,
z, bins_x, bins_y,
lim_x=None, lim_y=None,
vmin=None, vmax=None):
"""
Create a two-dimension histogram plot or binned map.
If using the outputs of np.histogram2d, remember to transpose the histogram.
INPUTS
"""
plt.figure()
mesh_x, mesh_y = np.meshgrid(bins_x, bins_y)
if vmin != None and vmin == vmax:
plt.pcolor(mesh_x, mesh_y, z)
else:
plt.pcolor(mesh_x, mesh_y, z, vmin=vmin, vmax=vmax)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | [
"def",
"twoDimensionalHistogram",
"(",
"title",
",",
"title_x",
",",
"title_y",
",",
"z",
",",
"bins_x",
",",
"bins_y",
",",
"lim_x",
"=",
"None",
",",
"lim_y",
"=",
"None",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
")",
":",
"plt",
".",
"figure",
"(",
")",
"mesh_x",
",",
"mesh_y",
"=",
"np",
".",
"meshgrid",
"(",
"bins_x",
",",
"bins_y",
")",
"if",
"vmin",
"!=",
"None",
"and",
"vmin",
"==",
"vmax",
":",
"plt",
".",
"pcolor",
"(",
"mesh_x",
",",
"mesh_y",
",",
"z",
")",
"else",
":",
"plt",
".",
"pcolor",
"(",
"mesh_x",
",",
"mesh_y",
",",
"z",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"plt",
".",
"xlabel",
"(",
"title_x",
")",
"plt",
".",
"ylabel",
"(",
"title_y",
")",
"plt",
".",
"title",
"(",
"title",
")",
"plt",
".",
"colorbar",
"(",
")",
"if",
"lim_x",
":",
"plt",
".",
"xlim",
"(",
"lim_x",
"[",
"0",
"]",
",",
"lim_x",
"[",
"1",
"]",
")",
"if",
"lim_y",
":",
"plt",
".",
"ylim",
"(",
"lim_y",
"[",
"0",
"]",
",",
"lim_y",
"[",
"1",
"]",
")"
] | Create a two-dimension histogram plot or binned map.
If using the outputs of np.histogram2d, remember to transpose the histogram.
INPUTS | [
"Create",
"a",
"two",
"-",
"dimension",
"histogram",
"plot",
"or",
"binned",
"map",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L74-L101 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | twoDimensionalScatter | def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | python | def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | [
"def",
"twoDimensionalScatter",
"(",
"title",
",",
"title_x",
",",
"title_y",
",",
"x",
",",
"y",
",",
"lim_x",
"=",
"None",
",",
"lim_y",
"=",
"None",
",",
"color",
"=",
"'b'",
",",
"size",
"=",
"20",
",",
"alpha",
"=",
"None",
")",
":",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"c",
"=",
"color",
",",
"s",
"=",
"size",
",",
"alpha",
"=",
"alpha",
",",
"edgecolors",
"=",
"'none'",
")",
"plt",
".",
"xlabel",
"(",
"title_x",
")",
"plt",
".",
"ylabel",
"(",
"title_y",
")",
"plt",
".",
"title",
"(",
"title",
")",
"if",
"type",
"(",
"color",
")",
"is",
"not",
"str",
":",
"plt",
".",
"colorbar",
"(",
")",
"if",
"lim_x",
":",
"plt",
".",
"xlim",
"(",
"lim_x",
"[",
"0",
"]",
",",
"lim_x",
"[",
"1",
"]",
")",
"if",
"lim_y",
":",
"plt",
".",
"ylim",
"(",
"lim_y",
"[",
"0",
"]",
",",
"lim_y",
"[",
"1",
"]",
")"
] | Create a two-dimensional scatter plot.
INPUTS | [
"Create",
"a",
"two",
"-",
"dimensional",
"scatter",
"plot",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L105-L127 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | drawHealpixMap | def drawHealpixMap(hpxmap, lon, lat, size=1.0, xsize=501, coord='GC', **kwargs):
"""
Draw local projection of healpix map.
"""
ax = plt.gca()
x = np.linspace(-size,size,xsize)
y = np.linspace(-size,size,xsize)
xx, yy = np.meshgrid(x,y)
coord = coord.upper()
if coord == 'GC':
#Assumes map and (lon,lat) are Galactic, but plotting celestial
llon, llat = image2sphere(*gal2cel(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*cel2gal(llon,llat))
elif coord == 'CG':
#Assumes map and (lon,lat) are celestial, but plotting Galactic
llon, llat = image2sphere(*cel2gal(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*gal2cel(llon,llat))
else:
#Assumes plotting the native coordinates
llon, llat = image2sphere(lon,lat,xx.flat,yy.flat)
pix = ang2pix(get_nside(hpxmap),llon,llat)
values = hpxmap[pix].reshape(xx.shape)
zz = np.ma.array(values,mask=(values==hp.UNSEEN),fill_value=np.nan)
return drawProjImage(xx,yy,zz,coord=coord,**kwargs) | python | def drawHealpixMap(hpxmap, lon, lat, size=1.0, xsize=501, coord='GC', **kwargs):
"""
Draw local projection of healpix map.
"""
ax = plt.gca()
x = np.linspace(-size,size,xsize)
y = np.linspace(-size,size,xsize)
xx, yy = np.meshgrid(x,y)
coord = coord.upper()
if coord == 'GC':
#Assumes map and (lon,lat) are Galactic, but plotting celestial
llon, llat = image2sphere(*gal2cel(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*cel2gal(llon,llat))
elif coord == 'CG':
#Assumes map and (lon,lat) are celestial, but plotting Galactic
llon, llat = image2sphere(*cel2gal(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*gal2cel(llon,llat))
else:
#Assumes plotting the native coordinates
llon, llat = image2sphere(lon,lat,xx.flat,yy.flat)
pix = ang2pix(get_nside(hpxmap),llon,llat)
values = hpxmap[pix].reshape(xx.shape)
zz = np.ma.array(values,mask=(values==hp.UNSEEN),fill_value=np.nan)
return drawProjImage(xx,yy,zz,coord=coord,**kwargs) | [
"def",
"drawHealpixMap",
"(",
"hpxmap",
",",
"lon",
",",
"lat",
",",
"size",
"=",
"1.0",
",",
"xsize",
"=",
"501",
",",
"coord",
"=",
"'GC'",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"x",
"=",
"np",
".",
"linspace",
"(",
"-",
"size",
",",
"size",
",",
"xsize",
")",
"y",
"=",
"np",
".",
"linspace",
"(",
"-",
"size",
",",
"size",
",",
"xsize",
")",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"x",
",",
"y",
")",
"coord",
"=",
"coord",
".",
"upper",
"(",
")",
"if",
"coord",
"==",
"'GC'",
":",
"#Assumes map and (lon,lat) are Galactic, but plotting celestial",
"llon",
",",
"llat",
"=",
"image2sphere",
"(",
"*",
"gal2cel",
"(",
"lon",
",",
"lat",
")",
",",
"x",
"=",
"xx",
".",
"flat",
",",
"y",
"=",
"yy",
".",
"flat",
")",
"pix",
"=",
"ang2pix",
"(",
"get_nside",
"(",
"hpxmap",
")",
",",
"*",
"cel2gal",
"(",
"llon",
",",
"llat",
")",
")",
"elif",
"coord",
"==",
"'CG'",
":",
"#Assumes map and (lon,lat) are celestial, but plotting Galactic",
"llon",
",",
"llat",
"=",
"image2sphere",
"(",
"*",
"cel2gal",
"(",
"lon",
",",
"lat",
")",
",",
"x",
"=",
"xx",
".",
"flat",
",",
"y",
"=",
"yy",
".",
"flat",
")",
"pix",
"=",
"ang2pix",
"(",
"get_nside",
"(",
"hpxmap",
")",
",",
"*",
"gal2cel",
"(",
"llon",
",",
"llat",
")",
")",
"else",
":",
"#Assumes plotting the native coordinates",
"llon",
",",
"llat",
"=",
"image2sphere",
"(",
"lon",
",",
"lat",
",",
"xx",
".",
"flat",
",",
"yy",
".",
"flat",
")",
"pix",
"=",
"ang2pix",
"(",
"get_nside",
"(",
"hpxmap",
")",
",",
"llon",
",",
"llat",
")",
"values",
"=",
"hpxmap",
"[",
"pix",
"]",
".",
"reshape",
"(",
"xx",
".",
"shape",
")",
"zz",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"values",
",",
"mask",
"=",
"(",
"values",
"==",
"hp",
".",
"UNSEEN",
")",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
"return",
"drawProjImage",
"(",
"xx",
",",
"yy",
",",
"zz",
",",
"coord",
"=",
"coord",
",",
"*",
"*",
"kwargs",
")"
] | Draw local projection of healpix map. | [
"Draw",
"local",
"projection",
"of",
"healpix",
"map",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L162-L189 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | getDSSImage | def getDSSImage(ra,dec,radius=1.0,xsize=800,**kwargs):
"""
Download Digitized Sky Survey images
https://archive.stsci.edu/cgi-bin/dss_form
https://archive.stsci.edu/cgi-bin/dss_search
Image is in celestial orientation (RA increases to the right)
https://archive.stsci.edu/dss/script_usage.html
ra (r) - right ascension
dec (d) - declination
equinox (e) - equinox (B1950 or J2000; default: J2000)
height (h) - height of image (arcminutes; default: 15.0)
width (w) - width of image (arcminutes; default: 15.0)
format (f) - image format (FITS or GIF; default: FITS)
compression (c) - compression (UNIX, GZIP, or NONE; default: NONE; compression
applies to FITS only)
version (v) - Which version of the survey to use:
1 - First Generation survey (garden variety)
2 - Second generation survey (incomplete)
3 - Check the 2nd generation; if no image is available,
then go to the 1st generation.
4 - The Quick V survey (whence came the Guide Stars Catalog;
used mostly for Phase II proposal submission)
save (s) - Save the file to disk instead of trying to display.
(ON (or anything) or not defined; default: not defined.)
For the skyview service, see:
https://skyview.gsfc.nasa.gov/current/docs/batchpage.html
"""
import subprocess
import tempfile
service = 'skyview'
if service == 'stsci':
url="https://archive.stsci.edu/cgi-bin/dss_search?"
scale = 2.0 * radius * 60.
params=dict(ra='%.3f'%ra,dec='%.3f'%dec,width=scale,height=scale,
format='gif',version=1)
#v='poss2ukstu_red'
elif service == 'skyview':
url="https://skyview.gsfc.nasa.gov/cgi-bin/images?"
params=dict(survey='DSS',position='%.3f,%.3f'%(ra,dec),scaling='Linear',
Return='GIF',size=2*radius,projection='Car',pixels=xsize)
else:
raise Exception("Unrecognized service.")
query='&'.join("%s=%s"%(k,v) for k,v in params.items())
tmp = tempfile.NamedTemporaryFile(suffix='.gif')
cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query)
subprocess.call(cmd,shell=True)
im = plt.imread(tmp.name)
tmp.close()
if service == 'stsci' and xsize:
im = scipy.misc.imresize(im,size=(xsize,xsize))
return im | python | def getDSSImage(ra,dec,radius=1.0,xsize=800,**kwargs):
"""
Download Digitized Sky Survey images
https://archive.stsci.edu/cgi-bin/dss_form
https://archive.stsci.edu/cgi-bin/dss_search
Image is in celestial orientation (RA increases to the right)
https://archive.stsci.edu/dss/script_usage.html
ra (r) - right ascension
dec (d) - declination
equinox (e) - equinox (B1950 or J2000; default: J2000)
height (h) - height of image (arcminutes; default: 15.0)
width (w) - width of image (arcminutes; default: 15.0)
format (f) - image format (FITS or GIF; default: FITS)
compression (c) - compression (UNIX, GZIP, or NONE; default: NONE; compression
applies to FITS only)
version (v) - Which version of the survey to use:
1 - First Generation survey (garden variety)
2 - Second generation survey (incomplete)
3 - Check the 2nd generation; if no image is available,
then go to the 1st generation.
4 - The Quick V survey (whence came the Guide Stars Catalog;
used mostly for Phase II proposal submission)
save (s) - Save the file to disk instead of trying to display.
(ON (or anything) or not defined; default: not defined.)
For the skyview service, see:
https://skyview.gsfc.nasa.gov/current/docs/batchpage.html
"""
import subprocess
import tempfile
service = 'skyview'
if service == 'stsci':
url="https://archive.stsci.edu/cgi-bin/dss_search?"
scale = 2.0 * radius * 60.
params=dict(ra='%.3f'%ra,dec='%.3f'%dec,width=scale,height=scale,
format='gif',version=1)
#v='poss2ukstu_red'
elif service == 'skyview':
url="https://skyview.gsfc.nasa.gov/cgi-bin/images?"
params=dict(survey='DSS',position='%.3f,%.3f'%(ra,dec),scaling='Linear',
Return='GIF',size=2*radius,projection='Car',pixels=xsize)
else:
raise Exception("Unrecognized service.")
query='&'.join("%s=%s"%(k,v) for k,v in params.items())
tmp = tempfile.NamedTemporaryFile(suffix='.gif')
cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query)
subprocess.call(cmd,shell=True)
im = plt.imread(tmp.name)
tmp.close()
if service == 'stsci' and xsize:
im = scipy.misc.imresize(im,size=(xsize,xsize))
return im | [
"def",
"getDSSImage",
"(",
"ra",
",",
"dec",
",",
"radius",
"=",
"1.0",
",",
"xsize",
"=",
"800",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"subprocess",
"import",
"tempfile",
"service",
"=",
"'skyview'",
"if",
"service",
"==",
"'stsci'",
":",
"url",
"=",
"\"https://archive.stsci.edu/cgi-bin/dss_search?\"",
"scale",
"=",
"2.0",
"*",
"radius",
"*",
"60.",
"params",
"=",
"dict",
"(",
"ra",
"=",
"'%.3f'",
"%",
"ra",
",",
"dec",
"=",
"'%.3f'",
"%",
"dec",
",",
"width",
"=",
"scale",
",",
"height",
"=",
"scale",
",",
"format",
"=",
"'gif'",
",",
"version",
"=",
"1",
")",
"#v='poss2ukstu_red'",
"elif",
"service",
"==",
"'skyview'",
":",
"url",
"=",
"\"https://skyview.gsfc.nasa.gov/cgi-bin/images?\"",
"params",
"=",
"dict",
"(",
"survey",
"=",
"'DSS'",
",",
"position",
"=",
"'%.3f,%.3f'",
"%",
"(",
"ra",
",",
"dec",
")",
",",
"scaling",
"=",
"'Linear'",
",",
"Return",
"=",
"'GIF'",
",",
"size",
"=",
"2",
"*",
"radius",
",",
"projection",
"=",
"'Car'",
",",
"pixels",
"=",
"xsize",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unrecognized service.\"",
")",
"query",
"=",
"'&'",
".",
"join",
"(",
"\"%s=%s\"",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
")",
"tmp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.gif'",
")",
"cmd",
"=",
"'wget --progress=dot:mega -O %s \"%s\"'",
"%",
"(",
"tmp",
".",
"name",
",",
"url",
"+",
"query",
")",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"im",
"=",
"plt",
".",
"imread",
"(",
"tmp",
".",
"name",
")",
"tmp",
".",
"close",
"(",
")",
"if",
"service",
"==",
"'stsci'",
"and",
"xsize",
":",
"im",
"=",
"scipy",
".",
"misc",
".",
"imresize",
"(",
"im",
",",
"size",
"=",
"(",
"xsize",
",",
"xsize",
")",
")",
"return",
"im"
] | Download Digitized Sky Survey images
https://archive.stsci.edu/cgi-bin/dss_form
https://archive.stsci.edu/cgi-bin/dss_search
Image is in celestial orientation (RA increases to the right)
https://archive.stsci.edu/dss/script_usage.html
ra (r) - right ascension
dec (d) - declination
equinox (e) - equinox (B1950 or J2000; default: J2000)
height (h) - height of image (arcminutes; default: 15.0)
width (w) - width of image (arcminutes; default: 15.0)
format (f) - image format (FITS or GIF; default: FITS)
compression (c) - compression (UNIX, GZIP, or NONE; default: NONE; compression
applies to FITS only)
version (v) - Which version of the survey to use:
1 - First Generation survey (garden variety)
2 - Second generation survey (incomplete)
3 - Check the 2nd generation; if no image is available,
then go to the 1st generation.
4 - The Quick V survey (whence came the Guide Stars Catalog;
used mostly for Phase II proposal submission)
save (s) - Save the file to disk instead of trying to display.
(ON (or anything) or not defined; default: not defined.)
For the skyview service, see:
https://skyview.gsfc.nasa.gov/current/docs/batchpage.html | [
"Download",
"Digitized",
"Sky",
"Survey",
"images"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L241-L298 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | draw_slices | def draw_slices(hist, func=np.sum, **kwargs):
""" Draw horizontal and vertical slices through histogram """
from mpl_toolkits.axes_grid1 import make_axes_locatable
kwargs.setdefault('ls','-')
ax = plt.gca()
data = hist
# Slices
vslice = func(data,axis=0)
hslice = func(data,axis=1)
npix = np.array(data.shape)
#xlim,ylim = plt.array(zip([0,0],npix-1))
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#extent = ax.get_extent()
#xlim =extent[:2]
#ylim = extent[2:]
# Bin centers
xbin = np.linspace(xlim[0],xlim[1],len(vslice))#+0.5
ybin = np.linspace(ylim[0],ylim[1],len(hslice))#+0.5
divider = make_axes_locatable(ax)
#gh2 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[2, 1])
hax = divider.append_axes("right", size=1.2, pad=0.05,sharey=ax,
axes_class=axes_divider.LocatableAxes)
hax.axis["left"].toggle(label=False, ticklabels=False)
#hax.plot(hslice, plt.arange(*ylim)+0.5,'-') # Bin center
hax.plot(hslice, ybin, **kwargs) # Bin center
hax.xaxis.set_major_locator(MaxNLocator(4,prune='both'))
hax.set_ylim(*ylim)
#gh1 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[0, 2])
vax = divider.append_axes("top", size=1.2, pad=0.05, sharex=ax,
axes_class=axes_divider.LocatableAxes)
vax.axis["bottom"].toggle(label=False, ticklabels=False)
vax.plot(xbin, vslice, **kwargs)
vax.yaxis.set_major_locator(MaxNLocator(4,prune='lower'))
vax.set_xlim(*xlim)
return vax,hax | python | def draw_slices(hist, func=np.sum, **kwargs):
""" Draw horizontal and vertical slices through histogram """
from mpl_toolkits.axes_grid1 import make_axes_locatable
kwargs.setdefault('ls','-')
ax = plt.gca()
data = hist
# Slices
vslice = func(data,axis=0)
hslice = func(data,axis=1)
npix = np.array(data.shape)
#xlim,ylim = plt.array(zip([0,0],npix-1))
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#extent = ax.get_extent()
#xlim =extent[:2]
#ylim = extent[2:]
# Bin centers
xbin = np.linspace(xlim[0],xlim[1],len(vslice))#+0.5
ybin = np.linspace(ylim[0],ylim[1],len(hslice))#+0.5
divider = make_axes_locatable(ax)
#gh2 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[2, 1])
hax = divider.append_axes("right", size=1.2, pad=0.05,sharey=ax,
axes_class=axes_divider.LocatableAxes)
hax.axis["left"].toggle(label=False, ticklabels=False)
#hax.plot(hslice, plt.arange(*ylim)+0.5,'-') # Bin center
hax.plot(hslice, ybin, **kwargs) # Bin center
hax.xaxis.set_major_locator(MaxNLocator(4,prune='both'))
hax.set_ylim(*ylim)
#gh1 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[0, 2])
vax = divider.append_axes("top", size=1.2, pad=0.05, sharex=ax,
axes_class=axes_divider.LocatableAxes)
vax.axis["bottom"].toggle(label=False, ticklabels=False)
vax.plot(xbin, vslice, **kwargs)
vax.yaxis.set_major_locator(MaxNLocator(4,prune='lower'))
vax.set_xlim(*xlim)
return vax,hax | [
"def",
"draw_slices",
"(",
"hist",
",",
"func",
"=",
"np",
".",
"sum",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"mpl_toolkits",
".",
"axes_grid1",
"import",
"make_axes_locatable",
"kwargs",
".",
"setdefault",
"(",
"'ls'",
",",
"'-'",
")",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"data",
"=",
"hist",
"# Slices",
"vslice",
"=",
"func",
"(",
"data",
",",
"axis",
"=",
"0",
")",
"hslice",
"=",
"func",
"(",
"data",
",",
"axis",
"=",
"1",
")",
"npix",
"=",
"np",
".",
"array",
"(",
"data",
".",
"shape",
")",
"#xlim,ylim = plt.array(zip([0,0],npix-1))",
"xlim",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"ylim",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"#extent = ax.get_extent()",
"#xlim =extent[:2]",
"#ylim = extent[2:]",
"# Bin centers",
"xbin",
"=",
"np",
".",
"linspace",
"(",
"xlim",
"[",
"0",
"]",
",",
"xlim",
"[",
"1",
"]",
",",
"len",
"(",
"vslice",
")",
")",
"#+0.5 ",
"ybin",
"=",
"np",
".",
"linspace",
"(",
"ylim",
"[",
"0",
"]",
",",
"ylim",
"[",
"1",
"]",
",",
"len",
"(",
"hslice",
")",
")",
"#+0.5",
"divider",
"=",
"make_axes_locatable",
"(",
"ax",
")",
"#gh2 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[2, 1])",
"hax",
"=",
"divider",
".",
"append_axes",
"(",
"\"right\"",
",",
"size",
"=",
"1.2",
",",
"pad",
"=",
"0.05",
",",
"sharey",
"=",
"ax",
",",
"axes_class",
"=",
"axes_divider",
".",
"LocatableAxes",
")",
"hax",
".",
"axis",
"[",
"\"left\"",
"]",
".",
"toggle",
"(",
"label",
"=",
"False",
",",
"ticklabels",
"=",
"False",
")",
"#hax.plot(hslice, plt.arange(*ylim)+0.5,'-') # Bin center",
"hax",
".",
"plot",
"(",
"hslice",
",",
"ybin",
",",
"*",
"*",
"kwargs",
")",
"# Bin center",
"hax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"4",
",",
"prune",
"=",
"'both'",
")",
")",
"hax",
".",
"set_ylim",
"(",
"*",
"ylim",
")",
"#gh1 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[0, 2])",
"vax",
"=",
"divider",
".",
"append_axes",
"(",
"\"top\"",
",",
"size",
"=",
"1.2",
",",
"pad",
"=",
"0.05",
",",
"sharex",
"=",
"ax",
",",
"axes_class",
"=",
"axes_divider",
".",
"LocatableAxes",
")",
"vax",
".",
"axis",
"[",
"\"bottom\"",
"]",
".",
"toggle",
"(",
"label",
"=",
"False",
",",
"ticklabels",
"=",
"False",
")",
"vax",
".",
"plot",
"(",
"xbin",
",",
"vslice",
",",
"*",
"*",
"kwargs",
")",
"vax",
".",
"yaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"4",
",",
"prune",
"=",
"'lower'",
")",
")",
"vax",
".",
"set_xlim",
"(",
"*",
"xlim",
")",
"return",
"vax",
",",
"hax"
] | Draw horizontal and vertical slices through histogram | [
"Draw",
"horizontal",
"and",
"vertical",
"slices",
"through",
"histogram"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1035-L1077 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | plotSkymapCatalog | def plotSkymapCatalog(lon,lat,**kwargs):
"""
Plot a catalog of coordinates on a full-sky map.
"""
fig = plt.figure()
ax = plt.subplot(111,projection=projection)
drawSkymapCatalog(ax,lon,lat,**kwargs) | python | def plotSkymapCatalog(lon,lat,**kwargs):
"""
Plot a catalog of coordinates on a full-sky map.
"""
fig = plt.figure()
ax = plt.subplot(111,projection=projection)
drawSkymapCatalog(ax,lon,lat,**kwargs) | [
"def",
"plotSkymapCatalog",
"(",
"lon",
",",
"lat",
",",
"*",
"*",
"kwargs",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"111",
",",
"projection",
"=",
"projection",
")",
"drawSkymapCatalog",
"(",
"ax",
",",
"lon",
",",
"lat",
",",
"*",
"*",
"kwargs",
")"
] | Plot a catalog of coordinates on a full-sky map. | [
"Plot",
"a",
"catalog",
"of",
"coordinates",
"on",
"a",
"full",
"-",
"sky",
"map",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1409-L1415 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | makePath | def makePath(x_path, y_path, epsilon=1.e-10):
"""
Create closed path.
"""
x_path_closed = np.concatenate([x_path, x_path[::-1]])
y_path_closed = np.concatenate([y_path, epsilon + y_path[::-1]])
path = matplotlib.path.Path(list(zip(x_path_closed, y_path_closed)))
return path | python | def makePath(x_path, y_path, epsilon=1.e-10):
"""
Create closed path.
"""
x_path_closed = np.concatenate([x_path, x_path[::-1]])
y_path_closed = np.concatenate([y_path, epsilon + y_path[::-1]])
path = matplotlib.path.Path(list(zip(x_path_closed, y_path_closed)))
return path | [
"def",
"makePath",
"(",
"x_path",
",",
"y_path",
",",
"epsilon",
"=",
"1.e-10",
")",
":",
"x_path_closed",
"=",
"np",
".",
"concatenate",
"(",
"[",
"x_path",
",",
"x_path",
"[",
":",
":",
"-",
"1",
"]",
"]",
")",
"y_path_closed",
"=",
"np",
".",
"concatenate",
"(",
"[",
"y_path",
",",
"epsilon",
"+",
"y_path",
"[",
":",
":",
"-",
"1",
"]",
"]",
")",
"path",
"=",
"matplotlib",
".",
"path",
".",
"Path",
"(",
"list",
"(",
"zip",
"(",
"x_path_closed",
",",
"y_path_closed",
")",
")",
")",
"return",
"path"
] | Create closed path. | [
"Create",
"closed",
"path",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1499-L1506 | train |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | BasePlotter.drawMask | def drawMask(self,ax=None, mask=None, mtype='maglim'):
""" Draw the maglim from the mask. """
if not ax: ax = plt.gca()
if mask is None:
mask = ugali.analysis.loglike.createMask(self.config,roi=self.roi)
mask_map = hp.UNSEEN*np.ones(hp.nside2npix(self.nside))
if mtype.lower() == 'maglim':
mask_map[mask.roi.pixels] = mask.mask_1.mask_roi_sparse
elif mtype.lower() == 'fracdet':
mask_map[mask.roi.pixels] = mask.mask_1.frac_roi_sparse
else:
raise Exception("Unrecognized type: %s"%mtype)
masked = (mask_map==hp.UNSEEN) | (mask_map==0)
mask_map = np.ma.array(mask_map,mask=masked,fill_value=np.nan)
im = drawHealpixMap(mask_map,self.lon,self.lat,self.radius,coord=self.coord)
try: cbar = ax.cax.colorbar(im)
except: cbar = plt.colorbar(im)
cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(),rotation=90)
ax.annotate(mtype,**self.label_kwargs)
return im | python | def drawMask(self,ax=None, mask=None, mtype='maglim'):
""" Draw the maglim from the mask. """
if not ax: ax = plt.gca()
if mask is None:
mask = ugali.analysis.loglike.createMask(self.config,roi=self.roi)
mask_map = hp.UNSEEN*np.ones(hp.nside2npix(self.nside))
if mtype.lower() == 'maglim':
mask_map[mask.roi.pixels] = mask.mask_1.mask_roi_sparse
elif mtype.lower() == 'fracdet':
mask_map[mask.roi.pixels] = mask.mask_1.frac_roi_sparse
else:
raise Exception("Unrecognized type: %s"%mtype)
masked = (mask_map==hp.UNSEEN) | (mask_map==0)
mask_map = np.ma.array(mask_map,mask=masked,fill_value=np.nan)
im = drawHealpixMap(mask_map,self.lon,self.lat,self.radius,coord=self.coord)
try: cbar = ax.cax.colorbar(im)
except: cbar = plt.colorbar(im)
cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(),rotation=90)
ax.annotate(mtype,**self.label_kwargs)
return im | [
"def",
"drawMask",
"(",
"self",
",",
"ax",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"mtype",
"=",
"'maglim'",
")",
":",
"if",
"not",
"ax",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"mask",
"is",
"None",
":",
"mask",
"=",
"ugali",
".",
"analysis",
".",
"loglike",
".",
"createMask",
"(",
"self",
".",
"config",
",",
"roi",
"=",
"self",
".",
"roi",
")",
"mask_map",
"=",
"hp",
".",
"UNSEEN",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"nside",
")",
")",
"if",
"mtype",
".",
"lower",
"(",
")",
"==",
"'maglim'",
":",
"mask_map",
"[",
"mask",
".",
"roi",
".",
"pixels",
"]",
"=",
"mask",
".",
"mask_1",
".",
"mask_roi_sparse",
"elif",
"mtype",
".",
"lower",
"(",
")",
"==",
"'fracdet'",
":",
"mask_map",
"[",
"mask",
".",
"roi",
".",
"pixels",
"]",
"=",
"mask",
".",
"mask_1",
".",
"frac_roi_sparse",
"else",
":",
"raise",
"Exception",
"(",
"\"Unrecognized type: %s\"",
"%",
"mtype",
")",
"masked",
"=",
"(",
"mask_map",
"==",
"hp",
".",
"UNSEEN",
")",
"|",
"(",
"mask_map",
"==",
"0",
")",
"mask_map",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"mask_map",
",",
"mask",
"=",
"masked",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
"im",
"=",
"drawHealpixMap",
"(",
"mask_map",
",",
"self",
".",
"lon",
",",
"self",
".",
"lat",
",",
"self",
".",
"radius",
",",
"coord",
"=",
"self",
".",
"coord",
")",
"try",
":",
"cbar",
"=",
"ax",
".",
"cax",
".",
"colorbar",
"(",
"im",
")",
"except",
":",
"cbar",
"=",
"plt",
".",
"colorbar",
"(",
"im",
")",
"cbar",
".",
"ax",
".",
"set_xticklabels",
"(",
"cbar",
".",
"ax",
".",
"get_xticklabels",
"(",
")",
",",
"rotation",
"=",
"90",
")",
"ax",
".",
"annotate",
"(",
"mtype",
",",
"*",
"*",
"self",
".",
"label_kwargs",
")",
"return",
"im"
] | Draw the maglim from the mask. | [
"Draw",
"the",
"maglim",
"from",
"the",
"mask",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L458-L480 | train |
warrenspe/hconf | hconf/subparsers/ini.py | INI.parse | def parse(self, configManager, config):
"""
Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
parser = ConfigParser.RawConfigParser()
configOptions = dict()
configFile = self._getConfigFile(config)
if configFile:
parser.readfp(configFile)
for section in parser.sections():
if self.sections is None or section in self.sections:
configOptions.update(parser.items(section))
return configOptions | python | def parse(self, configManager, config):
"""
Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
parser = ConfigParser.RawConfigParser()
configOptions = dict()
configFile = self._getConfigFile(config)
if configFile:
parser.readfp(configFile)
for section in parser.sections():
if self.sections is None or section in self.sections:
configOptions.update(parser.items(section))
return configOptions | [
"def",
"parse",
"(",
"self",
",",
"configManager",
",",
"config",
")",
":",
"parser",
"=",
"ConfigParser",
".",
"RawConfigParser",
"(",
")",
"configOptions",
"=",
"dict",
"(",
")",
"configFile",
"=",
"self",
".",
"_getConfigFile",
"(",
"config",
")",
"if",
"configFile",
":",
"parser",
".",
"readfp",
"(",
"configFile",
")",
"for",
"section",
"in",
"parser",
".",
"sections",
"(",
")",
":",
"if",
"self",
".",
"sections",
"is",
"None",
"or",
"section",
"in",
"self",
".",
"sections",
":",
"configOptions",
".",
"update",
"(",
"parser",
".",
"items",
"(",
"section",
")",
")",
"return",
"configOptions"
] | Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object. | [
"Parse",
"configuration",
"options",
"out",
"of",
"an",
".",
"ini",
"configuration",
"file",
"."
] | 12074d15dc3641d3903488c95d89a507386a32d5 | https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/ini.py#L46-L67 | train |
accraze/python-markov-novel | src/markov_novel/chapter.py | Chapter.write_chapter | def write_chapter(self):
"""
Create a chapter
that contains a random number
of paragraphs
"""
self.paragraphs = []
self.paragraphs.append('\n')
for x in range(randint(0, 50)):
p = Paragraph(self.model)
self.paragraphs.append(p.get_paragraph())
self.paragraphs.append('\n')
return self.paragraphs | python | def write_chapter(self):
"""
Create a chapter
that contains a random number
of paragraphs
"""
self.paragraphs = []
self.paragraphs.append('\n')
for x in range(randint(0, 50)):
p = Paragraph(self.model)
self.paragraphs.append(p.get_paragraph())
self.paragraphs.append('\n')
return self.paragraphs | [
"def",
"write_chapter",
"(",
"self",
")",
":",
"self",
".",
"paragraphs",
"=",
"[",
"]",
"self",
".",
"paragraphs",
".",
"append",
"(",
"'\\n'",
")",
"for",
"x",
"in",
"range",
"(",
"randint",
"(",
"0",
",",
"50",
")",
")",
":",
"p",
"=",
"Paragraph",
"(",
"self",
".",
"model",
")",
"self",
".",
"paragraphs",
".",
"append",
"(",
"p",
".",
"get_paragraph",
"(",
")",
")",
"self",
".",
"paragraphs",
".",
"append",
"(",
"'\\n'",
")",
"return",
"self",
".",
"paragraphs"
] | Create a chapter
that contains a random number
of paragraphs | [
"Create",
"a",
"chapter",
"that",
"contains",
"a",
"random",
"number",
"of",
"paragraphs"
] | ff451639e93a3ac11fb0268b92bc0cffc00bfdbe | https://github.com/accraze/python-markov-novel/blob/ff451639e93a3ac11fb0268b92bc0cffc00bfdbe/src/markov_novel/chapter.py#L20-L32 | train |
kallimachos/sphinxmark | sphinxmark/__init__.py | buildcss | def buildcss(app, buildpath, imagefile):
"""Create CSS file."""
# set default values
div = 'body'
repeat = 'repeat-y'
position = 'center'
attachment = 'scroll'
if app.config.sphinxmark_div != 'default':
div = app.config.sphinxmark_div
if app.config.sphinxmark_repeat is False:
repeat = 'no-repeat'
if app.config.sphinxmark_fixed is True:
attachment = 'fixed'
border = app.config.sphinxmark_border
if border == 'left' or border == 'right':
css = template('border', div=div, image=imagefile, side=border)
else:
css = template('watermark', div=div, image=imagefile, repeat=repeat,
position=position, attachment=attachment)
LOG.debug('[sphinxmark] Template: ' + css)
cssname = 'sphinxmark.css'
cssfile = os.path.join(buildpath, cssname)
with open(cssfile, 'w') as f:
f.write(css)
return(cssname) | python | def buildcss(app, buildpath, imagefile):
"""Create CSS file."""
# set default values
div = 'body'
repeat = 'repeat-y'
position = 'center'
attachment = 'scroll'
if app.config.sphinxmark_div != 'default':
div = app.config.sphinxmark_div
if app.config.sphinxmark_repeat is False:
repeat = 'no-repeat'
if app.config.sphinxmark_fixed is True:
attachment = 'fixed'
border = app.config.sphinxmark_border
if border == 'left' or border == 'right':
css = template('border', div=div, image=imagefile, side=border)
else:
css = template('watermark', div=div, image=imagefile, repeat=repeat,
position=position, attachment=attachment)
LOG.debug('[sphinxmark] Template: ' + css)
cssname = 'sphinxmark.css'
cssfile = os.path.join(buildpath, cssname)
with open(cssfile, 'w') as f:
f.write(css)
return(cssname) | [
"def",
"buildcss",
"(",
"app",
",",
"buildpath",
",",
"imagefile",
")",
":",
"# set default values",
"div",
"=",
"'body'",
"repeat",
"=",
"'repeat-y'",
"position",
"=",
"'center'",
"attachment",
"=",
"'scroll'",
"if",
"app",
".",
"config",
".",
"sphinxmark_div",
"!=",
"'default'",
":",
"div",
"=",
"app",
".",
"config",
".",
"sphinxmark_div",
"if",
"app",
".",
"config",
".",
"sphinxmark_repeat",
"is",
"False",
":",
"repeat",
"=",
"'no-repeat'",
"if",
"app",
".",
"config",
".",
"sphinxmark_fixed",
"is",
"True",
":",
"attachment",
"=",
"'fixed'",
"border",
"=",
"app",
".",
"config",
".",
"sphinxmark_border",
"if",
"border",
"==",
"'left'",
"or",
"border",
"==",
"'right'",
":",
"css",
"=",
"template",
"(",
"'border'",
",",
"div",
"=",
"div",
",",
"image",
"=",
"imagefile",
",",
"side",
"=",
"border",
")",
"else",
":",
"css",
"=",
"template",
"(",
"'watermark'",
",",
"div",
"=",
"div",
",",
"image",
"=",
"imagefile",
",",
"repeat",
"=",
"repeat",
",",
"position",
"=",
"position",
",",
"attachment",
"=",
"attachment",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] Template: '",
"+",
"css",
")",
"cssname",
"=",
"'sphinxmark.css'",
"cssfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"buildpath",
",",
"cssname",
")",
"with",
"open",
"(",
"cssfile",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"css",
")",
"return",
"(",
"cssname",
")"
] | Create CSS file. | [
"Create",
"CSS",
"file",
"."
] | f7b17d9dabf1fff448bb38d90474498f0d203990 | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L34-L64 | train |
kallimachos/sphinxmark | sphinxmark/__init__.py | createimage | def createimage(app, srcdir, buildpath):
"""Create PNG image from string."""
text = app.config.sphinxmark_text
# draw transparent background
width = app.config.sphinxmark_text_width
height = app.config.sphinxmark_text_spacing
img = Image.new('RGBA', (width, height), (255, 255, 255, 0))
d = ImageDraw.Draw(img)
# set font
fontfile = os.path.join(srcdir, 'arial.ttf')
font = ImageFont.truetype(fontfile, app.config.sphinxmark_text_size)
# set x y location for text
xsize, ysize = d.textsize(text, font)
LOG.debug('[sphinxmark] x = ' + str(xsize) + '\ny = ' + str(ysize))
x = (width / 2) - (xsize / 2)
y = (height / 2) - (ysize / 2)
# add text to image
color = app.config.sphinxmark_text_color
d.text((x, y), text, font=font, fill=color)
# set opacity
img.putalpha(app.config.sphinxmark_text_opacity)
# rotate image
img = img.rotate(app.config.sphinxmark_text_rotation)
# save image
imagefile = 'textmark_' + text + '.png'
imagepath = os.path.join(buildpath, imagefile)
img.save(imagepath, 'PNG')
LOG.debug('[sphinxmark] Image saved to: ' + imagepath)
return(imagefile) | python | def createimage(app, srcdir, buildpath):
"""Create PNG image from string."""
text = app.config.sphinxmark_text
# draw transparent background
width = app.config.sphinxmark_text_width
height = app.config.sphinxmark_text_spacing
img = Image.new('RGBA', (width, height), (255, 255, 255, 0))
d = ImageDraw.Draw(img)
# set font
fontfile = os.path.join(srcdir, 'arial.ttf')
font = ImageFont.truetype(fontfile, app.config.sphinxmark_text_size)
# set x y location for text
xsize, ysize = d.textsize(text, font)
LOG.debug('[sphinxmark] x = ' + str(xsize) + '\ny = ' + str(ysize))
x = (width / 2) - (xsize / 2)
y = (height / 2) - (ysize / 2)
# add text to image
color = app.config.sphinxmark_text_color
d.text((x, y), text, font=font, fill=color)
# set opacity
img.putalpha(app.config.sphinxmark_text_opacity)
# rotate image
img = img.rotate(app.config.sphinxmark_text_rotation)
# save image
imagefile = 'textmark_' + text + '.png'
imagepath = os.path.join(buildpath, imagefile)
img.save(imagepath, 'PNG')
LOG.debug('[sphinxmark] Image saved to: ' + imagepath)
return(imagefile) | [
"def",
"createimage",
"(",
"app",
",",
"srcdir",
",",
"buildpath",
")",
":",
"text",
"=",
"app",
".",
"config",
".",
"sphinxmark_text",
"# draw transparent background",
"width",
"=",
"app",
".",
"config",
".",
"sphinxmark_text_width",
"height",
"=",
"app",
".",
"config",
".",
"sphinxmark_text_spacing",
"img",
"=",
"Image",
".",
"new",
"(",
"'RGBA'",
",",
"(",
"width",
",",
"height",
")",
",",
"(",
"255",
",",
"255",
",",
"255",
",",
"0",
")",
")",
"d",
"=",
"ImageDraw",
".",
"Draw",
"(",
"img",
")",
"# set font",
"fontfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"srcdir",
",",
"'arial.ttf'",
")",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"fontfile",
",",
"app",
".",
"config",
".",
"sphinxmark_text_size",
")",
"# set x y location for text",
"xsize",
",",
"ysize",
"=",
"d",
".",
"textsize",
"(",
"text",
",",
"font",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] x = '",
"+",
"str",
"(",
"xsize",
")",
"+",
"'\\ny = '",
"+",
"str",
"(",
"ysize",
")",
")",
"x",
"=",
"(",
"width",
"/",
"2",
")",
"-",
"(",
"xsize",
"/",
"2",
")",
"y",
"=",
"(",
"height",
"/",
"2",
")",
"-",
"(",
"ysize",
"/",
"2",
")",
"# add text to image",
"color",
"=",
"app",
".",
"config",
".",
"sphinxmark_text_color",
"d",
".",
"text",
"(",
"(",
"x",
",",
"y",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"color",
")",
"# set opacity",
"img",
".",
"putalpha",
"(",
"app",
".",
"config",
".",
"sphinxmark_text_opacity",
")",
"# rotate image",
"img",
"=",
"img",
".",
"rotate",
"(",
"app",
".",
"config",
".",
"sphinxmark_text_rotation",
")",
"# save image",
"imagefile",
"=",
"'textmark_'",
"+",
"text",
"+",
"'.png'",
"imagepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"buildpath",
",",
"imagefile",
")",
"img",
".",
"save",
"(",
"imagepath",
",",
"'PNG'",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] Image saved to: '",
"+",
"imagepath",
")",
"return",
"(",
"imagefile",
")"
] | Create PNG image from string. | [
"Create",
"PNG",
"image",
"from",
"string",
"."
] | f7b17d9dabf1fff448bb38d90474498f0d203990 | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L67-L103 | train |
kallimachos/sphinxmark | sphinxmark/__init__.py | getimage | def getimage(app):
"""Get image file."""
# append source directory to TEMPLATE_PATH so template is found
srcdir = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_PATH.append(srcdir)
staticbase = '_static'
buildpath = os.path.join(app.outdir, staticbase)
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
if app.config.sphinxmark_image == 'default':
imagefile = 'watermark-draft.png'
imagepath = os.path.join(srcdir, imagefile)
copy(imagepath, buildpath)
LOG.debug('[sphinxmark] Using default image: ' + imagefile)
elif app.config.sphinxmark_image == 'text':
imagefile = createimage(app, srcdir, buildpath)
LOG.debug('[sphinxmark] Image: ' + imagefile)
else:
imagefile = app.config.sphinxmark_image
if app.config.html_static_path:
staticpath = app.config.html_static_path[0]
else:
staticpath = '_static'
LOG.debug('[sphinxmark] static path: ' + staticpath)
imagepath = os.path.join(app.confdir, staticpath, imagefile)
LOG.debug('[sphinxmark] Imagepath: ' + imagepath)
try:
copy(imagepath, buildpath)
except Exception:
message = ("Cannot find '%s'. Put watermark images in the "
"'_static' directory or specify the location using "
"'html_static_path'." % imagefile)
LOG.warning(message)
LOG.warning('Failed to add watermark.')
return
return(buildpath, imagefile) | python | def getimage(app):
"""Get image file."""
# append source directory to TEMPLATE_PATH so template is found
srcdir = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_PATH.append(srcdir)
staticbase = '_static'
buildpath = os.path.join(app.outdir, staticbase)
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
if app.config.sphinxmark_image == 'default':
imagefile = 'watermark-draft.png'
imagepath = os.path.join(srcdir, imagefile)
copy(imagepath, buildpath)
LOG.debug('[sphinxmark] Using default image: ' + imagefile)
elif app.config.sphinxmark_image == 'text':
imagefile = createimage(app, srcdir, buildpath)
LOG.debug('[sphinxmark] Image: ' + imagefile)
else:
imagefile = app.config.sphinxmark_image
if app.config.html_static_path:
staticpath = app.config.html_static_path[0]
else:
staticpath = '_static'
LOG.debug('[sphinxmark] static path: ' + staticpath)
imagepath = os.path.join(app.confdir, staticpath, imagefile)
LOG.debug('[sphinxmark] Imagepath: ' + imagepath)
try:
copy(imagepath, buildpath)
except Exception:
message = ("Cannot find '%s'. Put watermark images in the "
"'_static' directory or specify the location using "
"'html_static_path'." % imagefile)
LOG.warning(message)
LOG.warning('Failed to add watermark.')
return
return(buildpath, imagefile) | [
"def",
"getimage",
"(",
"app",
")",
":",
"# append source directory to TEMPLATE_PATH so template is found",
"srcdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"TEMPLATE_PATH",
".",
"append",
"(",
"srcdir",
")",
"staticbase",
"=",
"'_static'",
"buildpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"outdir",
",",
"staticbase",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"buildpath",
")",
"except",
"OSError",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"buildpath",
")",
":",
"raise",
"if",
"app",
".",
"config",
".",
"sphinxmark_image",
"==",
"'default'",
":",
"imagefile",
"=",
"'watermark-draft.png'",
"imagepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"srcdir",
",",
"imagefile",
")",
"copy",
"(",
"imagepath",
",",
"buildpath",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] Using default image: '",
"+",
"imagefile",
")",
"elif",
"app",
".",
"config",
".",
"sphinxmark_image",
"==",
"'text'",
":",
"imagefile",
"=",
"createimage",
"(",
"app",
",",
"srcdir",
",",
"buildpath",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] Image: '",
"+",
"imagefile",
")",
"else",
":",
"imagefile",
"=",
"app",
".",
"config",
".",
"sphinxmark_image",
"if",
"app",
".",
"config",
".",
"html_static_path",
":",
"staticpath",
"=",
"app",
".",
"config",
".",
"html_static_path",
"[",
"0",
"]",
"else",
":",
"staticpath",
"=",
"'_static'",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] static path: '",
"+",
"staticpath",
")",
"imagepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"confdir",
",",
"staticpath",
",",
"imagefile",
")",
"LOG",
".",
"debug",
"(",
"'[sphinxmark] Imagepath: '",
"+",
"imagepath",
")",
"try",
":",
"copy",
"(",
"imagepath",
",",
"buildpath",
")",
"except",
"Exception",
":",
"message",
"=",
"(",
"\"Cannot find '%s'. Put watermark images in the \"",
"\"'_static' directory or specify the location using \"",
"\"'html_static_path'.\"",
"%",
"imagefile",
")",
"LOG",
".",
"warning",
"(",
"message",
")",
"LOG",
".",
"warning",
"(",
"'Failed to add watermark.'",
")",
"return",
"return",
"(",
"buildpath",
",",
"imagefile",
")"
] | Get image file. | [
"Get",
"image",
"file",
"."
] | f7b17d9dabf1fff448bb38d90474498f0d203990 | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L106-L149 | train |
kallimachos/sphinxmark | sphinxmark/__init__.py | watermark | def watermark(app, env):
"""Add watermark."""
if app.config.sphinxmark_enable is True:
LOG.info('adding watermark...', nonl=True)
buildpath, imagefile = getimage(app)
cssname = buildcss(app, buildpath, imagefile)
app.add_css_file(cssname)
LOG.info(' done') | python | def watermark(app, env):
"""Add watermark."""
if app.config.sphinxmark_enable is True:
LOG.info('adding watermark...', nonl=True)
buildpath, imagefile = getimage(app)
cssname = buildcss(app, buildpath, imagefile)
app.add_css_file(cssname)
LOG.info(' done') | [
"def",
"watermark",
"(",
"app",
",",
"env",
")",
":",
"if",
"app",
".",
"config",
".",
"sphinxmark_enable",
"is",
"True",
":",
"LOG",
".",
"info",
"(",
"'adding watermark...'",
",",
"nonl",
"=",
"True",
")",
"buildpath",
",",
"imagefile",
"=",
"getimage",
"(",
"app",
")",
"cssname",
"=",
"buildcss",
"(",
"app",
",",
"buildpath",
",",
"imagefile",
")",
"app",
".",
"add_css_file",
"(",
"cssname",
")",
"LOG",
".",
"info",
"(",
"' done'",
")"
] | Add watermark. | [
"Add",
"watermark",
"."
] | f7b17d9dabf1fff448bb38d90474498f0d203990 | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L152-L159 | train |
kallimachos/sphinxmark | sphinxmark/__init__.py | setup | def setup(app):
"""
Configure setup for Sphinx extension.
:param app: Sphinx application context.
"""
app.add_config_value('sphinxmark_enable', False, 'html')
app.add_config_value('sphinxmark_div', 'default', 'html')
app.add_config_value('sphinxmark_border', None, 'html')
app.add_config_value('sphinxmark_repeat', True, 'html')
app.add_config_value('sphinxmark_fixed', False, 'html')
app.add_config_value('sphinxmark_image', 'default', 'html')
app.add_config_value('sphinxmark_text', 'default', 'html')
app.add_config_value('sphinxmark_text_color', (255, 0, 0), 'html')
app.add_config_value('sphinxmark_text_size', 100, 'html')
app.add_config_value('sphinxmark_text_width', 1000, 'html')
app.add_config_value('sphinxmark_text_opacity', 20, 'html')
app.add_config_value('sphinxmark_text_spacing', 400, 'html')
app.add_config_value('sphinxmark_text_rotation', 0, 'html')
app.connect('env-updated', watermark)
return {
'version': '0.1.18',
'parallel_read_safe': True,
'parallel_write_safe': True,
} | python | def setup(app):
"""
Configure setup for Sphinx extension.
:param app: Sphinx application context.
"""
app.add_config_value('sphinxmark_enable', False, 'html')
app.add_config_value('sphinxmark_div', 'default', 'html')
app.add_config_value('sphinxmark_border', None, 'html')
app.add_config_value('sphinxmark_repeat', True, 'html')
app.add_config_value('sphinxmark_fixed', False, 'html')
app.add_config_value('sphinxmark_image', 'default', 'html')
app.add_config_value('sphinxmark_text', 'default', 'html')
app.add_config_value('sphinxmark_text_color', (255, 0, 0), 'html')
app.add_config_value('sphinxmark_text_size', 100, 'html')
app.add_config_value('sphinxmark_text_width', 1000, 'html')
app.add_config_value('sphinxmark_text_opacity', 20, 'html')
app.add_config_value('sphinxmark_text_spacing', 400, 'html')
app.add_config_value('sphinxmark_text_rotation', 0, 'html')
app.connect('env-updated', watermark)
return {
'version': '0.1.18',
'parallel_read_safe': True,
'parallel_write_safe': True,
} | [
"def",
"setup",
"(",
"app",
")",
":",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_enable'",
",",
"False",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_div'",
",",
"'default'",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_border'",
",",
"None",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_repeat'",
",",
"True",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_fixed'",
",",
"False",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_image'",
",",
"'default'",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text'",
",",
"'default'",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_color'",
",",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_size'",
",",
"100",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_width'",
",",
"1000",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_opacity'",
",",
"20",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_spacing'",
",",
"400",
",",
"'html'",
")",
"app",
".",
"add_config_value",
"(",
"'sphinxmark_text_rotation'",
",",
"0",
",",
"'html'",
")",
"app",
".",
"connect",
"(",
"'env-updated'",
",",
"watermark",
")",
"return",
"{",
"'version'",
":",
"'0.1.18'",
",",
"'parallel_read_safe'",
":",
"True",
",",
"'parallel_write_safe'",
":",
"True",
",",
"}"
] | Configure setup for Sphinx extension.
:param app: Sphinx application context. | [
"Configure",
"setup",
"for",
"Sphinx",
"extension",
"."
] | f7b17d9dabf1fff448bb38d90474498f0d203990 | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L162-L187 | train |
DarkEnergySurvey/ugali | ugali/utils/bayesian_efficiency.py | gammalnStirling | def gammalnStirling(z):
"""
Uses Stirling's approximation for the log-gamma function suitable for large arguments.
"""
return (0.5 * (np.log(2. * np.pi) - np.log(z))) \
+ (z * (np.log(z + (1. / ((12. * z) - (1. / (10. * z))))) - 1.)) | python | def gammalnStirling(z):
"""
Uses Stirling's approximation for the log-gamma function suitable for large arguments.
"""
return (0.5 * (np.log(2. * np.pi) - np.log(z))) \
+ (z * (np.log(z + (1. / ((12. * z) - (1. / (10. * z))))) - 1.)) | [
"def",
"gammalnStirling",
"(",
"z",
")",
":",
"return",
"(",
"0.5",
"*",
"(",
"np",
".",
"log",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
"-",
"np",
".",
"log",
"(",
"z",
")",
")",
")",
"+",
"(",
"z",
"*",
"(",
"np",
".",
"log",
"(",
"z",
"+",
"(",
"1.",
"/",
"(",
"(",
"12.",
"*",
"z",
")",
"-",
"(",
"1.",
"/",
"(",
"10.",
"*",
"z",
")",
")",
")",
")",
")",
"-",
"1.",
")",
")"
] | Uses Stirling's approximation for the log-gamma function suitable for large arguments. | [
"Uses",
"Stirling",
"s",
"approximation",
"for",
"the",
"log",
"-",
"gamma",
"function",
"suitable",
"for",
"large",
"arguments",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/bayesian_efficiency.py#L15-L20 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | satellite | def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs):
"""
Wrapping the isochrone and kernel simulate functions.
"""
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
return mag_1, mag_2, lon, lat | python | def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs):
"""
Wrapping the isochrone and kernel simulate functions.
"""
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
return mag_1, mag_2, lon, lat | [
"def",
"satellite",
"(",
"isochrone",
",",
"kernel",
",",
"stellar_mass",
",",
"distance_modulus",
",",
"*",
"*",
"kwargs",
")",
":",
"mag_1",
",",
"mag_2",
"=",
"isochrone",
".",
"simulate",
"(",
"stellar_mass",
",",
"distance_modulus",
")",
"lon",
",",
"lat",
"=",
"kernel",
".",
"simulate",
"(",
"len",
"(",
"mag_1",
")",
")",
"return",
"mag_1",
",",
"mag_2",
",",
"lon",
",",
"lat"
] | Wrapping the isochrone and kernel simulate functions. | [
"Wrapping",
"the",
"isochrone",
"and",
"kernel",
"simulate",
"functions",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L811-L818 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Generator.detectability | def detectability(self,**kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10**3/mod2dist(18)**2
detect = stellar_mass / distance**2
detect /= norm | python | def detectability(self,**kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10**3/mod2dist(18)**2
detect = stellar_mass / distance**2
detect /= norm | [
"def",
"detectability",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"distance_modulus",
"=",
"kwargs",
".",
"get",
"(",
"'distance_modulus'",
")",
"distance",
"=",
"mod2dist",
"(",
"distance_modulus",
")",
"stellar_mass",
"=",
"kwargs",
".",
"get",
"(",
"'stellar_mass'",
")",
"extension",
"=",
"kwargs",
".",
"get",
"(",
"'extension'",
")",
"# Normalized to 10^3 Msolar at mod=18",
"norm",
"=",
"10",
"**",
"3",
"/",
"mod2dist",
"(",
"18",
")",
"**",
"2",
"detect",
"=",
"stellar_mass",
"/",
"distance",
"**",
"2",
"detect",
"/=",
"norm"
] | An a priori detectability proxy. | [
"An",
"a",
"priori",
"detectability",
"proxy",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L76-L88 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator._create_catalog | def _create_catalog(self,catalog=None):
"""
Bundle it.
"""
if catalog is None:
catalog = ugali.analysis.loglike.createCatalog(self.config,self.roi)
cut = self.mask.restrictCatalogToObservableSpace(catalog)
self.catalog = catalog.applyCut(cut) | python | def _create_catalog(self,catalog=None):
"""
Bundle it.
"""
if catalog is None:
catalog = ugali.analysis.loglike.createCatalog(self.config,self.roi)
cut = self.mask.restrictCatalogToObservableSpace(catalog)
self.catalog = catalog.applyCut(cut) | [
"def",
"_create_catalog",
"(",
"self",
",",
"catalog",
"=",
"None",
")",
":",
"if",
"catalog",
"is",
"None",
":",
"catalog",
"=",
"ugali",
".",
"analysis",
".",
"loglike",
".",
"createCatalog",
"(",
"self",
".",
"config",
",",
"self",
".",
"roi",
")",
"cut",
"=",
"self",
".",
"mask",
".",
"restrictCatalogToObservableSpace",
"(",
"catalog",
")",
"self",
".",
"catalog",
"=",
"catalog",
".",
"applyCut",
"(",
"cut",
")"
] | Bundle it. | [
"Bundle",
"it",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L228-L235 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator._setup_subpix | def _setup_subpix(self,nside=2**16):
"""
Subpixels for random position generation.
"""
# Only setup once...
if hasattr(self,'subpix'): return
# Simulate over full ROI
self.roi_radius = self.config['coords']['roi_radius']
# Setup background spatial stuff
logger.info("Setup subpixels...")
self.nside_pixel = self.config['coords']['nside_pixel']
self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter
epsilon = np.degrees(hp.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix
subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
self.subpix = subpix[np.in1d(superpix,self.roi.pixels)] | python | def _setup_subpix(self,nside=2**16):
"""
Subpixels for random position generation.
"""
# Only setup once...
if hasattr(self,'subpix'): return
# Simulate over full ROI
self.roi_radius = self.config['coords']['roi_radius']
# Setup background spatial stuff
logger.info("Setup subpixels...")
self.nside_pixel = self.config['coords']['nside_pixel']
self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter
epsilon = np.degrees(hp.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix
subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
self.subpix = subpix[np.in1d(superpix,self.roi.pixels)] | [
"def",
"_setup_subpix",
"(",
"self",
",",
"nside",
"=",
"2",
"**",
"16",
")",
":",
"# Only setup once...",
"if",
"hasattr",
"(",
"self",
",",
"'subpix'",
")",
":",
"return",
"# Simulate over full ROI",
"self",
".",
"roi_radius",
"=",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'roi_radius'",
"]",
"# Setup background spatial stuff",
"logger",
".",
"info",
"(",
"\"Setup subpixels...\"",
")",
"self",
".",
"nside_pixel",
"=",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'nside_pixel'",
"]",
"self",
".",
"nside_subpixel",
"=",
"self",
".",
"nside_pixel",
"*",
"2",
"**",
"4",
"# Could be config parameter",
"epsilon",
"=",
"np",
".",
"degrees",
"(",
"hp",
".",
"max_pixrad",
"(",
"self",
".",
"nside_pixel",
")",
")",
"# Pad roi radius to cover edge healpix",
"subpix",
"=",
"ugali",
".",
"utils",
".",
"healpix",
".",
"query_disc",
"(",
"self",
".",
"nside_subpixel",
",",
"self",
".",
"roi",
".",
"vec",
",",
"self",
".",
"roi_radius",
"+",
"epsilon",
")",
"superpix",
"=",
"ugali",
".",
"utils",
".",
"healpix",
".",
"superpixel",
"(",
"subpix",
",",
"self",
".",
"nside_subpixel",
",",
"self",
".",
"nside_pixel",
")",
"self",
".",
"subpix",
"=",
"subpix",
"[",
"np",
".",
"in1d",
"(",
"superpix",
",",
"self",
".",
"roi",
".",
"pixels",
")",
"]"
] | Subpixels for random position generation. | [
"Subpixels",
"for",
"random",
"position",
"generation",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L289-L306 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator._setup_cmd | def _setup_cmd(self,mode='cloud-in-cells'):
"""
The purpose here is to create a more finely binned
background CMD to sample from.
"""
# Only setup once...
if hasattr(self,'bkg_lambda'): return
logger.info("Setup color...")
# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2
# (Remember to convert from sr to deg^2)
#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))
solid_angle_roi = self.roi.area_pixel*len(self.roi.pixels)
# Large CMD bins cause problems when simulating
config = Config(self.config)
config['color']['n_bins'] *= 5 #10
config['mag']['n_bins'] *= 1 #2
#config['mask']['minimum_solid_angle'] = 0
roi = ugali.analysis.loglike.createROI(config,self.roi.lon,self.roi.lat)
mask = ugali.analysis.loglike.createMask(config,roi)
self.bkg_centers_color = roi.centers_color
self.bkg_centers_mag = roi.centers_mag
# Background CMD has units: [objs / deg^2 / mag^2]
cmd_background = mask.backgroundCMD(self.catalog,mode)
self.bkg_lambda=cmd_background*solid_angle_roi*roi.delta_color*roi.delta_mag
np.sum(self.bkg_lambda)
# Clean up
del config, roi, mask | python | def _setup_cmd(self,mode='cloud-in-cells'):
"""
The purpose here is to create a more finely binned
background CMD to sample from.
"""
# Only setup once...
if hasattr(self,'bkg_lambda'): return
logger.info("Setup color...")
# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2
# (Remember to convert from sr to deg^2)
#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))
solid_angle_roi = self.roi.area_pixel*len(self.roi.pixels)
# Large CMD bins cause problems when simulating
config = Config(self.config)
config['color']['n_bins'] *= 5 #10
config['mag']['n_bins'] *= 1 #2
#config['mask']['minimum_solid_angle'] = 0
roi = ugali.analysis.loglike.createROI(config,self.roi.lon,self.roi.lat)
mask = ugali.analysis.loglike.createMask(config,roi)
self.bkg_centers_color = roi.centers_color
self.bkg_centers_mag = roi.centers_mag
# Background CMD has units: [objs / deg^2 / mag^2]
cmd_background = mask.backgroundCMD(self.catalog,mode)
self.bkg_lambda=cmd_background*solid_angle_roi*roi.delta_color*roi.delta_mag
np.sum(self.bkg_lambda)
# Clean up
del config, roi, mask | [
"def",
"_setup_cmd",
"(",
"self",
",",
"mode",
"=",
"'cloud-in-cells'",
")",
":",
"# Only setup once...",
"if",
"hasattr",
"(",
"self",
",",
"'bkg_lambda'",
")",
":",
"return",
"logger",
".",
"info",
"(",
"\"Setup color...\"",
")",
"# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2",
"# (Remember to convert from sr to deg^2) ",
"#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))",
"solid_angle_roi",
"=",
"self",
".",
"roi",
".",
"area_pixel",
"*",
"len",
"(",
"self",
".",
"roi",
".",
"pixels",
")",
"# Large CMD bins cause problems when simulating",
"config",
"=",
"Config",
"(",
"self",
".",
"config",
")",
"config",
"[",
"'color'",
"]",
"[",
"'n_bins'",
"]",
"*=",
"5",
"#10",
"config",
"[",
"'mag'",
"]",
"[",
"'n_bins'",
"]",
"*=",
"1",
"#2",
"#config['mask']['minimum_solid_angle'] = 0",
"roi",
"=",
"ugali",
".",
"analysis",
".",
"loglike",
".",
"createROI",
"(",
"config",
",",
"self",
".",
"roi",
".",
"lon",
",",
"self",
".",
"roi",
".",
"lat",
")",
"mask",
"=",
"ugali",
".",
"analysis",
".",
"loglike",
".",
"createMask",
"(",
"config",
",",
"roi",
")",
"self",
".",
"bkg_centers_color",
"=",
"roi",
".",
"centers_color",
"self",
".",
"bkg_centers_mag",
"=",
"roi",
".",
"centers_mag",
"# Background CMD has units: [objs / deg^2 / mag^2]",
"cmd_background",
"=",
"mask",
".",
"backgroundCMD",
"(",
"self",
".",
"catalog",
",",
"mode",
")",
"self",
".",
"bkg_lambda",
"=",
"cmd_background",
"*",
"solid_angle_roi",
"*",
"roi",
".",
"delta_color",
"*",
"roi",
".",
"delta_mag",
"np",
".",
"sum",
"(",
"self",
".",
"bkg_lambda",
")",
"# Clean up ",
"del",
"config",
",",
"roi",
",",
"mask"
] | The purpose here is to create a more finely binned
background CMD to sample from. | [
"The",
"purpose",
"here",
"is",
"to",
"create",
"a",
"more",
"finely",
"binned",
"background",
"CMD",
"to",
"sample",
"from",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L308-L340 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator.toy_background | def toy_background(self,mc_source_id=2,seed=None):
"""
Quick uniform background generation.
"""
logger.info("Running toy background simulation...")
size = 20000
nstar = np.random.poisson(size)
#np.random.seed(0)
logger.info("Simulating %i background stars..."%nstar)
### # Random points from roi pixels
### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar)
### pix = self.roi.pixels[idx]
# Random points drawn from subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
pix = ang2pix(self.nside_pixel, lon, lat)
lon,lat = pix2ang(self.nside_pixel,pix)
# Single color
#mag_1 = 19.05*np.ones(len(pix))
#mag_2 = 19.10*np.ones(len(pix))
# Uniform in color
logger.info("Generating uniform CMD...")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
#mag_err_1 = 1.0*np.ones(len(pix))
#mag_err_2 = 1.0*np.ones(len(pix))
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog | python | def toy_background(self,mc_source_id=2,seed=None):
"""
Quick uniform background generation.
"""
logger.info("Running toy background simulation...")
size = 20000
nstar = np.random.poisson(size)
#np.random.seed(0)
logger.info("Simulating %i background stars..."%nstar)
### # Random points from roi pixels
### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar)
### pix = self.roi.pixels[idx]
# Random points drawn from subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
pix = ang2pix(self.nside_pixel, lon, lat)
lon,lat = pix2ang(self.nside_pixel,pix)
# Single color
#mag_1 = 19.05*np.ones(len(pix))
#mag_2 = 19.10*np.ones(len(pix))
# Uniform in color
logger.info("Generating uniform CMD...")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
#mag_err_1 = 1.0*np.ones(len(pix))
#mag_err_2 = 1.0*np.ones(len(pix))
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog | [
"def",
"toy_background",
"(",
"self",
",",
"mc_source_id",
"=",
"2",
",",
"seed",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Running toy background simulation...\"",
")",
"size",
"=",
"20000",
"nstar",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"size",
")",
"#np.random.seed(0)",
"logger",
".",
"info",
"(",
"\"Simulating %i background stars...\"",
"%",
"nstar",
")",
"### # Random points from roi pixels",
"### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar)",
"### pix = self.roi.pixels[idx]",
"# Random points drawn from subpixels",
"logger",
".",
"info",
"(",
"\"Generating uniform positions...\"",
")",
"idx",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"subpix",
")",
"-",
"1",
",",
"size",
"=",
"nstar",
")",
"lon",
",",
"lat",
"=",
"pix2ang",
"(",
"self",
".",
"nside_subpixel",
",",
"self",
".",
"subpix",
"[",
"idx",
"]",
")",
"pix",
"=",
"ang2pix",
"(",
"self",
".",
"nside_pixel",
",",
"lon",
",",
"lat",
")",
"lon",
",",
"lat",
"=",
"pix2ang",
"(",
"self",
".",
"nside_pixel",
",",
"pix",
")",
"# Single color",
"#mag_1 = 19.05*np.ones(len(pix))",
"#mag_2 = 19.10*np.ones(len(pix))",
"# Uniform in color",
"logger",
".",
"info",
"(",
"\"Generating uniform CMD...\"",
")",
"mag_1",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"self",
".",
"config",
"[",
"'mag'",
"]",
"[",
"'min'",
"]",
",",
"self",
".",
"config",
"[",
"'mag'",
"]",
"[",
"'max'",
"]",
",",
"size",
"=",
"nstar",
")",
"color",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"self",
".",
"config",
"[",
"'color'",
"]",
"[",
"'min'",
"]",
",",
"self",
".",
"config",
"[",
"'color'",
"]",
"[",
"'max'",
"]",
",",
"size",
"=",
"nstar",
")",
"mag_2",
"=",
"mag_1",
"-",
"color",
"# There is probably a better way to do this step without creating the full HEALPix map",
"mask",
"=",
"-",
"1.",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"nside_pixel",
")",
")",
"mask",
"[",
"self",
".",
"roi",
".",
"pixels",
"]",
"=",
"self",
".",
"mask",
".",
"mask_1",
".",
"mask_roi_sparse",
"mag_lim_1",
"=",
"mask",
"[",
"pix",
"]",
"mask",
"=",
"-",
"1.",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"nside_pixel",
")",
")",
"mask",
"[",
"self",
".",
"roi",
".",
"pixels",
"]",
"=",
"self",
".",
"mask",
".",
"mask_2",
".",
"mask_roi_sparse",
"mag_lim_2",
"=",
"mask",
"[",
"pix",
"]",
"#mag_err_1 = 1.0*np.ones(len(pix))",
"#mag_err_2 = 1.0*np.ones(len(pix))",
"mag_err_1",
"=",
"self",
".",
"photo_err_1",
"(",
"mag_lim_1",
"-",
"mag_1",
")",
"mag_err_2",
"=",
"self",
".",
"photo_err_2",
"(",
"mag_lim_2",
"-",
"mag_2",
")",
"mc_source_id",
"=",
"mc_source_id",
"*",
"np",
".",
"ones",
"(",
"len",
"(",
"mag_1",
")",
")",
"select",
"=",
"(",
"mag_lim_1",
">",
"mag_1",
")",
"&",
"(",
"mag_lim_2",
">",
"mag_2",
")",
"hdu",
"=",
"ugali",
".",
"observation",
".",
"catalog",
".",
"makeHDU",
"(",
"self",
".",
"config",
",",
"mag_1",
"[",
"select",
"]",
",",
"mag_err_1",
"[",
"select",
"]",
",",
"mag_2",
"[",
"select",
"]",
",",
"mag_err_2",
"[",
"select",
"]",
",",
"lon",
"[",
"select",
"]",
",",
"lat",
"[",
"select",
"]",
",",
"mc_source_id",
"[",
"select",
"]",
")",
"catalog",
"=",
"ugali",
".",
"observation",
".",
"catalog",
".",
"Catalog",
"(",
"self",
".",
"config",
",",
"data",
"=",
"hdu",
".",
"data",
")",
"return",
"catalog"
] | Quick uniform background generation. | [
"Quick",
"uniform",
"background",
"generation",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L343-L397 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator.satellite | def satellite(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
# Randomize magnitudes by their errors
mag_obs_1 = mag_1+np.random.normal(size=len(mag_1))*mag_err_1
mag_obs_2 = mag_2+np.random.normal(size=len(mag_2))*mag_err_2
#mag_obs_1 = mag_1
#mag_obs_2 = mag_2
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)
# Make sure objects lie within the original cmd (should also be done later...)
#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog | python | def satellite(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
# Randomize magnitudes by their errors
mag_obs_1 = mag_1+np.random.normal(size=len(mag_1))*mag_err_1
mag_obs_2 = mag_2+np.random.normal(size=len(mag_2))*mag_err_2
#mag_obs_1 = mag_1
#mag_obs_2 = mag_2
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)
# Make sure objects lie within the original cmd (should also be done later...)
#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog | [
"def",
"satellite",
"(",
"self",
",",
"stellar_mass",
",",
"distance_modulus",
",",
"mc_source_id",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"isochrone",
"=",
"kwargs",
".",
"pop",
"(",
"'isochrone'",
",",
"self",
".",
"isochrone",
")",
"kernel",
"=",
"kwargs",
".",
"pop",
"(",
"'kernel'",
",",
"self",
".",
"kernel",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"kernel",
".",
"params",
".",
"keys",
"(",
")",
":",
"setattr",
"(",
"kernel",
",",
"k",
",",
"v",
")",
"mag_1",
",",
"mag_2",
"=",
"isochrone",
".",
"simulate",
"(",
"stellar_mass",
",",
"distance_modulus",
")",
"lon",
",",
"lat",
"=",
"kernel",
".",
"simulate",
"(",
"len",
"(",
"mag_1",
")",
")",
"logger",
".",
"info",
"(",
"\"Simulating %i satellite stars...\"",
"%",
"len",
"(",
"mag_1",
")",
")",
"pix",
"=",
"ang2pix",
"(",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'nside_pixel'",
"]",
",",
"lon",
",",
"lat",
")",
"# There is probably a better way to do this step without creating the full HEALPix map",
"mask",
"=",
"-",
"1.",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'nside_pixel'",
"]",
")",
")",
"mask",
"[",
"self",
".",
"roi",
".",
"pixels",
"]",
"=",
"self",
".",
"mask",
".",
"mask_1",
".",
"mask_roi_sparse",
"mag_lim_1",
"=",
"mask",
"[",
"pix",
"]",
"mask",
"=",
"-",
"1.",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'nside_pixel'",
"]",
")",
")",
"mask",
"[",
"self",
".",
"roi",
".",
"pixels",
"]",
"=",
"self",
".",
"mask",
".",
"mask_2",
".",
"mask_roi_sparse",
"mag_lim_2",
"=",
"mask",
"[",
"pix",
"]",
"mag_err_1",
"=",
"self",
".",
"photo_err_1",
"(",
"mag_lim_1",
"-",
"mag_1",
")",
"mag_err_2",
"=",
"self",
".",
"photo_err_2",
"(",
"mag_lim_2",
"-",
"mag_2",
")",
"# Randomize magnitudes by their errors",
"mag_obs_1",
"=",
"mag_1",
"+",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"len",
"(",
"mag_1",
")",
")",
"*",
"mag_err_1",
"mag_obs_2",
"=",
"mag_2",
"+",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"len",
"(",
"mag_2",
")",
")",
"*",
"mag_err_2",
"#mag_obs_1 = mag_1",
"#mag_obs_2 = mag_2",
"#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)",
"select",
"=",
"(",
"mag_lim_1",
">",
"mag_obs_1",
")",
"&",
"(",
"mag_lim_2",
">",
"mag_obs_2",
")",
"# Make sure objects lie within the original cmd (should also be done later...)",
"#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)",
"#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]",
"logger",
".",
"info",
"(",
"\"Clipping %i simulated satellite stars...\"",
"%",
"(",
"~",
"select",
")",
".",
"sum",
"(",
")",
")",
"mc_source_id",
"=",
"mc_source_id",
"*",
"np",
".",
"ones",
"(",
"len",
"(",
"mag_1",
")",
")",
"hdu",
"=",
"ugali",
".",
"observation",
".",
"catalog",
".",
"makeHDU",
"(",
"self",
".",
"config",
",",
"mag_obs_1",
"[",
"select",
"]",
",",
"mag_err_1",
"[",
"select",
"]",
",",
"mag_obs_2",
"[",
"select",
"]",
",",
"mag_err_2",
"[",
"select",
"]",
",",
"lon",
"[",
"select",
"]",
",",
"lat",
"[",
"select",
"]",
",",
"mc_source_id",
"[",
"select",
"]",
")",
"catalog",
"=",
"ugali",
".",
"observation",
".",
"catalog",
".",
"Catalog",
"(",
"self",
".",
"config",
",",
"data",
"=",
"hdu",
".",
"data",
")",
"return",
"catalog"
] | Create a simulated satellite. Returns a catalog object. | [
"Create",
"a",
"simulated",
"satellite",
".",
"Returns",
"a",
"catalog",
"object",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L495-L544 | train |
DarkEnergySurvey/ugali | ugali/simulation/simulator.py | Simulator.makeHDU | def makeHDU(self, mag_1, mag_err_1, mag_2, mag_err_2, lon, lat, mc_source_id):
"""
Create a catalog fits file object based on input data.
ADW: This should be combined with the write_membership
function of loglike.
"""
if self.config['catalog']['coordsys'].lower() == 'cel' \
and self.config['coords']['coordsys'].lower() == 'gal':
lon, lat = ugali.utils.projector.gal2cel(lon, lat)
elif self.config['catalog']['coordsys'].lower() == 'gal' \
and self.config['coords']['coordsys'].lower() == 'cel':
lon, lat = ugali.utils.projector.cel2gal(lon, lat)
columns = [
pyfits.Column(name=self.config['catalog']['objid_field'],
format = 'D',array = np.arange(len(lon))),
pyfits.Column(name=self.config['catalog']['lon_field'],
format = 'D',array = lon),
pyfits.Column(name = self.config['catalog']['lat_field'],
format = 'D',array = lat),
pyfits.Column(name = self.config['catalog']['mag_1_field'],
format = 'E',array = mag_1),
pyfits.Column(name = self.config['catalog']['mag_err_1_field'],
format = 'E',array = mag_err_1),
pyfits.Column(name = self.config['catalog']['mag_2_field'],
format = 'E',array = mag_2),
pyfits.Column(name = self.config['catalog']['mag_err_2_field'],
format = 'E',array = mag_err_2),
pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
format = 'I',array = mc_source_id),
]
hdu = pyfits.new_table(columns)
return hdu | python | def makeHDU(self, mag_1, mag_err_1, mag_2, mag_err_2, lon, lat, mc_source_id):
"""
Create a catalog fits file object based on input data.
ADW: This should be combined with the write_membership
function of loglike.
"""
if self.config['catalog']['coordsys'].lower() == 'cel' \
and self.config['coords']['coordsys'].lower() == 'gal':
lon, lat = ugali.utils.projector.gal2cel(lon, lat)
elif self.config['catalog']['coordsys'].lower() == 'gal' \
and self.config['coords']['coordsys'].lower() == 'cel':
lon, lat = ugali.utils.projector.cel2gal(lon, lat)
columns = [
pyfits.Column(name=self.config['catalog']['objid_field'],
format = 'D',array = np.arange(len(lon))),
pyfits.Column(name=self.config['catalog']['lon_field'],
format = 'D',array = lon),
pyfits.Column(name = self.config['catalog']['lat_field'],
format = 'D',array = lat),
pyfits.Column(name = self.config['catalog']['mag_1_field'],
format = 'E',array = mag_1),
pyfits.Column(name = self.config['catalog']['mag_err_1_field'],
format = 'E',array = mag_err_1),
pyfits.Column(name = self.config['catalog']['mag_2_field'],
format = 'E',array = mag_2),
pyfits.Column(name = self.config['catalog']['mag_err_2_field'],
format = 'E',array = mag_err_2),
pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
format = 'I',array = mc_source_id),
]
hdu = pyfits.new_table(columns)
return hdu | [
"def",
"makeHDU",
"(",
"self",
",",
"mag_1",
",",
"mag_err_1",
",",
"mag_2",
",",
"mag_err_2",
",",
"lon",
",",
"lat",
",",
"mc_source_id",
")",
":",
"if",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'coordsys'",
"]",
".",
"lower",
"(",
")",
"==",
"'cel'",
"and",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'coordsys'",
"]",
".",
"lower",
"(",
")",
"==",
"'gal'",
":",
"lon",
",",
"lat",
"=",
"ugali",
".",
"utils",
".",
"projector",
".",
"gal2cel",
"(",
"lon",
",",
"lat",
")",
"elif",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'coordsys'",
"]",
".",
"lower",
"(",
")",
"==",
"'gal'",
"and",
"self",
".",
"config",
"[",
"'coords'",
"]",
"[",
"'coordsys'",
"]",
".",
"lower",
"(",
")",
"==",
"'cel'",
":",
"lon",
",",
"lat",
"=",
"ugali",
".",
"utils",
".",
"projector",
".",
"cel2gal",
"(",
"lon",
",",
"lat",
")",
"columns",
"=",
"[",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'objid_field'",
"]",
",",
"format",
"=",
"'D'",
",",
"array",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"lon",
")",
")",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'lon_field'",
"]",
",",
"format",
"=",
"'D'",
",",
"array",
"=",
"lon",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'lat_field'",
"]",
",",
"format",
"=",
"'D'",
",",
"array",
"=",
"lat",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'mag_1_field'",
"]",
",",
"format",
"=",
"'E'",
",",
"array",
"=",
"mag_1",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'mag_err_1_field'",
"]",
",",
"format",
"=",
"'E'",
",",
"array",
"=",
"mag_err_1",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'mag_2_field'",
"]",
",",
"format",
"=",
"'E'",
",",
"array",
"=",
"mag_2",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'mag_err_2_field'",
"]",
",",
"format",
"=",
"'E'",
",",
"array",
"=",
"mag_err_2",
")",
",",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"self",
".",
"config",
"[",
"'catalog'",
"]",
"[",
"'mc_source_id_field'",
"]",
",",
"format",
"=",
"'I'",
",",
"array",
"=",
"mc_source_id",
")",
",",
"]",
"hdu",
"=",
"pyfits",
".",
"new_table",
"(",
"columns",
")",
"return",
"hdu"
] | Create a catalog fits file object based on input data.
ADW: This should be combined with the write_membership
function of loglike. | [
"Create",
"a",
"catalog",
"fits",
"file",
"object",
"based",
"on",
"input",
"data",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L628-L662 | train |
totalgood/pugnlp | src/pugnlp/util.py | inverted_dict | def inverted_dict(d):
"""Return a dict with swapped keys and values
>>> inverted_dict({0: ('a', 'b'), 1: 'cd'}) == {'cd': 1, ('a', 'b'): 0}
True
"""
return dict((force_hashable(v), k) for (k, v) in viewitems(dict(d))) | python | def inverted_dict(d):
"""Return a dict with swapped keys and values
>>> inverted_dict({0: ('a', 'b'), 1: 'cd'}) == {'cd': 1, ('a', 'b'): 0}
True
"""
return dict((force_hashable(v), k) for (k, v) in viewitems(dict(d))) | [
"def",
"inverted_dict",
"(",
"d",
")",
":",
"return",
"dict",
"(",
"(",
"force_hashable",
"(",
"v",
")",
",",
"k",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"viewitems",
"(",
"dict",
"(",
"d",
")",
")",
")"
] | Return a dict with swapped keys and values
>>> inverted_dict({0: ('a', 'b'), 1: 'cd'}) == {'cd': 1, ('a', 'b'): 0}
True | [
"Return",
"a",
"dict",
"with",
"swapped",
"keys",
"and",
"values"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L167-L173 | train |
totalgood/pugnlp | src/pugnlp/util.py | inverted_dict_of_lists | def inverted_dict_of_lists(d):
"""Return a dict where the keys are all the values listed in the values of the original dict
>>> inverted_dict_of_lists({0: ['a', 'b'], 1: 'cd'}) == {'a': 0, 'b': 0, 'cd': 1}
True
"""
new_dict = {}
for (old_key, old_value_list) in viewitems(dict(d)):
for new_key in listify(old_value_list):
new_dict[new_key] = old_key
return new_dict | python | def inverted_dict_of_lists(d):
"""Return a dict where the keys are all the values listed in the values of the original dict
>>> inverted_dict_of_lists({0: ['a', 'b'], 1: 'cd'}) == {'a': 0, 'b': 0, 'cd': 1}
True
"""
new_dict = {}
for (old_key, old_value_list) in viewitems(dict(d)):
for new_key in listify(old_value_list):
new_dict[new_key] = old_key
return new_dict | [
"def",
"inverted_dict_of_lists",
"(",
"d",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"(",
"old_key",
",",
"old_value_list",
")",
"in",
"viewitems",
"(",
"dict",
"(",
"d",
")",
")",
":",
"for",
"new_key",
"in",
"listify",
"(",
"old_value_list",
")",
":",
"new_dict",
"[",
"new_key",
"]",
"=",
"old_key",
"return",
"new_dict"
] | Return a dict where the keys are all the values listed in the values of the original dict
>>> inverted_dict_of_lists({0: ['a', 'b'], 1: 'cd'}) == {'a': 0, 'b': 0, 'cd': 1}
True | [
"Return",
"a",
"dict",
"where",
"the",
"keys",
"are",
"all",
"the",
"values",
"listed",
"in",
"the",
"values",
"of",
"the",
"original",
"dict"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L176-L186 | train |
Subsets and Splits