repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
networkx/networkx | 7,399 | networkx__networkx-7399 | [
"7396"
] | 4bc8ba917d67862c4f745d02f360692e17c5abd2 | diff --git a/networkx/algorithms/bipartite/generators.py b/networkx/algorithms/bipartite/generators.py
--- a/networkx/algorithms/bipartite/generators.py
+++ b/networkx/algorithms/bipartite/generators.py
@@ -62,7 +62,7 @@ def complete_bipartite_graph(n1, n2, create_using=None):
if len(G) != len(top) + len(bottom):
raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes")
G.add_edges_from((u, v) for u in top for v in bottom)
- G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})"
+ G.graph["name"] = f"complete_bipartite_graph({len(top)}, {len(bottom)})"
return G
| diff --git a/networkx/algorithms/bipartite/tests/test_generators.py b/networkx/algorithms/bipartite/tests/test_generators.py
--- a/networkx/algorithms/bipartite/tests/test_generators.py
+++ b/networkx/algorithms/bipartite/tests/test_generators.py
@@ -398,3 +398,12 @@ def test_gnmk_random_graph_complete(self):
assert set(range(n)) == X
assert set(range(n, n + m)) == Y
assert edges == len(list(G.edges()))
+
+ @pytest.mark.parametrize("n", (4, range(4), {0, 1, 2, 3}))
+ @pytest.mark.parametrize("m", (range(4, 7), {4, 5, 6}))
+ def test_complete_bipartite_graph_str(self, n, m):
+ """Ensure G.name is consistent for all inputs accepted by nodes_or_number.
+ See gh-7396"""
+ G = nx.complete_bipartite_graph(n, m)
+ ans = "Graph named 'complete_bipartite_graph(4, 3)' with 7 nodes and 12 edges"
+ assert str(G) == ans
| Graph name attribute for `complete_bipartite_graph` verbose for non-integer intputs
The `complete_bipartite_graph` creation function adds a `.graph["name"]` attribute like so:
https://github.com/networkx/networkx/blob/cca1a715217a102b970800c43a066dd759ece92d/networkx/algorithms/bipartite/generators.py#L65
This works fine when `n1` and `n2` are integers, but can get ugly when they aren't. For example:
```python
# Generator expressions designed to be intentionally verbose
>>> n = (i for i in range(4))
>>> m = (i for i in range(4, 7))
>>> G = nx.complete_bipartite_graph(n, m)
>>> print(G)
Graph named 'complete_bipartite_graph(<generator object <genexpr> at 0x7e94bc447370>, <generator object <genexpr> at 0x7e94bc5352f0>)' with 7 nodes and 12 edges
```
| The expected behavior should be printing the length of the generator, right ?
> The expected behavior should be printing the length of the generator, right ?
Usually it's `Graph with 7 nodes and 12 edges`
> Usually it's `Graph with 7 nodes and 12 edges`
This is a bipartite graph, so i guess n1 and n2 refers to the two node sets.
I think it should be
`complete_bipartite_graph(4,3) with 7 nodes and 12 edges` | 2024-04-09T12:29:58 |
networkx/networkx | 7,412 | networkx__networkx-7412 | [
"7407"
] | e97634ab839f7eb40e4887c4b59d13c267977df3 | diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -214,9 +214,8 @@ def from_pandas_adjacency(df, create_using=None):
raise nx.NetworkXError("Columns must match Indices.", msg) from err
A = df.values
- G = from_numpy_array(A, create_using=create_using)
+ G = from_numpy_array(A, create_using=create_using, nodelist=df.columns)
- nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False)
return G
@@ -1022,7 +1021,9 @@ def to_numpy_array(
@nx._dispatchable(graphs=None, returns_graph=True)
-def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"):
+def from_numpy_array(
+ A, parallel_edges=False, create_using=None, edge_attr="weight", *, nodelist=None
+):
"""Returns a graph from a 2D NumPy array.
The 2D NumPy array is interpreted as an adjacency matrix for the graph.
@@ -1046,6 +1047,11 @@ def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weig
The attribute to which the array values are assigned on each edge. If
it is None, edge attributes will not be assigned.
+ nodelist : sequence of nodes, optional
+ A sequence of objects to use as the nodes in the graph. If provided, the
+ list of nodes must be the same length as the dimensions of `A`. The
+ default is `None`, in which case the nodes are drawn from ``range(n)``.
+
Notes
-----
For directed graphs, explicitly mention create_using=nx.DiGraph,
@@ -1139,9 +1145,14 @@ def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weig
python_type = kind_to_python_type[dt.kind]
except Exception as err:
raise TypeError(f"Unknown numpy data type: {dt}") from err
+ if _default_nodes := (nodelist is None):
+ nodelist = range(n)
+ else:
+ if len(nodelist) != n:
+ raise ValueError("nodelist must have the same length as A.shape[0]")
# Make sure we get even the isolated nodes of the graph.
- G.add_nodes_from(range(n))
+ G.add_nodes_from(nodelist)
# Get a list of all the entries in the array with nonzero entries. These
# coordinates become edges in the graph. (convert to int from np.int64)
edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero()))
@@ -1198,5 +1209,9 @@ def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weig
# when `G.add_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
+ # Remap nodes if user provided custom `nodelist`
+ if not _default_nodes:
+ idx_to_node = dict(enumerate(nodelist))
+ triples = ((idx_to_node[u], idx_to_node[v], d) for u, v, d in triples)
G.add_edges_from(triples)
return G
| diff --git a/networkx/tests/test_convert_numpy.py b/networkx/tests/test_convert_numpy.py
--- a/networkx/tests/test_convert_numpy.py
+++ b/networkx/tests/test_convert_numpy.py
@@ -1,3 +1,5 @@
+import itertools
+
import pytest
np = pytest.importorskip("numpy")
@@ -393,3 +395,138 @@ def test_to_numpy_array_structured_multigraph_raises(graph_type):
dtype = np.dtype([("weight", int), ("cost", int)])
with pytest.raises(nx.NetworkXError, match="Structured arrays are not supported"):
nx.to_numpy_array(G, dtype=dtype, weight=None)
+
+
+def test_from_numpy_array_nodelist_bad_size():
+ """An exception is raised when `len(nodelist) != A.shape[0]`."""
+ n = 5 # Number of nodes
+ A = np.diag(np.ones(n - 1), k=1) # Adj. matrix for P_n
+ expected = nx.path_graph(n)
+
+ assert graphs_equal(nx.from_numpy_array(A, edge_attr=None), expected)
+ nodes = list(range(n))
+ assert graphs_equal(
+ nx.from_numpy_array(A, edge_attr=None, nodelist=nodes), expected
+ )
+
+ # Too many node labels
+ nodes = list(range(n + 1))
+ with pytest.raises(ValueError, match="nodelist must have the same length as A"):
+ nx.from_numpy_array(A, nodelist=nodes)
+
+ # Too few node labels
+ nodes = list(range(n - 1))
+ with pytest.raises(ValueError, match="nodelist must have the same length as A"):
+ nx.from_numpy_array(A, nodelist=nodes)
+
+
[email protected](
+ "nodes",
+ (
+ [4, 3, 2, 1, 0],
+ [9, 7, 1, 2, 8],
+ ["a", "b", "c", "d", "e"],
+ [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
+ ["A", 2, 7, "spam", (1, 3)],
+ ),
+)
+def test_from_numpy_array_nodelist(nodes):
+ A = np.diag(np.ones(4), k=1)
+ # Without edge attributes
+ expected = nx.relabel_nodes(
+ nx.path_graph(5), mapping=dict(enumerate(nodes)), copy=True
+ )
+ G = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
+ assert graphs_equal(G, expected)
+
+ # With edge attributes
+ nx.set_edge_attributes(expected, 1.0, name="weight")
+ G = nx.from_numpy_array(A, nodelist=nodes)
+ assert graphs_equal(G, expected)
+
+
[email protected](
+ "nodes",
+ (
+ [4, 3, 2, 1, 0],
+ [9, 7, 1, 2, 8],
+ ["a", "b", "c", "d", "e"],
+ [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
+ ["A", 2, 7, "spam", (1, 3)],
+ ),
+)
+def test_from_numpy_array_nodelist_directed(nodes):
+ A = np.diag(np.ones(4), k=1)
+ # Without edge attributes
+ H = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)])
+ expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True)
+ G = nx.from_numpy_array(A, create_using=nx.DiGraph, edge_attr=None, nodelist=nodes)
+ assert graphs_equal(G, expected)
+
+ # With edge attributes
+ nx.set_edge_attributes(expected, 1.0, name="weight")
+ G = nx.from_numpy_array(A, create_using=nx.DiGraph, nodelist=nodes)
+ assert graphs_equal(G, expected)
+
+
[email protected](
+ "nodes",
+ (
+ [4, 3, 2, 1, 0],
+ [9, 7, 1, 2, 8],
+ ["a", "b", "c", "d", "e"],
+ [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
+ ["A", 2, 7, "spam", (1, 3)],
+ ),
+)
+def test_from_numpy_array_nodelist_multigraph(nodes):
+ A = np.array(
+ [
+ [0, 1, 0, 0, 0],
+ [1, 0, 2, 0, 0],
+ [0, 2, 0, 3, 0],
+ [0, 0, 3, 0, 4],
+ [0, 0, 0, 4, 0],
+ ]
+ )
+
+ H = nx.MultiGraph()
+ for i, edge in enumerate(((0, 1), (1, 2), (2, 3), (3, 4))):
+ H.add_edges_from(itertools.repeat(edge, i + 1))
+ expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True)
+
+ G = nx.from_numpy_array(
+ A,
+ parallel_edges=True,
+ create_using=nx.MultiGraph,
+ edge_attr=None,
+ nodelist=nodes,
+ )
+ assert graphs_equal(G, expected)
+
+
[email protected](
+ "nodes",
+ (
+ [4, 3, 2, 1, 0],
+ [9, 7, 1, 2, 8],
+ ["a", "b", "c", "d", "e"],
+ [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
+ ["A", 2, 7, "spam", (1, 3)],
+ ),
+)
[email protected]("graph", (nx.complete_graph, nx.cycle_graph, nx.wheel_graph))
+def test_from_numpy_array_nodelist_rountrip(graph, nodes):
+ G = graph(5)
+ A = nx.to_numpy_array(G)
+ expected = nx.relabel_nodes(G, mapping=dict(enumerate(nodes)), copy=True)
+ H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
+ assert graphs_equal(H, expected)
+
+ # With an isolated node
+ G = graph(4)
+ G.add_node("foo")
+ A = nx.to_numpy_array(G)
+ expected = nx.relabel_nodes(G, mapping=dict(zip(G.nodes, nodes)), copy=True)
+ H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
+ assert graphs_equal(H, expected)
diff --git a/networkx/tests/test_convert_pandas.py b/networkx/tests/test_convert_pandas.py
--- a/networkx/tests/test_convert_pandas.py
+++ b/networkx/tests/test_convert_pandas.py
@@ -318,3 +318,21 @@ def test_to_pandas_edgelist_with_nodelist():
df = nx.to_pandas_edgelist(G, nodelist=[1, 2])
assert 0 not in df["source"].to_numpy()
assert 100 not in df["weight"].to_numpy()
+
+
+def test_from_pandas_adjacency_with_index_collisions():
+ """See gh-7407"""
+ df = pd.DataFrame(
+ [
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [0, 0, 0, 0],
+ ],
+ index=[1010001, 2, 1, 1010002],
+ columns=[1010001, 2, 1, 1010002],
+ )
+ G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph)
+ expected = nx.DiGraph([(1010001, 2), (2, 1), (1, 1010002)])
+ assert nodes_equal(G.nodes, expected.nodes)
+ assert edges_equal(G.edges, expected.edges)
| networkx.exception.NetworkXUnfeasible when creating a graph from pandas
### Current Behavior
When creating a graph from a pandas dataframe, if the row/columns names are integer less than the dimensionality of the frame, an error occurs:
```
raise nx.NetworkXUnfeasible(
networkx.exception.NetworkXUnfeasible: The node label sets are overlapping and no ordering can resolve the mapping. Use copy=True.
```
### Expected Behavior
Graph is created without errors
To fix it:
`nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False`) in convert_matrix.py should have copy=True otherwise there's a duplication of labels
### Steps to Reproduce
```
M = pd.DataFrame(0.0, index=[1010001,2,1,1010002], columns=[1010001,2,1,1010002])
G = nx.from_pandas_adjacency(M, create_using = nx.DiGraph)
```
Possible fix appears in #7408
### Environment
Python version: 3.9, 3.10
NetworkX version: 3.1, 3.3
| 2024-04-13T02:56:26 |
|
networkx/networkx | 7,426 | networkx__networkx-7426 | [
"7401"
] | 5e0216e8492c0b99af7864acd98fd45200c06cdf | diff --git a/networkx/convert.py b/networkx/convert.py
--- a/networkx/convert.py
+++ b/networkx/convert.py
@@ -133,7 +133,7 @@ def to_networkx_graph(data, create_using=None, multigraph_input=False):
msg = "Input is not a correct Pandas DataFrame edge-list."
raise nx.NetworkXError(msg) from err
except ImportError:
- warnings.warn("pandas not found, skipping conversion test.", ImportWarning)
+ pass
# numpy array
try:
@@ -147,7 +147,7 @@ def to_networkx_graph(data, create_using=None, multigraph_input=False):
f"Failed to interpret array as an adjacency matrix."
) from err
except ImportError:
- warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
+ pass
# scipy sparse array - any format
try:
@@ -161,7 +161,7 @@ def to_networkx_graph(data, create_using=None, multigraph_input=False):
"Input is not a correct scipy sparse array type."
) from err
except ImportError:
- warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
+ pass
# Note: most general check - should remain last in order of execution
# Includes containers (e.g. list, set, dict, etc.), generators, and
| simple_cycles attempts to import multiple libraries on every call
<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->
<!--- Provide a general summary of the issue in the Title above -->
### Current Behavior
Calling [nx.simple_cycles](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.cycles.simple_cycles.html) produces multiple ImportWarnings when pandas/numpy/scipy is not installed:
```
/usr/local/lib/python3.11/site-packages/networkx/convert.py:136: ImportWarning: pandas not found, skipping conversion test.
warnings.warn("pandas not found, skipping conversion test.", ImportWarning)
/usr/local/lib/python3.11/site-packages/networkx/convert.py:150: ImportWarning: numpy not found, skipping conversion test.
warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
/usr/local/lib/python3.11/site-packages/networkx/convert.py:164: ImportWarning: scipy not found, skipping conversion test.
warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
```
This happens every time `simple_cycles` is called. The warnings come the method `convert.to_networkx_graph`, which appears in DiGraph's constructor when called from [here](https://github.com/networkx/networkx/blob/main/networkx/algorithms/cycles.py#L220)
Problems with this:
- **Warnings polluted**: If logs are configured to show warnings, they get very polluted with these warnings. On pytest, for example, warnings are enabled, so they pollute the logs.
- **Import attempts on every call**: Every time `simple_cycles` is called, multiple `import` instructions are called, which is slow. Does it make sense to try importing these libs on every `to_networkx_graph` call? Or can that be done just once? See a sample code below with a profile:
The following code reproduces the issue: Calling `simple_cycles` 1000 times with warnings enabled:
`graph.py`:
```
import warnings
import networkx as nx
warnings.simplefilter('always', Warning)
for _ in range(1000):
edges = [
("A", "B"),
("B", "C"),
("C", "D"),
("D", "B"),
]
graph = nx.DiGraph(edges) # << warnings appear once here
cycles = list(nx.simple_cycles(graph)) # <<< and another time here
print(cycles)
```
`python -m cProfile -s cumulative graph.py`
```
2890844 function calls (2879132 primitive calls) in 1.228 seconds
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
2 0.005 0.002 1.229 0.615 graph.py:1(<module>)
358/1 0.002 0.000 1.228 1.228 {built-in method builtins.exec}
3000 0.007 0.000 0.958 0.000 digraph.py:316(__init__)
2000 0.015 0.000 0.949 0.000 convert.py:34(to_networkx_graph)
6374/6001 0.017 0.000 0.871 0.000 <frozen importlib._bootstrap>:1165(_find_and_load)
6373/6001 0.009 0.000 0.806 0.000 <frozen importlib._bootstrap>:1120(_find_and_load_unlocked)
6369 0.027 0.000 0.706 0.000 <frozen importlib._bootstrap>:1054(_find_spec)
6367 0.004 0.000 0.638 0.000 <frozen importlib._bootstrap_external>:1496(find_spec)
6367 0.046 0.000 0.634 0.000 <frozen importlib._bootstrap_external>:1464(_get_spec)
2000 0.005 0.000 0.626 0.000 cycles.py:104(simple_cycles)
```
From 1000 calls to `simple_cycles` there are 6000 import attempts and it took ~90% of the total execution time.
I would also like to mention that this may not be exclusive to `simple_cycle` method, since the warnings/imports happen on any usage of `nx.DiGraph(incoming_graph_data)` where incoming_graph_data is a list or iterator, but I understand this is expected because it has to guess. However, I wouldn't expect this guessing to happen internally on simple_cycle calls, because unlike with the construction of my nx.DiGraph, I have no way of controlling it.
```
edges = [
("A", "B"),
("B", "C"),
("C", "D"),
("D", "B"),
]
nx.DiGraph((u, v) for u, v in edges)
```
<!--- Tell us what happens instead of the expected behavior -->
### Expected Behavior
Warnings and imports should happen at most one time, both for logging purposes and execution time.
### Steps to Reproduce
<!--- Provide a minimal example that reproduces the bug -->
See graph.py above
### Environment
<!--- Please provide details about your local environment -->
Python version: 3.11.9
NetworkX version: 3.3
### Additional context
<!--- Add any other context about the problem here, screenshots, etc. -->
| A quick bit of context - this is not related to the recent nx v3.3 release. This behavior (i.e. ImportWarnings on soft dependencies when the warnings filter is turned all the way up) has always been around. That's not to say it shouldn't be changed insofar as possible, just that it's not related to the recent release!
Do we need the warnings in `to_networkx_graph`?
I think the warnings were intended to help a user who didn't realize that they had forgotten to install e.g. pandas in their environment. That they input a pandas DataFrame but didn't have pandas installed. But it seems like that case is not possible. If `pandas` is not installed, how could the user get a pandas object to input?
Is there a way for users to input an object intended for the pandas conversion code and not have pandas installed?
If not, is there any danger in replacing the warnings with `pass`?
We should also update the docstring for [nx.simple_cycles](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.cycles.simple_cycles.html). It currently states:
```
Parameters
----------
G : NetworkX DiGraph
A directed graph
```
but `G` doesn't have to be a NetworkX DiGraph. | 2024-04-19T02:54:24 |
|
networkx/networkx | 7,445 | networkx__networkx-7445 | [
"7444"
] | ad673ffed8479b1303c156fa785f80fdb0f8f934 | diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -348,7 +348,7 @@ def from_pandas_edgelist(
edge_attr : str or int, iterable, True, or None
A valid column name (str or int) or iterable of column names that are
used to retrieve items and add them to the graph as edge attributes.
- If `True`, all of the remaining columns will be added.
+ If `True`, all columns will be added except `source`, `target` and `edge_key`.
If `None`, no edge attributes are added to the graph.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
@@ -436,6 +436,8 @@ def from_pandas_edgelist(
return g
reserved_columns = [source, target]
+ if g.is_multigraph() and edge_key is not None:
+ reserved_columns.append(edge_key)
# Additional columns requested
attr_col_headings = []
| diff --git a/networkx/tests/test_convert_pandas.py b/networkx/tests/test_convert_pandas.py
--- a/networkx/tests/test_convert_pandas.py
+++ b/networkx/tests/test_convert_pandas.py
@@ -241,7 +241,8 @@ def test_from_adjacency_named(self):
df = nx.to_pandas_adjacency(G, dtype=np.intp)
pd.testing.assert_frame_equal(df, dftrue)
- def test_edgekey_with_multigraph(self):
+ @pytest.mark.parametrize("edge_attr", [["attr2", "attr3"], True])
+ def test_edgekey_with_multigraph(self, edge_attr):
df = pd.DataFrame(
{
"source": {"A": "N1", "B": "N2", "C": "N1", "D": "N1"},
@@ -264,7 +265,7 @@ def test_edgekey_with_multigraph(self):
df,
source="source",
target="target",
- edge_attr=["attr2", "attr3"],
+ edge_attr=edge_attr,
edge_key="attr1",
create_using=nx.MultiGraph(),
)
| keys added as edge attributes when using nx.from_pandas_edgelist
Hi there,
regarding the nx.from_pandas_edgelist function, when creating multigraphs:
### Current Behavior
Following the example in the documentation, but calling "edge_attr = True" instead of "edge_attr=["weight", "color"]", the edge keys get added to the graph not only as keys, but also as edge attributes.
i.e G.edges(data=True) outputs:
`MultiEdgeDataView([(0, 2, {'my_edge_key': 'A', 'weight': 3, 'color': 'red'}), (0, 2, {'my_edge_key': 'D', 'weight': 6, 'color': 'blue'}), (2, 1, {'my_edge_key': 'B', 'weight': 4, 'color': 'blue'}), (2, 3, {'my_edge_key': 'C', 'weight': 5, 'color': 'blue'})])`
I do not think this is intended behavior, as edge keys are not edge attributes.
In other words If we start with a multigraph and save it to a pandas edgelist using `nx.to_pandas_edgelist()`, the graph we load from that same edgelist in this way has an extra attribute (my_edge_key).
### Expected Behavior
keys should only be added as keys, as is what happens when you manually specify which columns are edge_attr.
### Steps to Reproduce
`edges = pd.DataFrame(
{
"source": [0, 1, 2, 0],
"target": [2, 2, 3, 2],
"my_edge_key": ["A", "B", "C", "D"],
"weight": [3, 4, 5, 6],
"color": ["red", "blue", "blue", "blue"],
}
)`
`
G = nx.from_pandas_edgelist(
edges,
edge_key="my_edge_key",
edge_attr=["weight", "color"],
create_using=nx.MultiGraph(),
)
`
### Environment
NetworkX version: 3.2.1
| 2024-05-13T08:39:42 |
|
networkx/networkx | 7,449 | networkx__networkx-7449 | [
"7410"
] | 0e7bbe5aa8879c32470343a6596c82b4d5fb523b | diff --git a/networkx/generators/trees.py b/networkx/generators/trees.py
--- a/networkx/generators/trees.py
+++ b/networkx/generators/trees.py
@@ -452,6 +452,24 @@ def random_labeled_tree(n, *, seed=None):
------
NetworkXPointlessConcept
If `n` is zero (because the null graph is not a tree).
+
+ Examples
+ --------
+ >>> G = nx.random_labeled_tree(5, seed=42)
+ >>> nx.is_tree(G)
+ True
+ >>> G.edges
+ EdgeView([(0, 1), (0, 3), (0, 2), (2, 4)])
+
+ A tree with *arbitrarily directed* edges can be created by assigning
+ generated edges to a ``DiGraph``:
+
+ >>> DG = nx.DiGraph()
+ >>> DG.add_edges_from(G.edges)
+ >>> nx.is_tree(DG)
+ True
+ >>> DG.edges
+ OutEdgeView([(0, 1), (0, 3), (0, 2), (2, 4)])
"""
# Cannot create a Prüfer sequence unless `n` is at least two.
if n == 0:
| Can't replicate directed functionality of deprecated `random_tree`
The deprecated `random_tree` function supported the `create_using` kwarg to allow the creation of directed trees:
```python
>>> G = nx.random_tree(10, create_using=nx.DiGraph, seed=42)
>>> G.edges
OutEdgeView([(0, 6), (0, 4), (1, 5), (1, 8), (2, 1), (3, 7), (3, 2), (4, 3), (8, 9)])
```
The `random_labeled_tree` function is the recommended replacement for the deprecated `random_tree` fn, but it doesn't support the `create_using` kwarg. AFAICT, the only way to replicate the current behavior of `random_tree` is to create a DiGraph from the generated edges manually:
```python
>>> H = nx.random_labeled_tree(10, seed=42) # Undirected
>>> DH = nx.DiGraph()
>>> DH.add_edges_from(H)
```
I'm mostly just raising for visibility - I don't recall if this was an explicit decision to remove `create_using` support - @vigna . From a user perspective it'd be convenient to be able to replicate existing functionality with as little code modification as possible.
| https://github.com/networkx/networkx/pull/6758#issuecomment-1901097730
| 2024-05-15T18:03:52 |
|
networkx/networkx | 7,471 | networkx__networkx-7471 | [
"7469"
] | 2a03462ea94b1ca70b8ab9cb5e04b85b2a146833 | diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -70,7 +70,6 @@ def bridges(G, root=None):
H = nx.Graph(G) if multigraph else G
chains = nx.chain_decomposition(H, root=root)
chain_edges = set(chain.from_iterable(chains))
- H_copy = H.copy()
if root is not None:
H = H.subgraph(nx.node_connected_component(H, root)).copy()
for u, v in H.edges():
| `algorithms.bridges.bridges()` does a redundant copy of graph
This is the code of the `bridges` function:
```python
def bridges(G, root=None):
multigraph = G.is_multigraph()
H = nx.Graph(G) if multigraph else G
chains = nx.chain_decomposition(H, root=root)
chain_edges = set(chain.from_iterable(chains))
H_copy = H.copy()
if root is not None:
H = H.subgraph(nx.node_connected_component(H, root)).copy()
for u, v in H.edges():
if (u, v) not in chain_edges and (v, u) not in chain_edges:
if multigraph and len(G[u][v]) > 1:
continue
yield u, v
```
The statement in the middle:
```python
H_copy = H.copy()
```
Seem to have no effect, because `H_copy` is not used anywhere else in this module, and therefore **can be removed**.
### Current Behavior
Actually in addition to being redundant due to `H_copy` not used anywhere, presence of this statement makes extending the `networkx.Graph` class harder.
If you inherit from `networkx.Graph` and your class would have a constructor that requires some parameters to be provided, you wouldn't be able to use the `bridges()` function at all. Because this `.copy()` method has the following line:
```python
# Body of .copy() method
G = self.__class__()
```
And for inheritors of `networkx.Graph` that have custom constructors `self.__class__()` raises `TypeError` on missing arguments.
### Expected Behavior
There should be no redundant lines, and `bridges()` should work for inheritors as long as they don't override some dependent methods.
### Steps to Reproduce
Set up for example this:
```python
class Map(nx.Graph):
def __init__(self: tp.Self, routes: tp.Iterable[str]) -> None:
super().__init__()
```
Initialize this object like `Map(routes=("a", "b"))` and try using `bridges()` with it.
### Environment
Python version: 3.11
NetworkX version: 3.3
| Thanks for this! You are correct that the copy line is not needed and should be deleted.
You comment about subclassing `nx.Graph` suggests that it is important to override the `copy` method when your subclass has additional init parameters. Removing the `copy` command from `bridges` will let your example code run, but anything else that uses the `copy` method will not work. I'd suggest overriding the copy method. Another approach would be to create your subclass and have it hold the graph object as an attribute of the new class rather than be a subclass. But that depends on what else you do with your subclass. :)
Thanks! A PR that removes the `H_copy` assignment would fix this issue. | 2024-05-30T21:25:13 |
|
networkx/networkx | 7,477 | networkx__networkx-7477 | [
"7467"
] | 6eabce499fb0cd4235448480d334cc78f94ce75f | diff --git a/networkx/algorithms/shortest_paths/generic.py b/networkx/algorithms/shortest_paths/generic.py
--- a/networkx/algorithms/shortest_paths/generic.py
+++ b/networkx/algorithms/shortest_paths/generic.py
@@ -224,7 +224,7 @@ def shortest_path_length(G, source=None, target=None, weight=None, method="dijks
Returns
-------
- length: int or iterator
+ length: number or iterator
If the source and target are both specified, return the length of
the shortest path from the source to the target.
| `shortest_path_length()` specifies return value of `int` but was `float`
### Current Behavior
if `shortest_path_length` is used with weights containing `float`, a `float` is returned, contrary to the function docs.
```python
import networkx as nx
G = nx.DiGraph()
G.add_node(0)
G.add_node(1)
G.add_node(2)
G.add_edge(0, 1, weight=3.33333)
G.add_edge(1, 2, weight=4.75555)
length = nx.shortest_path_length(G, source=0, target=2, weight='weight', method='dijkstra')
print(length)
print(type(length))
```
```
8.08888
<class 'float'>
```
### Expected Behavior
According to the documentation I would expect an integer or a `ValueError`, as it specifies that an `int` is returned -- see [docs](https://networkx.org/documentation/stable/_modules/networkx/algorithms/shortest_paths/generic.html#shortest_path_length) and [code](https://github.com/networkx/networkx/blob/729bec23b10e89b5f6d37175e30484f2278107c4/networkx/algorithms/shortest_paths/generic.py#L225-L229)
however, the underlying function used here, `dijkstra_path_length`, [specifies](https://github.com/networkx/networkx/blob/729bec23b10e89b5f6d37175e30484f2278107c4/networkx/algorithms/shortest_paths/weighted.py#L205-L208) that a "number" is returned.
Perhaps this is a documentation bug? Are float in edge weights supported (with the caveat of floating point issues)?
### Steps to Reproduce
see above
### Environment
Python version: Python 3.12.3
NetworkX version: 3.3
| Thanks for this! The docs are indeed incorrect here. It should return a number or iterator, not specifically an `int` or iterator. It goes all the way back to #744 before which it is correctly called a number.
NetworkX supports `float` edge weights with the usual caveats of floating point issues.
Thanks!
Hi @dschult,
I'm new to this project and would love to contribute to this issue. I'm interested in working on this issue. Is it still available? ,Could you provide some guidance on how to get started?
Thanks!
We dont assign issues to individual developers -- you can start looking at an issue. When you have a solution open a PR. Or open a PR with a partial solution and we'll give feedback. (actually we'll give feedback either way. :)
This one might be a good one to start with. To start, you should read through the developer section of the docs, especially the contributor guide. | 2024-06-02T17:58:03 |
|
uclapi/uclapi | 51 | uclapi__uclapi-51 | [
"52"
] | e78c5a4350c2f9bf60724b174e6e914db7d3f413 | diff --git a/backend/uclapi/roombookings/token_auth.py b/backend/uclapi/roombookings/token_auth.py
--- a/backend/uclapi/roombookings/token_auth.py
+++ b/backend/uclapi/roombookings/token_auth.py
@@ -1,4 +1,3 @@
-from rest_framework.response import Response
from dashboard.models import App
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
@@ -9,16 +8,22 @@ def wrapped(request, *args, **kwargs):
token = request.GET.get("token")
if not token:
- return JsonResponse({
+ response = JsonResponse({
+ "ok": False,
"error": "No token provided"
})
+ response.status_code = 400
+ return response
try:
App.objects.get(api_token=token)
except ObjectDoesNotExist:
- return JsonResponse({
+ response = JsonResponse({
+ "ok": False,
"error": "Token does not exist"
})
+ response.status_code = 400
+ return response
return view_func(request, *args, **kwargs)
return wrapped
| Error responses missing OK property and correct HTTP Status Code
Hello there,
when making an invalid request, say, an invalid token, an error is given but the HTTP code is still `200 OK` when it should give an authentication error (a `400 Bad Request` or even a `401 Unauthorized`).
Request:
```http
GET /roombookings/rooms?token=uclapi-5d58c3c4e6bf9c-c2910ad3b6e054-7ef60f44f1c14f-a05147bfd17fdb HTTP/1.1
Host: uclapi.com
User-Agent: Paw/3.0.16 (Macintosh; OS X/10.12.4) NSURLConnection/1349.63
Cookie: AWSALB=8q9+FZmk9TOAZ/GG2tFsVUuckVO8STONoiGDn6/jd9FBEwFi5Ke/kvz+hIjdCmAwtpOIXGlnudL7LU3AaVxKt1sHWYGjLJnoMypHqu53r7Ub4b73trfiMx5NMVZ6
```
Response:
```http
HTTP/1.1 200 OK
Connection: keep-alive
Content-Type: application/json
Allow: GET, OPTIONS
Server: nginx/1.11.10
Set-Cookie: AWSALB=U2qTk7k+LrfxXibfskgPN1RoWvDQqKhaSBmgvUMOa3AcYgag1BZMcyz+5h5rQ2qhQc+Cm2PYzfjKV466PHcc9dleZHkLYE5O8d5q2WO+7WVbQT6VQMSSSHpOh3xy; Expires=Sat, 11 Mar 2017 14:18:40 GMT; Path=/
Transfer-Encoding: Identity
Date: Sat, 04 Mar 2017 14:18:40 GMT
X-Frame-Options: SAMEORIGIN
Vary: Accept, Cookie
{"error": "Token does not exist"}
```
Furthermore, when a request is successful, the first property is an `"ok": true`, however, with invalid requests, the `"ok": false` is missing from the error response.
Actual:
```json
{
"error": "Token does not exist"
}
```
Expected:
```json
{
"ok": false,
"error": "Token does not exist"
}
```
| 2017-03-04T14:25:02 |
||
uclapi/uclapi | 128 | uclapi__uclapi-128 | [
"119"
] | e148d90ff90ddc419a11696f2e39fb2a788c6510 | diff --git a/backend/uclapi/search/views.py b/backend/uclapi/search/views.py
--- a/backend/uclapi/search/views.py
+++ b/backend/uclapi/search/views.py
@@ -13,10 +13,12 @@
@log_api_call
def people(request):
if "query" not in request.GET:
- return JsonResponse({
+ response = JsonResponse({
"ok": False,
- "error": "No query provided"
+ "error": "No query provided."
})
+ response.status_code = 400
+ return response
query = request.GET["query"]
| [Bug] Search People should return HTTP status 400 when query is missing
Currently, the `/search/people` returns a HTTP 200 code when even for an incorrect API request. For example, if you leave out the `query` param it returns the following body:
```json
{ "error": "No query provided", "ok": false}
```
Yet, the HTTP status code is 200, while it should be 400.
| 2017-10-08T03:01:18 |
||
uclapi/uclapi | 140 | uclapi__uclapi-140 | [
"139"
] | db16e1810ecc0689733b53b5a8b7880120f7a89e | diff --git a/backend/uclapi/resources/views.py b/backend/uclapi/resources/views.py
--- a/backend/uclapi/resources/views.py
+++ b/backend/uclapi/resources/views.py
@@ -45,7 +45,7 @@ def get_pc_availability(request, *args, **kwargs):
"longitude": _("longitude"),
"building_name": _("buildingName"),
"address": _("buildingAddress"),
- "postcode": _("buildingPostCode")
+ "postcode": _("buildingPostcode")
},
"free_seats": _("free"),
"total_seats": _("seats"),
| [Documentation] Docs link is Absolute, not Relative
The documentation link always goes to `https://uclapi.com/docs`, even if running in, for example, staging. Just linking to `/docs` would be adequate to fix this.
| 2017-11-02T23:22:00 |
||
uclapi/uclapi | 226 | uclapi__uclapi-226 | [
"194"
] | 60b6014348c5e268041933008730e3904b28b810 | diff --git a/backend/uclapi/timetable/views.py b/backend/uclapi/timetable/views.py
--- a/backend/uclapi/timetable/views.py
+++ b/backend/uclapi/timetable/views.py
@@ -82,7 +82,7 @@ def get_departments(request, *args, **kwargs):
depts["departments"].append({
"department_id": dept.deptid,
"name": dept.name
- }, rate_limiting_data=kwargs)
+ })
return JsonResponse(depts, rate_limiting_data=kwargs)
| timetable/data/departments endpoint returns 500 error
The timetable/data/departments endpoint is currently returning a 500 error on any request.
I know it's not a documented endpoint, but it would be nice if it worked :)
It looks to me like the problem is line 85 below - `rate_limiting_data` is being passed as an argument to `append`.
https://github.com/uclapi/uclapi/blob/cfd6753ae3d979bbe53573dad68babc2de19e04d/backend/uclapi/timetable/views.py#L82-L85
Removing that and replacing with this:
```python
depts["departments"].append({
"department_id": dept.deptid,
"name": dept.name})
```
should fix it, though I don't have the whole API setup installed, so I can't be sure.
| Sorry, just saw this! That looks right to me.
We should have this endpoint documented at some point (I think I actually wrote it up before; some of my work got lost when we moved to a new docs setup). | 2017-12-07T21:06:55 |
|
uclapi/uclapi | 227 | uclapi__uclapi-227 | [
"225"
] | 186cc24296c04f1b5360eb775aaddb77c28fd8b4 | diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -20,7 +20,7 @@
from .scoping import Scopes
from uclapi.settings import REDIS_UCLAPI_HOST
-from common.decorators import uclapi_protected_endpoint
+from common.decorators import uclapi_protected_endpoint, get_var
# The endpoint that creates a Shibboleth login and redirects the user to it
@@ -321,12 +321,13 @@ def userallow(request):
)
+@csrf_exempt
def token(request):
- try:
- code = request.GET["code"]
- client_id = request.GET["client_id"]
- client_secret = request.GET["client_secret"]
- except KeyError:
+ code = get_var(request, "code")
+ client_id = get_var(request, "client_id")
+ client_secret = get_var(request, "client_secret")
+
+ if not code or not client_id or not client_secret:
response = PrettyJsonResponse({
"ok": False,
"error": ("The client did not provide"
| /oauth/token Should Accept POST Requests
Many OAuth 2.0 libraries seem to expect the `/oauth/token` endpoint to accept a POST request instead of a GET one. We should probably support this too if the RFC expects it.
| https://ucl-assistant-auth.glitch.me
Updated this code to send a URL-encoded POST request instead of a GET request. Server returns CSRF error. Found the cause of my problems earlier! 😁
@mbellgb thank you so much for investigating this for us! I'll see if I can code up a quick solution to this now and get it into Staging. Would you might testing if I can? | 2017-12-07T21:28:40 |
|
uclapi/uclapi | 370 | uclapi__uclapi-370 | [
"369"
] | 1678aa120733a238ba5175b734df93649b74dba8 | diff --git a/backend/uclapi/workspaces/occupeye/cache.py b/backend/uclapi/workspaces/occupeye/cache.py
--- a/backend/uclapi/workspaces/occupeye/cache.py
+++ b/backend/uclapi/workspaces/occupeye/cache.py
@@ -394,7 +394,7 @@ def cache_common_summaries(self):
self._const.SURVEY_DATA_KEY.format(survey_id)
)
survey_data = {
- "id": survey_id,
+ "id": int(survey_id),
"name": survey_redis_data["name"],
"maps": []
}
@@ -402,7 +402,7 @@ def cache_common_summaries(self):
sensors = api.get_survey_sensors(survey_id)
for survey_map in sensors["maps"]:
map_data = {
- "id": survey_map["id"],
+ "id": int(survey_map["id"]),
"name": survey_map["name"],
"sensors_absent": 0,
"sensors_occupied": 0,
| Workspaces summary endpoint provides survey and map ids as strings rather than integers
Example response:
```json
"ok": true,
"surveys": [
{
"id": "46",
"name": "UCL Institute of Education Library",
"maps": [
{
"id": "73",
"name": "Level 3",
"sensors_absent": 70,
"sensors_occupied": 87,
"sensors_other": 0
},
{
"id": "74",
"name": "Level 4",
"sensors_absent": 51,
"sensors_occupied": 45,
"sensors_other": 0
},
{
"id": "75",
"name": "Level 5",
"sensors_absent": 30,
"sensors_occupied": 41,
"sensors_other": 2
}
]
}
]
}
```
Also, there's a discrepancy in the [docs](https://uclapi.com/docs#workspaces/sensors/summary) as to the type of the id value.
| Thanks so much for finding this! It's a regression from #365, woops! | 2018-03-11T17:47:18 |
|
uclapi/uclapi | 371 | uclapi__uclapi-371 | [
"326"
] | 54658e4ab701102edc1c6d47d97edaf7ad7c76f5 | diff --git a/backend/uclapi/roombookings/helpers.py b/backend/uclapi/roombookings/helpers.py
--- a/backend/uclapi/roombookings/helpers.py
+++ b/backend/uclapi/roombookings/helpers.py
@@ -7,7 +7,7 @@
import pytz
import redis
-from django.core.exceptions import FieldError, ObjectDoesNotExist
+from django.core.exceptions import FieldError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
import ciso8601
@@ -20,6 +20,30 @@
TOKEN_EXPIRY_TIME = 30 * 60
+ROOM_TYPE_MAP = {
+ "AN": "Anechoic Chamber",
+ "CI": "Clinic Room",
+ "CF": "Catering Facilities",
+ "CFE": "Cafe",
+ "CL": "Cloakroom",
+ "CR": "Classroom",
+ "ER": "Equipment Room",
+ "IN": "Installation",
+ "LA": "Laboratory",
+ "LB": "Library",
+ "LT": "Lecture Theatre",
+ "MR": "Meeting Room",
+ "OF": "Office",
+ "PC1": "Public Cluster",
+ "PC2": "Public Cluster - Tutorial",
+ "PC3": "Public Cluster - Students",
+ "RC": "Reverberation Chamber",
+ "SS": "Social Space",
+ "STU": "Studio",
+ "TH": "Theatre",
+}
+
+
def _create_page_token(query, pagination):
r = redis.StrictRedis(host=REDIS_UCLAPI_HOST)
page_data = {
@@ -151,6 +175,12 @@ def _parse_datetime(start_time, end_time, search_date):
def _serialize_rooms(room_set):
rooms = []
for room in room_set:
+ # Maps room classification to a textual version
+ # e.g. LT => Lecture Theatre
+ classification_name = ROOM_TYPE_MAP.get(
+ room.roomclass,
+ "Unknown Room Type"
+ )
room_to_add = {
"roomname": room.roomname,
"roomid": room.roomid,
@@ -158,6 +188,7 @@ def _serialize_rooms(room_set):
"sitename": room.sitename,
"capacity": room.capacity,
"classification": room.roomclass,
+ "classification_name": classification_name,
"automated": room.automated,
"location": {
"address": [
@@ -308,8 +339,8 @@ def _filter_for_free_rooms(all_rooms, bookings, start, end):
for room in rooms_with_bookings:
roomid, siteid = room["roomid"], room["siteid"]
if (
- (roomid, siteid) not in bookings_map
- or not bookings_map[(roomid, siteid)]
+ (roomid, siteid) not in bookings_map or
+ not bookings_map[(roomid, siteid)]
):
# if room doesn't have any overlapping bookings
free_rooms.append(room)
| Undocumented classification
Hi there,
I've just noticed that there are some new(?) classifications that are not in the docs.
These are:
* `TH`
* `CFE`
* `MR`
* `ER`
* `CF`
What do these stand for?
Thanks,
Tiago
| Very good question
@ChristopherHammond13 any clue what these could mean? I'd be happy to make a PR for the docs if you just list the meanings here.
Hey! Apologies for not getting back to this sooner. I have done some digging and will (hopefully soon!) be sent the data dictionary for the CMIS database that this talks to. With that I'll be able to fill in all the random bits of data we don't know.
Could you give me some example bookings that are coming up with these and I'll see what I can add from guess-work?
Sure thing, here are some examples:
### TH
```json
{
"roomname": "IOE - Bedford Way (20) - 744",
"roomid": "744",
"siteid": "162",
"sitename": "IOE - Bedford Way, 20",
"capacity": 25,
"classification": "TH",
"automated": "N",
"location": {
"address": [
"20 Bedford Way",
"London",
"WC1H 0AL",
""
],
"coordinates": {
"lat": "51.523471",
"lng": "-0.1285067"
}
}
}
```
### CFE
```json
{
"automated": "N",
"location": {
"address": [
"9-11 Endsleigh Gardens",
"London",
"WC1H 0EH",
""
],
"coordinates": {
"lng": "-0.1314654",
"lat": "51.526198"
}
},
"capacity": 18,
"roomid": "EGG5",
"roomname": "IOE - Endsleigh Gardens (9-11) - G05",
"siteid": "084",
"sitename": "IOE - Endsleigh Gardens, 9-11",
"classification": "CFE"
}
```
### MR
```json
{
"roomname": "Wilkins Garden Room",
"roomid": "34",
"siteid": "050",
"sitename": "Bernard Katz Building",
"capacity": 20,
"classification": "MR",
"automated": "P",
"location": {
"address": [
"Gower Street,",
"London",
"WC1E 6BT",
""
],
"coordinates": {
"lat": "51.5247725",
"lng": "-0.1334268"
}
}
}
```
### ER
```json
{
"roomname": "Wilkins Building (Main Building) North Observatory",
"roomid": "XG1",
"siteid": "005",
"sitename": "Main Building",
"capacity": 6,
"classification": "ER",
"automated": "N",
"location": {
"address": [
"Gower Street",
"London",
"WC1E 6BT",
""
],
"coordinates": {
"lat": "51.524699",
"lng": "-0.13366"
}
}
}
```
### CF
```json
{
"roomname": "Wilkins Building (Main Building) Whistler Room",
"roomid": "G09D",
"siteid": "005",
"sitename": "Main Building",
"capacity": 10,
"classification": "CF",
"automated": "N",
"location": {
"address": [
"Gower Street",
"London",
"WC1E 6BT",
""
],
"coordinates": {
"lat": "51.524699",
"lng": "-0.13366"
}
}
}
```
MR - Marquee? is the only one I can get :thinking:
MR - Marquee makes sense.
ER - Equipment room
I think all the classifications are here: https://roombooking.ucl.ac.uk/rb/bookableSpace/viewAllBookableSpace.html?invoker=EFD
Update: I'm currently updating the room bookings API to return a human-readable version of the room type (with the field `classification_str`) based on the data shown on the UCL Room Bookings site. Watch this space! :) | 2018-03-11T18:27:18 |
|
uclapi/uclapi | 556 | uclapi__uclapi-556 | [
"524"
] | 17ba361895960b734a711cdc69e9e01cc60abb7b | diff --git a/backend/uclapi/uclapi/settings.py b/backend/uclapi/uclapi/settings.py
--- a/backend/uclapi/uclapi/settings.py
+++ b/backend/uclapi/uclapi/settings.py
@@ -187,15 +187,11 @@
USE_TZ = False
-
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
-STATICFILES_DIRS = [
- os.path.join(BASE_DIR, "images"),
-]
# Cross Origin settings
CORS_ORIGIN_ALLOW_ALL = True
| Move to WebPack
Building the frontend is annoying, and updating dependencies in a way that lets us test the updates is nothing short of tedious.
WebPack with proper bounds on the minor version of each component and dependency should help us stay in line and keep up to date with the frontend code.
| 2018-06-05T15:52:09 |
||
uclapi/uclapi | 568 | uclapi__uclapi-568 | [
"544"
] | 7a7a134cbf471f04a46ebaff337df88d7459ee95 | diff --git a/backend/uclapi/uclapi/celery.py b/backend/uclapi/uclapi/celery.py
--- a/backend/uclapi/uclapi/celery.py
+++ b/backend/uclapi/uclapi/celery.py
@@ -27,23 +27,6 @@ def on_configure(self):
app.config_from_object('django.conf.settings', namespace='CELERY')
-
-from opbeat.contrib.django.models import \
- register_handlers as opbeat_register_handlers, \
- logger as opbeat_logger # noqa: E402#
-
-from opbeat.contrib.celery import \
- register_signal as opbeat_register_signal # noqa: E402
-
-
-try:
- opbeat_register_signal(app)
-except Exception as e:
- opbeat_logger.exception('Failed installing celery hook: %s' % e)
-
-if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
- opbeat_register_handlers()
-
app.autodiscover_tasks()
diff --git a/backend/uclapi/uclapi/settings.py b/backend/uclapi/uclapi/settings.py
--- a/backend/uclapi/uclapi/settings.py
+++ b/backend/uclapi/uclapi/settings.py
@@ -66,14 +66,12 @@
'oauth',
'timetable',
'common',
- 'opbeat.contrib.django',
'raven.contrib.django.raven_compat',
'corsheaders',
'workspaces'
]
MIDDLEWARE = [
- 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
@@ -143,13 +141,6 @@
DATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter']
-# analytics
-OPBEAT = {
- 'ORGANIZATION_ID': os.environ.get("OPBEAT_ORG_ID"),
- 'APP_ID': os.environ.get("OPBEAT_APP_ID"),
- 'SECRET_TOKEN': os.environ.get("OPBEAT_SECRET_TOKEN")
-}
-
RAVEN_CONFIG = {
'dsn': os.environ.get("SENTRY_DSN"),
}
| Remove opbeat
Opbeat is no longer a thing, so we should remove it and replace it with something else? Maybe double down on Sentry and integrate that more tightly
https://github.com/uclapi/uclapi/blob/81b90305f9316b020664b32c2436e27ab957e8a7/backend/uclapi/requirements.txt#L33
| I was literally about to come to this issues page to open the exact same issue. BRAIN SYNC. | 2018-06-09T15:00:25 |
|
uclapi/uclapi | 883 | uclapi__uclapi-883 | [
"863"
] | 75f644e37ff8a2c0ea0f1be4a537b8c7da9f5932 | diff --git a/backend/uclapi/timetable/amp.py b/backend/uclapi/timetable/amp.py
--- a/backend/uclapi/timetable/amp.py
+++ b/backend/uclapi/timetable/amp.py
@@ -31,7 +31,8 @@ def __init__(self, delivery_code):
def get_delivery(self):
return {
"fheq_level": self.fheq_level,
- "is_undergraduate": self.undergraduate
+ "is_undergraduate": self.undergraduate,
+ "student_type": self.student_type
}
| AMP Doesn't Return STUDENT_TYPE
I am an idiot.
https://github.com/uclapi/uclapi/blob/69f0d3240d8ec6cf2a3b018897dc247aa58cb1bf/backend/uclapi/timetable/amp.py#L31
| 2019-01-09T19:52:25 |
||
uclapi/uclapi | 953 | uclapi__uclapi-953 | [
"946"
] | d61612994b84627dbfbf01f56d7688adccf5680e | diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -50,10 +50,17 @@ def authorise(request):
response.status_code = 400
return response
- if app.callback_url is None:
+ if app.callback_url is None or app.callback_url.strip() == "":
response = PrettyJsonResponse({
"ok": False,
- "error": "No callback URL set for this app."
+ "error": (
+ "This app does not have a callback URL set. "
+ "If you are the developer of this app, "
+ "please ensure you have set a valid callback "
+ "URL for your application in the Dashboard. "
+ "If you are a user, please contact the app's "
+ "developer to rectify this."
+ )
})
response.status_code = 400
return response
| diff --git a/backend/uclapi/oauth/tests.py b/backend/uclapi/oauth/tests.py
--- a/backend/uclapi/oauth/tests.py
+++ b/backend/uclapi/oauth/tests.py
@@ -12,7 +12,7 @@
from .app_helpers import generate_random_verification_code
from .models import OAuthScope, OAuthToken
from .scoping import Scopes
-
+from .views import authorise
@uclapi_protected_endpoint(personal_data=True, required_scopes=["timetable"])
def test_timetable_request(request, *args, **kwargs):
@@ -311,6 +311,38 @@ def test_decorator_everything_passes(self):
self.assertEqual(response.status_code, 200)
+ def test_no_callback_url(self):
+ user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Test"
+ )
+ app_ = App.objects.create(user=user_, name="An App")
+ request = self.factory.get(
+ '/oauth/authorise',
+ {
+ 'client_id': app_.client_id,
+ 'state': 1
+ }
+ )
+ try:
+ response = authorise(request)
+ content = json.loads(response.content.decode())
+ self.assertEqual(response.status_code, 400)
+ self.assertEqual(
+ content["error"],
+ (
+ "This app does not have a callback URL set. "
+ "If you are the developer of this app, "
+ "please ensure you have set a valid callback "
+ "URL for your application in the Dashboard. "
+ "If you are a user, please contact the app's "
+ "developer to rectify this."
+ )
+ )
+ except json.decoder.JSONDecodeError:
+ self.fail("Got through to authorize page with no callback URL set")
+
class AppHelpersTestCase(TestCase):
def test_generate_random_verification_code(self):
| OAuth: Useless Error With No Callback URL
1) Create an app
2) Attach an integration via OAuth
3) Attempt to Authenticate
4) Error about invalid signed data. Instead, we should put an error on screen that says a callback URL is required. In fact, authentication shouldn't even begin without a valid callback URL.
| 2019-01-26T21:09:06 |
|
uclapi/uclapi | 976 | uclapi__uclapi-976 | [
"920"
] | 1fab29f680a03d979a1f9a32971df87cba5ba1c3 | diff --git a/backend/uclapi/uclapi/settings.py b/backend/uclapi/uclapi/settings.py
--- a/backend/uclapi/uclapi/settings.py
+++ b/backend/uclapi/uclapi/settings.py
@@ -115,7 +115,7 @@
DATABASES = {
'default': {
- 'ENGINE': 'django.db.backends.postgresql',
+ 'ENGINE': 'django_postgrespool2',
'NAME': os.environ.get("DB_UCLAPI_NAME"),
'USER': os.environ.get("DB_UCLAPI_USERNAME"),
'PASSWORD': os.environ.get("DB_UCLAPI_PASSWORD"),
@@ -131,7 +131,7 @@
'PORT': ''
},
'gencache': {
- 'ENGINE': 'django.db.backends.postgresql',
+ 'ENGINE': 'django_postgrespool2',
'NAME': os.environ.get("DB_CACHE_NAME"),
'USER': os.environ.get("DB_CACHE_USERNAME"),
'PASSWORD': os.environ.get("DB_CACHE_PASSWORD"),
@@ -140,6 +140,15 @@
}
}
+# Max connections is pool_size + max_overflow
+# Will idle at pool_size connections, overflow are for spikes in traffic
+
+DATABASE_POOL_ARGS = {
+ 'max_overflow': 15,
+ 'pool_size': 5,
+ 'recycle': 300
+}
+
DATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter']
RAVEN_CONFIG = {
| Scaling of PostgreSQL Connections
We have tried in the past to implement connection pooling for Django <==> PostgreSQL. We should try this again so that the system doesn't get pulled down by too many parallel requests (as could happen in the event of UCL Assistant).
| can we implement https://pypi.org/project/django-postgrespool2/0.2.0/? | 2019-01-29T15:36:16 |
|
uclapi/uclapi | 977 | uclapi__uclapi-977 | [
"527"
] | aaa4f4fa88fc09c1ae403a3753f8ca523b11258e | diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -4,16 +4,25 @@
from dashboard.models import Webhook, WebhookTriggerHistory
from datetime import datetime
from deepdiff import DeepDiff
-import grequests
from django.utils import timezone
+from requests_futures.sessions import FuturesSession
class Command(BaseCommand):
help = 'Diff roombooking result sets and notify relevant webhooks'
+ def add_arguments(self, parser):
+ parser.add_argument(
+ '--debug',
+ action='store_true',
+ dest='debug',
+ help='Print webhook responses',
+ )
+
def handle(self, *args, **options):
self.stdout.write("Triggering webhooks")
+ session = FuturesSession()
# currently locked table is the old one, more recent one is not locked
lock = Lock.objects.all()[0] # there is only ever one lock
@@ -121,10 +130,11 @@ def webhook_filter(booking):
webhooks_to_enact[idx]["payload"] = payload
- if payload["content"] != {}:
+ if payload["content"] != {} and webhook["url"] != "":
unsent_requests.append(
- grequests.post(
- webhook["url"], json=payload, headers={
+ session.post(
+ webhook["url"], json=payload,
+ headers={
"User-Agent": "uclapi-bot/1"
}
)
@@ -132,7 +142,11 @@ def webhook_filter(booking):
self.stdout.write(
"Triggering {} webhooks.".format(len(unsent_requests))
)
- grequests.map(unsent_requests)
+ if("debug" in options):
+ for i in unsent_requests:
+ self.stdout.write(
+ 'response status {0}'.format(i.result().status_code)
+ )
for webhook in webhooks_to_enact:
if webhook["payload"]["content"] != {}:
| Webhooks
They haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.
| This is *still*, shamefully, broken. Fixes to this will likely come with DevOps improvements as we review our caching operations. | 2019-01-29T20:53:23 |
|
uclapi/uclapi | 989 | uclapi__uclapi-989 | [
"983"
] | caf6158ed399ba58bd0e60295fe9915d847211b2 | diff --git a/backend/uclapi/timetable/app_helpers.py b/backend/uclapi/timetable/app_helpers.py
--- a/backend/uclapi/timetable/app_helpers.py
+++ b/backend/uclapi/timetable/app_helpers.py
@@ -36,7 +36,10 @@
}
_rooms_cache = {}
-
+_site_coord_cache = {}
+_room_coord_cache = {}
+_instance_cache = {}
+_lecturers_cache = {}
_department_name_cache = {}
@@ -111,6 +114,8 @@ def _get_full_department_name(department_code):
def _get_lecturer_details(lecturer_upi):
"""Returns a lecturer's name and email address from their UPI"""
+ if lecturer_upi in _lecturers_cache:
+ return _lecturers_cache[lecturer_upi]
lecturers = get_cache("lecturer")
details = {
"name": "Unknown",
@@ -129,19 +134,23 @@ def _get_lecturer_details(lecturer_upi):
if lecturer.owner:
details["department_id"] = lecturer.owner
details["department_name"] = _get_full_department_name(lecturer.owner)
-
+ _lecturers_cache[lecturer_upi] = details
return details
def _get_instance_details(instid):
+ if instid in _instance_cache:
+ return _instance_cache[instid]
cminstances = get_cache("cminstances")
instance_data = cminstances.objects.get(instid=instid)
instance = ModuleInstance(instance_data.instcode)
- return {
+ data = {
"delivery": instance.delivery.get_delivery(),
"periods": instance.periods.get_periods(),
"instance_code": instance_data.instcode
}
+ _instance_cache[instid] = data
+ return data
def _get_timetable_events(full_modules, stumodules):
@@ -157,9 +166,17 @@ def _get_timetable_events(full_modules, stumodules):
modules = get_cache("module")
bookings = get_cache("booking")
-
+ event_bookings_list = {}
full_timetable = {}
+ modules_chosen = {}
for module in full_modules:
+ key = str(module.moduleid)+" "+str(module.instid)
+ lab_key = key+str(module.modgrpcode)
+ if key in modules_chosen:
+ del modules_chosen[key]
+ modules_chosen[lab_key] = module
+
+ for _, module in modules_chosen.items():
if stumodules:
# Get events for the lab group assigned
# Also include general lecture events (via the or operator)
@@ -180,8 +197,12 @@ def _get_timetable_events(full_modules, stumodules):
module_data = module
instance_data = _get_instance_details(module.instid)
for event in events_data:
- event_bookings = bookings.objects.filter(slotid=event.slotid)
- if len(event_bookings) == 0:
+ if event.slotid not in event_bookings_list:
+ event_bookings_list[event.slotid] = \
+ bookings.objects.filter(slotid=event.slotid)
+ event_bookings = event_bookings_list[event.slotid]
+ # .exists() instead of len so we don't evaluate all of the filter
+ if not event_bookings.exists():
# We have to trust the data in the event because
# no rooms are booked for some weird reason.
for date in _get_real_dates(event):
@@ -359,19 +380,25 @@ def _get_session_type_str(session_type):
def _get_location_coordinates(siteid, roomid):
# First try and get the specific room's location
try:
+ if roomid in _room_coord_cache:
+ return _room_coord_cache[roomid]
location = Location.objects.get(
siteid=siteid,
roomid=roomid
)
+ _room_coord_cache[roomid] = (location.lat, location.lng)
return location.lat, location.lng
except Location.DoesNotExist:
pass
# Now try and get the building's location
try:
+ if siteid in _site_coord_cache:
+ return _site_coord_cache[siteid]
location = SiteLocation.objects.get(
siteid=siteid
)
+ _site_coord_cache[siteid] = (location.lat, location.lng)
return location.lat, location.lng
except SiteLocation.DoesNotExist:
pass
| diff --git a/backend/uclapi/timetable/management/commands/test_personal_timetable.py b/backend/uclapi/timetable/management/commands/test_personal_timetable.py
--- a/backend/uclapi/timetable/management/commands/test_personal_timetable.py
+++ b/backend/uclapi/timetable/management/commands/test_personal_timetable.py
@@ -1,3 +1,5 @@
+import time
+
from django.core.management.base import BaseCommand
from timetable.app_helpers import get_student_timetable
@@ -9,5 +11,7 @@ class Command(BaseCommand):
def handle(self, *args, **options):
upi = input("Please enter the student UPI: ")
+ start_time = time.time()
tt = get_student_timetable(upi)
- print(tt)
+ elapsed_time = time.time() - start_time
+ print(elapsed_time)
| Duplicate Timetable Entries
Currently there is a bug where there are multiple timetable entries for the same event, due to it being booked for different people (lecturer/module leader etc.).
| 2019-02-04T23:06:57 |
|
uclapi/uclapi | 1,001 | uclapi__uclapi-1001 | [
"999"
] | 9574e1e65bf13a536506a2dce9fc7aed0001ad8e | diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -461,6 +461,7 @@ def token(request):
"state": state,
"client_id": app.client_id,
"token": token.token,
+ "access_token": token.token,
"scope": json.dumps(s.scope_dict(token.scope.scope_number))
}
| OAuth Standards Compliance
So it turns out that my OAuth 2.0 implementation violates section 4.2.2 of RFC 6749 by calling the UCL API User Token just a `token` and not an `access_token`.
This breaks apps (such as Discourse) that rely on OAuth implementations actually being standards compliant. Whoopsidaisies!
| This is issue #999 because fixing this is an EMERGENCY since we don't follow standards! | 2019-02-06T16:47:28 |
|
uclapi/uclapi | 1,014 | uclapi__uclapi-1014 | [
"1013"
] | 6c33a0ec9a8e4c0cfe133abbb02942f3060d47a2 | diff --git a/backend/uclapi/workspaces/occupeye/cache.py b/backend/uclapi/workspaces/occupeye/cache.py
--- a/backend/uclapi/workspaces/occupeye/cache.py
+++ b/backend/uclapi/workspaces/occupeye/cache.py
@@ -253,12 +253,13 @@ def cache_all_survey_sensor_states(self, survey_id):
# This is a rare case, so just set it to False.
# Easier than expecting our clients to deal with null
occupied_state = False
- pipeline.hmset(sensor_status_key, {
+ sensor_status = {
"occupied": str(occupied_state),
- "hardware_id": sensor_data["HardwareID"],
- "last_trigger_type": sensor_data["LastTriggerType"],
- "last_trigger_timestamp": sensor_data["LastTriggerTime"]
- })
+ "hardware_id": str(sensor_data["HardwareID"]),
+ "last_trigger_type": str(sensor_data["LastTriggerType"]),
+ "last_trigger_timestamp": str(sensor_data["LastTriggerTime"])
+ }
+ pipeline.hmset(sensor_status_key, sensor_status)
pipeline.execute()
| Workspaces Summary Endpoint Data Out of Date
We have had reports that the Workspaces Summary endpoint is not supplying up to date data. This is a high priority bug if confirmed. Thanks to @henryzhang00 for the report!
| If the header is correct we haven't updated the cache in 2 days??
If that's the case then I need to sort this ASAP because the only thing worse than an API that's down is an API that's supplying faulty data
Are there logs for the cronbox that runs cache.py. It should update the header at the bottom so that indicates we aren't reaching that line, or the commands just isn't running at all.
Yeah I checked the cron box logs, and found that some code was tripping over itself so I updated the crontab to give the full cache more breathing room the 2am cache time. | 2019-02-09T18:38:01 |
|
uclapi/uclapi | 1,028 | uclapi__uclapi-1028 | [
"1015"
] | 36d1f0e1ac6ffcd698df4d7462430e3daf334f19 | diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py
--- a/backend/uclapi/dashboard/app_helpers.py
+++ b/backend/uclapi/dashboard/app_helpers.py
@@ -4,9 +4,10 @@
from common.helpers import generate_api_token
from uclapi.settings import (
MEDIUM_ARTICLE_QUANTITY,
- REDIS_UCLAPI_HOST
+ REDIS_UCLAPI_HOST,
+ DEBUG
)
-
+from django.core.management import call_command
import os
import redis
import textwrap
@@ -15,6 +16,11 @@
def get_articles():
r = redis.Redis(host=REDIS_UCLAPI_HOST)
+ if not r.exists("Blog:item:1:url"):
+ if DEBUG:
+ call_command('update_medium')
+ else:
+ return []
pipe = r.pipeline()
articles = []
for i in range(0, MEDIUM_ARTICLE_QUANTITY):
| Medium Articles Bug
Bug in getting medium articles on the homepage
| 2019-02-11T22:48:44 |
||
uclapi/uclapi | 1,032 | uclapi__uclapi-1032 | [
"1031"
] | dd9f5c25621e67fa8581925b4711fd1aca9ca399 | diff --git a/backend/uclapi/timetable/app_helpers.py b/backend/uclapi/timetable/app_helpers.py
--- a/backend/uclapi/timetable/app_helpers.py
+++ b/backend/uclapi/timetable/app_helpers.py
@@ -284,7 +284,8 @@ def _get_timetable_events(full_modules, stumodules):
"session_type_str": _get_session_type_str(
event.moduletype
),
- "contact": booking.condisplayname
+ "contact": booking.condisplayname,
+ "instance": instance_data
}
# If this is student module data, add in the group code
| AMP/Instance Data not returned by the personal timetable endpoints
`timetable/app_helpers.py`, line 288 should have the following:
```
"instance": instance_data
```
| 2019-02-12T15:01:34 |
||
uclapi/uclapi | 1,090 | uclapi__uclapi-1090 | [
"1087"
] | 8619c06e9f93b18bad3e5059368d9d81781ac0e2 | diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -4,7 +4,8 @@
from oauth.scoping import Scopes
from common.helpers import PrettyJsonResponse
-from .app_helpers import is_url_safe
+from .app_helpers import (is_url_unsafe, NOT_HTTPS,
+ NOT_VALID, URL_BLACKLISTED, NOT_PUBLIC)
from .models import App, User
@@ -251,12 +252,20 @@ def set_callback_url(request):
})
response.status_code = 400
return response
-
- if not is_url_safe(new_callback_url):
+ url_not_safe_saved = is_url_unsafe(new_callback_url)
+ if url_not_safe_saved:
+ if url_not_safe_saved == NOT_HTTPS:
+ message = "The requested callback URL does not "\
+ "start with 'https://'."
+ elif url_not_safe_saved == NOT_VALID:
+ message = "The requested callback URL is not valid."
+ elif url_not_safe_saved == URL_BLACKLISTED:
+ message = "The requested callback URL is forbidden."
+ elif url_not_safe_saved == NOT_PUBLIC:
+ message = "The requested callback URL is not publicly available."
response = PrettyJsonResponse({
"success": False,
- "message": ("The requested callback URL"
- " is not valid.")
+ "message": message
})
response.status_code = 400
return response
diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py
--- a/backend/uclapi/dashboard/app_helpers.py
+++ b/backend/uclapi/dashboard/app_helpers.py
@@ -13,6 +13,10 @@
import textwrap
import validators
+NOT_HTTPS = 1
+NOT_VALID = 2
+URL_BLACKLISTED = 3
+NOT_PUBLIC = 4
def get_articles():
r = redis.Redis(host=REDIS_UCLAPI_HOST)
@@ -73,23 +77,25 @@ def generate_app_client_secret():
return client_secret
-def is_url_safe(url):
+def is_url_unsafe(url):
if not url.startswith("https://"):
- return False
+ return NOT_HTTPS
if not validators.url(url, public=True):
- return False
+ if validators.url(url, public=False):
+ return NOT_PUBLIC
+ return NOT_VALID
whitelist_urls = os.environ["WHITELISTED_CALLBACK_URLS"].split(';')
if url in whitelist_urls:
- return True
+ return 0
forbidden_urls = os.environ["FORBIDDEN_CALLBACK_URLS"].split(';')
for furl in forbidden_urls:
if furl in url:
- return False
+ return URL_BLACKLISTED
- return True
+ return 0
def generate_secret():
diff --git a/backend/uclapi/dashboard/webhook_views.py b/backend/uclapi/dashboard/webhook_views.py
--- a/backend/uclapi/dashboard/webhook_views.py
+++ b/backend/uclapi/dashboard/webhook_views.py
@@ -2,7 +2,7 @@
from .models import App, User
from dashboard.tasks import keen_add_event_task as keen_add_event
import requests
-from .app_helpers import generate_secret, is_url_safe
+from .app_helpers import generate_secret, is_url_unsafe
def user_owns_app(user_id, app_id):
@@ -84,7 +84,7 @@ def edit_webhook(request):
webhook = app.webhook
if url != webhook.url:
- if not is_url_safe(url):
+ if is_url_unsafe(url):
response = PrettyJsonResponse({
"ok": False,
"message": (
| diff --git a/backend/uclapi/dashboard/tests.py b/backend/uclapi/dashboard/tests.py
--- a/backend/uclapi/dashboard/tests.py
+++ b/backend/uclapi/dashboard/tests.py
@@ -6,7 +6,7 @@
from django.conf import settings
import redis
-from .app_helpers import is_url_safe, generate_api_token, \
+from .app_helpers import is_url_unsafe, generate_api_token, \
generate_app_client_id, generate_app_client_secret, \
generate_app_id, get_articles
from .middleware.fake_shibboleth_middleware import FakeShibbolethMiddleWare
@@ -102,13 +102,13 @@ def test_post_agreement(self):
self.assertContains(res, "Test testington")
def test_unsafe_urls(self):
- assert not is_url_safe("ftp://test.com")
- assert not is_url_safe("https://uclapi.com/callback")
- assert not is_url_safe("ssh://uclapi.com/callback")
+ assert is_url_unsafe("ftp://test.com")
+ assert is_url_unsafe("https://uclapi.com/callback")
+ assert is_url_unsafe("ssh://uclapi.com/callback")
def test_safe_url(self):
- assert is_url_safe("https://mytestapp.com/callback")
- assert is_url_safe("https://uclapiexample.com/callback")
+ assert not is_url_unsafe("https://mytestapp.com/callback")
+ assert not is_url_unsafe("https://uclapiexample.com/callback")
class FakeShibbolethMiddleWareTestCase(TestCase):
@@ -183,38 +183,38 @@ def test_generate_app_id(self):
class URLSafetyTestCase(TestCase):
def test_is_url_safe_full_success(self):
- self.assertTrue(
- is_url_safe("https://example.com")
+ self.assertFalse(
+ is_url_unsafe("https://example.com")
)
def test_is_url_safe_https_failure(self):
- self.assertFalse(
- is_url_safe("http://example.com")
+ self.assertTrue(
+ is_url_unsafe("http://example.com")
)
def test_is_url_safe_validators_failure(self):
- self.assertFalse(
- is_url_safe("https://asdasd.asd.asd.asd.1234")
+ self.assertTrue(
+ is_url_unsafe("https://asdasd.asd.asd.asd.1234")
)
def test_is_url_safe_validators_failure_private(self):
- self.assertFalse(
- is_url_safe("https://127.0.0.1")
+ self.assertTrue(
+ is_url_unsafe("https://127.0.0.1")
)
def test_is_url_safe_validators_failure_private2(self):
- self.assertFalse(
- is_url_safe("https://10.0.0.1")
+ self.assertTrue(
+ is_url_unsafe("https://10.0.0.1")
)
def test_is_url_safe_forbidden(self):
- self.assertFalse(
- is_url_safe("https://uclapi.com/test/test")
+ self.assertTrue(
+ is_url_unsafe("https://uclapi.com/test/test")
)
def test_is_url_safe_forbidden2(self):
- self.assertFalse(
- is_url_safe("https://staging.ninja/test/test")
+ self.assertTrue(
+ is_url_unsafe("https://staging.ninja/test/test")
)
# Testcase for whitelisted URL needed
@@ -311,7 +311,7 @@ def test_edit_webhook_POST_user_does_not_own_app(self):
)
@patch("dashboard.webhook_views.verify_ownership", lambda *args: False)
- @patch("dashboard.webhook_views.is_url_safe", lambda *args: True)
+ @patch("dashboard.webhook_views.is_url_unsafe", lambda *args: False)
def test_edit_webhook_POST_ownership_verification_fail(
self
):
@@ -338,7 +338,7 @@ def test_edit_webhook_POST_ownership_verification_fail(
)
@patch("dashboard.webhook_views.verify_ownership", lambda *args: True)
- @patch("dashboard.webhook_views.is_url_safe", lambda *args: True)
+ @patch("dashboard.webhook_views.is_url_unsafe", lambda *args: False)
@patch("keen.add_event", lambda *args: None)
def test_edit_webhook_POST_user_owns_app_changing_url_verification_ok(
self
@@ -831,7 +831,7 @@ def test_change_callback_url_not_valid(self):
self.assertEqual(response.status_code, 400)
self.assertEqual(
content["message"],
- "The requested callback URL is not valid."
+ "The requested callback URL does not start with 'https://'."
)
def test_change_callback_url_success(self):
| Add verbosity to oauth callback URL error message
We should tell users whats wrong with their url, for example not starting with https:// etc...
| 2019-02-26T23:27:08 |
|
uclapi/uclapi | 1,125 | uclapi__uclapi-1125 | [
"1024"
] | bd952f0522ba681a2b153abda4c9a36939860d84 | diff --git a/backend/uclapi/workspaces/occupeye/cache.py b/backend/uclapi/workspaces/occupeye/cache.py
--- a/backend/uclapi/workspaces/occupeye/cache.py
+++ b/backend/uclapi/workspaces/occupeye/cache.py
@@ -52,13 +52,19 @@ def cache_maps_for_survey(self, survey_id):
survey_maps_list_key = (
self._const.SURVEY_MAPS_LIST_KEY
).format(survey_id)
- pipeline = self._redis.pipeline()
survey_maps_data = authenticated_request(
self._const.URL_MAPS_BY_SURVEY.format(survey_id),
self.bearer_token
)
+ pipeline = self._redis.pipeline()
+
+ self.delete_maps(
+ pipeline,
+ survey_id,
+ survey_maps_list_key,
+ survey_maps_data
+ )
- pipeline.delete(survey_maps_list_key)
for survey_map in survey_maps_data:
survey_map_id = self._const.SURVEY_MAP_DATA_KEY.format(
survey_id,
@@ -76,6 +82,7 @@ def cache_maps_for_survey(self, survey_id):
survey_maps_list_key,
survey_map["MapID"]
)
+
pipeline.execute()
def cache_survey_data(self):
@@ -85,12 +92,11 @@ def cache_survey_data(self):
helper function above to tie all maps to surveys.
"""
pipeline = self._redis.pipeline()
- pipeline.delete(self._const.SURVEYS_LIST_KEY)
surveys_data = authenticated_request(
self._const.URL_SURVEYS,
self.bearer_token
)
-
+ self.delete_surveys(pipeline, surveys_data)
for survey in surveys_data:
survey_id = survey["SurveyID"]
survey_key = self._const.SURVEY_DATA_KEY.format(
@@ -189,8 +195,13 @@ def cache_survey_sensor_data(self, survey_id):
survey_sensors_list_key = (
self._const.SURVEY_SENSORS_LIST_KEY
).format(survey_id)
+ self.delete_sensors(
+ pipeline,
+ survey_id,
+ survey_sensors_list_key,
+ all_sensors_data
+ )
- pipeline.delete(survey_sensors_list_key)
for sensor_data in all_sensors_data:
hardware_id = sensor_data["HardwareID"]
sensor_data_key = (
@@ -584,3 +595,124 @@ def feed_cache(self, full):
self._redis.set(last_modified_key, current_timestamp)
print("[+] Done")
+
+ def delete_maps(
+ self, pipeline, survey_id,
+ survey_maps_list_key, survey_maps_data
+ ):
+ """Delete maps that no longer exist in a survey"""
+ redis_survey_maps_set = set(self._redis.lrange(
+ survey_maps_list_key,
+ 0,
+ self._redis.llen(survey_maps_list_key)
+ ))
+ api_survey_maps_id_set = set(
+ [survey_map["MapID"] for survey_map in survey_maps_data]
+ )
+ maps_to_delete = redis_survey_maps_set - api_survey_maps_id_set
+
+ pipeline.delete(survey_maps_list_key)
+ for map_id in maps_to_delete:
+ pipeline.delete(
+ self._const.SURVEY_MAP_DATA_KEY.format(survey_id, map_id)
+ )
+ pipeline.delete(
+ self._const.SURVEY_MAP_VMAX_X_KEY.format(survey_id, map_id)
+ )
+ pipeline.delete(
+ self._const.SURVEY_MAP_VMAX_Y_KEY.format(survey_id, map_id)
+ )
+ pipeline.delete(
+ self._const.SURVEY_MAP_VIEWBOX_KEY.format(survey_id, map_id)
+ )
+ survey_maps_sensors_key = (
+ self._const.SURVEY_MAP_SENSORS_LIST_KEY.format(
+ survey_id,
+ map_id
+ )
+ )
+ survey_maps_sensors_list = self._redis.lrange(
+ survey_maps_sensors_key,
+ 0,
+ self._redis.llen(survey_maps_sensors_key)
+ )
+ for sensor_id in survey_maps_sensors_list:
+ pipeline.delete(
+ self._const.SURVEY_MAP_SENSOR_PROPERTIES_KEY.format(
+ survey_id,
+ map_id,
+ sensor_id
+ )
+ )
+ image_id = self._redis.hgetall(
+ self._const.SURVEY_MAP_DATA_KEY.format(survey_id, map_id)
+ )['image_id']
+ self.delete_image(pipeline, image_id)
+
+ pipeline.delete(survey_maps_sensors_key)
+
+ def delete_surveys(self, pipeline, surveys_data):
+ """Delete surveys that no longer exist"""
+ redis_survey_set = set(self._redis.lrange(
+ self._const.SURVEYS_LIST_KEY,
+ 0,
+ self._redis.llen(self._const.SURVEYS_LIST_KEY)
+ ))
+ api_survey_id_set = set(
+ [survey["SurveyID"] for survey in surveys_data]
+ )
+ surveys_to_delete = redis_survey_set - api_survey_id_set
+ pipeline.delete(self._const.SURVEYS_LIST_KEY)
+ for survey_id in surveys_to_delete:
+ pipeline.delete(self._const.SURVEY_DATA_KEY.format(survey_id))
+ pipeline.delete(self._const.SUMMARY_CACHE_SURVEY.format(survey_id))
+ pipeline.delete(
+ self._const.SURVEY_MAX_TIMESTAMP_KEY.format(survey_id)
+ )
+
+ survey_maps_list_key = self._const.SURVEY_MAPS_LIST_KEY.format(
+ survey_id
+ )
+ self.delete_maps(pipeline, survey_id, survey_maps_list_key, [])
+
+ survey_sensors_list_key = (
+ self._const.SURVEY_SENSORS_LIST_KEY.format(survey_id)
+ )
+ self.delete_sensors(
+ pipeline,
+ survey_id,
+ survey_sensors_list_key,
+ []
+ )
+
+ def delete_sensors(
+ self, pipeline, survey_id,
+ survey_sensors_list_key, all_sensors_data
+ ):
+ """Delete sensors that no longer exist in a survey"""
+ redis_survey_sensors_set = set(self._redis.lrange(
+ survey_sensors_list_key,
+ 0,
+ self._redis.llen(survey_sensors_list_key)
+ ))
+ api_survey_sensors_id_set = set(
+ [survey_sensor["HardwareID"] for survey_sensor in all_sensors_data]
+ )
+ sensors_to_delete = (
+ redis_survey_sensors_set - api_survey_sensors_id_set
+ )
+ pipeline.delete(survey_sensors_list_key)
+ for sensor_id in sensors_to_delete:
+ pipeline.delete(
+ self._const.SURVEY_SENSOR_DATA_KEY.format(survey_id, sensor_id)
+ )
+ pipeline.delete(
+ self._const.SURVEY_SENSOR_STATUS_KEY.format(
+ survey_id, sensor_id
+ )
+ )
+
+ def delete_image(self, pipeline, image_id):
+ "Deletes images that no longer are used by the API"""
+ pipeline.delete(self._const.IMAGE_BASE64_KEY.format(image_id))
+ pipeline.delete(self._const.IMAGE_CONTENT_TYPE_KEY.format(image_id))
| Workspaces Endpoint for Live Maps throws an error 500 if an incorrect survey_id / map_id combo are given
As explained in the title.
This should instead yield a proper 400 error as we do for other endpoints.
| 2019-03-09T19:18:43 |
||
uclapi/uclapi | 1,218 | uclapi__uclapi-1218 | [
"1214"
] | b88eda93ff2ed57557f21ad09f6ecb64bbf19ec4 | diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -202,6 +202,13 @@ def delete_app(request):
else:
app = apps[0]
app.deleted = True
+ webhook = app.webhook
+ webhook.url = ""
+ webhook.siteid = ""
+ webhook.roomid = ""
+ webhook.contact = ""
+ webhook.enabled = False
+ webhook.save()
app.save()
keen_add_event.delay("App deleted", {
diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -50,7 +50,7 @@ def handle(self, *args, **options):
ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)
- webhooks = Webhook.objects.all()
+ webhooks = Webhook.objects.filter(app__deleted=False)
# assumption: list of webhooks will be longer than ddiff
num_bookings_added = 0
| Webhooks are not deleted upon App deleting
Create an app, link webhook. delete app. The webhook will still fire.
| 2019-04-06T17:17:31 |
||
uclapi/uclapi | 1,219 | uclapi__uclapi-1219 | [
"1215"
] | 897d44b3a453308966b2050813b11f9058e84cd8 | diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -7,6 +7,7 @@
from deepdiff import DeepDiff
from django.utils import timezone
from requests_futures.sessions import FuturesSession
+from django.db.models import Q
class Command(BaseCommand):
@@ -39,11 +40,13 @@ def handle(self, *args, **options):
old_bookings = _serialize_bookings(
old_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
new_bookings = _serialize_bookings(
new_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
| Data exposed by webhooks, not shown by /bookings
An example is Gordon St (22) 4.01 . which is provided by webhooks when bookings change but we do not return it for bookings usually.
| 2019-04-07T13:42:03 |
||
uclapi/uclapi | 1,313 | uclapi__uclapi-1313 | [
"1165"
] | 73a0cf30c963380e1aa70de8421f2338a99054fe | diff --git a/backend/uclapi/workspaces/occupeye/api.py b/backend/uclapi/workspaces/occupeye/api.py
--- a/backend/uclapi/workspaces/occupeye/api.py
+++ b/backend/uclapi/workspaces/occupeye/api.py
@@ -55,8 +55,20 @@ def get_surveys(self, survey_filter):
"active": strtobool(survey_data["active"]),
"start_time": survey_data["start_time"],
"end_time": survey_data["end_time"],
- "staff_survey": strtobool(survey_data["staff_survey"])
- }
+ "staff_survey": strtobool(survey_data["staff_survey"]),
+ "location": {
+ "coordinates": {
+ "lat": survey_data["lat"],
+ "lng": survey_data["long"]
+ },
+ "address": [
+ survey_data["address1"],
+ survey_data["address2"],
+ survey_data["address3"],
+ survey_data["address4"]
+ ]
+ }
+ }
# If we want to filter out staff surveys and this is a staff
# one then we skip over it.
if (
diff --git a/backend/uclapi/workspaces/occupeye/cache.py b/backend/uclapi/workspaces/occupeye/cache.py
--- a/backend/uclapi/workspaces/occupeye/cache.py
+++ b/backend/uclapi/workspaces/occupeye/cache.py
@@ -118,7 +118,12 @@ def cache_survey_data(self):
staff_survey = str(
int(survey["SurveyID"]) in self._const.STAFF_SURVEY_IDS
)
-
+ if survey["Name"] in self._const.SURVEY_LOCATIONS:
+ location_data = self._const.SURVEY_LOCATIONS[survey["Name"]]
+ else:
+ location_data = {
+ "lat": "", "long": "", "address": ["", "", "", ""]
+ }
pipeline.hmset(
survey_key,
{
@@ -127,7 +132,13 @@ def cache_survey_data(self):
"name": survey["Name"],
"start_time": survey["StartTime"],
"end_time": survey["EndTime"],
- "staff_survey": staff_survey
+ "staff_survey": staff_survey,
+ "lat": location_data["lat"],
+ "long": location_data["long"],
+ "address1": location_data["address"][0],
+ "address2": location_data["address"][1],
+ "address3": location_data["address"][2],
+ "address4": location_data["address"][3]
}
)
pipeline.lpush(
diff --git a/backend/uclapi/workspaces/occupeye/constants.py b/backend/uclapi/workspaces/occupeye/constants.py
--- a/backend/uclapi/workspaces/occupeye/constants.py
+++ b/backend/uclapi/workspaces/occupeye/constants.py
@@ -84,3 +84,305 @@ class OccupEyeConstants():
# Set a filter for surveys designed for staff only
STAFF_SURVEY_IDS = [59]
VALID_SURVEY_FILTERS = ["all", "staff", "student"]
+
+ # address mapping
+
+ SURVEY_LOCATIONS = {
+ "UCL Torrington 1-19 113": {
+ "lat": "51.521886", "long": "-0.134415",
+ "address": [
+ "1-19 Torrington Pl",
+ "Fitzrovia",
+ "London",
+ "WC1E 7HB"
+ ]
+ },
+ "UCL Student Centre": {
+ "lat": "51.524916", "long": "-0.132371",
+ "address": [
+ "27-28 Gordon Square",
+ "Bloomsbury",
+ "London",
+ "WC1H 0AH"
+ ]
+ },
+ "UCL SSEES Library": {
+ "lat": "51.525342", "long": "-0.131602",
+ "address": [
+ "16 Taviton St",
+ "Bloomsbury",
+ "London",
+ "WC1H 0BW"
+ ]
+ },
+ "UCL SENIT Suite": {
+ "lat": "51.524280", "long": "-0.133026",
+ "address": [
+ "Wilkins Building",
+ "Gower St",
+ "London",
+ "WC1E 6BT"
+ ]
+ },
+ "UCL Senate House Hub": {
+ "lat": "51.521094", "long": "-0.128735",
+ "address": [
+ "Senate House University of London",
+ "Malet St",
+ "London",
+ "WC1E 7HU"
+ ]
+ },
+ "UCL Science Library": {
+ "lat": "51.523556", "long": "-0.132588",
+ "address": [
+ "DMS Watson Building",
+ "Malet Place",
+ "London",
+ "WC1E 6BT"
+ ]
+ },
+ "UCL School of Pharmacy Library": {
+ "lat": "51.524967", "long": "-0.122872",
+ "address": [
+ "29-39 Brunswick Square",
+ "Bloomsbury",
+ "London",
+ "WC1N 1AX"
+ ]
+ },
+ "UCL Main Library": {
+ "lat": "51.524748", "long": "-0.133535",
+ "address": [
+ "Wilkins Building",
+ "Gower St",
+ "London",
+ "WC1E 6BT"
+ ]
+ },
+ "UCL Language & Speech Science Library": {
+ "lat": "51.525847", "long": "-0.122785",
+ "address": [
+ "2 Wakefield St",
+ "Kings Cross",
+ "London",
+ "WC1N 1PJ"
+ ]
+ },
+ "UCL Institute of Orthopaedics Library": {
+ "lat": "51.631529", "long": "-0.306551",
+ "address": [
+ "Sir Herbert Seddon Teaching Centre",
+ "Royal National Orthopaedic Hospital",
+ "Brockley Hill, Stanmore",
+ "HA7 4LP"
+ ]
+ },
+ "UCL Institute of Neurology, Queen Square Library": {
+ "lat": "51.522398", "long": "-0.122973",
+ "address": [
+ "23 Queen Square",
+ "London",
+ "WC1N 3BG",
+ ""
+ ]
+ },
+ "UCL Institute of Education Library": {
+ "lat": "51.522897", "long": "-0.127864",
+ "address": [
+ "20 Bedford Way",
+ "London",
+ "WC1H 0AL",
+ ""
+ ]
+ },
+ "UCL Institute of Archaeology Library": {
+ "lat": "51.524906", "long": "-0.131578",
+ "address": [
+ "31-34 Gordon Square",
+ "WC1H 0PY",
+ "",
+ ""
+ ]
+ },
+ "UCL Great Ormond Street Institute of Child Health Library": {
+ "lat": "51.5232287", "long": "-0.1200982",
+ "address": [
+ "30 Guilford Street",
+ "London",
+ "WC1N 1EH",
+ ""
+ ]
+ },
+ "UCL Eastman Dental Library": {
+ "lat": "51.526314", "long": "-0.117660",
+ "address": [
+ "256 Gray's Inn Road",
+ "King's Cross",
+ "London",
+ "WC1X 8LD"
+ ]
+ },
+ "UCL Ear Institute & Hearing Loss Library": {
+ "lat": "51.529268", "long": "-0.119571",
+ "address": [
+ "330-336 Grays Inn Rd",
+ "Kings Cross",
+ "London",
+ "WC1X 8EE"
+ ]
+ },
+ "UCL Cruciform Hub": {
+ "lat": "51.524054", "long": "-0.135032",
+ "address": [
+ "Cruciform Building, Gower St",
+ "Fitzrovia",
+ "London",
+ "WC1E 6BT"
+ ]
+ },
+ "UCL Christopher Ingold G20": {
+ "lat": "51.525370", "long": "-0.132171",
+ "address": [
+ "20 Gordon St",
+ "Kings Cross",
+ "London",
+ "WC1H 0AJ"
+ ]
+ },
+ "UCL chadwick B04": {
+ "lat": "51.524233", "long": "-0.134336",
+ "address": [
+ "Chadwick building, Gower St",
+ "Bloomsbury",
+ "London",
+ "WC1E 6DE"
+ ]
+ },
+ "UCL Chadwick 223": {
+ "lat": "51.524233", "long": "-0.134336",
+ "address": [
+ "Chadwick building, Gower St",
+ "Bloomsbury",
+ "London",
+ "WC1E 6DE"
+ ]
+ },
+ "UCL Bedfordway G11": {
+ "lat": "51.523857", "long": "-0.128845",
+ "address": [
+ "26 Bedford Way",
+ "Bloomsbury",
+ "London",
+ "WC1H 0AP"
+ ]
+ },
+ "UCL Bedfordway 316": {
+ "lat": "51.523857", "long": "-0.128845",
+ "address": [
+ "26 Bedford Way",
+ "Bloomsbury",
+ "London",
+ "WC1H 0AP"
+ ]
+ },
+ "UCL Bartlett Library": {
+ "lat": "51.526812", "long": "-0.129855",
+ "address": [
+ "14 Upper Woburn Pl",
+ "Bloomsbury",
+ "London",
+ "WC1H 0NN"
+ ]
+ },
+ "UCL Archaeology 501": {
+ "lat": "51.524908", "long": "-0.131575",
+ "address": [
+ "31-34 Gordon Square",
+ "Kings Cross",
+ "London",
+ "WC1H 0PY"
+ ]
+ },
+ "UCL Archaeology 117": {
+ "lat": "51.524908", "long": "-0.131575",
+ "address": [
+ "31-34 Gordon Square",
+ "Kings Cross",
+ "London",
+ "WC1H 0PY"
+ ]
+ },
+ "UCL Anatomy Hub ": {
+ "lat": "51.523625", "long": "-0.133624",
+ "address": [
+ "Anatomy Building",
+ "Gower St",
+ "London",
+ "WC1E 6XA"
+ ]
+ },
+ "UCL 23 Gordon Square history cluster": {
+ "lat": "51.524497", "long": "-0.132115",
+ "address": [
+ "23 Gordon Square",
+ "London",
+ "WC1H 0AG",
+ ""
+ ]
+ },
+ "The Joint Library of Ophthalmology": {
+ "lat": "51.527235", "long": "-0.091205",
+ "address": [
+ "11-43 Bath Street",
+ "London",
+ "EC1V 9EL",
+ ""
+ ]
+ },
+ "Royal Free Hospital Medical Library": {
+ "lat": "51.552507", "long": "-0.165783",
+ "address": [
+ "Rowland Hill Street",
+ "Hampstead",
+ "London",
+ "NW3 2PF"
+ ]
+ },
+ "Graduate Hub - RP Dev Testing": {
+ "lat": "51.524143", "long": "-0.133525",
+ "address": [
+ "South Wing, Gower St",
+ "Kings Cross",
+ "London",
+ "WC1E 6DE",
+ ]
+ },
+ "Graduate Hub": {
+ "lat": "51.524143", "long": "-0.133525",
+ "address": [
+ "South Wing, Gower St",
+ "Kings Cross",
+ "London",
+ "WC1E 6DE",
+ ]
+ },
+ "Foster Court": {
+ "lat": "51.523555", "long": "-0.132497",
+ "address": [
+ "Foster Court, Gower St",
+ "Bloomsbury",
+ "London",
+ "WC1E 6BT"
+ ]
+ },
+ "1 Saint Martin Le Grand": {
+ "lat": "51.516380", "long": "-0.097666",
+ "address": [
+ "1 St Martin's Le Grand",
+ "London",
+ "EC1A 4NP",
+ ""
+ ]
+ }
+ }
| diff --git a/backend/uclapi/workspaces/tests.py b/backend/uclapi/workspaces/tests.py
--- a/backend/uclapi/workspaces/tests.py
+++ b/backend/uclapi/workspaces/tests.py
@@ -91,7 +91,13 @@ def test_get_surveys(self):
"name": "test survey 1",
"start_time": "10:00",
"end_time": "12:00",
- "staff_survey": str(True)
+ "staff_survey": str(True),
+ "lat": "3.14159",
+ "long": "-0.500100",
+ "address1": "some building",
+ "address2": "some street",
+ "address3": "some city",
+ "address4": "postcode please"
}
)
pipeline.hmset(
@@ -102,7 +108,13 @@ def test_get_surveys(self):
"name": "test survey 2",
"start_time": "09:00",
"end_time": "17:00",
- "staff_survey": str(False)
+ "staff_survey": str(False),
+ "lat": "2.14159",
+ "long": "-1.500100",
+ "address1": "some building2",
+ "address2": "some street2",
+ "address3": "some city2",
+ "address4": "postcode please2"
}
)
pipeline.expire("occupeye:surveys", 20)
@@ -117,6 +129,10 @@ def test_get_surveys(self):
len(surveys),
2
)
+ self.assertEqual(
+ len(surveys[0]),
+ 8
+ )
self.assertEqual(
surveys[0]["id"],
@@ -138,6 +154,30 @@ def test_get_surveys(self):
self.assertTrue(
strtobool(str(surveys[0]["staff_survey"]))
)
+ self.assertEqual(
+ surveys[0]["location"]["coordinates"]["lat"],
+ "3.14159"
+ )
+ self.assertEqual(
+ surveys[0]["location"]["coordinates"]["lng"],
+ "-0.500100"
+ )
+ self.assertEqual(
+ surveys[0]["location"]["address"][0],
+ "some building"
+ )
+ self.assertEqual(
+ surveys[0]["location"]["address"][1],
+ "some street"
+ )
+ self.assertEqual(
+ surveys[0]["location"]["address"][2],
+ "some city"
+ )
+ self.assertEqual(
+ surveys[0]["location"]["address"][3],
+ "postcode please"
+ )
self.assertEqual(
surveys[1]["id"],
@@ -159,3 +199,27 @@ def test_get_surveys(self):
self.assertFalse(
strtobool(str(surveys[1]["staff_survey"]))
)
+ self.assertEqual(
+ surveys[1]["location"]["coordinates"]["lat"],
+ "2.14159"
+ )
+ self.assertEqual(
+ surveys[1]["location"]["coordinates"]["lng"],
+ "-1.500100"
+ )
+ self.assertEqual(
+ surveys[1]["location"]["address"][0],
+ "some building2"
+ )
+ self.assertEqual(
+ surveys[1]["location"]["address"][1],
+ "some street2"
+ )
+ self.assertEqual(
+ surveys[1]["location"]["address"][2],
+ "some city2"
+ )
+ self.assertEqual(
+ surveys[1]["location"]["address"][3],
+ "postcode please2"
+ )
| Provide Coordinates of Each Workspaces Survey
This would help apps like UCL Assistant sort libraries by proximity to user, and navigate users to study space buildings.
| Just to clarify, this would involve adding location data (lat, lon at least, address etc is a plus) to the survey data returned by the various workspace endpoints (e.g. `/workspaces/sensors/summary`)
Should have this done by tonight, will not be the most elegant solution though, since manual data entry is needed because we don't have any databases containing some of the buildings as they don't use the same room bookings system :( . We could get less accurate results by using the site they are closest to/in however but i'd rather maintain accuracy. | 2019-05-23T20:07:44 |
uclapi/uclapi | 1,390 | uclapi__uclapi-1390 | [
"1389"
] | d61decf77a762cd89999a12c471a04df02bf88fb | diff --git a/backend/uclapi/dashboard/views.py b/backend/uclapi/dashboard/views.py
--- a/backend/uclapi/dashboard/views.py
+++ b/backend/uclapi/dashboard/views.py
@@ -4,6 +4,7 @@
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
+from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils.http import quote
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
@@ -19,6 +20,21 @@
@csrf_exempt
def shibboleth_callback(request):
+ # We first check whether the user is a member of any UCL Intranet Groups.
+ # This is a quick litmus test to determine whether they should have
+ # access to the dashboard.
+ # We deny access to test accounts and alumni, neither of which have
+ # this Shibboleth attribute.
+ if 'HTTP_UCLINTRANETGROUPS' not in request.META:
+ response = HttpResponse(
+ (
+ "Error 403 - denied. <br>"
+ "The API Dashboard is only assessible to active UCL users."
+ )
+ )
+ response.status_code = 403
+ return response
+
# should auth user login or signup
# then redirect to dashboard homepage
eppn = request.META['HTTP_EPPN']
diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -6,6 +6,7 @@
from django.core import signing
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signing import TimestampSigner
+from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils.http import quote
from django.views.decorators.csrf import (
@@ -131,13 +132,34 @@ def shibcallback(request):
app = App.objects.get(client_id=client_id)
eppn = request.META['HTTP_EPPN']
- groups = request.META['HTTP_UCLINTRANETGROUPS']
cn = request.META['HTTP_CN']
department = request.META['HTTP_DEPARTMENT']
given_name = request.META['HTTP_GIVENNAME']
display_name = request.META['HTTP_DISPLAYNAME']
employee_id = request.META['HTTP_EMPLOYEEID']
+ # We check whether the user is a member of any UCL Intranet Groups.
+ # This is a quick litmus test to determine whether they should be able to
+ # use an OAuth application.
+ # We deny access to alumni, which does not have this Shibboleth attribute.
+ # Test accounts also do not have this attribute, but we can check the
+ # department attribute for the Shibtests department.
+ # This lets App Store reviewers log in to apps that use the UCL API.
+ if 'HTTP_UCLINTRANETGROUPS' in request.META:
+ groups = request.META['HTTP_UCLINTRANETGROUPS']
+ else:
+ if department == "Shibtests":
+ groups = "shibtests"
+ else:
+ response = HttpResponse(
+ (
+ "Error 403 - denied. <br>"
+ "Unfortunately, alumni are not permitted to use UCL Apps."
+ )
+ )
+ response.status_code = 403
+ return response
+
# If a user has never used the API before then we need to sign them up
try:
user = User.objects.get(email=eppn)
@@ -488,16 +510,16 @@ def userdata(request, *args, **kwargs):
user_data = {
"ok": True,
- "full_name": token.user.full_name,
- "email": token.user.email,
- "given_name": token.user.given_name,
"cn": token.user.cn,
"department": token.user.department,
+ "email": token.user.email,
+ "full_name": token.user.full_name,
+ "given_name": token.user.given_name,
"upi": token.user.employee_id,
"scope_number": token.scope.scope_number,
- "is_student": is_student
+ "is_student": is_student,
+ "ucl_groups": token.user.raw_intranet_groups.split(';')
}
- print("Is student: " + str(is_student))
return PrettyJsonResponse(
user_data,
@@ -563,6 +585,7 @@ def get_student_number(request, *args, **kwargs):
custom_header_data=kwargs
)
+
@csrf_exempt
def myapps_shibboleth_callback(request):
# should auth user login or signup
@@ -577,7 +600,7 @@ def myapps_shibboleth_callback(request):
try:
user = User.objects.get(email=eppn)
- except ObjectDoesNotExist:
+ except User.DoesNotExist:
# create a new user
new_user = User(
email=eppn,
@@ -590,7 +613,6 @@ def myapps_shibboleth_callback(request):
)
new_user.save()
- add_user_to_mailing_list_task.delay(new_user.email, new_user.full_name)
request.session["user_id"] = new_user.id
keen_add_event.delay("signup", {
@@ -658,7 +680,7 @@ def my_apps(request):
})
initial_data_dict = {
- "status" : "ONLINE",
+ "status": "ONLINE",
"fullname": user.full_name,
"user_id": user.id,
"department": user.department,
| diff --git a/backend/uclapi/oauth/tests.py b/backend/uclapi/oauth/tests.py
--- a/backend/uclapi/oauth/tests.py
+++ b/backend/uclapi/oauth/tests.py
@@ -1,8 +1,10 @@
import json
-import unittest.mock
import os
+import random
+import string
+import unittest.mock
-from django.test import TestCase
+from django.test import Client, TestCase
from rest_framework.test import APIRequestFactory
from django.core import signing
@@ -355,6 +357,7 @@ def test_no_callback_url(self):
class ViewsTestCase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
+ self.client = Client()
def test_no_parameters(self):
request = self.factory.get(
@@ -472,6 +475,190 @@ def test_expired_signature(self, TimestampSigner):
"Team to rectify this.")
)
+ def test_valid_shibcallback_real_account(self):
+ dev_user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Dev",
+ employee_id='testdev01'
+ )
+ app_ = App.objects.create(
+ user=dev_user_,
+ name="An App",
+ callback_url="www.somecallbackurl.com/callback"
+ )
+ test_user_ = User.objects.create(
+ email="[email protected]",
+ cn="testxxx",
+ given_name="Test User",
+ employee_id='xxxtest01'
+ )
+
+ signer = signing.TimestampSigner()
+ # Generate a random state for testing
+ state = ''.join(
+ random.choices(string.ascii_letters + string.digits, k=32)
+ )
+ data = app_.client_id + state
+ signed_data = signer.sign(data)
+
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ HTTP_EPPN='[email protected]',
+ HTTP_CN='testxxx',
+ HTTP_DEPARTMENT='Dept of Tests',
+ HTTP_GIVENNAME='Test New Name',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_EMPLOYEEID='xxxtest01',
+ HTTP_UCLINTRANETGROUPS='ucl-all;ucl-tests-all'
+ )
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(self.client.session['user_id'], test_user_.id)
+
+ initial_data = json.loads(response.context['initial_data'])
+ self.assertEqual(
+ initial_data['app_name'],
+ app_.name
+ )
+ self.assertEqual(
+ initial_data['client_id'],
+ app_.client_id
+ )
+ self.assertEqual(
+ initial_data['state'],
+ state
+ )
+ self.assertDictEqual(
+ initial_data['user'],
+ {
+ "full_name": "Test User",
+ "cn": "testxxx",
+ "email": "[email protected]",
+ "department": "Dept of Tests",
+ "upi": "xxxtest01"
+ }
+ )
+
+ # Reload the test user from DB
+ test_user_ = User.objects.get(id=test_user_.id)
+
+ self.assertEqual(
+ test_user_.given_name,
+ "Test New Name"
+ )
+
+ def test_valid_shibcallback_test_account(self):
+ dev_user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Dev",
+ employee_id='testdev01'
+ )
+ app_ = App.objects.create(
+ user=dev_user_,
+ name="An App",
+ callback_url="www.somecallbackurl.com/callback"
+ )
+ test_user_ = User.objects.create(
+ email="[email protected]",
+ cn="testxxx",
+ given_name="Test User",
+ employee_id='xxxtest01'
+ )
+
+ signer = signing.TimestampSigner()
+ # Generate a random state for testing
+ state = ''.join(
+ random.choices(string.ascii_letters + string.digits, k=32)
+ )
+ data = app_.client_id + state
+ signed_data = signer.sign(data)
+
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ HTTP_EPPN='[email protected]',
+ HTTP_CN='testxxx',
+ HTTP_DEPARTMENT='Shibtests',
+ HTTP_GIVENNAME='Test',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_EMPLOYEEID='xxxtest01',
+ )
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(self.client.session['user_id'], test_user_.id)
+
+ initial_data = json.loads(response.context['initial_data'])
+ self.assertEqual(
+ initial_data['app_name'],
+ app_.name
+ )
+ self.assertEqual(
+ initial_data['client_id'],
+ app_.client_id
+ )
+ self.assertEqual(
+ initial_data['state'],
+ state
+ )
+ self.assertDictEqual(
+ initial_data['user'],
+ {
+ "full_name": "Test User",
+ "cn": "testxxx",
+ "email": "[email protected]",
+ "department": "Shibtests",
+ "upi": "xxxtest01"
+ }
+ )
+
+ # Reload the test user from DB
+ test_user_ = User.objects.get(id=test_user_.id)
+
+ self.assertEqual(
+ test_user_.raw_intranet_groups,
+ "shibtests"
+ )
+
+ def test_invalid_or_alumni_account(self):
+ dev_user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Dev",
+ employee_id='testdev01'
+ )
+ app_ = App.objects.create(
+ user=dev_user_,
+ name="An App",
+ callback_url="www.somecallbackurl.com/callback"
+ )
+
+ signer = signing.TimestampSigner()
+ # Generate a random state for testing
+ state = ''.join(
+ random.choices(string.ascii_letters + string.digits, k=32)
+ )
+ data = app_.client_id + state
+ signed_data = signer.sign(data)
+
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ HTTP_EPPN='[email protected]',
+ HTTP_CN='testxxx',
+ HTTP_DEPARTMENT='Dept of Alumni',
+ HTTP_GIVENNAME='Test',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_EMPLOYEEID='xxxtest01',
+ )
+ self.assertEqual(response.status_code, 403)
+
class AppHelpersTestCase(TestCase):
def test_generate_random_verification_code(self):
| [Feature Request] Expose all Shibboleth UCL Intranet Groups via OAuth
**Is your feature request related to a problem? Please describe.**
It is not possible to determine which faculty a user is in, or whather they have special access (e.g. to do the Bloomsbury Theatre builings), each of which places them in a new UCL Intranet Group.
Without this feature in place it is not possible to migrate EngHub from raw Shibboleth to UCL API OAuth.
**Describe the solution you'd like**
A list of each UCL Intranet Group should be provided via the `/user/data` endpoint.
**Describe alternatives you've considered (optional)**
Parsing this data on the API's side and providing metadata (such as the Faculty a user is in) instead of a raw list of groups. This might be hard to do without a lot of help from ISD though.
| Does the shibboleth response give us all the group information? | 2019-06-14T17:37:18 |
uclapi/uclapi | 1,622 | uclapi__uclapi-1622 | [
"1602"
] | 039b2297acddc4a9586b037d427bc10aaca56b16 | diff --git a/backend/uclapi/dashboard/views.py b/backend/uclapi/dashboard/views.py
--- a/backend/uclapi/dashboard/views.py
+++ b/backend/uclapi/dashboard/views.py
@@ -19,12 +19,46 @@
@csrf_exempt
def shibboleth_callback(request):
+ # should auth user login or signup
+ # then redirect to dashboard homepage
+
+ # Sometimes UCL doesn't give us the expected headers.
+ # If a critical header is missing we error out.
+ # If non-critical headers are missing we simply put a placeholder string.
+ try:
+ # This is used to find the correct user
+ eppn = request.META['HTTP_EPPN']
+ # We don't really use cn but because it's unique in the DB we can't
+ # really put a place holder value.
+ cn = request.META['HTTP_CN']
+ # (aka UPI), also unique in the DB
+ employee_id = request.META['HTTP_EMPLOYEEID']
+ except KeyError:
+ response = HttpResponse(
+ (
+
+ "Error 400 - Bad Request. <br>"
+ "UCL has sent incomplete headers. If the issues persist please"
+ "contact the UCL API Team to rectify this."
+ )
+ )
+ response.status_code = 400
+ return response
+
+ # TODO: Ask UCL what on earth are they doing by missing out headers, and
+ # remind them we need to to be informed of these types of changes.
+ # TODO: log to sentry that fields were missing...
+ department = request.META.get('HTTP_DEPARTMENT', '')
+ given_name = request.META.get('HTTP_GIVENNAME', '')
+ display_name = request.META.get('HTTP_DISPLAYNAME', '')
+ groups = request.META.get('HTTP_UCLINTRANETGROUPS', '')
+
# We first check whether the user is a member of any UCL Intranet Groups.
# This is a quick litmus test to determine whether they should have
# access to the dashboard.
# We deny access to test accounts and alumni, neither of which have
# this Shibboleth attribute.
- if 'HTTP_UCLINTRANETGROUPS' not in request.META:
+ if not groups:
response = HttpResponse(
(
"Error 403 - denied. <br>"
@@ -34,17 +68,10 @@ def shibboleth_callback(request):
response.status_code = 403
return response
- # should auth user login or signup
- # then redirect to dashboard homepage
- eppn = request.META['HTTP_EPPN']
- groups = request.META['HTTP_UCLINTRANETGROUPS']
- cn = request.META['HTTP_CN']
- department = request.META.get('HTTP_DEPARTMENT', '')
- given_name = request.META['HTTP_GIVENNAME']
- display_name = request.META['HTTP_DISPLAYNAME']
- employee_id = request.META['HTTP_EMPLOYEEID']
-
try:
+ # TODO: Handle MultipleObjectsReturned exception.
+ # email field isn't unique at database level (on our side).
+ # Alternatively, switch to employee_id (which is unique).
user = User.objects.get(email=eppn)
except ObjectDoesNotExist:
# create a new user
@@ -63,14 +90,17 @@ def shibboleth_callback(request):
request.session["user_id"] = new_user.id
else:
- # user exists already, update values
- request.session["user_id"] = user.id
- user.full_name = display_name
- user.given_name = given_name
+ # User exists already, so update the values if new ones are non-empty.
+ user = User.objects.get(email=eppn)
+ user.employee_id = employee_id
+ if display_name:
+ user.full_name = display_name
+ if given_name:
+ user.given_name = given_name
if department:
user.department = department
- user.raw_intranet_groups = groups
- user.employee_id = employee_id
+ if groups:
+ user.raw_intranet_groups = groups
user.save()
return redirect(dashboard)
diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -130,17 +130,33 @@ def shibcallback(request):
# string sent via Shibboleth
app = App.objects.get(client_id=client_id)
- eppn = request.META['HTTP_EPPN']
- cn = request.META['HTTP_CN']
- # UCL's Shibboleth isn't passing us the department anymore...
- # TODO: Ask UCL what on earth are they doing, and remind them we need to to
- # be informed of these types of changes.
- # TODO: Some of these fields are non-critical, think of defaults incase UCL
- # starts removing/renaming more of these fields...
+ # Sometimes UCL doesn't give us the expected headers.
+ # If a critical header is missing we error out.
+ # If non-critical headers are missing we simply put a placeholder string.
+ try:
+ # This is used to find the correct user
+ eppn = request.META['HTTP_EPPN']
+ # We don't really use cn but because it's unique in the DB we can't
+ # really put a place holder value.
+ cn = request.META['HTTP_CN']
+ # (aka UPI), also unique in the DB
+ employee_id = request.META['HTTP_EMPLOYEEID']
+ except KeyError:
+ response = PrettyJsonResponse({
+ "ok": False,
+ "error": ("UCL has sent incomplete headers. If the issues persist"
+ "please contact the UCL API Team to rectify this.")
+ })
+ response.status_code = 400
+ return response
+
+ # TODO: Ask UCL what on earth are they doing by missing out headers, and
+ # remind them we need to to be informed of these types of changes.
+ # TODO: log to sentry that fields were missing...
department = request.META.get('HTTP_DEPARTMENT', '')
- given_name = request.META['HTTP_GIVENNAME']
- display_name = request.META['HTTP_DISPLAYNAME']
- employee_id = request.META['HTTP_EMPLOYEEID']
+ given_name = request.META.get('HTTP_GIVENNAME', '')
+ display_name = request.META.get('HTTP_DISPLAYNAME', '')
+ groups = request.META.get('HTTP_UCLINTRANETGROUPS', '')
# We check whether the user is a member of any UCL Intranet Groups.
# This is a quick litmus test to determine whether they should be able to
@@ -149,9 +165,7 @@ def shibcallback(request):
# Test accounts also do not have this attribute, but we can check the
# department attribute for the Shibtests department.
# This lets App Store reviewers log in to apps that use the UCL API.
- if 'HTTP_UCLINTRANETGROUPS' in request.META:
- groups = request.META['HTTP_UCLINTRANETGROUPS']
- else:
+ if not groups:
if department == "Shibtests" or eppn == SHIB_TEST_USER:
groups = "shibtests"
else:
@@ -166,6 +180,9 @@ def shibcallback(request):
# If a user has never used the API before then we need to sign them up
try:
+ # TODO: Handle MultipleObjectsReturned exception.
+ # email field isn't unique at database level (on our side).
+ # Alternatively, switch to employee_id (which is unique).
user = User.objects.get(email=eppn)
except User.DoesNotExist:
# create a new user
@@ -181,14 +198,17 @@ def shibcallback(request):
user.save()
else:
- # User exists already, so update the values
+ # User exists already, so update the values if new ones are non-empty.
user = User.objects.get(email=eppn)
- user.full_name = display_name
- user.given_name = given_name
- if department: # UCL doesn't pass this anymore it seems...
- user.department = department
- user.raw_intranet_groups = groups
user.employee_id = employee_id
+ if display_name:
+ user.full_name = display_name
+ if given_name:
+ user.given_name = given_name
+ if department:
+ user.department = department
+ if groups:
+ user.raw_intranet_groups = groups
user.save()
# Log the user into the system using their User ID
@@ -584,16 +604,40 @@ def get_student_number(request, *args, **kwargs):
def myapps_shibboleth_callback(request):
# should auth user login or signup
# then redirect to my apps homepage
- eppn = request.META['HTTP_EPPN']
- groups = request.META['HTTP_UCLINTRANETGROUPS']
- cn = request.META['HTTP_CN']
- department = request.META['HTTP_DEPARTMENT']
- given_name = request.META['HTTP_GIVENNAME']
- display_name = request.META['HTTP_DISPLAYNAME']
- employee_id = request.META['HTTP_EMPLOYEEID']
+
+ # Sometimes UCL doesn't give us the expected headers.
+ # If a critical header is missing we error out.
+ # If non-critical headers are missing we simply put a placeholder string.
+ try:
+ # This is used to find the correct user
+ eppn = request.META['HTTP_EPPN']
+ # We don't really use cn but because it's unique in the DB we can't
+ # really put a place holder value.
+ cn = request.META['HTTP_CN']
+ # (aka UPI), also unique in the DB
+ employee_id = request.META['HTTP_EMPLOYEEID']
+ except KeyError:
+ response = PrettyJsonResponse({
+ "ok": False,
+ "error": ("UCL has sent incomplete headers. If the issues persist"
+ "please contact the UCL API Team to rectify this.")
+ })
+ response.status_code = 400
+ return response
+
+ # TODO: Ask UCL what on earth are they doing by missing out headers, and
+ # remind them we need to to be informed of these types of changes.
+ # TODO: log to sentry that fields were missing...
+ department = request.META.get('HTTP_DEPARTMENT', '')
+ given_name = request.META.get('HTTP_GIVENNAME', '')
+ display_name = request.META.get('HTTP_DISPLAYNAME', '')
+ groups = request.META.get('HTTP_UCLINTRANETGROUPS', '')
try:
user = User.objects.get(email=eppn)
+ # TODO: Handle MultipleObjectsReturned exception.
+ # email field isn't unique at database level (on our side).
+ # Alternatively, switch to employee_id (which is unique).
except User.DoesNotExist:
# create a new user
new_user = User(
@@ -610,13 +654,17 @@ def myapps_shibboleth_callback(request):
request.session["user_id"] = new_user.id
else:
- # user exists already, update values
- request.session["user_id"] = user.id
- user.full_name = display_name
- user.given_name = given_name
- user.department = department
- user.raw_intranet_groups = groups
+ # User exists already, so update the values if new ones are non-empty.
+ user = User.objects.get(email=eppn)
user.employee_id = employee_id
+ if display_name:
+ user.full_name = display_name
+ if given_name:
+ user.given_name = given_name
+ if department:
+ user.department = department
+ if groups:
+ user.raw_intranet_groups = groups
user.save()
return redirect("/oauth/myapps")
| diff --git a/backend/uclapi/oauth/tests.py b/backend/uclapi/oauth/tests.py
--- a/backend/uclapi/oauth/tests.py
+++ b/backend/uclapi/oauth/tests.py
@@ -550,7 +550,8 @@ def test_valid_shibcallback_real_account(self):
"Test New Name"
)
- # Now lets test for when UCL doesn't give us the department.
+ # Now lets test for when UCL doesn't give us department the department,
+ # givenname and displayname.
response = self.client.get(
'/oauth/shibcallback',
{
@@ -558,9 +559,7 @@ def test_valid_shibcallback_real_account(self):
},
HTTP_EPPN='[email protected]',
HTTP_CN='testxxx',
- # NOTE: missing HTTP_DEPARTMENT
- HTTP_GIVENNAME='Test New Name',
- HTTP_DISPLAYNAME='Test User',
+ # NOTE: missing HTTP_DEPARTMENT, HTTP_GIVENNAME, HTTP_DISPLAYNAME
HTTP_EMPLOYEEID='xxxtest01',
HTTP_UCLINTRANETGROUPS='ucl-all;ucl-tests-all'
)
@@ -569,12 +568,67 @@ def test_valid_shibcallback_real_account(self):
# Reload the test user from DB
test_user_ = User.objects.get(id=test_user_.id)
- # If no department is passed, we don't want to overwrite the previous
- # value with an empty string.
+ # If a non-critical HTTP header is not passed, we don't want to
+ # overwrite the previous value with an empty string.
self.assertEqual(
test_user_.department,
"Dept of Tests"
)
+ self.assertEqual(
+ test_user_.given_name,
+ "Test New Name"
+ )
+ self.assertEqual(
+ test_user_.full_name,
+ "Test User"
+ )
+
+ # Now let's test when critical fields are missing
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ # NOTE: missing critical field eppn
+ HTTP_CN='testxxx',
+ HTTP_EMPLOYEEID='xxxtest01',
+ HTTP_DEPARTMENT='Dept of Tests',
+ HTTP_GIVENNAME='Test New Name',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_UCLINTRANETGROUPS='ucl-all;ucl-tests-all'
+ )
+ self.assertEqual(response.status_code, 400)
+
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ HTTP_EPPN='[email protected]',
+ # NOTE: missing critical field cn
+ HTTP_DEPARTMENT='Dept of Tests',
+ HTTP_GIVENNAME='Test New Name',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_EMPLOYEEID='xxxtest01',
+ HTTP_UCLINTRANETGROUPS='ucl-all;ucl-tests-all'
+ )
+ self.assertEqual(response.status_code, 400)
+
+ response = self.client.get(
+ '/oauth/shibcallback',
+ {
+ 'appdata': signed_data
+ },
+ HTTP_EPPN='[email protected]',
+ HTTP_CN='testxxx',
+ # NOTE: missing critical field employee_id (aka UPI)
+ HTTP_DEPARTMENT='Dept of Tests',
+ HTTP_GIVENNAME='Test New Name',
+ HTTP_DISPLAYNAME='Test User',
+ HTTP_UCLINTRANETGROUPS='ucl-all;ucl-tests-all'
+ )
+ self.assertEqual(response.status_code, 400)
+
def test_valid_shibcallback_test_account(self):
dev_user_ = User.objects.create(
| Oauth 500
Hi,
There were several people reporting issues to me when logging in with oauth and receiving a server error 500 message.
It works for most people using my app, I'm not sure what's the reason it fails for some.
If you need help digging up the log files, I can provide some confidential information (ie names, or ucl email addresses for failed accounts), please PM me in this case.

| The persons reporting 500s are most likely students enrolling in AY19/20. Since they have not fully matriculated yet, UCL is not yet passing us all the information we need.
We're working to figure out what information is missing, and to throw a more helpful error message (#1602). But for the time being, you might want to add a warning to your app.
OK, thanks | 2019-09-18T13:26:59 |
uclapi/uclapi | 1,802 | uclapi__uclapi-1802 | [
"529"
] | 2336da7ccbaaaa9edf41c64317298cc8e3e6a17c | diff --git a/backend/uclapi/dashboard/models.py b/backend/uclapi/dashboard/models.py
--- a/backend/uclapi/dashboard/models.py
+++ b/backend/uclapi/dashboard/models.py
@@ -39,7 +39,11 @@ class App(models.Model):
primary_key=True,
default=generate_app_id
)
- user = models.ForeignKey(User, related_name='user')
+ user = models.ForeignKey(
+ User,
+ related_name='user',
+ on_delete=models.CASCADE
+ )
name = models.CharField(max_length=1000)
api_token = models.CharField(
max_length=1000,
@@ -72,8 +76,9 @@ def create_scope():
scope = models.ForeignKey(
OAuthScope,
- on_delete=models.CASCADE,
- default=create_scope)
+ default=create_scope,
+ on_delete=models.CASCADE
+ )
def regenerate_token(self):
new_token = generate_api_token()
@@ -86,8 +91,16 @@ class Meta:
class APICall(models.Model):
ts = models.DateTimeField(auto_now_add=True)
- app = models.ForeignKey(App, related_name='api_call')
- user = models.ForeignKey(User, related_name='api_call')
+ app = models.ForeignKey(
+ App,
+ on_delete=models.CASCADE,
+ related_name='api_call'
+ )
+ user = models.ForeignKey(
+ User,
+ on_delete=models.CASCADE,
+ related_name='api_call'
+ )
raw_request = models.TextField(max_length=10000000)
class Meta:
@@ -95,7 +108,7 @@ class Meta:
class Webhook(models.Model):
- app = models.OneToOneField(App)
+ app = models.OneToOneField(App, on_delete=models.CASCADE)
url = models.URLField(max_length=1000, blank=True)
siteid = models.CharField(max_length=40, blank=True)
@@ -117,7 +130,10 @@ class Meta:
class WebhookTriggerHistory(models.Model):
- webhook = models.ForeignKey(Webhook)
+ webhook = models.ForeignKey(
+ Webhook,
+ on_delete=models.CASCADE
+ )
payload = models.CharField(max_length=10000000)
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
diff --git a/backend/uclapi/timetable/models.py b/backend/uclapi/timetable/models.py
--- a/backend/uclapi/timetable/models.py
+++ b/backend/uclapi/timetable/models.py
@@ -256,7 +256,7 @@ class Module(models.Model):
name = models.TextField(max_length=120)
category = models.TextField(max_length=10, null=True)
classif = models.TextField(max_length=10)
- linkcode = models.TextField(primary_key=True, max_length=20)
+ linkcode = models.TextField(max_length=20)
csize = models.BigIntegerField(null=True, blank=True)
minsize = models.BigIntegerField(null=True, blank=True)
maxsize = models.BigIntegerField(null=True, blank=True)
@@ -272,6 +272,7 @@ class Meta:
managed = False
db_table = '"CMIS_OWNER"."MODULE"'
_DATABASE = 'roombookings'
+ unique_together = (("linkcode", "moduleid"),)
class ModuleA(models.Model):
@@ -758,6 +759,7 @@ class CrsavailmodulesB(models.Model):
class Meta:
_DATABASE = 'gencache'
+
class Timetable(models.Model):
slotid = models.BigIntegerField(primary_key=True)
slotentry = models.BigIntegerField(null=True, blank=True)
@@ -1291,10 +1293,10 @@ class Meta:
class Modulegroups(models.Model):
- setid = models.TextField(max_length=10, primary_key=True)
- moduleid = models.TextField(max_length=12, primary_key=True)
+ setid = models.TextField(max_length=10)
+ moduleid = models.TextField(max_length=12)
owner = models.TextField(max_length=10)
- grpcode = models.TextField(max_length=10, primary_key=True)
+ grpcode = models.TextField(max_length=10)
name = models.TextField(max_length=30, null=True, blank=True)
csize = models.IntegerField(null=True, blank=True)
minsize = models.IntegerField(null=True, blank=True)
@@ -1312,6 +1314,7 @@ class Meta:
managed = False
db_table = '"CMIS_OWNER"."MODULEGROUPS"'
_DATABASE = 'roombookings'
+ unique_together = (("setid", "moduleid", "grpcode"),)
class ModulegroupsA(models.Model):
diff --git a/backend/uclapi/uclapi/urls.py b/backend/uclapi/uclapi/urls.py
--- a/backend/uclapi/uclapi/urls.py
+++ b/backend/uclapi/uclapi/urls.py
@@ -18,6 +18,9 @@
from dashboard.views import documentation, home, about
from marketplace.views import marketplace
+
+app_name = "uclapi"
+
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^dashboard/', include('dashboard.urls')),
| Django 2.0 Compatibility
We need to test and assess our code for Django 2.0 compatibility, including checking whether our database code works with the 2.0 breaking changes.
| This looks helpful: https://eldarion.com/blog/2017/12/26/10-tips-upgrading-django-20/ | 2019-11-08T02:41:11 |
|
uclapi/uclapi | 2,070 | uclapi__uclapi-2070 | [
"1757"
] | 8565fff41573b0cb5a710e17f892be64d040a328 | diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py
--- a/backend/uclapi/oauth/urls.py
+++ b/backend/uclapi/oauth/urls.py
@@ -15,4 +15,5 @@
url(r'user/data$', views.userdata),
url(r'user/studentnumber$', views.get_student_number),
url(r'deauthorise$', views.deauthorise_app),
+ url(r'logout$', views.logout),
]
diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -665,7 +665,7 @@ def my_apps(request):
url = os.environ["SHIBBOLETH_ROOT"] + "/Login?target="
param = urllib.parse.urljoin(
request.build_absolute_uri(request.path),
- "/shibcallback"
+ "/oauth/myapps/shibcallback"
)
param = quote(param)
url = url + param
@@ -759,3 +759,10 @@ def deauthorise_app(request):
})
response.status_code = 200
return response
+
+
+@ensure_csrf_cookie
+def logout(request):
+ response = redirect('/')
+ response.delete_cookie('user_id')
+ return response
\ No newline at end of file
| [BUG] Dates on the dashboard are incorrect
**Describe the bug**
The created at and last modified dates for apps on the dashboard are incorrect.

(this app was not created on 28th October; it was created several months before)
| 2020-02-07T09:07:08 |
||
uclapi/uclapi | 2,170 | uclapi__uclapi-2170 | [
"2110"
] | 6a3cd6f4376f1c1e7bd7c7803ade20b83ec96685 | diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py
--- a/backend/uclapi/oauth/urls.py
+++ b/backend/uclapi/oauth/urls.py
@@ -4,7 +4,6 @@
urlpatterns = [
url(r'myapps/shibcallback$', views.myapps_shibboleth_callback),
- url(r'myapps', views.my_apps),
url(r'authorise/$', views.authorise),
url(r'shibcallback', views.shibcallback),
url(r'token$', views.token),
@@ -15,5 +14,4 @@
url(r'user/data$', views.userdata),
url(r'user/studentnumber$', views.get_student_number),
url(r'deauthorise$', views.deauthorise_app),
- url(r'logout$', views.logout),
]
diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -652,11 +652,11 @@ def myapps_shibboleth_callback(request):
user.raw_intranet_groups = groups
user.save()
- return redirect("/oauth/myapps")
+ return redirect(settings)
@ensure_csrf_cookie
-def my_apps(request):
+def settings(request):
# Check whether the user is logged in
try:
user_id = request.session["user_id"]
@@ -763,6 +763,10 @@ def deauthorise_app(request):
@ensure_csrf_cookie
def logout(request):
- response = redirect('/')
- response.delete_cookie('user_id')
+ try:
+ del request.session['user_id']
+ except KeyError:
+ pass
+
+ response = redirect('/', )
return response
\ No newline at end of file
diff --git a/backend/uclapi/uclapi/urls.py b/backend/uclapi/uclapi/urls.py
--- a/backend/uclapi/uclapi/urls.py
+++ b/backend/uclapi/uclapi/urls.py
@@ -15,10 +15,11 @@
"""
from django.conf.urls import url, include
from django.contrib import admin
+from django.urls import path
from dashboard.views import documentation, home, about
+from oauth.views import settings, logout
from marketplace.views import marketplace
-
app_name = "uclapi"
urlpatterns = [
@@ -26,6 +27,8 @@
url(r'^dashboard/', include('dashboard.urls')),
url(r'^docs', documentation),
url(r'^about', about),
+ path('settings/', settings),
+ path('logout/', logout),
url(r'^marketplace', marketplace),
url(r'^roombookings/', include('roombookings.urls')),
url(r'^oauth/', include('oauth.urls')),
| Broken logging in flow for the settings page
**Describe the bug**
Logging in when accessing settings page leads to 404
**To Reproduce**
Clear your cache
Go to uclapi.com
Click on settings
Log into your account
Observe 404 uclapi.com/shibcallback does not exist:
**Expected behavior**
Should redirect to the callback for the settings page
OR
Could redesign so the log in button redirects to the home page and then once logged in those pages are accessible
**Screenshots**
If applicable, add screenshots to help explain your problem.

| 2020-03-11T15:37:46 |
||
uclapi/uclapi | 2,431 | uclapi__uclapi-2431 | [
"2401"
] | 5f5f8040ed08b3972c3ae285ef6a1ce42c94eaf6 | diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -280,7 +280,8 @@ def userdeny(request):
"ok": False,
"error": ("The signed data received was invalid."
" Please try the login process again. "
- "If this issue persists, please contact support.")
+ "If this issue persists, please contact us at "
+ "[email protected] or on github.")
})
response.status_code = 400
return response
@@ -291,7 +292,8 @@ def userdeny(request):
response = PrettyJsonResponse({
"ok": False,
"error": ("The JSON data was not in the expected format."
- " Please contact support.")
+ " Please contact us at "
+ "[email protected] or on github.")
})
response.status_code = 400
return response
@@ -313,7 +315,8 @@ def userdeny(request):
"ok": False,
"error":
"User does not exist. This should never occur. "
- "Please contact support."
+ "Please contact us at "
+ "[email protected] or on github."
})
response.status_code = 400
return response
@@ -339,7 +342,8 @@ def userallow(request):
"ok": False,
"error": ("The signed data received was invalid."
" Please try the login process again."
- " If this issue persists, please contact support.")
+ " If this issue persists, please contact us at"
+ " [email protected] or on github.")
})
response.status_code = 400
return response
@@ -350,7 +354,8 @@ def userallow(request):
response = PrettyJsonResponse({
"ok": False,
"error": ("The JSON data was not in the expected format."
- " Please contact support.")
+ " Please contact us at"
+ " [email protected] or on github.")
})
response.status_code = 400
return response
| diff --git a/backend/uclapi/oauth/tests.py b/backend/uclapi/oauth/tests.py
--- a/backend/uclapi/oauth/tests.py
+++ b/backend/uclapi/oauth/tests.py
@@ -768,7 +768,8 @@ def test_userallow_no_post_data(self):
("The signed data received "
"was invalid."
" Please try the login process again."
- " If this issue persists, please contact support."))
+ " If this issue persists, please contact us at "
+ "[email protected] or on github."))
def test_userallow_bad_but_signed_post_data(self):
signer = signing.TimestampSigner()
@@ -782,7 +783,8 @@ def test_userallow_bad_but_signed_post_data(self):
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"],
("The JSON data was not in the expected format. "
- "Please contact support."))
+ "Please contact us at "
+ "[email protected] or on github."))
def test_userdeny_no_post_data(self):
response = self.client.get(
@@ -794,7 +796,8 @@ def test_userdeny_no_post_data(self):
("The signed data received "
"was invalid."
" Please try the login process again."
- " If this issue persists, please contact support."))
+ " If this issue persists, please contact us at "
+ "[email protected] or on github."))
def test_userdeny_bad_but_signed_post_data(self):
signer = signing.TimestampSigner()
@@ -808,7 +811,8 @@ def test_userdeny_bad_but_signed_post_data(self):
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"],
("The JSON data was not in the expected format. "
- "Please contact support."))
+ "Please contact us at "
+ "[email protected] or on github."))
def test_userdeny_user_does_not_exist(self):
dev_user_ = User.objects.create(
@@ -848,7 +852,8 @@ def test_userdeny_user_does_not_exist(self):
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"],
("User does not exist. This should never occur. "
- "Please contact support."))
+ "Please contact us at "
+ "[email protected] or on github."))
def test_userdeny_good_flow(self):
dev_user_ = User.objects.create(
| Vague "contact support" error messages should note who and where support is
Error messages such as "User does not exist. This should never occur. Please contact support." exist and should probably instead say "Please email [email protected]" or something along those lines, or raise an issue on github etc...
| 2020-06-01T13:21:22 |
|
uclapi/uclapi | 2,446 | uclapi__uclapi-2446 | [
"1931"
] | f27bfa427b8b4a283863619367d53daac002d092 | diff --git a/backend/uclapi/timetable/views.py b/backend/uclapi/timetable/views.py
--- a/backend/uclapi/timetable/views.py
+++ b/backend/uclapi/timetable/views.py
@@ -34,11 +34,8 @@ def get_personal_timetable_endpoint(request, *args, **kwargs):
"""
token = kwargs['token']
user = token.user
- try:
- date_filter = request.GET["date_filter"]
- timetable = get_student_timetable(user.employee_id, date_filter)
- except KeyError:
- timetable = get_student_timetable(user.employee_id)
+ date_filter = request.GET.get("date")
+ timetable = get_student_timetable(user.employee_id, date_filter)
response = {
"ok": True,
@@ -66,7 +63,7 @@ def get_modules_timetable_endpoint(request, *args, **kwargs):
modules = module_ids.split(',')
- date_filter = request.GET.get("date_filter")
+ date_filter = request.GET.get("date")
custom_timetable = get_custom_timetable(modules, date_filter)
if custom_timetable:
| Document date_filter parameter for timetable endpoint
**Is your feature request related to a problem? Please describe.**
The /timetable/personal endpoint accepts an undocumented `date_filter` query parameter in ISO 8601 format, i.e. YYYY-MM-DD
**Describe the solution you'd like**
Document this in uclapi.com/docs
| Depends on #1934
Basically, if people start to split their requests into smaller dates than 1 year, then it will actually affect our performance worse. Once the above issue is resolved then we can actually publicise this. I'll add this onto my ever-growing TODO.
Since #1934 has been indicated as less important due to caching, this can now be done. | 2020-06-02T11:34:42 |
|
uclapi/uclapi | 2,951 | uclapi__uclapi-2951 | [
"2589"
] | afae7572d336a124ebf26460b413d3f97134187b | diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -370,6 +370,17 @@ def update_scopes(request):
})
+def get_number_of_requests(token):
+ if token.startswith('uclapi-user-'):
+ calls = APICall.objects.filter(token__token__exact=token)
+ elif token.startswith('uclapi-'):
+ calls = APICall.objects.filter(app__api_token__exact=token)
+ else:
+ return None
+
+ return len(calls)
+
+
def number_of_requests(request):
try:
token = request.GET["token"]
@@ -381,20 +392,18 @@ def number_of_requests(request):
response.status_code = 400
return response
- if token.startswith('uclapi-user-'):
- calls = APICall.objects.filter(token__token__exact=token)
- elif token.startswith('uclapi-'):
- calls = APICall.objects.filter(app__api_token__exact=token)
- else:
+ calls = get_number_of_requests(token)
+ if calls is None:
response = JsonResponse({
"ok": False,
"message": "Token is invalid"
})
response.status_code = 400
return response
+
return PrettyJsonResponse({
"ok": True,
- "num": len(calls),
+ "num": calls,
})
@@ -449,23 +458,19 @@ def get_apps(request):
"siteid": app.webhook.siteid,
"roomid": app.webhook.roomid,
"contact": app.webhook.contact
+ },
+ "analytics": {
+ "requests": get_number_of_requests(app.api_token),
+ "remaining_quota": get_quota_remaining(app.api_token),
+ "users": get_users_per_app(app.api_token),
+ "users_per_dept": get_users_per_app_per_dept(app.api_token)
}
})
return PrettyJsonResponse(user_meta)
-def quota_remaining(request):
- try:
- token = request.GET["token"]
- except MultiValueDictKeyError:
- response = JsonResponse({
- "ok": False,
- "message": "No token provided"
- })
- response.status_code = 400
- return response
-
+def get_quota_remaining(token):
r = redis.Redis(host=REDIS_UCLAPI_HOST)
if token.startswith('uclapi-user-'):
@@ -480,12 +485,7 @@ def quota_remaining(request):
limit = app.user.dev_quota
else:
- response = JsonResponse({
- "ok": False,
- "message": "Token is invalid"
- })
- response.status_code = 400
- return response
+ return None
count_data = r.get(cache_key)
@@ -493,9 +493,33 @@ def quota_remaining(request):
count_data = int(r.get(cache_key))
else:
count_data = 0
+
+ return limit - count_data
+
+
+def quota_remaining(request):
+ try:
+ token = request.GET["token"]
+ except MultiValueDictKeyError:
+ response = JsonResponse({
+ "ok": False,
+ "message": "No token provided"
+ })
+ response.status_code = 400
+ return response
+
+ quota = get_quota_remaining(token)
+ if quota is None:
+ response = JsonResponse({
+ "ok": False,
+ "message": "Token is invalid"
+ })
+ response.status_code = 400
+ return response
+
return PrettyJsonResponse({
"ok": True,
- "remaining": limit - count_data,
+ "remaining": quota,
})
@@ -527,6 +551,20 @@ def most_popular_method(request):
})
+def get_users_per_app(token, start=None, end=None):
+ if start and end:
+ start_date = datetime.strptime(start, "%Y-%m-%d")
+ end_date = datetime.strptime(end, "%Y-%m-%d")
+
+ users = OAuthToken.objects.filter(creation_date__gte=start_date,
+ creation_date__lte=end_date,
+ app__api_token__exact=token)
+ else:
+ users = OAuthToken.objects.filter(app__api_token__exact=token)
+
+ return len(users)
+
+
def users_per_app(request):
try:
token = request.GET["token"]
@@ -541,23 +579,23 @@ def users_per_app(request):
try:
start = request.GET["start_date"]
end = request.GET["end_date"]
-
- start_date = datetime.strptime(start, "%Y-%m-%d")
- end_date = datetime.strptime(end, "%Y-%m-%d")
-
- users = OAuthToken.objects.filter(creation_date__gte=start_date,
- creation_date__lte=end_date,
- app__api_token__exact=token)
-
+ users_count = get_users_per_app(token, start, end)
except MultiValueDictKeyError:
- users = OAuthToken.objects.filter(app__api_token__exact=token)
+ users_count = get_users_per_app(token)
return PrettyJsonResponse({
"ok": True,
- "users": len(users)
+ "users": users_count
})
+def get_users_per_app_per_dept(token):
+ users = User.objects.filter(oauthtoken__app__api_token__exact=token)\
+ .values("department").annotate(count=Count('department'))\
+ .order_by("-count")
+ return list(users)
+
+
def users_per_app_by_dept(request):
try:
token = request.GET["token"]
@@ -569,10 +607,9 @@ def users_per_app_by_dept(request):
response.status_code = 400
return response
- users = User.objects.filter(oauthtoken__app__api_token__exact=token)\
- .values("department").annotate(count=Count('department'))\
- .order_by("-count")
+ users = get_users_per_app_per_dept(token)
+
return PrettyJsonResponse({
"ok": True,
- "data": list(users)
+ "data": users
})
| [Feature Request] Show remaining quota on dashboard
Now we have an endpoint in the works for getting the quota remaining for a token, we could display it in the dashboard, or at least a boolean showing if you are throttled or not etc...?
| 2020-11-10T15:24:59 |
||
uclapi/uclapi | 2,966 | uclapi__uclapi-2966 | [
"2814"
] | 2482acea6ed87ea07f90299b8675cffa8e6109b8 | diff --git a/backend/uclapi/common/decorators.py b/backend/uclapi/common/decorators.py
--- a/backend/uclapi/common/decorators.py
+++ b/backend/uclapi/common/decorators.py
@@ -49,7 +49,7 @@ def how_many_seconds_until_midnight():
def log_api_call(request, token, token_type):
"""This functions handles logging of api calls."""
service = request.path.split("/")[1]
- method = request.path.split("/")[2]
+ method = "/".join(request.path.split("/")[2:])
headers = request.META
version_headers = {}
diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -247,7 +247,7 @@ def set_callback_url(request):
url_not_safe_saved = is_url_unsafe(new_callback_url)
if url_not_safe_saved:
if url_not_safe_saved == NOT_HTTPS:
- message = "The requested callback URL does not "\
+ message = "The requested callback URL does not " \
"start with 'https://'."
elif url_not_safe_saved == NOT_VALID:
message = "The requested callback URL is not valid."
@@ -535,15 +535,43 @@ def most_popular_service(request):
def most_popular_method(request):
- try:
- service = request.GET["service"]
- most_common = APICall.objects.filter(service__exact=service).values(
- "method").annotate(count=Count('method')).order_by("-count")
- except MultiValueDictKeyError:
- most_common = APICall.objects.values(
- "method").annotate(count=Count('method')).order_by("-count")
-
- most_common = list(most_common)
+ service = request.GET.get("service", False)
+ split_by_service = request.GET.get("split_services", "false")
+ split_by_service = False if split_by_service.lower() in [
+ "false", "0"] else True
+
+ if service:
+ most_common = APICall.objects.filter(service__exact=service)\
+ .values("service", "method").annotate(count=Count('method')).order_by("-count")
+ else:
+ most_common = APICall.objects\
+ .values("service", "method").annotate(count=Count('method')).order_by("-count")
+
+ if not split_by_service:
+ t_most_common_counter = {}
+ for m in most_common:
+ if m["method"].split("/")[0] in t_most_common_counter:
+ t_most_common_counter[m["method"].split("/")[0]] += m["count"]
+ else:
+ t_most_common_counter[m["method"].split("/")[0]] = m["count"]
+ print(t_most_common_counter)
+
+ most_common = [{"method": method, "count": count}
+ for method, count in t_most_common_counter.items()]
+ else:
+ temp_most_common_aggregate = {}
+ for method in most_common:
+ if method["service"] in temp_most_common_aggregate:
+ temp_most_common_aggregate[method["service"]].append({
+ "method": method["method"],
+ "count": method["count"]
+ })
+ else:
+ temp_most_common_aggregate[method["service"]] = [{
+ "method": method["method"],
+ "count": method["count"]
+ }]
+ most_common = temp_most_common_aggregate
return PrettyJsonResponse({
"ok": True,
| diff --git a/backend/uclapi/dashboard/tests.py b/backend/uclapi/dashboard/tests.py
--- a/backend/uclapi/dashboard/tests.py
+++ b/backend/uclapi/dashboard/tests.py
@@ -905,6 +905,29 @@ def test_change_callback_url_not_valid(self):
"The requested callback URL does not start with 'https://'."
)
+ def test_change_callback_url_https_not_valid(self):
+ user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Test"
+ )
+ app_ = App.objects.create(user=user_, name="An App")
+ request = self.factory.post(
+ '/api/setcallbackurl/',
+ {
+ "app_id": app_.id,
+ "callback_url": "https://a"
+ }
+ )
+ request.session = {'user_id': user_.id}
+ response = set_callback_url(request)
+ content = json.loads(response.content.decode())
+ self.assertEqual(response.status_code, 400)
+ self.assertEqual(
+ content["message"],
+ "The requested callback URL is not valid."
+ )
+
def test_change_callback_url_success(self):
user_ = User.objects.create(
email="[email protected]",
@@ -1563,6 +1586,110 @@ def test_analytics_most_popular_method_filter_by_service_empty_good_flow(
self.assertEqual(response.status_code, 200)
self.assertEqual(content["data"], [])
+ def test_analytics_most_popular_method_alternate_split_services(self):
+ # Set up token
+ user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Test"
+ )
+ app_ = App.objects.create(user=user_, name="An App")
+
+ # Create some request objects
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service1",
+ method="method1",
+ queryparams="")
+
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service1",
+ method="method1",
+ queryparams="")
+
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service1",
+ method="method2",
+ queryparams="")
+
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service2",
+ method="method2",
+ queryparams="")
+
+ # Hit endpoint and check number is correct
+ request = self.factory.get(
+ '/api/analytics/methods',
+ {
+ "split_services": "true"
+ }
+ )
+ response = most_popular_method(request)
+ content = json.loads(response.content.decode())
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(content["data"], {
+ "service1": [
+ {
+ "method": "method1",
+ "count": 2,
+ },
+ {
+ "method": "method2",
+ "count": 1,
+ }
+ ],
+ "service2": [
+ {
+ "method": "method2",
+ "count": 1,
+ }
+ ]
+ })
+
+ def test_analytics_most_popular_method_alternate_no_split_services(self):
+ # Set up token
+ user_ = User.objects.create(
+ email="[email protected]",
+ cn="test",
+ given_name="Test Test"
+ )
+ app_ = App.objects.create(user=user_, name="An App")
+
+ # Create some request objects
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service1",
+ method="method1/name1",
+ queryparams="")
+
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service1",
+ method="method1/name2",
+ queryparams="")
+
+ _ = APICall.objects.create(app=app_, user=user_,
+ token_type="general",
+ service="service2",
+ method="method2",
+ queryparams="")
+
+ # Hit endpoint and check number is correct
+ request = self.factory.get(
+ '/api/analytics/methods',
+ {
+ "split_services": "false"
+ }
+ )
+ response = most_popular_method(request)
+ content = json.loads(response.content.decode())
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(content["data"], [{'method': 'method1', 'count': 2},
+ {'method': 'method2', 'count': 1}])
+
def test_analytics_users_per_app_good_flow(self):
dev_ = User.objects.create(
email="[email protected]",
| [Feature Request] Separate most popular methods by service
In the `/dashboard/api/analytics/methods` endpoint we list the most popular methods. This however is not useful without also knowing the service for example both `https://uclapi.com/timetable/data/modules` and `https://uclapi.com/timetable/data/courses/modules` are classed as the `modules` method` but they are different.
| It seems to me there are two ways to implement this. Either break compatibility with the results that are currently being returned by the api, or add a request parameter that toggles whether to group methods by services that defaults to False. | 2020-11-14T20:10:28 |
uclapi/uclapi | 3,232 | uclapi__uclapi-3232 | [
"3230"
] | 49f85c077554fb162701a265bfe45ef1223c428b | diff --git a/backend/uclapi/oauth/views.py b/backend/uclapi/oauth/views.py
--- a/backend/uclapi/oauth/views.py
+++ b/backend/uclapi/oauth/views.py
@@ -488,7 +488,6 @@ def token(request):
)
def userdata(request, *args, **kwargs):
token = kwargs['token']
- print("Checking student status")
try:
get_student_by_upi(
token.user.employee_id
@@ -507,7 +506,9 @@ def userdata(request, *args, **kwargs):
"upi": token.user.employee_id,
"scope_number": token.scope.scope_number,
"is_student": is_student,
- "ucl_groups": token.user.raw_intranet_groups.split(';')
+ "ucl_groups": token.user.raw_intranet_groups.split(';'),
+ "sn": token.user.sn,
+ "mail": token.user.mail
}
return PrettyJsonResponse(
| diff --git a/backend/uclapi/oauth/tests.py b/backend/uclapi/oauth/tests.py
--- a/backend/uclapi/oauth/tests.py
+++ b/backend/uclapi/oauth/tests.py
@@ -7,7 +7,8 @@
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction
from django.test import Client, TestCase
-from rest_framework.test import APIRequestFactory
+from django_mock_queries.query import MockModel, MockSet
+from rest_framework.test import APIRequestFactory, APITestCase
from django.core import signing
from parameterized import parameterized
@@ -1189,3 +1190,78 @@ def test_deauthorise_no_token_for_app_and_user(self):
"token for this user, so no action was taken."
)
)
+
+
+class OAuthUserDataTestCase(APITestCase):
+ """Tests the /oauth/user/data endpoint"""
+ fake_student = MockSet(
+ MockModel(
+ qtype2='UPI'
+ )
+ )
+ studenta_objects = unittest.mock.patch(
+ 'timetable.models.StudentsA.objects',
+ fake_student
+ )
+ fake_locks = MockSet(
+ MockModel(
+ a=True,
+ b=False
+ )
+ )
+ lock_objects = unittest.mock.patch(
+ 'timetable.models.Lock.objects',
+ fake_locks
+ )
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.user = User.objects.create(
+ cn="cn",
+ department="Dept of UCL API",
+ email="[email protected]",
+ full_name="First Last",
+ given_name="First",
+ employee_id="upi",
+ raw_intranet_groups="group-all",
+ sn="Last",
+ mail="[email protected]"
+ )
+ cls.dev = User.objects.create(email="[email protected]", cn="test", given_name="Test Test")
+ cls.app = App.objects.create(user=cls.dev, name="An App")
+ cls.oauth_scope = OAuthScope.objects.create()
+ cls.oauth_token = OAuthToken.objects.create(app=cls.app, user=cls.user, scope=cls.oauth_scope)
+
+ def setUp(self):
+ self.expected_user_data = {
+ "ok": True,
+ "cn": "cn",
+ "department": "Dept of UCL API",
+ "email": "[email protected]",
+ "full_name": "First Last",
+ "given_name": "First",
+ "upi": "upi",
+ "scope_number": 0,
+ "ucl_groups": ["group-all"],
+ "sn": "Last",
+ "mail": "[email protected]"
+ }
+
+ def test_userdata_non_student(self):
+ """Tests that userdata is correctly returned for a non-student"""
+ response = self.client.get(
+ '/oauth/user/data', {'token': self.oauth_token.token, 'client_secret': self.app.client_secret})
+ self.assertEqual(response.status_code, 200)
+ self.expected_user_data['is_student'] = False
+ self.assertJSONEqual(response.content.decode('utf-8'), self.expected_user_data)
+
+ @studenta_objects
+ @lock_objects
+ def test_userdata_student(self):
+ """Tests that userdata is correctly returned for a student"""
+ response = self.client.get(
+ '/oauth/user/data', {'token': self.oauth_token.token, 'client_secret': self.app.client_secret})
+ self.assertEqual(response.status_code, 200)
+ self.expected_user_data['is_student'] = True
+ self.assertJSONEqual(response.content.decode('utf-8'), self.expected_user_data)
| [Feature Request] user data - HTTP_MAIL
**Is your feature request related to a problem? Please describe.**
At the students union we are currently using Shibboleth and when a user successfully login into the website we have access to two different emails. One their account email such as [email protected], second a nicer email with their name like [email protected]. With the current UCL API we can only retrieve the first.
**Describe the solution you'd like**
We would like to be able to access this second email with the UCL API.
| 2021-03-13T17:25:05 |
|
uclapi/uclapi | 3,378 | uclapi__uclapi-3378 | [
"3339"
] | f17fbf7d25ed343a3ee4efbd7b64061563db7cfe | diff --git a/backend/uclapi/common/cachet.py b/backend/uclapi/common/cachet.py
--- a/backend/uclapi/common/cachet.py
+++ b/backend/uclapi/common/cachet.py
@@ -77,7 +77,8 @@ def create_incident(error_message: str, component_name: str,
"""
cachet_client = cachetclient.Client(endpoint=settings.CACHET_URL,
- api_token=settings.CACHET_TOKEN)
+ api_token=settings.CACHET_TOKEN,
+ version="1")
target_comp = _get_component(cachet_client, component_name)
@@ -112,7 +113,8 @@ def update_incident(update_message: str, component_name: str,
"""
cachet_client = cachetclient.Client(endpoint=settings.CACHET_URL,
- api_token=settings.CACHET_TOKEN)
+ api_token=settings.CACHET_TOKEN,
+ version="1")
target_comp = _get_component(cachet_client, component_name)
| [BUG] cachet URL error
Currently when sending requests to the status page we are getting the error
```Unexpected: Failed to create fixed cachet incident. Reason: ValueError("Cannot determine api version based on endpoint 'https://cachet.apps.uclapi.com/api/v1/'. If the api version is not present in the url, please supply it on client creation.")```
| 2021-06-18T10:18:41 |
||
uclapi/uclapi | 3,514 | uclapi__uclapi-3514 | [
"3195"
] | e8a4a928df20ee5e8365c75592e8d11e92408ef9 | diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -65,6 +65,12 @@ def create_app(request):
},
"webhook": {
"verification_secret": new_app.webhook.verification_secret,
+ },
+ "analytics": {
+ "requests": 0,
+ "remaining_quota": User._meta.get_field('oauth_quota').get_default(),
+ "users": 0,
+ "users_per_dept": []
}
}
})
| [BUG] Requests of Undefined when Creating a New App
**Describe the bug**
When creating a new app on the staging.ninja dashboard the redirect page fails to render.
**To Reproduce**
Steps to reproduce the behavior:
1. Create a new app on https://staging.ninja/dashboard/
**Expected behavior**
Return to app list page.
**Screenshots**
```js
vendors-66dfc54de2ab880932e8.js:2 TypeError: Cannot read property 'requests' of undefined
at dashboard-edcd2f4fb681f0126f93.js:1
at Array.map (<anonymous>)
at A.value (dashboard-edcd2f4fb681f0126f93.js:1)
at jo (vendors-66dfc54de2ab880932e8.js:2)
at Yo (vendors-66dfc54de2ab880932e8.js:2)
at ys (vendors-66dfc54de2ab880932e8.js:2)
at El (vendors-66dfc54de2ab880932e8.js:2)
at fl (vendors-66dfc54de2ab880932e8.js:2)
at sl (vendors-66dfc54de2ab880932e8.js:2)
at vendors-66dfc54de2ab880932e8.js:2
dashboard-edcd2f4fb681f0126f93.js:1 Uncaught (in promise) TypeError: Cannot read property 'requests' of undefined
at dashboard-edcd2f4fb681f0126f93.js:1
at Array.map (<anonymous>)
at A.value (dashboard-edcd2f4fb681f0126f93.js:1)
at jo (vendors-66dfc54de2ab880932e8.js:2)
at Yo (vendors-66dfc54de2ab880932e8.js:2)
at ys (vendors-66dfc54de2ab880932e8.js:2)
at El (vendors-66dfc54de2ab880932e8.js:2)
at fl (vendors-66dfc54de2ab880932e8.js:2)
at sl (vendors-66dfc54de2ab880932e8.js:2)
at vendors-66dfc54de2ab880932e8.js:2
```
**Additional context**
Does not occur in production.
| 2021-08-30T15:02:18 |
||
uclapi/uclapi | 3,515 | uclapi__uclapi-3515 | [
"3059"
] | a6d4376d6d7d287f7a719253d3850a6d1d61d369 | diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -481,12 +481,17 @@ def get_quota_remaining(token):
if token.startswith('uclapi-user-'):
Otoken = OAuthToken.objects.filter(token__exact=token).first()
+ if Otoken is None:
+ return None
cache_key = "oauth:" + Otoken.user.email
limit = Otoken.user.oauth_quota
elif token.startswith('uclapi-'):
app = App.objects.filter(api_token__exact=token).first()
+ if app is None:
+ return None
+
cache_key = app.user.email
limit = app.user.dev_quota
| diff --git a/backend/uclapi/dashboard/tests.py b/backend/uclapi/dashboard/tests.py
--- a/backend/uclapi/dashboard/tests.py
+++ b/backend/uclapi/dashboard/tests.py
@@ -1142,6 +1142,18 @@ def test_analytics_bad_token_provided(self):
self.assertEqual(response.status_code, 400)
self.assertEqual(content["message"], "Token is invalid")
+ def test_analytics_quota_non_existent_token_provided(self):
+ request = self.factory.get(
+ '/api/analytics/quota',
+ {
+ "token": "uclapi-user-nonexistenttoken"
+ }
+ )
+ response = quota_remaining(request)
+ content = json.loads(response.content.decode())
+ self.assertEqual(response.status_code, 400)
+ self.assertEqual(content["message"], "Token is invalid")
+
def test_analytics_num_requests_good_app_token_flow(self):
# Set up token
user_ = User.objects.create(
| [BUG] AttributeError /dashboard/api/analytics/quota
**Describe the bug**
`AttributeError` `/dashboard/api/analytics/quota`
`NoneType` object has no attribute 'user'
**To Reproduce**
Steps to reproduce the behavior:
1. `https://uclapi.com/dashboard/api/analytics/quota`
2. HTTP 500
**Expected behavior**
Not crash
**Additional context**
`dashboard/api_applications.py:479`
| note to self: it's when a token is in the valid format but does not exist (but this error doesn't happen on other endpoints with the same situation) | 2021-08-30T15:26:28 |
uclapi/uclapi | 4,023 | uclapi__uclapi-4023 | [
"4017"
] | 3a9be304c1c9150f43446bc33b30d738caa660e2 | diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py
--- a/backend/uclapi/oauth/urls.py
+++ b/backend/uclapi/oauth/urls.py
@@ -4,6 +4,7 @@
urlpatterns = [
url(r'authorise/$', views.authorise),
+ url(r'authorize/$', views.authorise),
url(r'adcallback', views.adcallback),
url(r'token$', views.token),
url(r'tokens/scopes$', views.scope_map),
| [Feature Request] Add /authorize Oauth route
**Is your feature request related to a problem? Please describe.**
I have been attempting to use 'auth0-react' to implement Oauth with UCL API, however, this requires a fair bit of tinkering as
the defaults of this and many other auth libraries are to redirect to a "/authorize?client_id..." endpoint which the UCL API does not support.
While this can be avoided through customisation, would it be possible to add a "/authorize" route, as I believe this could make it easier to use some of the "plug and play" Americanized auth libraries available?
**Describe the solution you'd like**
Edit uclapi/backend/uclapi/oauth/urls.py as below
```
urlpatterns = [
url(r'authorise/$', views.authorise),
url(r'authorize/$', views.authorise), <===== Including views.authorise for the 'authorize/$' route.
url(r'shibcallback', views.shibcallback),
url(r'token$', views.token),
url(r'tokens/scopes$', views.scope_map),
url(r'tokens/test$', views.token_test),
url(r'user/allow$', views.userallow),
url(r'user/deny$', views.userdeny),
url(r'user/data$', views.userdata),
url(r'user/studentnumber$', views.get_student_number),
url(r'deauthorise$', views.deauthorise_app),
url(r'user/settings$', views.get_settings)
]
```


| 2023-02-21T11:54:44 |
||
airctic/icevision | 71 | airctic__icevision-71 | [
"70"
] | 0bd36439f3df0829b0c8f440ae6f0bb9661897a1 | diff --git a/examples/detr_wheat.py b/examples/detr_wheat.py
--- a/examples/detr_wheat.py
+++ b/examples/detr_wheat.py
@@ -30,17 +30,17 @@ def height(self, o) -> int:
def width(self, o) -> int:
return o.width
- def label(self, o) -> int:
- return 1
+ def label(self, o) -> List[int]:
+ return [1]
- def bbox(self, o) -> BBox:
- return self.bbox
+ def bbox(self, o) -> List[BBox]:
+ return [self.bbox]
- def area(self, o) -> float:
- return self.bbox.area
+ def area(self, o) -> List[float]:
+ return [self.bbox.area]
- def iscrowd(self, o) -> bool:
- return 0
+ def iscrowd(self, o) -> List[bool]:
+ return [0]
def get_datasets(args):
@@ -62,7 +62,12 @@ def get_datasets(args):
# adds new arguments to original args_parser
args_parser = get_args_parser()
args_parser.add_argument("--data_path", type=str)
+ args_parser.add_argument("--num_classes", type=int, default=None)
+ args_parser.add_argument("--fine_tune", action="store_true")
args = args_parser.parse_args()
+ if args.fine_tune:
+ args.resume = detr_pretrained_checkpoint_base()
+
train_dataset, valid_dataset = get_datasets(args)
run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)
| Hub Detr fine tuning
Following the first relase of Detr on hub, it would be a very good idea to support fine tuning.
[this](https://github.com/facebookresearch/detr/issues/9) thread should be helpful, and [this](https://gist.github.com/mlk1337/651297e28199b4bb7907fc413c49f58f) gist has the high level overview on how to implement it.
| For those coming from the linked issue, I'm sorry, I accidentally linked it lol
Well, keep an eye open, this will soon be ready and I hope this will make it very easy to pretrain detr =) | 2020-06-12T19:25:33 |
|
airctic/icevision | 143 | airctic__icevision-143 | [
"141"
] | ff18261630ebbdfbc98567c3b357918338eebaf4 | diff --git a/mantisshrimp/models/mantis_module.py b/mantisshrimp/models/mantis_module.py
--- a/mantisshrimp/models/mantis_module.py
+++ b/mantisshrimp/models/mantis_module.py
@@ -6,13 +6,6 @@
class MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):
- @abstractmethod
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- """ Load weights
- """
-
@classmethod
@abstractmethod
def dataloader(cls, **kwargs) -> DataLoader:
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
@@ -54,11 +54,6 @@ def predict(self, images: List[np.ndarray], detection_threshold: float = 0.5):
images=images, convert_raw_prediction=convert_raw_prediction
)
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- return self.model.load_state_dict(state_dict=state_dict, strict=strict)
-
@property
def param_groups(self):
return self._param_groups
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
@@ -60,11 +60,6 @@ def predict(
images=images, convert_raw_prediction=convert_raw_prediction
)
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- return self.model.load_state_dict(state_dict=state_dict, strict=strict)
-
@property
def param_groups(self):
return self._param_groups
| diff --git a/tests/models/mantis_rcnn/test_mantis_rcnn_predict.py b/tests/models/mantis_rcnn/test_mantis_rcnn_predict.py
--- a/tests/models/mantis_rcnn/test_mantis_rcnn_predict.py
+++ b/tests/models/mantis_rcnn/test_mantis_rcnn_predict.py
@@ -21,7 +21,7 @@ def pretrained_state_dict():
def test_mantis_mask_rcnn_predict(sample_images, pretrained_state_dict):
model = MantisMaskRCNN(91, min_size=128, max_size=128)
- model.load_state_dict(pretrained_state_dict)
+ model.model.load_state_dict(pretrained_state_dict)
preds = model.predict(sample_images)
diff --git a/tests/models/test_parameters_splits_module_mixin.py b/tests/models/test_parameters_splits_module_mixin.py
--- a/tests/models/test_parameters_splits_module_mixin.py
+++ b/tests/models/test_parameters_splits_module_mixin.py
@@ -18,11 +18,6 @@ def forward(self, x):
def dataloader(cls, **kwargs) -> DataLoader:
return DataLoader(**kwargs)
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- pass
-
return SimpleModel()
| load_state_dict bug
## 🐛 Bug
Introducing `load_state_dict` into the models in order to avoid doing `model.model.load_state_dict` caused a bug when trying to load from a previous checkpoint.
When saving the model the state dict looks like: `model.backbone...` but the new `load_state_dict` expects only `backbone`, without the model part.
More specifically, the following fails:
```python
model.load_state_dict(model.state_dict())
```
Related to #137
| 2020-07-02T18:54:05 |
|
airctic/icevision | 144 | airctic__icevision-144 | [
"131"
] | 99f09342add79fde5cc315f7211053f7f7e29a12 | diff --git a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
@@ -4,7 +4,6 @@
from mantisshrimp.core import *
from mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *
from mantisshrimp.models.mantis_rcnn.mantis_rcnn import *
-from mantisshrimp.backbones import *
class MantisFasterRCNN(MantisRCNN):
@@ -22,6 +21,7 @@ def __init__(
backbone: nn.Module = None,
param_groups: List[nn.Module] = None,
metrics=None,
+ remove_internal_transforms=True,
**kwargs,
):
super().__init__(metrics=metrics)
@@ -42,6 +42,9 @@ def __init__(
self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]
check_all_model_params_in_groups(self.model, self._param_groups)
+ if remove_internal_transforms:
+ self._remove_transforms_from_model(self.model)
+
def forward(self, images, targets=None):
return self.model(images, targets)
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
@@ -5,7 +5,6 @@
from mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *
from mantisshrimp.models.mantis_rcnn.mantis_rcnn import *
from mantisshrimp.models.mantis_rcnn.mantis_faster_rcnn import *
-from mantisshrimp.backbones import *
class MantisMaskRCNN(MantisRCNN):
@@ -15,6 +14,7 @@ def __init__(
num_classes: int,
backbone: nn.Module = None,
param_groups: List[nn.Module] = None,
+ remove_internal_transforms: bool = True,
**kwargs,
):
super().__init__()
@@ -41,6 +41,9 @@ def __init__(
self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]
check_all_model_params_in_groups(self.model, self.param_groups)
+ if remove_internal_transforms:
+ self._remove_transforms_from_model(self.model)
+
def forward(self, images, targets=None):
return self.model(images, targets)
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_rcnn.py
@@ -27,6 +27,16 @@ def _predict(
return [convert_raw_prediction(raw_pred) for raw_pred in raw_preds]
+ def _remove_transforms_from_model(self, model: GeneralizedRCNN):
+ def noop_normalize(image):
+ return image
+
+ def noop_resize(image, target):
+ return image, target
+
+ model.transform.normalize = noop_normalize
+ model.transform.resize = noop_resize
+
@staticmethod
def loss(preds, targs) -> Tensor:
return sum(preds.values())
| Rescale transforms and torchvision RCNNs
## 🐛 Bug
torchvisions `FasterRCNN` and `MaskRCNN` internally rescales the images via `GeneralizedRCNNTransform`.
This means that any resizing transform previously applied to the image (probably on the `Dataset` stage) will be unexpectedly overriden.
It becomes really confusing if we should apply a resize transform via `Dataset` or via the model, ideally we want all transform to be applied at the `Dataset` stage. Going one step further, can this even be said about normalization?
## Solution 1
Changes the default of `min_size` and `max_size` in the model to 2 and 9999 respectively, practically making them non effective.
## Solution 2
Monkey patch `model.transform` to a function that returns the same items it receives (being careful with what are the expected output types)
---
Solution 1 seems like a better idea, it's less magical, we can still have `min/max_size` as parameters, we just change their defaults.
---
Should we do the same with normalize? Changing the mean to 0 and std to 1? It's them clear that if we want to normalize we should do it in the `Dataset` stage.
I'm thinking this because models that we integrate in the future might not have this internal transforms, and it will be very confusing to a new user why sometimes he has to explicity define normalization and sometimes not.
| @oke-aditya any thoughts?
Generalized RCNN transform allows user to pass an image of arbitrary size and RCNN takes care of scaling etc.
But if the user is very specific with his training size images then he may pass images of specific dimension too. And it will work fine.
Yes, future models may not have internal transforms that we think have as of now. So I guess we need to be clear with this issue. I am still unsure of solution.
| 2020-07-02T20:24:09 |
|
airctic/icevision | 189 | airctic__icevision-189 | [
"188"
] | 07aa02c7de74191767d8f3a8717e5c2657e57933 | diff --git a/mantisshrimp/datasets/coco/parsers.py b/mantisshrimp/datasets/coco/parsers.py
--- a/mantisshrimp/datasets/coco/parsers.py
+++ b/mantisshrimp/datasets/coco/parsers.py
@@ -1,9 +1,23 @@
-__all__ = ["COCOImageInfoParser", "COCOAnnotationParser"]
+__all__ = ["parser", "COCOImageInfoParser", "COCOAnnotationParser"]
from mantisshrimp.imports import *
from mantisshrimp.core import *
-from mantisshrimp.parsers.defaults import *
-from mantisshrimp.parsers.mixins import *
+from mantisshrimp.parsers import *
+
+
+def parser(
+ annotations_file: Union[str, Path], img_dir: Union[str, Path]
+) -> ParserInterface:
+ annotations_dict = json.loads(Path(annotations_file).read())
+
+ image_info_parser = COCOImageInfoParser(
+ infos=annotations_dict["images"], img_dir=img_dir
+ )
+ annotations_parser = COCOAnnotationParser(
+ annotations=annotations_dict["annotations"]
+ )
+
+ return CombinedParser(image_info_parser, annotations_parser)
class COCOImageInfoParser(DefaultImageInfoParser):
| diff --git a/mantisshrimp/test_utils/samples.py b/mantisshrimp/test_utils/samples.py
--- a/mantisshrimp/test_utils/samples.py
+++ b/mantisshrimp/test_utils/samples.py
@@ -20,12 +20,14 @@
def sample_image_info_parser():
return datasets.coco.COCOImageInfoParser(
- annotations_dict["images"], source / "images"
+ infos=annotations_dict["images"], img_dir=source / "images"
)
def sample_annotation_parser():
- return datasets.coco.COCOAnnotationParser(annotations_dict["annotations"])
+ return datasets.coco.COCOAnnotationParser(
+ annotations=annotations_dict["annotations"]
+ )
def sample_combined_parser():
diff --git a/tests/datasets/coco/__init__.py b/tests/datasets/coco/__init__.py
new file mode 100644
diff --git a/tests/datasets/coco/test_parser.py b/tests/datasets/coco/test_parser.py
new file mode 100644
--- /dev/null
+++ b/tests/datasets/coco/test_parser.py
@@ -0,0 +1,21 @@
+import pytest
+from mantisshrimp import *
+
+
+def test_coco_parser(samples_source):
+ parser = datasets.coco.parser(
+ annotations_file=samples_source / "annotations.json",
+ img_dir=samples_source / "images",
+ )
+
+ records = parser.parse()[0]
+ assert len(records) == 5
+
+ record = records[0]
+ assert record["imageid"] == 0
+ assert record["filepath"] == samples_source / "images/000000128372.jpg"
+ assert record["height"] == 427
+ assert record["width"] == 640
+
+ assert record["labels"] == [6, 1, 1, 1, 1, 1, 1, 1, 31, 31, 1, 3, 31, 1, 31, 31]
+ assert pytest.approx(record["bboxes"][0].xywh) == [0, 73.89, 416.44, 305.13]
| helper function for COCO parser
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
As described in #79, it's currently very verbose to create a COCO parser, the steps should be ideally be reduced to a single function call.
**Dream API**
```python
parser = datasets.coco.parser(
annotations_file="path_to_annotations.json",
img_dir="path_to_images_dir",
)
```
| 2020-07-19T00:09:53 |
|
airctic/icevision | 339 | airctic__icevision-339 | [
"270"
] | 6eba39d3e3ce6f6e4feb803e39b200aefad37033 | diff --git a/docs/autogen.py b/docs/autogen.py
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -26,7 +26,7 @@
"mantisshrimp.data.dataset.Dataset.from_images",
],
"albumentations_tfms.md": [
- # "mantisshrimp.tfms.albumentations.aug_tfms",
+ "mantisshrimp.tfms.albumentations.aug_tfms",
"mantisshrimp.tfms.albumentations.Adapter",
],
# "coco_metric.md": [
diff --git a/mantisshrimp/imports.py b/mantisshrimp/imports.py
--- a/mantisshrimp/imports.py
+++ b/mantisshrimp/imports.py
@@ -45,3 +45,25 @@
if SoftDependencies.pytorch_lightning:
import pytorch_lightning as pl
+
+
+# TODO: Stop importing partial from fastcore and move this to utils
+class partial:
+ """ Wraps functools.partial, same functionality.
+
+ Modifies the original partial `__repr__` and `__str__` in other to fix #270
+ """
+
+ def __init__(self, func, *args, **kwargs):
+ self._partial = functools.partial(func, *args, **kwargs)
+
+ def __call__(self, *args, **kwargs):
+ return self._partial(*args, **kwargs)
+
+ def __str__(self):
+ name = self._partial.func.__name__
+ partial_str = str(self._partial)
+ return re.sub(r"<.+>", name, partial_str)
+
+ def __repr__(self):
+ return str(self)
| autogen.py fails on tfms.A.aug_tfms
## 🐛 Bug
**Describe the bug**
Running `autogen.py` fails with the stack trace attached at the bottom.
The function causing the problem is `aug_tfms` in `tfms/albumentations/tfms.py`. I'm not sure what the problem is.
```python
Removing sources folder: /home/lgvaz/git/mantisshrimp/docs/sources
Cleaning up existing sources directory.
Populating sources directory with templates.
/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/sphinx/util/inspect.py:555: RemovedInSphinx40Warning: sphinx.util.inspect.Signature() is deprecated
RemovedInSphinx40Warning)
...creating new page with autogenerated content: /home/lgvaz/git/mantisshrimp/docs/sources/parser.md
Traceback (most recent call last):
File "autogen.py", line 282, in <module>
generate(mantisshrimp_dir / "docs" / "sources")
File "autogen.py", line 226, in generate
doc_generator.generate(dest_dir)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/autogen.py", line 82, in generate
markdown_text += self._render(element)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/autogen.py", line 109, in _render
return self._render_from_object(object_, signature_override)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/autogen.py", line 116, in _render_from_object
object_, signature_override, self.max_signature_line_length
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/get_signatures.py", line 53, in get_signature
return get_function_signature(object_, override, max_line_length)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/get_signatures.py", line 37, in get_function_signature
return format_signature(signature_start, signature_end, max_line_length)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/keras_autodoc/get_signatures.py", line 68, in format_signature
formatted_fake_python_code = black.format_str(fake_python_code, mode=mode)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/black.py", line 725, in format_str
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
File "/home/lgvaz/miniconda3/envs/mantis/lib/python3.7/site-packages/black.py", line 836, in lib2to3_parse
raise exc from None
black.InvalidInput: Cannot parse: 1:712: def xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(size, presize=None, horizontal_flip=HorizontalFlip(always_apply=False, p=0.5), shift_scale_rotate=ShiftScaleRotate(always_apply=False, p=0.5, shift_limit=(-0.0625, 0.0625), scale_limit=(-0.09999999999999998, 0.10000000000000009), rotate_limit=(-45, 45), interpolation=1, border_mode=4, value=None, mask_value=None), rgb_shift=RGBShift(always_apply=False, p=0.5, r_shift_limit=(-20, 20), g_shift_limit=(-20, 20), b_shift_limit=(-20, 20)), lightning=RandomBrightnessContrast(always_apply=False, p=0.5, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True), blur=Blur(always_apply=False, p=0.5, blur_limit=(1, 3)), crop_fn=functools.partial(<class 'albumentations.augmentations.transforms.RandomSizedBBoxSafeCrop'>, p=0.5), pad=functools.partial(<class 'albumentations.augmentations.transforms.PadIfNeeded'>, border_mode=0, value=[124, 116, 104])):
```
| The problem here is that `keras-autodoc` is internally using `black` to format the code, but when using partials we have the following function signature:
```python
...pad=functools.partial(<class 'albumentations.augmentations.transforms.PadIfNeeded'>,...
```
What happens is that `<class...` is not valid python, and then black fails to format
A solution is to somehow monkey-patch `partial` signature to look like:
```python
functools.partial(albumentations.augmentations.transforms.PadIfNeeded...
```
| 2020-08-17T23:03:25 |
|
airctic/icevision | 408 | airctic__icevision-408 | [
"407"
] | f8e8565cfdb9a8ddc4a8bd840f97d387a04efd02 | diff --git a/docs/autogen.py b/docs/autogen.py
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -64,7 +64,9 @@
"icevision.models.rcnn.mask_rcnn.dataloaders.build_valid_batch",
"icevision.models.rcnn.mask_rcnn.dataloaders.build_infer_batch",
],
- "mask_rcnn_fastai.md": ["icevision.models.rcnn.mask_rcnn.fastai.learner.learner",],
+ "mask_rcnn_fastai.md": [
+ "icevision.models.rcnn.mask_rcnn.fastai.learner.learner",
+ ],
"mask_rcnn_lightning.md": [
"icevision.models.rcnn.mask_rcnn.lightning.model_adapter.ModelAdapter",
],
@@ -77,7 +79,9 @@
"icevision.models.efficientdet.dataloaders.build_valid_batch",
"icevision.models.efficientdet.dataloaders.build_infer_batch",
],
- "efficientdet_fastai.md": ["icevision.models.efficientdet.fastai.learner.learner",],
+ "efficientdet_fastai.md": [
+ "icevision.models.efficientdet.fastai.learner.learner",
+ ],
"efficientdet_lightning.md": [
"icevision.models.efficientdet.lightning.model_adapter.ModelAdapter",
],
@@ -258,7 +262,7 @@ def generate(dest_dir: Path):
# Copy CNAME file
shutil.copyfile(icevision_dir / "CNAME", dest_dir / "CNAME")
-
+
# Copy web manifest
shutil.copyfile("manifest.webmanifest", dest_dir / "manifest.webmanifest")
@@ -284,7 +288,8 @@ def generate(dest_dir: Path):
# Copy static .md files from the docs folder
shutil.copyfile(icevision_dir / "docs/INSTALL.md", dest_dir / "install.md")
shutil.copyfile(
- icevision_dir / "docs/HOW-TO.md", dest_dir / "how-to.md",
+ icevision_dir / "docs/HOW-TO.md",
+ dest_dir / "how-to.md",
)
shutil.copyfile(icevision_dir / "docs/ABOUT.md", dest_dir / "about.md")
@@ -299,7 +304,8 @@ def generate(dest_dir: Path):
# Copy static .md files from the other folders
shutil.copyfile(
- icevision_dir / "icevision/models/README.md", dest_dir / "model_comparison.md",
+ icevision_dir / "icevision/models/README.md",
+ dest_dir / "model_comparison.md",
)
shutil.copyfile(
@@ -323,7 +329,8 @@ def generate(dest_dir: Path):
)
shutil.copyfile(
- icevision_dir / "icevision/tfms/README.md", dest_dir / "albumentations.md",
+ icevision_dir / "icevision/tfms/README.md",
+ dest_dir / "albumentations.md",
)
# Copy .md examples files to destination examples folder
| icedata is missing in the example code snippets
## 🐛 Bug
**Describe the bug**
The examples code snippets are not using icedata package
**Solution**
- Add `import icedata`
- Replace `load()` by `load_data`
| Ow yeah... I totally forgot about those | 2020-09-16T13:24:47 |
|
airctic/icevision | 411 | airctic__icevision-411 | [
"406"
] | 637b26848f89f22d2c8c283baeac6cd8e30693e9 | diff --git a/docs/autogen.py b/docs/autogen.py
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -11,6 +11,7 @@
PAGES = {
"parser.md": [
"icevision.parsers.Parser",
+ "icevision.parsers.Parser.parse",
"icevision.parsers.FasterRCNN",
"icevision.parsers.MaskRCNN",
"icevision.parsers.mixins.ImageidMixin",
diff --git a/icevision/parsers/combined_parser.py b/icevision/parsers/combined_parser.py
--- a/icevision/parsers/combined_parser.py
+++ b/icevision/parsers/combined_parser.py
@@ -12,7 +12,7 @@ def __init__(self, *parsers: Parser):
self.parsers = parsers
def parse(self, data_splitter=None, idmap: IDMap = None, show_pbar: bool = True):
- data_splitter = data_splitter or SingleSplitSplitter()
+ data_splitter = data_splitter or RandomSplitter([0.8, 0.2])
idmap = idmap or IDMap()
parsers_records = [
diff --git a/icevision/parsers/parser.py b/icevision/parsers/parser.py
--- a/icevision/parsers/parser.py
+++ b/icevision/parsers/parser.py
@@ -82,8 +82,18 @@ def parse(
idmap: IDMap = None,
show_pbar: bool = True,
) -> List[List[RecordType]]:
+ """Loops through all data points parsing the required fields.
+
+ # Arguments
+ data_splitter: How to split the parsed data, defaults to a [0.8, 0.2] random split.
+ idmap: Maps from filenames to unique ids, pass an `IDMap()` if you need this information.
+ show_pbar: Whether or not to show a progress bar while parsing the data.
+
+ # Returns
+ A list of records for each split defined by `data_splitter`.
+ """
idmap = idmap or IDMap()
- data_splitter = data_splitter or SingleSplitSplitter()
+ data_splitter = data_splitter or RandomSplitter([0.8, 0.2])
records = self.parse_dicted(show_pbar=show_pbar, idmap=idmap)
splits = data_splitter(idmap=idmap)
return [[{"imageid": id, **records[id]} for id in ids] for ids in splits]
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -19,7 +19,8 @@ def coco_imageid_map():
@pytest.fixture(scope="module")
def records(coco_imageid_map):
parser = test_utils.sample_combined_parser()
- return parser.parse(idmap=coco_imageid_map)[0]
+ data_splitter = SingleSplitSplitter()
+ return parser.parse(idmap=coco_imageid_map, data_splitter=data_splitter)[0]
@pytest.fixture(scope="module")
diff --git a/tests/parsers/test_coco_parser.py b/tests/parsers/test_coco_parser.py
--- a/tests/parsers/test_coco_parser.py
+++ b/tests/parsers/test_coco_parser.py
@@ -6,7 +6,7 @@
def test_info_parser():
parser = test_utils.sample_image_info_parser()
- infos = parser.parse()[0]
+ infos = parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(infos) == 6
expected = {
"imageid": 0,
@@ -19,7 +19,7 @@ def test_info_parser():
def test_coco_annotation_parser():
parser = test_utils.sample_annotation_parser()
- annotations = parser.parse()[0]
+ annotations = parser.parse(data_splitter=SingleSplitSplitter())[0]
annotation = annotations[0]
assert len(annotations) == 5
assert annotation["imageid"] == 0
diff --git a/tests/parsers/test_voc_parsers.py b/tests/parsers/test_voc_parsers.py
--- a/tests/parsers/test_voc_parsers.py
+++ b/tests/parsers/test_voc_parsers.py
@@ -7,7 +7,7 @@ def test_voc_annotation_parser(samples_source, voc_class_map):
images_dir=samples_source / "voc/JPEGImages",
class_map=voc_class_map,
)
- records = annotation_parser.parse()[0]
+ records = annotation_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 2
@@ -38,7 +38,7 @@ def test_voc_mask_parser(samples_source):
mask_parser = parsers.VocMaskParser(
masks_dir=samples_source / "voc/SegmentationClass"
)
- records = mask_parser.parse()[0]
+ records = mask_parser.parse(data_splitter=SingleSplitSplitter())[0]
record = records[0]
expected = {
@@ -61,7 +61,7 @@ def test_voc_combined_parser(samples_source, voc_class_map):
)
combined_parser = parsers.CombinedParser(annotation_parser, mask_parser)
- records = combined_parser.parse()[0]
+ records = combined_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 1
| RandomSplitter as the new default for parser.parse
## 🚀 Feature
Randomly splitting data in [0.8, 0.2] can be the default for `parser.parse`
| 2020-09-16T14:46:24 |
|
airctic/icevision | 441 | airctic__icevision-441 | [
"430"
] | 4193eb0a6152a222a7a016afd4123ab8353c46a4 | diff --git a/icevision/all.py b/icevision/all.py
--- a/icevision/all.py
+++ b/icevision/all.py
@@ -1,2 +1,9 @@
from icevision.imports import *
from icevision import *
+
+# soft import icedata
+try:
+ import icedata
+except ModuleNotFoundError as e:
+ if str(e) != f"No module named 'icedata'":
+ raise e
| Add icedata to icevision.all
## 🚀 Feature
Currently to train a dataset available with icedata the following two lines are necessary:
```python
import icedata
from icevision.all import *
```
Because icedata already depends on icevision, icevision cannot depend on icedata. **But** I guess we can add icedata as a soft dependency to `.all`, we just have to be sure not to use `icedata` internally in icevision.
| 2020-09-24T01:36:09 |
||
airctic/icevision | 500 | airctic__icevision-500 | [
"478"
] | b9a06453d94cadf0ff4dabd6cd68b37fce47316d | diff --git a/icevision/models/base_show_results.py b/icevision/models/base_show_results.py
--- a/icevision/models/base_show_results.py
+++ b/icevision/models/base_show_results.py
@@ -18,7 +18,7 @@ def base_show_results(
denormalize_fn: Optional[callable] = denormalize_imagenet,
show: bool = True,
) -> None:
- samples = [dataset[i] for i in range(num_samples)]
+ samples = random.choices(dataset, k=num_samples)
batch, samples = build_infer_batch_fn(samples)
preds = predict_fn(model, batch)
| Add tutorial with hard negative samples
## 📓 Documentation Update
"how to use an image as background annotation" is a common question. We can provide a tutorial showing how to do that
### Racoon and dogs
If you train a model on the racoon dataset and show the model a picture of a dog it will classify it as a racoon. We can add images of dogs to the dataset (without any annotations) and show how the difference of model performance in both scenarios.
| 2020-10-21T01:46:14 |
||
airctic/icevision | 518 | airctic__icevision-518 | [
"513"
] | a9fbd7f6382ee269e284e7fd9e2f5dc399666a7c | diff --git a/icevision/core/mask.py b/icevision/core/mask.py
--- a/icevision/core/mask.py
+++ b/icevision/core/mask.py
@@ -58,17 +58,15 @@ def to_erles(self, h, w) -> "EncodedRLEs":
# TODO: Assert shape? (bs, height, width)
-@dataclass
class MaskArray(Mask):
"""Binary numpy array representation of a mask.
- (num_instances, height, width)
+ # Arguments
+ data: Mask array, with the dimensions: (num_instances, height, width)
"""
- data: np.ndarray
-
- def __post_init__(self):
- self.data = self.data.astype(np.uint8)
+ def __init__(self, data: np.uint8):
+ self.data = data.astype(np.uint8)
def __len__(self):
return len(self.data)
@@ -115,12 +113,15 @@ def from_masks(cls, masks: Union[EncodedRLEs, Sequence[Mask]], h: int, w: int):
return cls(np.concatenate(masks_arrays))
-@dataclass
class MaskFile(Mask):
- filepath: Union[str, Path]
+ """Holds the path to mask image file.
+
+ # Arguments
+ filepath: Path to the mask image file.
+ """
- def __post_init__(self):
- self.filepath = Path(self.filepath)
+ def __init__(self, filepath: Union[str, Path]):
+ self.filepath = Path(filepath)
def to_mask(self, h, w):
mask = open_img(self.filepath, gray=True)
@@ -135,16 +136,18 @@ def to_erles(self, h, w) -> EncodedRLEs:
return self.to_mask(h, w).to_erles(h, w)
-@dataclass
class VocMaskFile(MaskFile):
"""Extension of `MaskFile` for VOC masks.
Removes the color pallete and optionally drops void pixels.
- Args:
- drop_void (bool): drops the void pixels, which should have the value 255.
+ # Arguments
+ drop_void (bool): drops the void pixels, which should have the value 255.
+ filepath: Path to the mask image file.
"""
- drop_void: bool = True
+ def __init__(self, filepath: Union[str, Path], drop_void: bool = True):
+ super().__init__(filepath=filepath)
+ self.drop_void = drop_void
def to_mask(self, h, w) -> MaskArray:
mask_arr = np.array(Image.open(self.filepath))
@@ -157,9 +160,15 @@ def to_mask(self, h, w) -> MaskArray:
return MaskArray(masks)
-@dataclass(frozen=True)
class RLE(Mask):
- counts: List[int]
+ """Run length encoding of a mask.
+
+ Don't instantiate this class directly, instead use the classmethods
+ `from_coco` and `from_kaggle`.
+ """
+
+ def __init__(self, counts: List[int]):
+ self.counts = counts
def to_mask(self, h, w) -> "MaskArray":
return self.to_erles(h=h, w=w).to_mask(h=h, w=w)
@@ -222,9 +231,15 @@ def from_coco(cls, counts: Sequence[int]):
# return cls.from_kaggle(kaggle_counts)
-@dataclass(frozen=True)
class Polygon(Mask):
- points: List[List[int]]
+ """Polygon representation of a mask
+
+ # Arguments
+ points: The vertices of the polygon in the COCO standard format.
+ """
+
+ def __init__(self, points: List[List[int]]):
+ self.points = points
def to_mask(self, h, w):
return self.to_erles(h=h, w=w).to_mask(h=h, w=w)
| Stop using dataclass for masks
Using `@dataclass` has proven to have more disadvantages than advantages, switching to normal classes should be very straight forward.
| 2020-10-30T01:04:26 |
||
airctic/icevision | 539 | airctic__icevision-539 | [
"535"
] | 3d4badab8a084a10497bcd6dd15ddc3156de71b1 | diff --git a/icevision/models/efficientdet/model.py b/icevision/models/efficientdet/model.py
--- a/icevision/models/efficientdet/model.py
+++ b/icevision/models/efficientdet/model.py
@@ -20,7 +20,7 @@ def model(
[this](https://github.com/rwightman/efficientdet-pytorch#models) table.
num_classes: Number of classes of your dataset (including background).
img_size: Image size that will be fed to the model. Must be squared and
- divisible by 64.
+ divisible by 128.
pretrained: If True, use a pretrained backbone (on COCO).
# Returns
| EfficientDet images_size has to be divisible by 128
## 📓 Documentation Update
**What part of documentation was unclear or wrong?**
It has to be clear that the image size for the efficientdet model has to be divisible by 128
**Describe the solution you'd like**
Add this information to the `efficientdet/model.py` docstring and also make it explicit in the tutorials that use efficientdet.
| 2020-11-11T21:18:20 |
||
airctic/icevision | 564 | airctic__icevision-564 | [
"561"
] | 73e1566c09d8feb355d5fc0a3b15a87c86575687 | diff --git a/icevision/backbones/__init__.py b/icevision/backbones/__init__.py
--- a/icevision/backbones/__init__.py
+++ b/icevision/backbones/__init__.py
@@ -3,3 +3,4 @@
from icevision.backbones.mobilenet import *
import icevision.backbones.resnet_fpn
+import icevision.backbones.resnest_fpn
diff --git a/icevision/backbones/resnest_fpn.py b/icevision/backbones/resnest_fpn.py
new file mode 100644
--- /dev/null
+++ b/icevision/backbones/resnest_fpn.py
@@ -0,0 +1,120 @@
+__all__ = [
+ "resnest50",
+ "resnest101",
+ "resnest200",
+ "resnest269",
+]
+
+from icevision.imports import *
+from icevision.utils import *
+from icevision.backbones.resnet_fpn import patch_param_groups
+from torchvision.ops.feature_pyramid_network import LastLevelMaxPool
+from torchvision.models.detection.backbone_utils import (
+ resnet_fpn_backbone,
+ BackboneWithFPN,
+)
+import resnest.torch
+
+
+# ResNeSt Backbones
+class Identity(nn.Module):
+ def __init__(self):
+ super(Identity, self).__init__()
+
+ def forward(self, x):
+ return x
+
+
+def resnest_fpn_backbone(
+ resnest_name,
+ pretrained,
+ # norm_layer=misc_nn_ops.FrozenBatchNorm2d,
+ trainable_layers=3,
+ returned_layers=None,
+ extra_blocks=None,
+):
+ """
+ Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
+
+ Examples::
+
+ >>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
+ >>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)
+ >>> # get some dummy image
+ >>> x = torch.rand(1,3,64,64)
+ >>> # compute the output
+ >>> output = backbone(x)
+ >>> print([(k, v.shape) for k, v in output.items()])
+ >>> # returns
+ >>> [('0', torch.Size([1, 256, 16, 16])),
+ >>> ('1', torch.Size([1, 256, 8, 8])),
+ >>> ('2', torch.Size([1, 256, 4, 4])),
+ >>> ('3', torch.Size([1, 256, 2, 2])),
+ >>> ('pool', torch.Size([1, 256, 1, 1]))]
+
+ Arguments:
+ backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
+ 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
+ norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
+ (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
+ pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet
+ trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.
+ Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
+ """
+ backbone = resnest_name(pretrained=pretrained)
+ # backbone = resnest50(pretrained=pretrained)
+
+ backbone.fc = Identity()
+ backbone.avgpool = Identity()
+
+ # select layers that wont be frozen
+ assert trainable_layers <= 5 and trainable_layers >= 0
+ layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][
+ :trainable_layers
+ ]
+ # freeze layers only if pretrained backbone is used
+ for name, parameter in backbone.named_parameters():
+ if all([not name.startswith(layer) for layer in layers_to_train]):
+ parameter.requires_grad_(False)
+
+ if extra_blocks is None:
+ extra_blocks = LastLevelMaxPool()
+
+ if returned_layers is None:
+ returned_layers = [1, 2, 3, 4]
+ assert min(returned_layers) > 0 and max(returned_layers) < 5
+ return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)}
+
+ in_channels_stage2 = backbone.inplanes // 8
+ in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
+ out_channels = 256
+ return BackboneWithFPN(
+ backbone,
+ return_layers,
+ in_channels_list,
+ out_channels,
+ extra_blocks=extra_blocks,
+ )
+
+
+def _resnest_fpn(name, pretrained: bool = True, **kwargs):
+ model = resnest_fpn_backbone(resnest_name=name, pretrained=pretrained, **kwargs)
+ patch_param_groups(model)
+
+ return model
+
+
+def resnest50(pretrained: bool = True, **kwargs):
+ return _resnest_fpn(resnest.torch.resnest50, pretrained=pretrained, **kwargs)
+
+
+def resnest101(pretrained: bool = True, **kwargs):
+ return _resnest_fpn(resnest.torch.resnest101, pretrained=pretrained, **kwargs)
+
+
+def resnest200(pretrained: bool = True, **kwargs):
+ return _resnest_fpn(resnest.torch.resnest200, pretrained=pretrained, **kwargs)
+
+
+def resnest269(pretrained: bool = True, **kwargs):
+ return _resnest_fpn(resnest.torch.resnest269, pretrained=pretrained, **kwargs)
| Add ResNeSt backbones
## 🚀 Feature
ResNeSt aims to replace a ResNet backbones in both classification and object detection tasks and substantially improve accuracy.
It would be beneficial for IceVision to support the different ResNeSt architectures.
Paper: ResNeSt: Split-Attention Networks - https://arxiv.org/pdf/2004.08955.pdf
Discord discussion: https://discordapp.com/channels/735877944085446747/735877944517591151/780303631738994700
| 2020-11-25T17:08:16 |
||
airctic/icevision | 596 | airctic__icevision-596 | [
"526"
] | 46510e75d14b3bae85cc61054e9345b399b8ece5 | diff --git a/icevision/utils/imageio.py b/icevision/utils/imageio.py
--- a/icevision/utils/imageio.py
+++ b/icevision/utils/imageio.py
@@ -7,7 +7,7 @@ def open_img(fn, gray=False):
if not os.path.exists(fn):
raise ValueError(f"File {fn} does not exists")
color = cv2.COLOR_BGR2GRAY if gray else cv2.COLOR_BGR2RGB
- return cv2.cvtColor(cv2.imread(str(fn)), color)
+ return cv2.cvtColor(cv2.imread(str(fn), cv2.IMREAD_UNCHANGED), color)
def show_img(img, ax=None, show: bool = False, **kwargs):
| Augmented (albumentations) BBox outside the range [0.0, 1.0]
## 🐛 Bug
**Describe the bug**
The error occur during training in this [Plantdoc notebook](https://colab.research.google.com/drive/1JFq-Do9JNLmjxf4gqqd0ow6_HLMZhr4D?usp=sharing):
`learn.fine_tune(20, 0.012, freeze_epochs=3)`
It occurs in:
```python
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 330, in check_bbox
"to be in the range [0.0, 1.0], got {value}.".format(bbox=bbox, name=name, value=value)
ValueError: Expected x_max for bbox (0.00038580246913580245, 0.022119341563786008, 1.4683641975308641, 0.625, 0) to be in the range [0.0, 1.0], got 1.4683641975308641.
```
Error stack:
```bash
ValueError Traceback (most recent call last)
<ipython-input-47-2d489cf4c395> in <module>()
1 from fastai.callback.tracker import SaveModelCallback
----> 2 learn.fine_tune(20, 0.012, freeze_epochs=3)
17 frames
/usr/local/lib/python3.6/dist-packages/fastcore/logargs.py in _f(*args, **kwargs)
54 init_args.update(log)
55 setattr(inst, 'init_args', init_args)
---> 56 return inst if to_return else f(*args, **kwargs)
57 return _f
/usr/local/lib/python3.6/dist-packages/fastai/callback/schedule.py in fine_tune(self, epochs, base_lr, freeze_epochs, lr_mult, pct_start, div, **kwargs)
159 "Fine tune with `freeze` for `freeze_epochs` then with `unfreeze` from `epochs` using discriminative LR"
160 self.freeze()
--> 161 self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
162 base_lr /= 2
163 self.unfreeze()
/usr/local/lib/python3.6/dist-packages/fastcore/logargs.py in _f(*args, **kwargs)
54 init_args.update(log)
55 setattr(inst, 'init_args', init_args)
---> 56 return inst if to_return else f(*args, **kwargs)
57 return _f
/usr/local/lib/python3.6/dist-packages/fastai/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
111 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
112 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 113 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
114
115 # Cell
/usr/local/lib/python3.6/dist-packages/fastcore/logargs.py in _f(*args, **kwargs)
54 init_args.update(log)
55 setattr(inst, 'init_args', init_args)
---> 56 return inst if to_return else f(*args, **kwargs)
57 return _f
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
205 self.opt.set_hypers(lr=self.lr if lr is None else lr)
206 self.n_epoch = n_epoch
--> 207 self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
208
209 def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
153
154 def _with_events(self, f, event_type, ex, final=noop):
--> 155 try: self(f'before_{event_type}') ;f()
156 except ex: self(f'after_cancel_{event_type}')
157 finally: self(f'after_{event_type}') ;final()
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _do_fit(self)
195 for epoch in range(self.n_epoch):
196 self.epoch=epoch
--> 197 self._with_events(self._do_epoch, 'epoch', CancelEpochException)
198
199 @log_args(but='cbs')
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
153
154 def _with_events(self, f, event_type, ex, final=noop):
--> 155 try: self(f'before_{event_type}') ;f()
156 except ex: self(f'after_cancel_{event_type}')
157 finally: self(f'after_{event_type}') ;final()
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _do_epoch(self)
189
190 def _do_epoch(self):
--> 191 self._do_epoch_train()
192 self._do_epoch_validate()
193
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _do_epoch_train(self)
181 def _do_epoch_train(self):
182 self.dl = self.dls.train
--> 183 self._with_events(self.all_batches, 'train', CancelTrainException)
184
185 def _do_epoch_validate(self, ds_idx=1, dl=None):
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
153
154 def _with_events(self, f, event_type, ex, final=noop):
--> 155 try: self(f'before_{event_type}') ;f()
156 except ex: self(f'after_cancel_{event_type}')
157 finally: self(f'after_{event_type}') ;final()
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in all_batches(self)
159 def all_batches(self):
160 self.n_iter = len(self.dl)
--> 161 for o in enumerate(self.dl): self.one_batch(*o)
162
163 def _do_one_batch(self):
/usr/local/lib/python3.6/dist-packages/fastai/data/load.py in __iter__(self)
100 self.before_iter()
101 self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
--> 102 for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
103 if self.device is not None: b = to_device(b, self.device)
104 yield self.after_batch(b)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
361
362 def __next__(self):
--> 363 data = self._next_data()
364 self._num_yielded += 1
365 if self._dataset_kind == _DatasetKind.Iterable and \
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _next_data(self)
987 else:
988 del self._task_info[idx]
--> 989 return self._process_data(data)
990
991 def _try_put_index(self):
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _process_data(self, data)
1012 self._try_put_index()
1013 if isinstance(data, ExceptionWrapper):
-> 1014 data.reraise()
1015 return data
1016
/usr/local/lib/python3.6/dist-packages/torch/_utils.py in reraise(self)
393 # (https://bugs.python.org/issue2651), so we work around it.
394 msg = KeyErrorMessage(msg)
--> 395 raise self.exc_type(msg)
ValueError: Caught ValueError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/worker.py", line 185, in _worker_loop
data = fetcher.fetch(index)
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py", line 34, in fetch
data = next(self.dataset_iter)
File "/usr/local/lib/python3.6/dist-packages/fastai/data/load.py", line 111, in create_batches
yield from map(self.do_batch, self.chunkify(res))
File "/usr/local/lib/python3.6/dist-packages/fastcore/utils.py", line 159, in chunked
res = list(itertools.islice(it, chunk_sz))
File "/usr/local/lib/python3.6/dist-packages/fastai/data/load.py", line 124, in do_item
try: return self.after_item(self.create_item(s))
File "/usr/local/lib/python3.6/dist-packages/fastai/data/load.py", line 130, in create_item
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
File "/usr/local/lib/python3.6/dist-packages/icevision/data/dataset.py", line 35, in __getitem__
data = self.tfm(data)
File "/usr/local/lib/python3.6/dist-packages/icevision/tfms/transform.py", line 13, in __call__
tfmed = self.apply(**data)
File "/usr/local/lib/python3.6/dist-packages/icevision/tfms/albumentations/tfms.py", line 110, in apply
d = self.tfms(**params)
File "/usr/local/lib/python3.6/dist-packages/albumentations/core/composition.py", line 180, in __call__
p.preprocess(data)
File "/usr/local/lib/python3.6/dist-packages/albumentations/core/utils.py", line 62, in preprocess
data[data_name] = self.check_and_convert(data[data_name], rows, cols, direction="to")
File "/usr/local/lib/python3.6/dist-packages/albumentations/core/utils.py", line 70, in check_and_convert
return self.convert_to_albumentations(data, rows, cols)
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 51, in convert_to_albumentations
return convert_bboxes_to_albumentations(data, self.params.format, rows, cols, check_validity=True)
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 303, in convert_bboxes_to_albumentations
return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 303, in <listcomp>
return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 251, in convert_bbox_to_albumentations
check_bbox(bbox)
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 330, in check_bbox
"to be in the range [0.0, 1.0], got {value}.".format(bbox=bbox, name=name, value=value)
ValueError: Expected x_max for bbox (0.00038580246913580245, 0.022119341563786008, 1.4683641975308641, 0.625, 0) to be in the range [0.0, 1.0], got 1.4683641975308641.
```
| Because this error is thrown by albumentations, there is no way for adding the image filename to the stacktrace
Maybe you would like to add it in the autofix step?
```
AUTOFIX-START - ️🔨 Autofixing record with imageid: 2 <FILENAME> ️🔨
```
Another issue, we cannot assume that FILENAME is always present on the record and you would still need to filter the list of records to find it.
Do you think a function `get_record_by_id(records, id)` would already solve this issue?
Yes, we should have a function `get_record_by_id(records, id)`, or have a dictionary that stores bothe the ids and the corresponding images.
is this fixed now?
Not really, are you also facing this issue?
yes.
Are you able to share the specific image that is causing the error for you? We can use that to investigate further
ice vision doesn't provide at which particular image id we have faced this error.
Hi @ai-fast-track can you please assign this one to me?
Done @FraPochetti! Thanks a lot for looking into this issue.
Brainstorming out loud here.
For this error to happen, it means that `xmax`/`ymax` must be `>` `width`/`height` of the image.
I don't see any other logical explanation.
So, it must be an annotation issue in the dataset, right?
If yes, why doesn't the `AUTOFIX` pick it up?
I run the following snippet on the `plantdoc` records and it finds nothing.
Kind of weird.
Does albumentations stretch boxes out of the image? Seems really awkward.
```
def check_boxes(r):
w, h = r["width"], r["height"]
ok = True
for box in r["bboxes"]:
xmax, ymax = box.xmax, box.ymax
if xmax > w:
print("X is wrong", xmax, w, r["filepath"])
ok = False
if ymax > h:
print("Y is wrong", ymax, h, r["filepath"])
ok = False
return ok, r
wrong = []
for record in train_records_csv:
ok, record = check_boxes(record)
if not ok:
wrong.append(record)
```
> Does albumentations stretch boxes out of the image? Seems really awkward.
This is what I'm currently thinking, that albumentations itself is causing the issue, which indeed is really weird
> If yes, why doesn't the AUTOFIX pick it up?
Exactly! Autofix would pick it up if it was an annotation issue! The only other explanation is if we have a bug there
> The only other explanation is if we have a bug there
The code snippet I ran proves the contrary.
All records seem fine.
Super weird if albumentations is the root cause.
ok, I have nailed down one wrong image in the `plantdoc` dataset, and found something interesting.
Not sure the order of the records is going to be the same from my machine to yours (I am not shuffling), but I want to post everything for you guys to take a look as well.
```
parser_csv = PlantDocParser(train_labels, source=data_dir, class_map=class_map)
train_records_csv, valid_records_csv = parser_csv.parse(cache_filepath="plantdoc.pkl")
presize = 128
size = 64
train_tfms = tfms.A.Adapter([*tfms.A.aug_tfms(size=size, presize=presize), tfms.A.Normalize()])
train_ds = Dataset(train_records_csv, train_tfms)
train_dl = model_type.train_dl(train_ds, batch_size=1, num_workers=0, shuffle=False)
```
This is the incriminated record
```
Record:
- Image ID: 1363
- Filepath: /Users/francescopochetti/PlantDoc-Object-Detection-Dataset/TRAIN/flies.jpg
- Image size (width, height): (3888, 2592)
- Labels: [23]
- BBoxes: [<BBox (xmin:1, ymin:86, xmax:3806, ymax:2430)>]
```
which throws
```
ValueError: Expected x_max for bbox (0.00038580246913580245, 0.022119341563786008, 1.4683641975308641, 0.625, 0) to be in the range [0.0, 1.0], got 1.4683641975308641.
```
Now, if you look at the image (`id=44` in `train_records_csv`), something looks off
`show_record(train_records_csv[44], display_bbox=True, figsize=(8, 10))`

The `record` reads `- Image size (width, height): (3888, 2592)`, whereas it should clearly be `(width, height): (2592, 3888)`, e.g. inverted width and height.
The `flies.xml` annotation file seems screwed up too:
```
<annotation>
<folder> tomato leaf </folder>
<filename>flies.jpg</filename>
<path>/home/sohamp/Desktop/done/ tomato leaf /flies.jpg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>3888</width>
<height>2592</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>Tomato leaf</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>1</xmin>
<ymin>86</ymin>
<xmax>3806</xmax>
<ymax>2430</ymax>
</bndbox>
</object>
</annotation>
```
From [this](https://stackoverflow.com/questions/63947990/why-are-width-and-height-of-an-image-are-inverted-when-loading-using-pil-versus) SO thread I tried the following and it indeed seems the image is somehow rotated.

According to [this](https://stackoverflow.com/questions/13872331/rotating-an-image-with-orientation-specified-in-exif-using-python-without-pil-in) SO thread:
> If you're using Pillow >= 6.0.0, you can use the built-in ImageOps.exif_transpose function do correctly rotate an image according to its exif tag
So, long story short, some images might be rotated!
We need to find a way to rotate them back while reading them.
Not sure.
Any thoughts?
| 2020-12-23T14:24:46 |
|
airctic/icevision | 646 | airctic__icevision-646 | [
"645"
] | 87559070e35aeeb432357836e9888086747ef830 | diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py
--- a/icevision/models/mmdet/common/bbox/single_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py
--- a/icevision/models/mmdet/common/bbox/two_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py
--- a/icevision/models/mmdet/common/mask/two_stage/model.py
+++ b/icevision/models/mmdet/common/mask/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
cfg.model.roi_head.mask_head.num_classes = num_classes - 1
if weights_path is not None:
| Allow passing a Config object to the MMDetection models
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
In the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path
**Describe the solution you'd like**
I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this:
```
cfg = Config.fromfile(cfg_filepath)
cfg.model.bbox_head.loss_cls.loss_weight = 0.8
cfg.model.bbox_head.loss_bbox.loss_weight = 2
```
**Additional context**
There is a discussion in our Discord forum around this issue:
https://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644
| 2021-02-19T14:15:57 |
||
airctic/icevision | 652 | airctic__icevision-652 | [
"651"
] | ca5d7cf13cb6142f6f98135cc5eb6c7941793b1d | diff --git a/icevision/models/mmdet/models/__init__.py b/icevision/models/mmdet/models/__init__.py
--- a/icevision/models/mmdet/models/__init__.py
+++ b/icevision/models/mmdet/models/__init__.py
@@ -2,6 +2,8 @@
from icevision.models.mmdet.models import faster_rcnn
from icevision.models.mmdet.models import retinanet
from icevision.models.mmdet.models import fcos
+from icevision.models.mmdet.models import cornernet
+from icevision.models.mmdet.models import centripetalnet
# segmentation
from icevision.models.mmdet.models import mask_rcnn
diff --git a/icevision/models/mmdet/models/centripetalnet/__init__.py b/icevision/models/mmdet/models/centripetalnet/__init__.py
new file mode 100644
--- /dev/null
+++ b/icevision/models/mmdet/models/centripetalnet/__init__.py
@@ -0,0 +1 @@
+from icevision.models.mmdet.common.bbox.single_stage import *
diff --git a/icevision/models/mmdet/models/cornernet/__init__.py b/icevision/models/mmdet/models/cornernet/__init__.py
new file mode 100644
--- /dev/null
+++ b/icevision/models/mmdet/models/cornernet/__init__.py
@@ -0,0 +1 @@
+from icevision.models.mmdet.common.bbox.single_stage import *
| Add support for MMDetection CorneNet and CentriPetalNet
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
Add support for MMDetection CorneNet and CentriPetalNet
| 2021-02-23T15:46:18 |
||
airctic/icevision | 660 | airctic__icevision-660 | [
"638"
] | b779b170997900ca2f948a064f346ef7f292a890 | diff --git a/icevision/models/ross/efficientdet/fastai/learner.py b/icevision/models/ross/efficientdet/fastai/learner.py
--- a/icevision/models/ross/efficientdet/fastai/learner.py
+++ b/icevision/models/ross/efficientdet/fastai/learner.py
@@ -34,14 +34,14 @@ def learner(
**learner_kwargs,
)
- # HACK: patch AvgLoss (in original, find_bs gives errors)
- class PatchedAvgLoss(fastai.AvgLoss):
+ # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)
+ class EffDetAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
- self.total += fastai.to_detach(learn.loss.mean()) * bs
+ bs = len(first(learn.yb)["cls"])
+ self.total += learn.to_detach(learn.loss.mean()) * bs
self.count += bs
recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
- recorder.loss = PatchedAvgLoss()
+ recorder.loss = EffDetAvgLoss()
return learn
diff --git a/icevision/models/torchvision/fastai/learner.py b/icevision/models/torchvision/fastai/learner.py
--- a/icevision/models/torchvision/fastai/learner.py
+++ b/icevision/models/torchvision/fastai/learner.py
@@ -27,7 +27,7 @@ def rcnn_learner(
# HACK: patch AvgLoss (in original, find_bs gives errors)
class RCNNAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
+ bs = len(first(learn.yb))
self.total += fastai.to_detach(learn.loss.mean()) * bs
self.count += bs
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -46,7 +46,7 @@ def fridge_ds(samples_source, fridge_class_map) -> Tuple[Dataset, Dataset]:
class_map=fridge_class_map,
)
- data_splitter = RandomSplitter([0.8, 0.2], seed=42)
+ data_splitter = RandomSplitter([0.5, 0.5], seed=42)
train_records, valid_records = parser.parse(data_splitter)
tfms_ = tfms.A.Adapter([A.Resize(IMG_SIZE, IMG_SIZE), A.Normalize()])
@@ -57,20 +57,20 @@ def fridge_ds(samples_source, fridge_class_map) -> Tuple[Dataset, Dataset]:
return train_ds, valid_ds
[email protected]()
-def fridge_efficientdet_dls(fridge_ds) -> Tuple[DataLoader, DataLoader]:
[email protected](params=[2, 3])
+def fridge_efficientdet_dls(fridge_ds, request) -> Tuple[DataLoader, DataLoader]:
train_ds, valid_ds = fridge_ds
- train_dl = efficientdet.train_dl(train_ds, batch_size=2)
- valid_dl = efficientdet.valid_dl(valid_ds, batch_size=2)
+ train_dl = efficientdet.train_dl(train_ds, batch_size=request.param)
+ valid_dl = efficientdet.valid_dl(valid_ds, batch_size=request.param)
return train_dl, valid_dl
[email protected]()
-def fridge_faster_rcnn_dls(fridge_ds) -> Tuple[DataLoader, DataLoader]:
[email protected](params=[2, 3])
+def fridge_faster_rcnn_dls(fridge_ds, request) -> Tuple[DataLoader, DataLoader]:
train_ds, valid_ds = fridge_ds
- train_dl = faster_rcnn.train_dl(train_ds, batch_size=2)
- valid_dl = faster_rcnn.valid_dl(valid_ds, batch_size=2)
+ train_dl = faster_rcnn.train_dl(train_ds, batch_size=request.param)
+ valid_dl = faster_rcnn.valid_dl(valid_ds, batch_size=request.param)
return train_dl, valid_dl
diff --git a/tests/models/mmdet/test_predict.py b/tests/models/mmdet/test_predict.py
--- a/tests/models/mmdet/test_predict.py
+++ b/tests/models/mmdet/test_predict.py
@@ -45,7 +45,7 @@ def test_mmdet_bbox_models_predict(ds, model_type, path, config, weights_path, r
def _test_preds(preds, pred_count=2, mask=False):
- assert len(preds) == pred_count
+ # assert len(preds) == pred_count
pred = preds[0].pred
assert isinstance(pred.detection.labels, list)
| fastai efficientdet fails on learn.validate() with AttributeError: 'NoneType' object has no attribute 'shape'
## 🐛 Bug
when trying to simply validate metrics for an efficientdet model with fastai
```python
KeyError: 'image_id'
```
```python
AttributeError: 'NoneType' object has no attribute 'shape'
```
it fails when trying to read the batch size automatically: in `accumulate, find_bs`
```python
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
```
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://colab.research.google.com/drive/1i4aXYu4wIKA7eLUK86GwTm7lq7zku_oF?usp=sharing
| I found that applying a following patch:
```python
from fastai.learner import AvgLoss
from fastai.torch_core import find_bs
@patch
def accumulate(self:AvgLoss, learn):
#bs = find_bs(learn.yb)
bs = find_bs(learn.xb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
```
fixes the issue. Im not sure though how should we store that patch in icevision. Anny suggestions?
a linked issue: https://forums.fast.ai/t/solved-fastai2-how-to-change-the-flow-for-metrics-calculation/70851/2
I too had this issue which got solved with this patch and online help from @potipot . Thanks a lot. Hopefully, this is pushed as a PR.
@lgvaz this is still an issue. Just wanted to ask for your opinion on where to put this fastai patch?
@potipot i get access denied on the colab notebook, can you change the permissions?
Wow, this error is really subtle, specially the *why* it was not happening before.
The error was introduced by #630 but the bug was there since the beginning. To first understand it we have to take a look at how `find_bs` works:
```python
def find_bs(b):
"Recursively search the batch size of `b`."
return item_find(b).shape[0]
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
```
In our case `x` that is passed to `item_find` will be a dictionary (the effdet target), note that `find_bs` calls `item_find` with `idx=0`, so `key = list(x.keys())[**idx**]` will take the first key from the dict.
And this is what #630 changed, before the first key in the dict was `bbox`:

And calling find_bs gives returns `.shape[0]` of the first prediction, which in this case is 3. Which is incorrect, the batch size is actually 16. So this is a silent bug.
After it started being `img_size`:

Now `find_bs` is called on `None`, hence the error.
---
The solution is to do what @potipot proposed here but without patching, instead we should do the same thing that we do for torchvision models, when creating the learner (`models/torchvision/fastai/learner.py`):
```python
class RCNNAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
bs = len(learn.yb)
self.total += fastai.to_detach(learn.loss.mean()) * bs
self.count += bs
recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
recorder.loss = RCNNAvgLoss()
```
We also have to check if the same happens for the mmdet models.
@potipot would you like to do a PR for this one?
@lgvaz im not sure whats the idea behind `find_bs` cause in the test cases it always is 1. Seems like accumulate operates on the current batch element, while the actual batch_size is 2. Copying the solution from RCNNAvgLoss works but now I'm wondering if this isn't some other issue in fact.
In the case of rcnn_learner the results are actually different. Don't know why this is necessary in case of rcnn_but this is what I get:

```python
In[18]: len(learn.yb), find_bs(learn.yb), learn.dls.bs
Out[18]: (1, 2, 2)
``` | 2021-03-05T10:09:07 |
airctic/icevision | 678 | airctic__icevision-678 | [
"668"
] | bf715f50a0710b258e87d7f74b0110a880fef77c | diff --git a/icevision/parsers/coco_parser.py b/icevision/parsers/coco_parser.py
--- a/icevision/parsers/coco_parser.py
+++ b/icevision/parsers/coco_parser.py
@@ -42,9 +42,13 @@ def __init__(
self._imageid2info = {o["id"]: o for o in self.annotations_dict["images"]}
categories = self.annotations_dict["categories"]
- self._cocoid2name = {o["id"]: o["name"] for o in categories}
- self._cocoid2name[0] = BACKGROUND
- self.class_map = ClassMap(self._cocoid2name.values())
+ id2class = {o["id"]: o["name"] for o in categories}
+ id2class[0] = BACKGROUND
+ # coco has non sequential ids, we fill the blanks with `None`, check #668 for more info
+ classes = [None for _ in range(max(id2class.keys()) + 1)]
+ for i, name in id2class.items():
+ classes[i] = name
+ self.class_map = ClassMap(classes)
super().__init__(record=self.template_record(), idmap=idmap)
@@ -76,8 +80,8 @@ def filepath(self, o) -> Path:
def img_size(self, o) -> ImgSize:
return get_img_size(self.filepath(o))
- def labels(self, o) -> List[Hashable]:
- return [self._cocoid2name[o["category_id"]]]
+ def labels_ids(self, o) -> List[Hashable]:
+ return [o["category_id"]]
def areas(self, o) -> List[float]:
return [o["area"]]
@@ -91,7 +95,7 @@ def parse_fields(self, o, record):
# TODO: is class_map still a issue here?
record.detect.set_class_map(self.class_map)
- record.detect.add_labels(self.labels(o))
+ record.detect.add_labels_by_id(self.labels_ids(o))
record.detect.add_areas(self.areas(o))
record.detect.add_iscrowds(self.iscrowds(o))
@@ -141,10 +145,10 @@ def keypoints(self, o) -> List[KeyPoints]:
else []
)
- def labels(self, o) -> List[Hashable]:
+ def labels_ids(self, o) -> List[Hashable]:
if sum(o["keypoints"]) <= 0:
return []
- return super().labels(o)
+ return super().labels_ids(o)
def areas(self, o) -> List[float]:
return [o["area"]] if sum(o["keypoints"]) > 0 else []
diff --git a/icevision/parsers/parser.py b/icevision/parsers/parser.py
--- a/icevision/parsers/parser.py
+++ b/icevision/parsers/parser.py
@@ -80,13 +80,12 @@ def parse_dicted(self, show_pbar: bool = True) -> Dict[int, RecordType]:
record = records[imageid]
except KeyError:
record = self.create_record()
+ # HACK: fix imageid (needs to be transformed with idmap)
+ record.set_imageid(imageid)
+ records[imageid] = record
self.parse_fields(sample, record)
- # HACK: fix imageid (needs to be transformed with idmap)
- record.set_imageid(imageid)
- records[imageid] = record
-
except AbortParseRecord as e:
logger.warning(
"Record with imageid: {} was skipped because: {}",
| diff --git a/tests/parsers/test_coco_parser.py b/tests/parsers/test_coco_parser.py
--- a/tests/parsers/test_coco_parser.py
+++ b/tests/parsers/test_coco_parser.py
@@ -34,6 +34,8 @@ def test_bbox_parser(coco_dir, coco_bbox_parser):
assert record.width == 640
assert record.height == 480
+ assert len(record.detect.class_map) == 91
+ assert record.detect.class_map.get_by_id(90) == "toothbrush"
assert record.detect.labels == [4]
assert pytest.approx(record.detect.bboxes[0].xyxy) == (
175.14,
| ClassMap with non-sequential IDs
How to handle non-sequential IDs? Example:
```python
{1: bike, 2: car, 5: motorcycle}
```
### Suggestion 1
The user can only pass a list of labels to the `ClassMap`, it's the job of the user to fill the missing information:
```
[bike, car, None, None, motorcycle]
```
We only need to modify the coco parser for this (so it returns 90 classes instead of 80). Torchvision models and efficientdet expects coco to have 90 classes.
### Suggestion 2
Enable to receive a dict on the `ClassMap` constructor and automatically fill the missing blanks. Checks needs to be implement to make sure the keys of the dict are ints.
| I'd opt for the Suggestion 1: what you see is what you get (+background :D)
The user can create a list from a non-sequential dictionary keys using `.values()` or `enumerate` | 2021-03-11T14:36:02 |
airctic/icevision | 682 | airctic__icevision-682 | [
"674"
] | b779b170997900ca2f948a064f346ef7f292a890 | diff --git a/icevision/core/record_components.py b/icevision/core/record_components.py
--- a/icevision/core/record_components.py
+++ b/icevision/core/record_components.py
@@ -14,6 +14,7 @@
"IsCrowdsRecordComponent",
"KeyPointsRecordComponent",
"ScoresRecordComponent",
+ "LossesRecordComponent",
]
from icevision.imports import *
@@ -470,3 +471,12 @@ def _repr(self) -> List[str]:
def as_dict(self) -> dict:
return {"scores": self.scores}
+
+
+class LossesRecordComponent(RecordComponent):
+ def __init__(self, task=tasks.common):
+ super().__init__(task=task)
+ self.losses = None
+
+ def set_losses(self, losses: Dict):
+ self.losses = losses
diff --git a/icevision/models/interpretation.py b/icevision/models/interpretation.py
--- a/icevision/models/interpretation.py
+++ b/icevision/models/interpretation.py
@@ -12,13 +12,14 @@
from icevision.core import *
from icevision.data import *
from icevision.visualize.show_data import show_preds
+from icevision.core.record_components import LossesRecordComponent
def get_weighted_sum(sample, weights):
loss_weighted = 0
for loss, weight in weights.items():
- loss_weighted += sample[loss] * weight
- sample["loss_weighted"] = loss_weighted
+ loss_weighted += sample.losses[loss] * weight
+ sample.losses["loss_weighted"] = loss_weighted
return sample
@@ -27,7 +28,7 @@ def sort_losses(
) -> Tuple[List[dict], List[dict], List[str]]:
by_copy = deepcopy(by)
losses_expected = [
- k for k in samples[0].keys() if "loss" in k and k != "loss_total"
+ k for k in samples[0].losses.keys() if "loss" in k and k != "loss_total"
]
if "effdet_total_loss" in losses_expected:
losses_expected.remove("effdet_total_loss")
@@ -54,14 +55,14 @@ def sort_losses(
by = "loss_weighted"
l = list(zip(samples, preds))
- l = sorted(l, key=lambda i: i[0][by], reverse=True)
+ l = sorted(l, key=lambda i: i[0].losses[by], reverse=True)
sorted_samples, sorted_preds = zip(*l)
- annotations = [el["text"] for el in sorted_samples]
+ annotations = [el.losses["text"] for el in sorted_samples]
if isinstance(by_copy, dict):
if by_copy["method"] == "weighted":
annotations = [
- f"loss_weighted: {round(s['loss_weighted'], 5)}\n" + a
+ f"loss_weighted: {round(s.losses['loss_weighted'], 5)}\n" + a
for a, s in zip(annotations, sorted_samples)
]
@@ -143,7 +144,9 @@ def _loop(self, dl, model, losses_stats, device):
losses_stats[l].append(loss[l])
loss = _prepend_str(loss, "loss")
- sample[0].update(loss)
+ loss_comp = LossesRecordComponent()
+ loss_comp.set_losses(loss)
+ sample[0].add_component(loss_comp)
samples_plus_losses.append(sample[0])
return samples_plus_losses, losses_stats
@@ -219,6 +222,7 @@ def plot_top_losses(
dl = self.infer_dl(dataset, batch_size=batch_size)
preds = self.predict_dl(model=model, infer_dl=dl)
+ preds = [p.pred for p in preds]
sorted_samples, sorted_preds, annotations = sort_losses(
samples, preds, by=sort_by
@@ -232,8 +236,12 @@ def plot_top_losses(
ann2 = "\n".join(ann[4:])
anns.append((ann1, ann2))
+ sorted_preds = [
+ Prediction(pred=p, ground_truth=s)
+ for s, p in zip(sorted_samples, sorted_preds)
+ ]
+
show_preds(
- samples=sorted_samples[:n_samples],
preds=sorted_preds[:n_samples],
annotations=anns[:n_samples],
)
@@ -247,11 +255,11 @@ def add_annotations(samples: List[dict]) -> List[dict]:
"""
for sample in samples:
text = ""
- for key in sample.keys():
+ for key in sample.losses.keys():
if "loss" in key:
- text += f"{key}: {round(sample[key], 5)}\n"
- text += f"IMG: {sample['filepath'].name}"
- sample["text"] = text
+ text += f"{key}: {round(sample.losses[key], 5)}\n"
+ text += f"IMG: {sample.filepath.name}"
+ sample.losses["text"] = text
return samples
diff --git a/icevision/models/mmdet/common/interpretation_utils.py b/icevision/models/mmdet/common/interpretation_utils.py
--- a/icevision/models/mmdet/common/interpretation_utils.py
+++ b/icevision/models/mmdet/common/interpretation_utils.py
@@ -5,6 +5,7 @@
from icevision.core import *
from icevision.data import *
from icevision.models.interpretation import _move_to_device
+from icevision.core.record_components import LossesRecordComponent
def sum_losses_mmdet(losses_dict):
@@ -33,7 +34,9 @@ def loop_mmdet(dl, model, losses_stats, device):
for l in losses_stats.keys():
losses_stats[l].append(loss[l])
- sample[0].update(loss)
+ loss_comp = LossesRecordComponent()
+ loss_comp.set_losses(loss)
+ sample[0].add_component(loss_comp)
samples_plus_losses.append(sample[0])
return samples_plus_losses, losses_stats
| diff --git a/tests/models/torchvision_models/faster_rcnn/test_show_results.py b/tests/models/torchvision_models/faster_rcnn/test_show_results.py
--- a/tests/models/torchvision_models/faster_rcnn/test_show_results.py
+++ b/tests/models/torchvision_models/faster_rcnn/test_show_results.py
@@ -64,8 +64,8 @@ def test_get_losses(fridge_faster_rcnn_model, fridge_ds):
"loss_rpn_box_reg",
"loss_total",
}
- assert "loss_box_reg" in samples[0].keys()
- assert "text" not in samples[0].keys()
+ assert "loss_box_reg" in samples[0].losses.keys()
+ assert "text" not in samples[0].losses.keys()
def test_add_annotations(fridge_faster_rcnn_model, fridge_ds):
@@ -74,5 +74,5 @@ def test_add_annotations(fridge_faster_rcnn_model, fridge_ds):
samples, _ = faster_rcnn.interp.get_losses(model, ds)
samples = add_annotations(samples)
- assert "loss_classifier" in samples[0]["text"]
- assert "IMG" in samples[0]["text"]
+ assert "loss_classifier" in samples[0].losses["text"]
+ assert "IMG" in samples[0].losses["text"]
diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py
--- a/tests/utils/test_utils.py
+++ b/tests/utils/test_utils.py
@@ -1,4 +1,5 @@
from icevision.all import *
+from icevision.core.record_components import LossesRecordComponent
def test_notnone():
@@ -61,12 +62,17 @@ def test_get_stats():
assert result == expected
-def test_sort_losses():
+def _set_losses_lrc(s):
+ br = LossesRecordComponent()
+ br.set_losses(s)
+ return br
+
+def test_sort_losses():
samples = [
- {"stuff": 0.2, "loss_total": 2, "text": "text2"},
- {"stuff": 0.1, "loss_total": 1, "text": "text1"},
- {"stuff": 0.3, "loss_total": 3, "text": "text3"},
+ _set_losses_lrc({"stuff": 0.2, "loss_total": 2, "text": "text2"}),
+ _set_losses_lrc({"stuff": 0.1, "loss_total": 1, "text": "text1"}),
+ _set_losses_lrc({"stuff": 0.3, "loss_total": 3, "text": "text3"}),
]
preds = [
@@ -89,7 +95,7 @@ def test_sort_losses():
sorted_samples, sorted_preds, annotations = sort_losses(samples, preds)
- assert sorted_samples == sorted_samples_ex
+ assert [s.losses for s in sorted_samples] == sorted_samples_ex
assert sorted_preds == sorted_preds_ex
assert annotations == ["text3", "text2", "text1"]
@@ -107,8 +113,9 @@ def test_get_weighted_sum():
"loss3": 0.25,
"loss4": 0.25,
}
+ br = _set_losses_lrc(s)
expected = {"loss1": 1, "loss2": 1, "loss3": 1, "loss4": 1, "loss_weighted": 1.0}
- result = get_weighted_sum(s, weights)
+ result = get_weighted_sum(br, weights)
- assert result["loss_weighted"] == expected["loss_weighted"]
+ assert result.losses["loss_weighted"] == expected["loss_weighted"]
| Fix plot_top_losses
`plot_top_losses` got broken because of the new refactor, the type of the data passed to it now changed, we just need to make the tests pass.
What were `samples` before is not just `Record`s and don't have a `dict` like interface anymore.
@FraPochetti would you be interested in this one?
| Sure, but not immediately. I think it might take a couple of months or so, given I am on yolov5 now.
> Sure, but not immediately. I think it might take a couple of months or so, given I am on yolov5 now.
Okay, no problem!!
@lgvaz
Those "couple of months" that I mention are completely indicative btw :D (just wanted to be conservative with yolo).
I really hope to be done sooner than that! and in any case, this fix should not take much time so I might just slide it somewhere when I have a couple of free hours ;)
Is this just the tests that are broken or the entire functionality?
> Is this just the tests that are broken or the entire functionality?
What broke is that before `sample` was a dict and now it's a `Record`, so getting attributes with `["labels"]` and calling `.update` doesn't work anymore
But don't worry, it should be an straight forward fix on my side, I'll take a look in a few days
I prefer doing that actually 😉. It will be the occasion to check out the new API.
I will take a look and keep you posted!
I have made up my mind and decided to look into this one tonight 😁. I think it is important to have all tests passing ASAP. Yolo can wait another couple of days.
Coming back from a weekend out of town. Will check this out as soon as I step at home 😉 | 2021-03-14T22:43:20 |
airctic/icevision | 722 | airctic__icevision-722 | [
"721"
] | 99c974257ab5deb05bac616d4495d2e24242d95a | diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py
--- a/icevision/models/ross/efficientdet/backbones.py
+++ b/icevision/models/ross/efficientdet/backbones.py
@@ -1,35 +1,35 @@
__all__ = [
- "tf_efficientdet_lite0",
- "efficientdet_d0",
- "efficientdet_d1",
- "efficientdet_d2",
- "efficientdet_d3",
- "efficientdet_d4",
- "efficientdet_d5",
- "efficientdet_d6",
- "efficientdet_d7",
- "efficientdet_d7x",
+ "tf_lite0",
+ "d0",
+ "d1",
+ "d2",
+ "d3",
+ "d4",
+ "d5",
+ "d6",
+ "d7",
+ "d7x",
]
from icevision.models.ross.efficientdet.utils import *
-tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
+tf_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
-efficientdet_d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
+d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
-efficientdet_d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
+d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
-efficientdet_d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
+d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
-efficientdet_d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
+d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
-efficientdet_d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
+d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
-efficientdet_d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
+d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
-efficientdet_d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
+d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
-efficientdet_d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
+d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
-efficientdet_d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
+d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -21,9 +21,7 @@ def coco_record_id_map():
def fridge_efficientdet_model() -> nn.Module:
WEIGHTS_URL = "https://github.com/airctic/model_zoo/releases/download/m2/fridge_tf_efficientdet_lite0.zip"
# TODO: HACK 5+1 in num_classes (becaues of change in model.py)
- model = efficientdet.model(
- backbone=tf_efficientdet_lite0, num_classes=5 + 1, img_size=384
- )
+ model = efficientdet.model(backbone=tf_lite0, num_classes=5 + 1, img_size=384)
state_dict = torch.hub.load_state_dict_from_url(
WEIGHTS_URL, map_location=torch.device("cpu")
diff --git a/tests/models/efficient_det/test_model.py b/tests/models/efficient_det/test_model.py
--- a/tests/models/efficient_det/test_model.py
+++ b/tests/models/efficient_det/test_model.py
@@ -6,10 +6,10 @@
@pytest.mark.parametrize(
"backbone",
[
- tf_efficientdet_lite0,
- efficientdet_d0,
- efficientdet_d1,
- efficientdet_d2,
+ tf_lite0,
+ d0,
+ d1,
+ d2,
],
)
def test_efficient_det_param_groups(backbone):
| Rename EfficientDet Backbones
rename EfficientDet Backbones by deleting the `efficientdet_` part from the names: no need to reference that because the model already suggests that
| 2021-03-22T19:40:41 |
|
airctic/icevision | 733 | airctic__icevision-733 | [
"703"
] | 9bf8576c5941eae16d58c5cdfbd6a3229f74cb3a | diff --git a/icevision/core/record_components.py b/icevision/core/record_components.py
--- a/icevision/core/record_components.py
+++ b/icevision/core/record_components.py
@@ -80,6 +80,9 @@ def __init__(self, task):
def set_class_map(self, class_map: ClassMap):
self.class_map = class_map
+ def _repr(self) -> List[str]:
+ return [f"Class Map: {self.class_map}"]
+
def as_dict(self) -> dict:
return {"class_map": self.class_map}
| Better Record __repr__ to show ClassMap when it is stored internally
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
More informative to show `class_map` when a `Record` object is storing it
Take for example a record loaded from the main repo:
```python
from icevision.all import *
data_dir = Path("~/icevision/samples/")
class_map = icedata.coco.class_map()
parser = parsers.COCOMaskParser(annotations_filepath=data_dir/'annotations.json', img_dir=data_dir/'images')
records = parser.parse(data_splitter=SingleSplitSplitter())[0]
record = records[0]
print(record)
## Output:
BaseRecord
common:
- Filepath: /Users/rahulsomani/git/icevision-orig/samples/images/000000343934.jpg
- Image: None
- Image size ImgSize(width=640, height=480)
- Image ID: 0
detection:
- Masks: <EncodedRLEs with 1 objects>
- Labels: [4]
- Areas: [43522.80595]
- BBoxes: [<BBox (xmin:175.14, ymin:175.68, xmax:496.21999999999997, ymax:415.68)>]
- Is Crowds: [0]
```
This record internally has access to the `class_map` via `record.detection.class_map`, which is great, but not known when you print the record. Additionally, if you print `record.components`, you get:
```python
{<icevision.core.record_components.AreasRecordComponent at 0x7fbb5b54a4d0>,
<icevision.core.record_components.BBoxesRecordComponent at 0x7fbb5b54acd0>,
<icevision.core.record_components.FilepathRecordComponent at 0x7fbb5b54a690>,
<icevision.core.record_components.InstancesLabelsRecordComponent at 0x7fbb5b54a7d0>,
<icevision.core.record_components.IsCrowdsRecordComponent at 0x7fbb5b54ad90>,
<icevision.core.record_components.MasksRecordComponent at 0x7fbb5b54a150>,
<icevision.core.record_components.RecordIDRecordComponent at 0x7fbb5b54a9d0>,
<icevision.core.record_components.SizeRecordComponent at 0x7fbb5b54a810>}
```
I'd have expected `ClassMapRecordComponent` to be in there as well?
| > I'd have expected ClassMapRecordComponent to be in there as well?
Hmm, yeah.. So currently what happens is that `LabelsRecordComponent` inherits from `ClassMapRecordComponent`. Do you think it's a good idea separating them? A drawback is that we would need to pass `ClassMapRecordComponent` when creating `BaseRecord`
---
To printing `class_map` that would be a good idea, and very easy to implement, we just need to add a `_repr` to `ClassMapRecordComponent`
> Do you think it's a good idea separating them?
I think it is a good idea indeed - having clarity on inspecting `.components` is great because it reduces the expectation of a dev to know what's happening behind the scenes as they can just see what components are being stored.
---
> A drawback is that we would need to pass ClassMapRecordComponent when creating BaseRecord
Right. I think that's not too bad. Alternatively, we could inclue `ClassMapRecordComponent` in `BaseRecord.base_components`? Can't think of any common scenario where you don't need a ClassMap (audio, video, images included)
> Alternatively, we could include ClassMapRecordComponent in BaseRecord.base_components? Can't think of any common scenario where you don't need a ClassMap (audio, video, images included)
That would not work because each `ClassMap` is specific to a task, this is also why it's not so straight forward to separate `labels` and `class_map` into two different components. We would need to do:
```python
BaseRecord(
(
FilepathRecordComponent(),
ClassMapRecordComponent(task=tasks.detection),
InstancesLabelsRecordComponent(),
BBoxesRecordComponent(),
)
)
```
And if we have classification we would need a new class map component for each.
---
Let's us first address the issue of the class map not being displayed (which is a simple solution) and we can then come back to this (maybe with another more specific issue)
| 2021-03-27T13:14:40 |
|
airctic/icevision | 734 | airctic__icevision-734 | [
"683"
] | 9bf8576c5941eae16d58c5cdfbd6a3229f74cb3a | diff --git a/icevision/models/torchvision/utils.py b/icevision/models/torchvision/utils.py
--- a/icevision/models/torchvision/utils.py
+++ b/icevision/models/torchvision/utils.py
@@ -9,17 +9,19 @@
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
-def remove_internal_model_transforms(model: GeneralizedRCNN):
- def noop_normalize(image: Tensor) -> Tensor:
- return image
+def _noop_normalize(image: Tensor) -> Tensor:
+ return image
+
- def noop_resize(
- image: Tensor, target: Optional[Dict[str, Tensor]]
- ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
- return image, target
+def _noop_resize(
+ image: Tensor, target: Optional[Dict[str, Tensor]]
+) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
+ return image, target
- model.transform.normalize = noop_normalize
- model.transform.resize = noop_resize
+
+def remove_internal_model_transforms(model: GeneralizedRCNN):
+ model.transform.normalize = _noop_normalize
+ model.transform.resize = _noop_resize
def patch_param_groups(
| Can't save a full model using torch.save (at least with faster-RCNN)
It is not possible to save a full model using default settings of `torch.save` (see stack trace below). This is because of the implementation of `remove_internal_model_transforms`, which uses inner functions in its implementation. The default pickle module does not support inner functions.
Workaround: use the `dill` module instead, which does support inner functions.
Suggested fix: It does not look as if the internal functions are necessary. If there were moved to standard functions, then the default pickle module should work.
`torch.save(model, 'mod.pth', pickle_module=pickle)` causes an error.
`torch.save(model, 'mod.pth', pickle_module=dill)` is a workaround.
**To Reproduce**
`torch.save(model, 'mod1-full.pth', pickle_module=pickle)`
results in:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-50f3761f4f3c> in <module>
----> 1 torch.save(model, 'mod1-full.pth', pickle_module=pickle)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol, _use_new_zipfile_serialization)
370 if _use_new_zipfile_serialization:
371 with _open_zipfile_writer(opened_file) as opened_zipfile:
--> 372 _save(obj, opened_zipfile, pickle_module, pickle_protocol)
373 return
374 _legacy_save(obj, opened_file, pickle_module, pickle_protocol)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in _save(obj, zip_file, pickle_module, pickle_protocol)
474 pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
475 pickler.persistent_id = persistent_id
--> 476 pickler.dump(obj)
477 data_value = data_buf.getvalue()
478 zip_file.write_record('data.pkl', data_value, len(data_value))
AttributeError: Can't pickle local object 'remove_internal_model_transforms.<locals>.noop_normalize'
```
Relevant definition:
```
def remove_internal_model_transforms(model: GeneralizedRCNN):
def noop_normalize(image: Tensor) -> Tensor:
return image
def noop_resize(
image: Tensor, target: Optional[Dict[str, Tensor]]
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
return image, target
model.transform.normalize = noop_normalize
model.transform.resize = noop_resize
```
| 2021-03-27T13:23:43 |
||
airctic/icevision | 753 | airctic__icevision-753 | [
"752"
] | 508891b262a15447cd21ff80d523b029c7e89ce7 | diff --git a/docs/autogen.py b/docs/autogen.py
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -7,6 +7,7 @@
# from keras_autodoc.examples import copy_examples
import tutobooks
+from loguru import logger
PAGES = {
"parser.md": [
@@ -104,8 +105,6 @@ def copy_examples(examples_dir, destination_dir):
continue
module_path = os.path.join(examples_dir, file)
docstring, starting_line = get_module_docstring(module_path)
- print("dostring", docstring)
- print("starting_line", starting_line)
destination_file = os.path.join(destination_dir, file[:-2] + "md")
with open(destination_file, "w+", encoding="utf-8") as f_out, open(
examples_dir / file, "r+", encoding="utf-8"
@@ -129,6 +128,13 @@ def copy_examples(examples_dir, destination_dir):
f_out.write(line)
f_out.write("\n```")
+ from_to = f"{file} -> {destination_file}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Copying Examples: {}</></>",
+ from_to,
+ )
+
def get_module_docstring(filepath):
"""Extract the module docstring.
@@ -163,7 +169,7 @@ def py_to_nb_md(dest_dir):
tutobooks.py_to_md(py_path, nb_path, md_path, "templates/img")
- github_repo_dir = "airctic/icevision/blob/master/docs/"
+ github_repo_dir = "airctic/icedata/blob/master/docs/"
with open(md_path, "r") as md_file:
button_lines = [
":material-link: "
@@ -190,7 +196,11 @@ def py_to_nb_md(dest_dir):
def nb_to_md(src_dir, nb_folder, dest_dir):
notebooks_dir = src_dir / nb_folder
- print("Notebooks folder: ", notebooks_dir)
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Notebooks folder: {}</></>",
+ notebooks_dir,
+ )
for file_path in os.listdir(notebooks_dir):
dir_path = notebooks_dir
@@ -203,15 +213,27 @@ def nb_to_md(src_dir, nb_folder, dest_dir):
continue
# md_path = os.path.join(dest_dir, 'tutorial', file_name_no_ext + '.md')
+ file_name_md = file_name_no_ext + ".md"
+ # md_path = os.path.join(dest_dir, file_name_md)
md_path = os.path.join(dest_dir, file_name_no_ext + ".md")
images_path = "images"
tutobooks.nb_to_md(nb_path, md_path, images_path)
+ from_to = f"{file_name} -> {file_name_md}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Converting to Notebook: {}</></>",
+ from_to,
+ )
def examples_to_md(dest_dir):
examples_dir = icevision_dir / "examples"
- print("Examples folder: ", examples_dir)
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Examples folder: {}</></>",
+ examples_dir,
+ )
for file_path in os.listdir(examples_dir):
dir_path = examples_dir
@@ -228,6 +250,13 @@ def examples_to_md(dest_dir):
copy_examples(examples_dir, dest_dir / "examples")
+ from_to = f"{nb_path} -> {md_path}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Copying Examples: {}</></>",
+ from_to,
+ )
+
def generate(dest_dir: Path):
template_dir = icevision_dir / "docs" / "templates"
@@ -236,16 +265,27 @@ def generate(dest_dir: Path):
# Create dest_dir if doesn't exist
if os.path.exists(dest_dir):
print("Removing sources folder:", dest_dir)
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<magenta><bold>\nRemoving sources folder: {}</></>",
+ dest_dir,
+ )
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
# Copy images folder from root folder to the template images folder
copy_tree(str(icevision_dir / "images"), str(template_images_dir))
+ from_to = f"root/images -> docs/images"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>\nCopying images folder: {}</></>",
+ from_to,
+ )
# Generate APIs Documentation
doc_generator = keras_autodoc.DocumentationGenerator(
pages=PAGES,
- project_url="https://github.com/airctic/icevision/blob/master",
+ project_url="https://github.com/airctic/icedata/blob/master",
template_dir=template_dir,
examples_dir=icevision_dir / "examples",
)
@@ -256,6 +296,12 @@ def generate(dest_dir: Path):
# Copy web manifest
shutil.copyfile("manifest.webmanifest", dest_dir / "manifest.webmanifest")
+ from_to = f"root/manifest.webmanifest -> docs/manifest.webmanifest"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>\nCopying webmanifest file: {}</></>",
+ from_to,
+ )
# Auto generate the index.md file using the README.md file and the index.md file in templates folder
readme = (icevision_dir / "README.md").read_text()
@@ -263,83 +309,90 @@ def generate(dest_dir: Path):
# Search for the beginning and the end of the installation procedure to hide in Docs to avoid duplication
start = readme.find("<!-- Not included in docs - start -->")
end = readme.find("<!-- Not included in docs - end -->")
- print("\nSTART: ", start)
- print("END: ", end, "\n")
+
readme = readme.replace(readme[start:end], "")
index = (template_dir / "index.md").read_text()
index = index.replace("{{autogenerated}}", readme[readme.find("##") :])
(dest_dir / "index.md").write_text(index, encoding="utf-8")
# Copy static .md files from the root folder
- shutil.copyfile(icevision_dir / "CONTRIBUTING.md", dest_dir / "contributing.md")
- shutil.copyfile(
- icevision_dir / "CODE_OF_CONDUCT.md", dest_dir / "code_of_conduct.md"
- )
-
- # Copy static .md files from the docs folder
- shutil.copyfile(icevision_dir / "docs/INSTALL.md", dest_dir / "install.md")
- shutil.copyfile(
- icevision_dir / "docs/HOW-TO.md",
- dest_dir / "how-to.md",
- )
- shutil.copyfile(icevision_dir / "docs/ABOUT.md", dest_dir / "about.md")
-
- shutil.copyfile(icevision_dir / "docs/README.md", dest_dir / "readme_mkdocs.md")
-
- shutil.copyfile(
- icevision_dir / "docs/CHANGING-THE-COLORS.md",
- dest_dir / "changing_the_colors.md",
- )
-
- shutil.copyfile(icevision_dir / "docs/DEPLOYMENT.md", dest_dir / "deployment.md")
-
- # Copy static .md files from the other folders
- shutil.copyfile(
- icevision_dir / "icevision/models/README.md",
- dest_dir / "model_comparison.md",
- )
-
- shutil.copyfile(
- icevision_dir / "icevision/models/ross/efficientdet/README.md",
- dest_dir / "model_efficientdet.md",
- )
-
- shutil.copyfile(
- icevision_dir / "icevision/models/torchvision/faster_rcnn/README.md",
- dest_dir / "model_faster_rcnn.md",
+ dir_to_search = icevision_dir
+ fnamelist = [
+ filename for filename in os.listdir(dir_to_search) if filename.endswith(".md")
+ ]
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>\nCopying .md files root folder: {}</></>",
+ fnamelist,
)
- shutil.copyfile(
- icevision_dir / "icevision/backbones/backbones_effecientdet.md",
- dest_dir / "backbones_effecientdet.md",
- )
+ for fname in fnamelist:
+ fname_src = icevision_dir / fname
+ fname_dst = dest_dir / fname.lower()
+ shutil.copyfile(fname_src, fname_dst)
+ from_to = f"{fname} -> {fname.lower()}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<light-blue><bold>file: {}</></>",
+ from_to,
+ )
- shutil.copyfile(
- icevision_dir / "icevision/backbones/backbones_faster_mask_rcnn.md",
- dest_dir / "backbones_faster_mask_rcnn.md",
- )
-
- shutil.copyfile(
- icevision_dir / "icevision/tfms/README.md",
- dest_dir / "albumentations.md",
+ # Copy static .md files from the docs folder
+ dir_to_search = icevision_dir / "docs"
+ fnamelist = [
+ filename for filename in os.listdir(dir_to_search) if filename.endswith(".md")
+ ]
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>\nCopying .md files from the docs folder: {}</></>",
+ fnamelist,
)
-
- # Copy .md examples files to destination examples folder
- # Copy css folder
- copy_tree(str(icevision_dir / "examples"), str(dest_dir / "examples"))
+ for fname in fnamelist:
+ fname_src = dir_to_search / fname
+ fname_dst = dest_dir / fname.lower()
+ shutil.copyfile(fname_src, fname_dst)
+ from_to = f"{fname} -> {fname.lower()}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<light-blue><bold>Copying files: {}</></>",
+ from_to,
+ )
# Copy images folder from the template folder to the destination folder
- print("Template folder: ", template_images_dir)
+ # print("Template folder: ", template_images_dir)
dest_images_dir = Path(dest_dir) / "images"
# Copy images folder
copy_tree(str(template_images_dir), str(dest_images_dir))
+ from_to = f"{template_images_dir} -> {dest_images_dir}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Copying Images: {}</></>",
+ from_to,
+ )
# Copy css folder
- copy_tree(str(icevision_dir / "docs/css"), str(dest_dir / "css"))
+ css_dir_src = str(icevision_dir / "docs/css")
+ css_dir_dest = str(str(dest_dir / "css"))
+ copy_tree(css_dir_src, css_dir_dest)
+ from_to = f"{css_dir_src} -> {css_dir_dest}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Copying CSS files: {}</></>",
+ from_to,
+ )
# Copy js folder
- copy_tree(str(icevision_dir / "docs/js"), str(dest_dir / "js"))
+ # copy_tree(str(icevision_dir / "docs/js"), str(dest_dir / "js"))
+ js_dir_src = str(icevision_dir / "docs/js")
+ js_dir_dest = str(str(dest_dir / "js"))
+ copy_tree(js_dir_src, js_dir_dest)
+ from_to = f"{js_dir_src} -> {js_dir_dest}"
+ logger.opt(colors=True).log(
+ "INFO",
+ "️<green><bold>Copying JS files: {}</></>",
+ from_to,
+ )
# Generate .md files form Jupyter Notebooks located in the /notebooks folder
nb_to_md(icevision_dir, "notebooks", dest_dir)
@@ -347,8 +400,23 @@ def generate(dest_dir: Path):
# Generate .md files form Jupyter Notebooks located in the /deployment folder
nb_to_md(icevision_dir / "docs", "deployment", dest_dir)
- # Generate .md files form python files located in the /examples folder
- # examples_to_md(dest_dir)
+ # albumentations
+ shutil.copyfile(
+ icevision_dir / "icevision/tfms/README.md",
+ dest_dir / "albumentations.md",
+ )
+
+ # Models
+ shutil.copyfile(
+ icevision_dir / "icevision/models/README.md",
+ dest_dir / "models.md",
+ )
+
+ # Backbones
+ shutil.copyfile(
+ icevision_dir / "icevision/backbones/README.md",
+ dest_dir / "backbones.md",
+ )
if __name__ == "__main__":
| Docs: Remove unused sections and add Model and Backbones initial pages
## 📓 Documentation Update
Docs: Remove unused sections and add Model and Backbones initial pages:
- Automate `.md` files generation
- Add `logger` support
- Remove Preferences
- Remove Examples
- Replace all models info by one single page (a stub page for now, it will be edit later)
- Replace all backbones info by one single page (a stub page for now, it will be edit later)
| 2021-04-07T16:12:53 |
||
airctic/icevision | 755 | airctic__icevision-755 | [
"754"
] | 508891b262a15447cd21ff80d523b029c7e89ce7 | diff --git a/icevision/models/model_choice_ui.py b/icevision/models/model_choice_ui.py
--- a/icevision/models/model_choice_ui.py
+++ b/icevision/models/model_choice_ui.py
@@ -1,4 +1,4 @@
-__all__ = ["display_model_choice_ui", "lib_info", "get_model_info"]
+__all__ = ["ModelChoiceUI"]
from icevision import models
@@ -6,105 +6,124 @@
from IPython.display import display
-lib_info = {
- "lib_type": None,
- "model_type": None,
- "backbone_type": None,
- "backbone": None,
- "model_list": [],
- "backbone_list": [],
-}
-
-
-def reset_lib_info():
- lib_info.update({"model_list": []})
- lib_info.update({"backbone_list": []})
- lib_info.update({"lib_type": None})
- lib_info.update({"model_type": None})
- lib_info.update({"backbone_type": None})
- lib_info.update({"backbone": None})
-
-
-def get_model_info():
- model_type = lib_info["model_type"]
- backbone = lib_info["backbone"]
-
- return model_type, backbone
-
-
-# Creating dropdown widgets
-libraries_available = widgets.Dropdown(
- options=["MMDetection", "Ross Wightman", "Torchvision"],
- # options=[models.ross, models.torchvision],
- description="Libraries",
- disabled=False,
-)
-
-models_available = widgets.Dropdown(
- options=[""],
- description="Models",
- disabled=False,
-)
-
-backbones_available = widgets.Dropdown(
- options=[""],
- description="Backbones",
- disabled=False,
-)
-
-
-def display_model_choice_ui():
- # Observe dropdown widget changes
- libraries_available.observe(od_library_change, names="value")
- models_available.observe(od_model_change, names="value")
- backbones_available.observe(od_backbone_change, names="value")
-
- # display dropdown widgets
- display(libraries_available)
- display(models_available)
- display(backbones_available)
-
-
-def od_library_change(change):
- reset_lib_info()
- lib_name = change.new
-
- if lib_name == "Torchvision":
- lib_type = models.torchvision
- model_list = ["faster_rcnn", "retinanet", "mask_rcnn", "keypoint_rcnn"]
-
- if lib_name == "MMDetection":
- lib_type = models.mmdet.models
- model_list = ["retinanet", "faster_rcnn", "fcos", "sparse_rcnn", "mask_rcnn"]
-
- if lib_name == "Ross Wightman":
- lib_type = models.ross
- model_list = ["efficientdet"]
-
- lib_info.update({"lib_type": lib_type})
- lib_info.update({"model_list": model_list})
-
- models_available.options = model_list
-
-
-def od_model_change(change):
- model_name = change.new
- lib_type = lib_info["lib_type"]
- model_type = getattr(lib_type, model_name)
- backbone_type = getattr(model_type, "backbones")
- backbone_list = [item for item in dir(model_type.backbones) if "__" not in item]
-
- lib_info.update({"model_type": model_type})
- lib_info.update({"backbone_type": backbone_type})
- lib_info.update({"backbone_list": backbone_list})
- backbones_available.options = backbone_list
-
-
-def od_backbone_change(change):
- backbone_name = change.new
- model_type = lib_info["model_type"]
- backbone_type = lib_info["backbone_type"]
-
- backbone = getattr(backbone_type, backbone_name)
-
- lib_info.update({"backbone": backbone})
+class ModelChoiceUI:
+ def __init__(self, task="object_detection"):
+ self.task = task
+ self.reset_lib_info()
+
+ self.libraries_available = widgets.Dropdown(
+ options=[],
+ description="Libraries",
+ disabled=False,
+ )
+
+ self.models_available = widgets.Dropdown(
+ options=[""],
+ description="Models",
+ disabled=False,
+ )
+
+ self.backbones_available = widgets.Dropdown(
+ options=[""],
+ description="Backbones",
+ disabled=False,
+ )
+
+ # lib_info = {
+ # "lib_type": None,
+ # "model_type": None,
+ # "backbone_type": None,
+ # "backbone": None,
+ # "model_list": [],
+ # "backbone_list": [],
+ # }
+
+ def reset_lib_info(self):
+ self.lib_type = None
+ self.model_type = None
+ self.backbone_type = None
+ self.model_list = []
+ self.backbone_list = []
+ self.backbone = None
+
+ def get_model_info(self):
+ return self.model_type, self.backbone
+
+ # Creating dropdown widgets
+ def populate_libraries(self):
+ if self.task == "object_detection":
+ libraries_list = ["", "MMDetection", "Ross Wightman", "Torchvision"]
+ elif self.task == "mask":
+ libraries_list = ["", "MMDetection", "Torchvision"]
+ elif self.task == "keypoints":
+ libraries_list = ["", "Torchvision"]
+
+ self.libraries_available.options = libraries_list
+
+ def od_library_change(self, change):
+ lib_name = change.new
+
+ if self.task == "object_detection":
+ if lib_name == "Torchvision":
+ lib_type = models.torchvision
+ model_list = ["faster_rcnn", "retinanet"]
+
+ if lib_name == "MMDetection":
+ lib_type = models.mmdet.models
+ model_list = ["retinanet", "faster_rcnn", "fcos", "sparse_rcnn"]
+
+ if lib_name == "Ross Wightman":
+ lib_type = models.ross
+ model_list = ["efficientdet"]
+
+ elif self.task == "mask":
+ if lib_name == "Torchvision":
+ lib_type = models.torchvision
+ model_list = ["mask_rcnn"]
+
+ if lib_name == "MMDetection":
+ lib_type = models.mmdet.models
+ model_list = ["mask_rcnn"]
+
+ elif self.task == "keypoints":
+ if lib_name == "Torchvision":
+ lib_type = models.torchvision
+ model_list = ["keypoint_rcnn"]
+
+ self.lib_type = lib_type
+ self.model_list = model_list
+
+ self.models_available.options = model_list
+
+ def od_model_change(self, change):
+ model_name = change.new
+ lib_type = self.lib_type
+ model_type = getattr(lib_type, model_name)
+ backbone_type = getattr(model_type, "backbones")
+ backbone_list = [item for item in dir(model_type.backbones) if "__" not in item]
+
+ self.model_type = model_type
+ self.backbone_type = backbone_type
+ self.backbone_list = backbone_list
+ self.backbones_available.options = backbone_list
+
+ def od_backbone_change(self, change):
+ backbone_name = change.new
+ model_type = self.model_type
+ backbone_type = self.backbone_type
+
+ backbone = getattr(backbone_type, backbone_name)
+
+ self.backbone = backbone
+
+ def display(self):
+ self.populate_libraries()
+ # Observe dropdown widget changes
+ self.libraries_available.observe(self.od_library_change, names="value")
+ self.models_available.observe(self.od_model_change, names="value")
+ self.backbones_available.observe(self.od_backbone_change, names="value")
+
+ # display dropdown widgets
+ display(self.libraries_available)
+ display(self.models_available)
+ display(self.backbones_available)
| Adapt models list displayer to task
## 🚀 Feature
The current model_choice_list displays a list of all models for all tasks. We should display only the models corresponding to a specific task: Object Detection, Mask, and Keypoints.
| 2021-04-08T02:12:43 |
||
airctic/icevision | 796 | airctic__icevision-796 | [
"788"
] | a230e24fac860dc0178179678c1892ff51794cea | diff --git a/icevision/models/ultralytics/yolov5/prediction.py b/icevision/models/ultralytics/yolov5/prediction.py
--- a/icevision/models/ultralytics/yolov5/prediction.py
+++ b/icevision/models/ultralytics/yolov5/prediction.py
@@ -19,7 +19,17 @@ def _predict_batch(
keep_images: bool = False,
device: Optional[torch.device] = None,
) -> List[Prediction]:
- device = device or model_device(model)
+ # device issue addressed on discord: https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250
+ if device is not None:
+ raise ValueError(
+ "For YOLOv5 device can only be specified during model creation, "
+ "for more info take a look at the discussion here: "
+ "https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250"
+ )
+ grid = model.model[-1].grid[-1]
+ # if `grid.numel() == 1` it means the grid isn't initialized yet and we can't
+ # trust it's device (will always be CPU)
+ device = grid.device if grid.numel() > 1 else model_device(model)
batch = batch[0].to(device)
model = model.eval().to(device)
| show_results with yolov5 + lightning training throws error
## 🐛 Bug
**Describe the bug**
`show_results` with yolo don't work if the model was training with pytorch lightning
**To Reproduce**
Train a yolo model with pytorch-lightning and try to call `show_results`
**Full stacktrace**
```python
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-19-2cf4276b061d> in <module>()
----> 1 model_type.show_results(model, valid_ds, detection_threshold=.5)
9 frames
/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/show_results.py in show_results(model, dataset, detection_threshold, num_samples, ncols, denormalize_fn, show, device)
41 show=show,
42 detection_threshold=detection_threshold,
---> 43 device=device,
44 )
45
/usr/local/lib/python3.7/dist-packages/icevision/models/base_show_results.py in base_show_results(predict_fn, model, dataset, num_samples, ncols, denormalize_fn, show, **predict_kwargs)
19 ) -> None:
20 records = random.choices(dataset, k=num_samples)
---> 21 preds = predict_fn(model, records, **predict_kwargs)
22
23 show_preds(
/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in predict(model, dataset, detection_threshold, keep_images, device)
48 detection_threshold=detection_threshold,
49 keep_images=keep_images,
---> 50 device=device,
51 )
52
/usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
24 def decorate_context(*args, **kwargs):
25 with self.__class__():
---> 26 return func(*args, **kwargs)
27 return cast(F, decorate_context)
28
/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in _predict_batch(model, batch, records, detection_threshold, keep_images, device)
24 model = model.eval().to(device)
25
---> 26 raw_preds = model(batch)[0]
27 return convert_raw_predictions(
28 batch=batch,
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x, augment, profile)
121 return torch.cat(y, 1), None # augmented inference, train
122 else:
--> 123 return self.forward_once(x, profile) # single-scale inference, train
124
125 def forward_once(self, x, profile=False):
/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward_once(self, x, profile)
137 print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
138
--> 139 x = m(x) # run
140 y.append(x if m.i in self.save else None) # save output
141
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x)
52
53 y = x[i].sigmoid()
---> 54 y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
55 y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
56 z.append(y.view(bs, -1, self.no))
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
```
| My guess is that the device casting under `_predict_batch` might be incorrect
Can you please assign this one to me?
> Can you please assign this one to me?
Yes sir! Thank you! =) | 2021-04-16T13:54:43 |
|
airctic/icevision | 798 | airctic__icevision-798 | [
"797"
] | a230e24fac860dc0178179678c1892ff51794cea | diff --git a/docs/autogen.py b/docs/autogen.py
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -418,6 +418,12 @@ def generate(dest_dir: Path):
dest_dir / "backbones.md",
)
+ # README DOCS
+ shutil.copyfile(
+ icevision_dir / "docs/README.md",
+ dest_dir / "readme_mkdocs.md",
+ )
+
if __name__ == "__main__":
generate(icevision_dir / "docs" / "sources")
| Fix the readme docs, and code snippet links
readme docs points to 404
code snippet is out-dated
| 2021-04-16T14:21:41 |
||
airctic/icevision | 821 | airctic__icevision-821 | [
"819"
] | 82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4 | diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py
--- a/icevision/core/class_map.py
+++ b/icevision/core/class_map.py
@@ -44,7 +44,13 @@ def get_by_name(self, name: str) -> int:
else:
raise e
- def add_name(self, name) -> int:
+ def add_name(self, name: str) -> int:
+ # Raise error if trying to add duplicate value
+ if name in self._id2class:
+ raise ValueError(
+ f"'{name}' already exists in the ClassMap. You can only add new labels that are unique"
+ )
+
self._id2class.append(name)
id = len(self._class2id)
self._class2id[name] = id
| diff --git a/tests/core/test_class_map.py b/tests/core/test_class_map.py
--- a/tests/core/test_class_map.py
+++ b/tests/core/test_class_map.py
@@ -26,3 +26,6 @@ def test_class_map_safety(classes):
class_map = ClassMap(classes, background=None)
classes.insert(0, "x")
assert class_map.get_by_id(0) == "a"
+
+ with pytest.raises(ValueError):
+ class_map.add_name(class_map._id2class[0])
| Adding Non Unique Names To ClassMap Must Be Illegal
Currently, `add_name` doesn't check if the added `name` is a duplicate or not. This should be illegal, as classes must be unique
https://github.com/airctic/icevision/blob/82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4/icevision/core/class_map.py#L47-L51
| valid point! up for a PR rahuuuul? =) | 2021-04-24T15:41:08 |
airctic/icevision | 827 | airctic__icevision-827 | [
"785"
] | 5b11cf8a41c9d372d016229ce4673e6dabfac701 | diff --git a/icevision/visualize/draw_data.py b/icevision/visualize/draw_data.py
--- a/icevision/visualize/draw_data.py
+++ b/icevision/visualize/draw_data.py
@@ -48,6 +48,10 @@ def draw_sample(
# Args for plotting specific labels
exclude_labels: List[str] = [],
include_only: List[str] = None,
+ multiple_classification_spacing_factor: float = 1.05,
+ dynamic_font_size_div_factor: float = 20.0,
+ include_classification_task_names: bool = True,
+ include_instances_task_names: bool = False,
) -> Union[np.ndarray, PIL.Image.Image]:
"""
Selected kwargs:
@@ -71,6 +75,11 @@ def draw_sample(
precedence over `exclude_labels` (?)
"""
img = sample.img.copy()
+ num_classification_plotted = 0
+
+ # Dynamic font size based on image height
+ if font_size is None:
+ font_size = sample.img_size.height / dynamic_font_size_div_factor
if denormalize_fn is not None:
img = denormalize_fn(img)
@@ -81,6 +90,17 @@ def draw_sample(
# `sample.common`. This is a foundational assumption? #NOTE
class_map = getattr(composite, "class_map", None)
+ if composite.get_component_by_type(ClassificationLabelsRecordComponent):
+ x = 0
+ y = (
+ font_size
+ * num_classification_plotted
+ * multiple_classification_spacing_factor
+ )
+ num_classification_plotted += 1
+ else:
+ x, y = None, None
+
# HACK
if hasattr(composite, "masks"):
masks = composite.masks.to_mask(h=sample.height, w=sample.width)
@@ -127,6 +147,16 @@ def draw_sample(
if display_keypoints and keypoints is not None:
img = draw_keypoints(img=img, kps=keypoints, color=color)
if display_label and label is not None:
+ prefix = ""
+ if include_classification_task_names:
+ if composite.get_component_by_type(
+ ClassificationLabelsRecordComponent
+ ):
+ prefix = prettify_func(task) + ": "
+ if include_instances_task_names:
+ if composite.get_component_by_type(InstancesLabelsRecordComponent):
+ prefix = prettify_func(task) + ": "
+
img = draw_label(
img=img,
label=label,
@@ -144,6 +174,9 @@ def draw_sample(
prettify=prettify,
prettify_func=prettify_func,
return_as_pil_img=False, # should this always be False??
+ prefix=prefix,
+ x=x,
+ y=y,
)
if return_as_pil_img:
# may or may not be a PIL Image based on `display_label`
@@ -155,7 +188,7 @@ def draw_sample(
def draw_label(
img: np.ndarray,
- label: int,
+ label: Union[int, str],
score: Optional[float],
color: Union[np.ndarray, list, tuple],
border_color: Union[np.ndarray, list, tuple],
@@ -170,14 +203,19 @@ def draw_label(
pad_width_factor=0.02,
pad_height_factor=0.005,
thin_border=True,
+ x: Optional[int] = None,
+ y: Optional[int] = None,
+ prefix: str = "",
) -> Union[np.ndarray, PIL.Image.Image]:
# finds label position based on bbox or mask
- if bbox is not None:
- x, y, _, _ = bbox.xyxy
- elif mask is not None:
- y, x = np.unravel_index(mask.data.argmax(), mask.data.shape)
- else:
- x, y = 0, 0
+ if x is None or y is None:
+ # print(f"X: {x}, Y: {y}")
+ if bbox is not None:
+ x, y, _, _ = bbox.xyxy
+ elif mask is not None:
+ y, x = np.unravel_index(mask.data.argmax(), mask.data.shape)
+ else:
+ x, y = 0, 0
if class_map is not None:
if isinstance(label, int):
@@ -190,6 +228,7 @@ def draw_label(
caption = str(label)
if prettify:
# We could introduce a callback here for more complex label renaming
+ caption = prefix + caption
caption = prettify_func(caption)
# Append label confidence to caption if applicable
| Dynamic font size for labels
## 🚀 Feature
Dynamically adjust the label font size when displaying records. We can automatically determine the font size based on the width and height of the bbox.
| Happy to take care of this.
@elmourr thank you! | 2021-04-28T16:35:13 |
|
airctic/icevision | 870 | airctic__icevision-870 | [
"869"
] | 416555ecf221c8d3aae03987b109b13e9883e506 | diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py
--- a/icevision/models/ross/efficientdet/backbones.py
+++ b/icevision/models/ross/efficientdet/backbones.py
@@ -21,6 +21,12 @@
"d6",
"d7",
"d7x",
+ "tf_d0_ap",
+ "tf_d1_ap",
+ "tf_d2_ap",
+ "tf_d3_ap",
+ "tf_d4_ap",
+ "tf_d5_ap",
]
from icevision.models.ross.efficientdet.utils import *
@@ -50,3 +56,10 @@
d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
+
+tf_d0_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d0_ap")
+tf_d1_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d1_ap")
+tf_d2_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d2_ap")
+tf_d3_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d3_ap")
+tf_d4_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d4_ap")
+tf_d5_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d5_ap")
| diff --git a/tests/models/efficient_det/test_model.py b/tests/models/efficient_det/test_model.py
--- a/tests/models/efficient_det/test_model.py
+++ b/tests/models/efficient_det/test_model.py
@@ -10,6 +10,9 @@
d0,
d1,
d2,
+ tf_d0_ap,
+ tf_d1_ap,
+ tf_d2_ap,
tf_d0,
tf_d1,
tf_d2,
| Add EfficientDet AdvProp-AA
## 🚀 Feature
Add EfficientDet AdvProp-AA pretrained backbones for D0-D5
See https://github.com/google/automl/blob/master/efficientdet/Det-AdvProp.md
| 2021-06-25T00:19:26 |
|
airctic/icevision | 878 | airctic__icevision-878 | [
"877"
] | c5c9abc9f741fea3053086c9e779b6ce66190bc6 | diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py
--- a/icevision/models/mmdet/download_configs.py
+++ b/icevision/models/mmdet/download_configs.py
@@ -9,12 +9,26 @@
def download_mmdet_configs() -> Path:
save_dir = get_root_dir() / f"mmdetection_configs"
- save_dir.mkdir(parents=True, exist_ok=True)
+ mmdet_config_path = save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
download_path = save_dir / f"{VERSION}.zip"
- if not download_path.exists():
- logger.info("Downloading mmdet configs")
- download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+ if mmdet_config_path.exists():
+ logger.info(
+ f"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}"
+ )
+ elif download_path.exists():
+ # The zip file was downloaded by not extracted yet
+ # Extract zip file
+ logger.info(f"Extracting the {VERSION}.zip file.")
+ save_dir = Path(download_path).parent
+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))
+ else:
+ save_dir.mkdir(parents=True, exist_ok=True)
- return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
+ download_path = save_dir / f"{VERSION}.zip"
+ if not download_path.exists():
+ logger.info("Downloading mmdet configs")
+ download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+
+ return mmdet_config_path
| Fix download_mmdet_configs
No need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.
| 2021-07-05T22:03:12 |
||
airctic/icevision | 883 | airctic__icevision-883 | [
"882"
] | d731a1c3d4a99c9bc9a887fd06c6fd82f0b1e76b | diff --git a/icevision/models/ross/efficientdet/model.py b/icevision/models/ross/efficientdet/model.py
--- a/icevision/models/ross/efficientdet/model.py
+++ b/icevision/models/ross/efficientdet/model.py
@@ -13,6 +13,7 @@ def model(
backbone: EfficientDetBackboneConfig,
num_classes: int,
img_size: int,
+ **kwargs,
) -> nn.Module:
"""Creates the efficientdet model specified by `model_name`.
@@ -39,6 +40,7 @@ def model(
bench_labeler=True,
num_classes=num_classes - 1,
pretrained=backbone.pretrained,
+ **kwargs,
)
# TODO: Break down param groups for backbone
| Add kwargs to EfficientDet model() method
Add kwargs to EfficientDet model() method. This will allow to pass `pretrained_backbone` argument to EfficientDet `create_model_from_config()` method. That will prevent loading pretrained weights if the user wish to do so
| 2021-07-06T23:30:44 |
||
airctic/icevision | 894 | airctic__icevision-894 | [
"889"
] | 0088ad2f1b933a054edee844424f2b48b879579f | diff --git a/icevision/core/record_components.py b/icevision/core/record_components.py
--- a/icevision/core/record_components.py
+++ b/icevision/core/record_components.py
@@ -115,7 +115,7 @@ def set_img(self, img: Union[PIL.Image.Image, np.ndarray]):
assert isinstance(img, (PIL.Image.Image, np.ndarray))
self.img = img
if isinstance(img, PIL.Image.Image):
- height, width = img.shape
+ width, height = img.size
elif isinstance(img, np.ndarray):
# else:
height, width, _ = self.img.shape
@@ -136,7 +136,7 @@ def _repr(self) -> List[str]:
)
return [f"Img: {width}x{height}x{channels} <np.ndarray> Image"]
elif isinstance(self.img, PIL.Image.Image):
- height, width = self.img.shape
+ width, height = self.img.size
return [f"Img: {width}x{height} <PIL.Image; mode='{self.img.mode}'>"]
else:
return [f"Img: {self.img}"]
| Pillow Image class has no attribute shape
## 🐛 Bug
**Describe the bug**
Pillow Image class has no attribute shape. When using open_image function from utils/image_io.py the shape property is not defined for Pil Image class.
This has been changed in 0.8.1 where we return PIL image instead of numpy array. To fix this one should try to access Image.height and Image.width fields instead or read these values from Image.size property.
**Expected behavior**
The show_records should be able to open the image without issues.
**Screenshots**
```python
show_records(records[:1])
```

**Desktop (please complete the following information):**
- OS: Windows 10
**Additional context**
https://github.com/airctic/icevision/blob/d731a1c3d4a99c9bc9a887fd06c6fd82f0b1e76b/icevision/core/record_components.py#L118
| this might be coming from the python poetry installation file. It is reproducible if icevision=0.8.1 is installed without specifying [all] egg
| 2021-07-14T19:48:08 |
|
airctic/icevision | 904 | airctic__icevision-904 | [
"903"
] | 2d91eacfab7fcaf09c93352f1e7816ccb2c252b9 | diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py
--- a/icevision/models/mmdet/common/bbox/single_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py
@@ -14,6 +14,7 @@ def model(
num_classes: int,
checkpoints_path: Optional[Union[str, Path]] = "checkpoints",
force_download=False,
+ cfg_options=None,
) -> nn.Module:
return build_model(
@@ -23,4 +24,5 @@ def model(
pretrained=backbone.pretrained,
checkpoints_path=checkpoints_path,
force_download=force_download,
+ cfg_options=cfg_options,
)
diff --git a/icevision/models/mmdet/common/utils.py b/icevision/models/mmdet/common/utils.py
--- a/icevision/models/mmdet/common/utils.py
+++ b/icevision/models/mmdet/common/utils.py
@@ -36,6 +36,7 @@ def build_model(
pretrained: bool = True,
checkpoints_path: Optional[Union[str, Path]] = "checkpoints",
force_download=False,
+ cfg_options=None,
) -> nn.Module:
cfg, weights_path = create_model_config(
@@ -43,6 +44,7 @@ def build_model(
pretrained=pretrained,
checkpoints_path=checkpoints_path,
force_download=force_download,
+ cfg_options=cfg_options,
)
if model_type == "one_stage_detector_bbox":
@@ -70,5 +72,7 @@ def build_model(
load_checkpoint(_model, str(weights_path))
_model.param_groups = MethodType(param_groups, _model)
+ _model.cfg = cfg # save the config in the model for convenience
+ _model.weights_path = weights_path # save the model.weights_path in case we want to rebuild the model after updating its attributes
return _model
diff --git a/icevision/models/mmdet/utils.py b/icevision/models/mmdet/utils.py
--- a/icevision/models/mmdet/utils.py
+++ b/icevision/models/mmdet/utils.py
@@ -61,6 +61,7 @@ def create_model_config(
pretrained: bool = True,
checkpoints_path: Optional[Union[str, Path]] = "checkpoints",
force_download=False,
+ cfg_options=None,
):
model_name = backbone.model_name
@@ -81,4 +82,7 @@ def create_model_config(
cfg = Config.fromfile(config_path)
+ if cfg_options is not None:
+ cfg.merge_from_dict(cfg_options)
+
return cfg, weights_path
| diff --git a/tests/models/mmdet/test_model.py b/tests/models/mmdet/test_model.py
--- a/tests/models/mmdet/test_model.py
+++ b/tests/models/mmdet/test_model.py
@@ -3,14 +3,25 @@
@pytest.mark.parametrize(
- "ds, model_type, pretrained",
+ "ds, model_type, pretrained, cfg_options",
(
- ("fridge_ds", models.mmdet.retinanet, True),
- ("fridge_ds", models.mmdet.retinanet, False),
+ ("fridge_ds", models.mmdet.retinanet, True, None),
+ ("fridge_ds", models.mmdet.retinanet, False, None),
+ (
+ "fridge_ds",
+ models.mmdet.retinanet,
+ False,
+ {
+ "model.bbox_head.loss_bbox.loss_weight": 2,
+ "model.bbox_head.loss_cls.loss_weight": 0.8,
+ },
+ ),
),
)
class TestBboxModels:
- def dls_model(self, ds, model_type, pretrained, samples_source, request):
+ def dls_model(
+ self, ds, model_type, pretrained, cfg_options, samples_source, request
+ ):
train_ds, valid_ds = request.getfixturevalue(ds)
train_dl = model_type.train_dl(train_ds, batch_size=2)
valid_dl = model_type.valid_dl(valid_ds, batch_size=2)
@@ -19,16 +30,18 @@ def dls_model(self, ds, model_type, pretrained, samples_source, request):
backbone.config_path = samples_source / backbone.config_path
model = model_type.model(
- backbone=backbone(pretrained=pretrained), num_classes=5
+ backbone=backbone(pretrained=pretrained),
+ num_classes=5,
+ cfg_options=cfg_options,
)
return train_dl, valid_dl, model
def test_mmdet_bbox_models_fastai(
- self, ds, model_type, pretrained, samples_source, request
+ self, ds, model_type, pretrained, cfg_options, samples_source, request
):
train_dl, valid_dl, model = self.dls_model(
- ds, model_type, pretrained, samples_source, request
+ ds, model_type, pretrained, cfg_options, samples_source, request
)
learn = model_type.fastai.learner(
@@ -37,10 +50,10 @@ def test_mmdet_bbox_models_fastai(
learn.fine_tune(1, 3e-4)
def test_mmdet_bbox_models_light(
- self, ds, model_type, pretrained, samples_source, request
+ self, ds, model_type, pretrained, cfg_options, samples_source, request
):
train_dl, valid_dl, model = self.dls_model(
- ds, model_type, pretrained, samples_source, request
+ ds, model_type, pretrained, cfg_options, samples_source, request
)
class LitModel(model_type.lightning.ModelAdapter):
| Make MMDetection config object accessible to users
## 🚀 Feature
Make MMDetection config object accessible to users. The goal is to be able to update model attributes: e.g. changing weighted loss parameters, changing anchor boxes ratios, etc.
| 2021-08-10T19:10:40 |
|
airctic/icevision | 910 | airctic__icevision-910 | [
"909"
] | dfcbeaa3ba7df4413c342bdfe9d648937bf2babc | diff --git a/icevision/models/ross/efficientdet/dataloaders.py b/icevision/models/ross/efficientdet/dataloaders.py
--- a/icevision/models/ross/efficientdet/dataloaders.py
+++ b/icevision/models/ross/efficientdet/dataloaders.py
@@ -179,7 +179,7 @@ def process_train_record(record) -> tuple:
def process_infer_record(record) -> tuple:
"""Extracts information from record and prepares a format required by the EffDet inference"""
image = im2tensor(record.img)
- image_size = image.shape[-2:]
+ n_channels, image_height, image_width = image.shape
image_scale = 1.0
-
- return image, image_size, image_scale
+ # EffDet expects image size to be passed in W, H notation
+ return image, (image_width, image_height), image_scale
| Efficientdet inference returns wrong bbox predictions
## 🐛 Bug
When running inference on efficientdet models, the predictions are squeezed to only fit square aspect ratio. This problem is only visible when running efficientdet in rectangular input shape (eg 512x768). Here is a screenshot of default behavior:

Note that predictions are squezzed, seemingly to only square image resolution. I have discovered that the bug comes from `process_infer_record` function, where the image input shape is passed to effdet in the wrong notation (H, W instead of W, H).
I applied that fix and the result is working as expected:

**To Reproduce**
Steps to reproduce the behavior:
1. Train efficientdet model in rectangular image input shape
2. Run inference
| 2021-08-24T11:56:40 |
||
airctic/icevision | 912 | airctic__icevision-912 | [
"560"
] | dfcbeaa3ba7df4413c342bdfe9d648937bf2babc | diff --git a/icevision/core/record_components.py b/icevision/core/record_components.py
--- a/icevision/core/record_components.py
+++ b/icevision/core/record_components.py
@@ -8,6 +8,7 @@
"BaseLabelsRecordComponent",
"InstancesLabelsRecordComponent",
"ClassificationLabelsRecordComponent",
+ "GrayScaleRecordComponent",
"BBoxesRecordComponent",
"MasksRecordComponent",
"AreasRecordComponent",
@@ -17,6 +18,7 @@
"LossesRecordComponent",
]
+from icevision.utils.imageio import open_gray_scale_image
from icevision.imports import *
from icevision.utils import *
from icevision.core.components import *
@@ -49,9 +51,6 @@ def _num_annotations(self) -> Dict[str, int]:
def _autofix(self) -> Dict[str, bool]:
return {}
- def _remove_annotation(self, i) -> None:
- return
-
def _aggregate_objects(self) -> Dict[str, List[dict]]:
return {}
@@ -216,6 +215,14 @@ def _builder_template(self) -> List[str]:
return ["record{task}set_img_size(<ImgSize>)"]
+class GrayScaleRecordComponent(FilepathRecordComponent):
+ """Overwrites the FilepathRecordComponent to load radiographic images like 16bit grayscale tiff images."""
+
+ def _load(self):
+ img = open_gray_scale_image(self.filepath)
+ self.set_img(img)
+
+
### Annotation parsers ###
class BaseLabelsRecordComponent(ClassMapRecordComponent):
def __init__(self, task=tasks.common):
diff --git a/icevision/core/record_defaults.py b/icevision/core/record_defaults.py
--- a/icevision/core/record_defaults.py
+++ b/icevision/core/record_defaults.py
@@ -1,4 +1,11 @@
-__all__ = ["ObjectDetectionRecord", "InstanceSegmentationRecord"]
+__all__ = [
+ "ObjectDetectionRecord",
+ "InstanceSegmentationRecord",
+ "KeypointsRecord",
+ "GrayScaleObjectDetectionRecord",
+ "GrayScaleInstanceSegmentationRecord",
+ "GrayScaleKeypointsRecord",
+]
from icevision.core.record import *
from icevision.core.record_components import *
@@ -34,3 +41,35 @@ def KeypointsRecord():
KeyPointsRecordComponent(),
)
)
+
+
+def GrayScaleObjectDetectionRecord():
+ return BaseRecord(
+ (
+ GrayScaleRecordComponent(),
+ InstancesLabelsRecordComponent(),
+ BBoxesRecordComponent(),
+ )
+ )
+
+
+def GrayScaleInstanceSegmentationRecord():
+ return BaseRecord(
+ (
+ GrayScaleRecordComponent(),
+ InstancesLabelsRecordComponent(),
+ BBoxesRecordComponent(),
+ MasksRecordComponent(),
+ )
+ )
+
+
+def GrayScaleKeypointsRecord():
+ return BaseRecord(
+ (
+ GrayScaleRecordComponent(),
+ InstancesLabelsRecordComponent(),
+ BBoxesRecordComponent(),
+ KeyPointsRecordComponent(),
+ )
+ )
diff --git a/icevision/utils/imageio.py b/icevision/utils/imageio.py
--- a/icevision/utils/imageio.py
+++ b/icevision/utils/imageio.py
@@ -32,6 +32,14 @@ def open_img(fn, gray=False) -> PIL.Image.Image:
return image
+def open_gray_scale_image(fn):
+ "Opens an radiographic/gray scale image, stacks the channel to represent a RGB image and returns is as a 32bit float array."
+ img = np.array(PIL.Image.open(fn))
+ img = np.dstack([img, img, img])
+ img = img.astype(np.float32)
+ return img
+
+
# TODO: Deprecated
def get_image_size(filepath: Union[str, Path]) -> Tuple[int, int]:
"""
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -144,6 +144,22 @@ def coco_keypoints_parser(coco_dir):
)
[email protected]
+def object_detection_record(samples_source):
+ record = ObjectDetectionRecord()
+
+ record.set_record_id(1)
+ record.set_filepath(samples_source / "voc/JPEGImages/2007_000063.jpg")
+ record.set_img_size(ImgSize(width=500, height=375))
+ record.detection.set_class_map(ClassMap(["a", "b"]))
+ record.detection.add_labels_by_id([1, 2])
+ record.detection.add_bboxes(
+ [BBox.from_xyxy(1, 2, 3, 4), BBox.from_xyxy(10, 20, 30, 40)]
+ )
+
+ return record
+
+
@pytest.fixture(scope="module")
def coco_mask_records(coco_mask_parser):
return coco_mask_parser.parse(data_splitter=SingleSplitSplitter())[0]
@@ -364,6 +380,22 @@ def object_detection_record(samples_source):
return record
[email protected]
+def gray_scale_object_detection_record(samples_source):
+ record = ObjectDetectionRecord()
+
+ record.set_record_id(1)
+ record.set_filepath(samples_source / "gray_scale/gray_scale_h_50_w_50_image.tiff")
+ record.set_img_size(ImgSize(width=50, height=50))
+ record.detection.set_class_map(ClassMap(["a", "b"]))
+ record.detection.add_labels_by_id([1, 2])
+ record.detection.add_bboxes(
+ [BBox.from_xyxy(1, 2, 3, 4), BBox.from_xyxy(10, 20, 30, 40)]
+ )
+
+ return record
+
+
@pytest.fixture
def instance_segmentation_record(object_detection_record):
record = object_detection_record
@@ -374,6 +406,16 @@ def instance_segmentation_record(object_detection_record):
return record
[email protected]
+def gray_scale_instance_segmentation_record(gray_scale_object_detection_record):
+ record = object_detection_record
+ record.add_component(MasksRecordComponent())
+
+ record.detection.add_masks([MaskArray(np.ones((2, 4, 4), dtype=np.uint8))])
+
+ return record
+
+
@pytest.fixture
def empty_annotations_record():
record = BaseRecord(
diff --git a/tests/core/test_record.py b/tests/core/test_record.py
--- a/tests/core/test_record.py
+++ b/tests/core/test_record.py
@@ -27,6 +27,31 @@ def record(samples_source):
return record
[email protected]()
+def gray_scale_record(samples_source):
+ record = BaseRecord(
+ (
+ BBoxesRecordComponent(),
+ InstancesLabelsRecordComponent(),
+ MasksRecordComponent(),
+ GrayScaleRecordComponent(),
+ )
+ )
+
+ record.set_record_id(1)
+ record.set_image_size(3, 3)
+ record.set_filepath(samples_source / "gray_scale/gray_scale_h_50_w_50_image.tiff")
+ record.detection.set_class_map(ClassMap(["a", "b"]))
+ record.detection.add_labels(["a", "b"])
+ record.detection.add_bboxes(
+ [BBox.from_xyxy(1, 2, 4, 4), BBox.from_xyxy(1, 2, 1, 3)]
+ )
+ mask_filepath = samples_source / "voc/SegmentationObject/2007_000063.png"
+ record.detection.add_masks([VocMaskFile(mask_filepath)])
+
+ return record
+
+
@pytest.fixture
def record_empty_annotations():
record = BaseRecord((BBoxesRecordComponent(), InstancesLabelsRecordComponent()))
@@ -83,6 +108,22 @@ def test_record_load(record):
assert isinstance(record_loaded.detection.masks, EncodedRLEs)
+def test_gray_scale_record_load(gray_scale_record):
+ record_loaded = gray_scale_record.load()
+
+ assert isinstance(record_loaded.img, np.ndarray)
+ assert isinstance(record_loaded.detection.masks, MaskArray)
+
+ # test original record is not modified
+ assert gray_scale_record.img == None
+ assert isinstance(gray_scale_record.detection.masks, EncodedRLEs)
+
+ # test unload
+ record_loaded.unload()
+ assert record_loaded.img == None
+ assert isinstance(record_loaded.detection.masks, EncodedRLEs)
+
+
class TestKeypointsMetadata(KeypointsMetadata):
labels = ("nose", "ankle")
diff --git a/tests/utils/test_imageio.py b/tests/utils/test_imageio.py
--- a/tests/utils/test_imageio.py
+++ b/tests/utils/test_imageio.py
@@ -30,3 +30,15 @@ def test_open_img(samples_source, fn, expected):
def test_get_image_size(samples_source, fn, expected):
size = get_image_size(samples_source / fn)
assert size == (expected)
+
+
[email protected](
+ "fn,expected",
+ [
+ ("gray_scale/gray_scale_h_10_w_10_image.tiff", (10, 10)),
+ ("gray_scale/gray_scale_h_50_w_50_image.tiff", (50, 50)),
+ ],
+)
+def test_open_gray_scale_image(samples_source, fn, expected):
+ # When returning np arrays
+ assert np.array(open_img(samples_source / fn)).shape[:-1] == expected
| Tutorial on writing a custom RecordMixin, to show how to read in special file formats like 16 bit gray scale or Dicom
Is there a dataset in icedata that can be used for this example, or do we need to get data from somewhere?
I don't intend to include any example of training a NN in the tutorial, only data reading and augmentation.
---
**Don't remove**
Main issue for examples: #39
| Currently we don't have any datasets using dicom or 16bit gray, would you like to create a tutorial on that?
After the tutorial is complete it would be super awesome to also add the dataset to icedata | 2021-08-27T13:51:18 |
airctic/icevision | 924 | airctic__icevision-924 | [
"923"
] | de76fccce2f3f1fb54e9c3c1b2a6ad77a4f23105 | diff --git a/icevision/models/__init__.py b/icevision/models/__init__.py
--- a/icevision/models/__init__.py
+++ b/icevision/models/__init__.py
@@ -13,6 +13,7 @@
if SoftDependencies.mmdet:
from icevision.models import mmdet
+ from icevision.models.checkpoint import *
if SoftDependencies.yolov5:
# HACK: yolov5 changes matplotlib backend here: https://github.com/ultralytics/yolov5/blob/77415a42e5975ea356393c9f1d5cff0ae8acae2c/utils/plots.py#L26
diff --git a/icevision/models/checkpoint.py b/icevision/models/checkpoint.py
new file mode 100644
--- /dev/null
+++ b/icevision/models/checkpoint.py
@@ -0,0 +1,154 @@
+__all__ = ["save_icevision_checkpoint", "model_from_checkpoint"]
+
+from icevision.imports import *
+from icevision.core import *
+from icevision import models
+
+from mmcv.runner import (
+ load_checkpoint,
+ save_checkpoint,
+ _load_checkpoint,
+ load_state_dict,
+)
+
+
+def save_icevision_checkpoint(
+ model,
+ model_name,
+ backbone_name,
+ class_map,
+ img_size,
+ filename,
+ optimizer=None,
+ meta=None,
+):
+ """Save checkpoint to file.
+
+ The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
+ ``optimizer``. By default ``meta`` will contain version and time info.
+
+ Args:
+ model (Module): Module whose params are to be saved.
+ filename (str): Checkpoint filename.
+ optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
+ meta (dict, optional): Metadata to be saved in checkpoint.
+
+ Examples:
+ >>> save_icevision_checkpoint(model_saved,
+ model_name='mmdet.retinanet',
+ backbone_name='resnet50_fpn_1x',
+ class_map = class_map,
+ img_size=img_size,
+ filename=checkpoint_path,
+ meta={'icevision_version': '0.9.1'})
+
+ """
+
+ if meta is None:
+ meta = {}
+ elif not isinstance(meta, dict):
+ raise TypeError(f"meta must be a dict or None, but got {type(meta)}")
+
+ if class_map:
+ classes = class_map._id2class
+ else:
+ classes = None
+
+ if classes:
+ meta.update(classes=classes)
+
+ if model_name:
+ meta.update(model_name=model_name)
+
+ if img_size:
+ meta.update(img_size=img_size)
+
+ if backbone_name:
+ meta.update(backbone_name=backbone_name)
+
+ save_checkpoint(model, filename, optimizer=optimizer, meta=meta)
+
+
+def model_from_checkpoint(
+ filename: Union[Path, str],
+ map_location=None,
+ strict=False,
+ logger=None,
+ revise_keys=[(r"^module\.", "")],
+):
+ """load checkpoint through URL scheme path.
+
+ Args:
+ filename (str): checkpoint file name with given prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default: None
+ logger (:mod:`logging.Logger`, optional): The logger for message.
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ if isinstance(filename, Path):
+ filename = str(filename)
+
+ checkpoint = _load_checkpoint(filename)
+
+ class_map = None
+ num_classes = None
+ img_size = None
+ model_name = None
+ backbone = None
+
+ classes = checkpoint["meta"].get("classes", None)
+ if classes:
+ class_map = ClassMap(checkpoint["meta"]["classes"])
+ num_classes = len(class_map)
+
+ img_size = checkpoint["meta"].get("img_size", None)
+
+ model_name = checkpoint["meta"].get("model_name", None)
+ if model_name:
+ lib, mod = model_name.split(".")
+ model_type = getattr(getattr(models, lib), mod)
+
+ backbone_name = checkpoint["meta"].get("backbone_name", None)
+ if backbone_name:
+ backbone = getattr(model_type.backbones, backbone_name)
+
+ extra_args = {}
+ models_with_img_size = ("yolov5", "efficientdet")
+ # if 'efficientdet' in model_name:
+ if any(m in model_name for m in models_with_img_size):
+ extra_args["img_size"] = img_size
+
+ # Instantiate model
+ model = model_type.model(
+ backbone=backbone(pretrained=False), num_classes=num_classes, **extra_args
+ )
+
+ # OrderedDict is a subclass of dict
+ if not isinstance(checkpoint, dict):
+ raise RuntimeError(f"No state_dict found in checkpoint file {filename}")
+ # get state_dict from checkpoint
+ if "state_dict" in checkpoint:
+ state_dict = checkpoint["state_dict"]
+ else:
+ state_dict = checkpoint
+ # strip prefix of state_dict
+ for p, r in revise_keys:
+ state_dict = {re.sub(p, r, k): v for k, v in state_dict.items()}
+
+ # load state_dict
+ load_state_dict(model, state_dict, strict, logger)
+
+ checkpoint_and_model = {
+ "model": model,
+ "model_type": model_type,
+ "backbone": backbone,
+ "class_map": class_map,
+ "img_size": img_size,
+ "checkpoint": checkpoint,
+ }
+ return checkpoint_and_model
+ # return model, model_type, backbone, class_map, img_size, checkpoint
| Simplify save and load model checkpoints
## 🚀 Feature
Simplify save and load model checkpoints by saving model metadata.
- The model should be recreated using model metadata
- The checkpoint file can either a local file or an URL
| 2021-09-17T14:36:24 |
||
airctic/icevision | 929 | airctic__icevision-929 | [
"929"
] | f0d1fe03c4828ed11f7bf3ebcb623123cb9fe691 | diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py
--- a/icevision/core/class_map.py
+++ b/icevision/core/class_map.py
@@ -32,6 +32,9 @@ def __init__(
def num_classes(self):
return len(self)
+ def get_classes(self) -> Sequence[str]:
+ return self._id2class
+
def get_by_id(self, id: int) -> str:
return self._id2class[id]
diff --git a/icevision/models/checkpoint.py b/icevision/models/checkpoint.py
--- a/icevision/models/checkpoint.py
+++ b/icevision/models/checkpoint.py
@@ -11,16 +11,100 @@
load_state_dict,
)
+# COCO Classes: 80 classes
+CLASSES = (
+ "person",
+ "bicycle",
+ "car",
+ "motorcycle",
+ "airplane",
+ "bus",
+ "train",
+ "truck",
+ "boat",
+ "traffic light",
+ "fire hydrant",
+ "stop sign",
+ "parking meter",
+ "bench",
+ "bird",
+ "cat",
+ "dog",
+ "horse",
+ "sheep",
+ "cow",
+ "elephant",
+ "bear",
+ "zebra",
+ "giraffe",
+ "backpack",
+ "umbrella",
+ "handbag",
+ "tie",
+ "suitcase",
+ "frisbee",
+ "skis",
+ "snowboard",
+ "sports ball",
+ "kite",
+ "baseball bat",
+ "baseball glove",
+ "skateboard",
+ "surfboard",
+ "tennis racket",
+ "bottle",
+ "wine glass",
+ "cup",
+ "fork",
+ "knife",
+ "spoon",
+ "bowl",
+ "banana",
+ "apple",
+ "sandwich",
+ "orange",
+ "broccoli",
+ "carrot",
+ "hot dog",
+ "pizza",
+ "donut",
+ "cake",
+ "chair",
+ "couch",
+ "potted plant",
+ "bed",
+ "dining table",
+ "toilet",
+ "tv",
+ "laptop",
+ "mouse",
+ "remote",
+ "keyboard",
+ "cell phone",
+ "microwave",
+ "oven",
+ "toaster",
+ "sink",
+ "refrigerator",
+ "book",
+ "clock",
+ "vase",
+ "scissors",
+ "teddy bear",
+ "hair drier",
+ "toothbrush",
+)
+
def save_icevision_checkpoint(
model,
- model_name,
- backbone_name,
- class_map,
- img_size,
filename,
optimizer=None,
meta=None,
+ model_name=None,
+ backbone_name=None,
+ classes=None,
+ img_size=None,
):
"""Save checkpoint to file.
@@ -49,11 +133,6 @@ def save_icevision_checkpoint(
elif not isinstance(meta, dict):
raise TypeError(f"meta must be a dict or None, but got {type(meta)}")
- if class_map:
- classes = class_map._id2class
- else:
- classes = None
-
if classes:
meta.update(classes=classes)
@@ -71,10 +150,17 @@ def save_icevision_checkpoint(
def model_from_checkpoint(
filename: Union[Path, str],
+ model_name=None,
+ backbone_name=None,
+ classes=None,
+ is_coco=False,
+ img_size=None,
map_location=None,
strict=False,
- logger=None,
- revise_keys=[(r"^module\.", "")],
+ revise_keys=[
+ (r"^module\.", ""),
+ ],
+ eval_mode=True,
):
"""load checkpoint through URL scheme path.
@@ -82,8 +168,7 @@ def model_from_checkpoint(
filename (str): checkpoint file name with given prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
- logger (:mod:`logging.Logger`, optional): The logger for message.
- Default: None
+
Returns:
dict or OrderedDict: The loaded checkpoint.
@@ -94,38 +179,54 @@ def model_from_checkpoint(
checkpoint = _load_checkpoint(filename)
- class_map = None
- num_classes = None
- img_size = None
- model_name = None
- backbone = None
+ if is_coco and classes:
+ logger.warning(
+ "`is_coco` cannot be set to True if `classes` is passed and `not None`. `classes` has priority. `is_coco` will be ignored."
+ )
- classes = checkpoint["meta"].get("classes", None)
+ if classes is None:
+ if is_coco:
+ classes = CLASSES
+ else:
+ classes = checkpoint["meta"].get("classes", None)
+
+ class_map = None
if classes:
- class_map = ClassMap(checkpoint["meta"]["classes"])
+ class_map = ClassMap(classes)
num_classes = len(class_map)
- img_size = checkpoint["meta"].get("img_size", None)
+ if img_size is None:
+ img_size = checkpoint["meta"].get("img_size", None)
+
+ if model_name is None:
+ model_name = checkpoint["meta"].get("model_name", None)
- model_name = checkpoint["meta"].get("model_name", None)
+ model_type = None
if model_name:
lib, mod = model_name.split(".")
model_type = getattr(getattr(models, lib), mod)
- backbone_name = checkpoint["meta"].get("backbone_name", None)
- if backbone_name:
+ if backbone_name is None:
+ backbone_name = checkpoint["meta"].get("backbone_name", None)
+ if model_type and backbone_name:
backbone = getattr(model_type.backbones, backbone_name)
extra_args = {}
+ if img_size is None:
+ img_size = checkpoint["meta"].get("img_size", None)
+
models_with_img_size = ("yolov5", "efficientdet")
# if 'efficientdet' in model_name:
- if any(m in model_name for m in models_with_img_size):
+ if (model_name) and (any(m in model_name for m in models_with_img_size)):
extra_args["img_size"] = img_size
# Instantiate model
- model = model_type.model(
- backbone=backbone(pretrained=False), num_classes=num_classes, **extra_args
- )
+ if model_type and backbone:
+ model = model_type.model(
+ backbone=backbone(pretrained=False), num_classes=num_classes, **extra_args
+ )
+ else:
+ model = None
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
@@ -140,7 +241,10 @@ def model_from_checkpoint(
state_dict = {re.sub(p, r, k): v for k, v in state_dict.items()}
# load state_dict
- load_state_dict(model, state_dict, strict, logger)
+ if model:
+ load_state_dict(model, state_dict, strict, logger)
+ if eval_mode:
+ model.eval()
checkpoint_and_model = {
"model": model,
| Inference - automatically recreate model trained with COCO
Automatically recreate model trained with COCO if the user provide the model name, the backbone name, and the classes.
Example:
```
checkpoint_and_model = model_from_checkpoint(checkpoint_path,
model_name='mmdet.retinanet',
backbone_name='resnet50_fpn_1x',
img_size=640,
is_coco=True)
```
closes #929
| 2021-09-21T17:47:10 |
||
airctic/icevision | 960 | airctic__icevision-960 | [
"600"
] | 3245b8d4d0a1b2ba7afc80b33d6ee5923e2c8142 | diff --git a/icevision/engines/lightning/lightning_model_adapter.py b/icevision/engines/lightning/lightning_model_adapter.py
--- a/icevision/engines/lightning/lightning_model_adapter.py
+++ b/icevision/engines/lightning/lightning_model_adapter.py
@@ -6,9 +6,21 @@
class LightningModelAdapter(pl.LightningModule, ABC):
- def __init__(self, metrics: List[Metric] = None):
+ def __init__(
+ self,
+ metrics: List[Metric] = None,
+ metrics_keys_to_log_to_prog_bar: List[tuple] = None,
+ ):
+ """
+ To show a metric in the progressbar a list of tupels can be provided for metrics_keys_to_log_to_prog_bar, the first
+ entry has to be the name of the metric to log and the second entry the display name in the progressbar. By default the
+ mAP is logged to the progressbar.
+ """
super().__init__()
self.metrics = metrics or []
+ self.metrics_keys_to_log_to_prog_bar = metrics_keys_to_log_to_prog_bar or [
+ ("AP (IoU=0.50:0.95) area=all", "COCOMetric")
+ ]
def accumulate_metrics(self, preds):
for metric in self.metrics:
@@ -18,4 +30,9 @@ def finalize_metrics(self) -> None:
for metric in self.metrics:
metric_logs = metric.finalize()
for k, v in metric_logs.items():
- self.log(f"{metric.name}/{k}", v)
+ for entry in self.metrics_keys_to_log_to_prog_bar:
+ if entry[0] == k:
+ self.log(entry[1], v, prog_bar=True)
+ self.log(f"{metric.name}/{k}", v)
+ else:
+ self.log(f"{metric.name}/{k}", v)
| diff --git a/tests/engines/lightning/test_lightning_model_adapter.py b/tests/engines/lightning/test_lightning_model_adapter.py
new file mode 100644
--- /dev/null
+++ b/tests/engines/lightning/test_lightning_model_adapter.py
@@ -0,0 +1,30 @@
+from icevision.engines.lightning import LightningModelAdapter
+
+
+class MockMetric:
+ def __init__(self):
+ self.name = "MockMetric"
+
+ def accumulate(self):
+ pass
+
+ def finalize(sefl):
+ return {"metric_a": 1, "metric_b": 2}
+
+
+class DummLightningModelAdapter(LightningModelAdapter):
+ pass
+
+
+# test if finalize metrics reports metrics correctly
+def test_finalze_metrics_reports_metrics_correctly(mocker):
+ mocker.patch(
+ "icevision.engines.lightning.lightning_model_adapter.LightningModelAdapter.log"
+ )
+
+ adapter = DummLightningModelAdapter([MockMetric()], [("metric_a", "a")])
+ adapter.finalize_metrics()
+
+ adapter.log.assert_any_call("a", 1, prog_bar=True)
+ adapter.log.assert_any_call("MockMetric/metric_a", 1)
+ adapter.log.assert_any_call("MockMetric/metric_b", 2)
| Add more logging to the pytorch lighning models.
The feature consists of two parts:
1. Add the validation loss to the progress bar by default
2. Create boolean parameter for extended progress bar logging (showing the different components of the loss)
| 2021-10-30T10:04:26 |
|
airctic/icevision | 993 | airctic__icevision-993 | [
"994"
] | b2e65d2dc713c56299cb96828eff449d0efa5fbc | diff --git a/icevision/models/mmdet/models/ssd/backbones/resnet_fpn.py b/icevision/models/mmdet/models/ssd/backbones/resnet_fpn.py
--- a/icevision/models/mmdet/models/ssd/backbones/resnet_fpn.py
+++ b/icevision/models/mmdet/models/ssd/backbones/resnet_fpn.py
@@ -1,6 +1,7 @@
__all__ = [
"ssd300",
"ssd512",
+ "ssdlite_mobilenetv2",
]
from icevision.imports import *
@@ -17,10 +18,15 @@ def __init__(self, **kwargs):
ssd300 = MMDetSSDBackboneConfig(
config_path=base_config_path / "ssd300_coco.py",
- weights_url=f"{base_weights_url}/ssd300_coco/ssd300_coco_20200307-a92d2092.pth",
+ weights_url=f"{base_weights_url}/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth",
)
ssd512 = MMDetSSDBackboneConfig(
config_path=base_config_path / "ssd512_coco.py",
- weights_url=f"{base_weights_url}/ssd512_coco/ssd512_coco_20200308-038c5591.pth",
+ weights_url=f"{base_weights_url}/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth",
+)
+
+ssdlite_mobilenetv2 = MMDetSSDBackboneConfig(
+ config_path=base_config_path / "ssdlite_mobilenetv2_scratch_600e_coco.py",
+ weights_url=f"{base_weights_url}/ssd512_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth",
)
diff --git a/icevision/models/mmdet/utils.py b/icevision/models/mmdet/utils.py
--- a/icevision/models/mmdet/utils.py
+++ b/icevision/models/mmdet/utils.py
@@ -35,18 +35,21 @@ def param_groups(model):
body = model.backbone
layers = []
+
+ # add the backbone
if isinstance(body, SSDVGG):
layers += [body.features]
- layers += [body.extra, body.l2_norm]
elif isinstance(body, CSPDarknet):
layers += [body.stem.conv.conv, body.stem.conv.bn]
layers += [body.stage1, body.stage2, body.stage3, body.stage4]
- layers += [model.neck]
else:
layers += [nn.Sequential(body.conv1, body.bn1)]
layers += [getattr(body, l) for l in body.res_layers]
- layers += [model.neck]
+ # add the neck
+ layers += [model.neck]
+
+ # add the head
if isinstance(model, SingleStageDetector):
layers += [model.bbox_head]
elif isinstance(model, TwoStageDetector):
| SSD model doesn't work
## 🐛 Bug
SSD model doesn't work anymore. It seems related to MMDetection updates made here:
https://github.com/open-mmlab/mmdetection/pull/5789/files
Refer to discussion on our Discord forum:
https://discord.com/channels/735877944085446747/780951885485965352/920249646964670464
| 2021-12-16T21:47:51 |
||
airctic/icevision | 995 | airctic__icevision-995 | [
"990"
] | 9c17564b82b2c17367ee2eb02fd24dbf8bf6376c | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
from setuptools import setup
+
if __name__ == "__main__":
setup()
| Fix installation in documentation
• Improve Installation Guide
We need to improve the installation guide for IceVision.
Too many people are getting stuck installing the library.
We need clear instructions for:
* Colab
* MacOS
* Windows (WSL2)
* Ubuntu
| 2021-12-17T10:25:56 |
||
airctic/icevision | 1,010 | airctic__icevision-1010 | [
"1009"
] | 4050e0eb0def68d964330b4861c79dd8ed5c7afe | diff --git a/icevision/models/mmdet/models/yolox/backbones/resnet_fpn.py b/icevision/models/mmdet/models/yolox/backbones/resnet_fpn.py
--- a/icevision/models/mmdet/models/yolox/backbones/resnet_fpn.py
+++ b/icevision/models/mmdet/models/yolox/backbones/resnet_fpn.py
@@ -1,5 +1,8 @@
__all__ = [
"yolox_tiny_8x8",
+ "yolox_s_8x8",
+ "yolox_l_8x8",
+ "yolox_x_8x8",
]
from icevision.imports import *
@@ -18,3 +21,18 @@ def __init__(self, **kwargs):
config_path=base_config_path / "yolox_tiny_8x8_300e_coco.py",
weights_url=f"{base_weights_url}yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth",
)
+
+yolox_s_8x8 = MMDetYOLOXBackboneConfig(
+ config_path=base_config_path / "yolox_s_8x8_300e_coco.py",
+ weights_url=f"{base_weights_url}yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth",
+)
+
+yolox_l_8x8 = MMDetYOLOXBackboneConfig(
+ config_path=base_config_path / "yolox_l_8x8_300e_coco.py",
+ weights_url=f"{base_weights_url}yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth",
+)
+
+yolox_x_8x8 = MMDetYOLOXBackboneConfig(
+ config_path=base_config_path / "yolox_x_8x8_300e_coco.py",
+ weights_url=f"{base_weights_url}yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth",
+)
| Add YOLOX backbones
Add small, large, and xlarge backbones to the YOLOX model
| 2021-12-23T20:48:59 |
||
airctic/icevision | 1,058 | airctic__icevision-1058 | [
"1057"
] | 9c17564b82b2c17367ee2eb02fd24dbf8bf6376c | diff --git a/icevision/models/mmdet/utils.py b/icevision/models/mmdet/utils.py
--- a/icevision/models/mmdet/utils.py
+++ b/icevision/models/mmdet/utils.py
@@ -6,6 +6,7 @@
"create_model_config",
]
+from numpy import False_
from icevision.imports import *
from icevision.utils import *
from icevision.backbones import BackboneConfig
@@ -81,9 +82,9 @@ def param_groups(model):
layers += [model.bbox_head]
# YOLACT has mask_head and segm_head
- if getattr(model, "mask_head"):
+ if hasattr(model, "mask_head"):
layers += [model.mask_head]
- if getattr(model, "segm_head"):
+ if hasattr(model, "segm_head"):
layers += [model.segm_head]
elif isinstance(model, TwoStageDetector):
| getting_started_object_detection.ipynb fails after CentripetalNet support merge
## 🐛 Bug
**Describe the bug**
`getting_started_object_detection.ipynb` fails to run with the following error.
`AttributeError: 'VFNet' object has no attribute 'mask_head'
`
**To Reproduce**
Steps to reproduce the behavior: Run the getting started notebook.
**Expected behavior**
Model should instantiate.
**Screenshots**

| 2022-02-07T08:43:20 |
||
airctic/icevision | 1,074 | airctic__icevision-1074 | [
"1073"
] | cc6d6a4a048f6ddda2782b6593dcd6b083a673e4 | diff --git a/icevision/tfms/albumentations/albumentations_adapter.py b/icevision/tfms/albumentations/albumentations_adapter.py
--- a/icevision/tfms/albumentations/albumentations_adapter.py
+++ b/icevision/tfms/albumentations/albumentations_adapter.py
@@ -133,7 +133,20 @@ def setup_masks(self, record_component):
self.adapter._collect_ops.append(CollectOp(self.collect))
def collect(self, record):
- masks = self.adapter._filter_attribute(self.adapter._albu_out["masks"])
+ try:
+ masks = self.adapter._filter_attribute(self.adapter._albu_out["masks"])
+ except AssertionError:
+ # TODO: messages should be more detailed.
+ img_path = record.as_dict()["common"][
+ "filepath"
+ ] # ~/.icevision/data/voc/SegmentationObject/2007_000033.png'
+ data_dir = img_path.parents[1] # ~/.icevision/data/voc'
+ checklist = list(data_dir.glob(f"**/{img_path.stem}.*"))
+ checklist = "".join([f"\n -{str(path)}" for path in checklist])
+ raise AttributeError(
+ f"Mismatch at annotations with number of masks. Check or delete {len(checklist)} files below. {checklist}"
+ )
+
masks = MaskArray(np.array(masks))
self._record_component.set_mask_array(masks)
# # set masks from the modified masks array
| wrong voc data fixation request (annotation mismatch)
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
`voc/SegmentationObject/2010_002305.png` has 8 masks but annotation file only has 5.
```python
# icevision/icevision/tfms/albumentations/albumentations_adapter.py
class Adapter(Transform, Composite):
...
def _filter_attribute(self, v: list):
if self._keep_mask is None or len(self._keep_mask) == 0:
return v
assert len(v) == len(self._keep_mask) # mismatch cause assertion error here
return [o for o, keep in zip(v, self._keep_mask) if keep]
)
```
**Describe alternatives you've considered**
I think that assertion should be checked before start learning.
This is temporal code i used for picking up wrong files.
```python
# icevision/icevision/tfms/albumentations/albumentations_adapter.py
class AlbumentationsMasksComponent(AlbumentationsAdapterComponent):
...
def collect(self, record):
try:
masks = self.adapter._filter_attribute(self.adapter._albu_out["masks"])
except AssertionError as e:
jpg_path = str(record.as_dict()["common"]["filepath"])
ann_path = jpg_path.replace("JPEGImages", "Annotations")
mask_path = jpg_path.replace("JPEGImages", "SegmentationObject")
raise AttributeError(
f"Mismatch at annotation file and number of masks. Check or delete 3 files.\n -{jpg_path}\n -{ann_path}\n -{mask_path}"
)
```
***Mismatch list***
- voc/2008_005245
- voc/2009_000455
- voc/2009_004969
- voc/2009_005069
- voc/2011_002863
- voc/2011_002644
| Thanks for taking a look!
Do you want to open a PR about this?
Sounds useful to me :) | 2022-03-05T07:36:10 |
|
airctic/icevision | 1,091 | airctic__icevision-1091 | [
"1090",
"1090"
] | af0693e355423e9879c97ab903f324db080411f4 | diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py
--- a/icevision/tfms/albumentations/albumentations_helpers.py
+++ b/icevision/tfms/albumentations/albumentations_helpers.py
@@ -32,7 +32,7 @@ def aug_tfms(
g_shift_limit=10,
b_shift_limit=10,
),
- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),
crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),
pad: Optional[A.DualTransform] = partial(
@@ -54,7 +54,7 @@ def aug_tfms(
is not applied.
rgb_shift: Randomly shift values for each channel of RGB image. If `None` this
transform is not applied.
- lightning: Randomly changes Brightness and Contrast. If `None` this transform
+ lighting: Randomly changes Brightness and Contrast. If `None` this transform
is not applied.
blur: Randomly blur the image. If `None` this transform is not applied.
crop_fn: Randomly crop the image. If `None` this transform is not applied.
@@ -71,7 +71,7 @@ def aug_tfms(
tfms = []
tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]
- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]
+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]
# Resize as the last transforms to reduce the number of artificial artifacts created
if crop_fn is not None:
crop = crop_fn(height=height, width=width)
| Typo lightning -> lighting in Albumentations helper
There are several instances in the codebase with the typo `lightning` when the intended term is `lighting`
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74
Typo lightning -> lighting in Albumentations helper
There are several instances in the codebase with the typo `lightning` when the intended term is `lighting`
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74
| 2022-04-09T18:02:09 |
||
aws/aws-sam-cli | 464 | aws__aws-sam-cli-464 | [
"389"
] | 340fca81e6d1d98e0f42abb12f92d504bda8494f | diff --git a/samcli/local/lambdafn/runtime.py b/samcli/local/lambdafn/runtime.py
--- a/samcli/local/lambdafn/runtime.py
+++ b/samcli/local/lambdafn/runtime.py
@@ -4,7 +4,6 @@
import os
import shutil
-import zipfile
import tempfile
import signal
import logging
@@ -12,6 +11,7 @@
from contextlib import contextmanager
from samcli.local.docker.lambda_container import LambdaContainer
+from .zip import unzip
LOG = logging.getLogger(__name__)
@@ -190,8 +190,7 @@ def _unzip_file(filepath):
LOG.info("Decompressing %s", filepath)
- zip_ref = zipfile.ZipFile(filepath, 'r')
- zip_ref.extractall(temp_dir)
+ unzip(filepath, temp_dir)
# The directory that Python returns might have symlinks. The Docker File sharing settings will not resolve
# symlinks. Hence get the real path before passing to Docker.
diff --git a/samcli/local/lambdafn/zip.py b/samcli/local/lambdafn/zip.py
new file mode 100644
--- /dev/null
+++ b/samcli/local/lambdafn/zip.py
@@ -0,0 +1,58 @@
+"""
+Helper methods to unzip an archive preserving the file permissions. Python's zipfile module does not yet support
+this feature natively (https://bugs.python.org/issue15795).
+"""
+
+import os
+import zipfile
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+def unzip(zip_file_path, output_dir):
+ """
+ Unzip the given file into the given directory while preserving file permissions in the process.
+
+ Parameters
+ ----------
+ zip_file_path : str
+ Path to the zip file
+
+ output_dir : str
+ Path to the directory where the it should be unzipped to
+ """
+
+ with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
+
+ # For each item in the zip file, extract the file and set permissions if available
+ for file_info in zip_ref.infolist():
+ name = file_info.filename
+ extracted_path = os.path.join(output_dir, name)
+
+ zip_ref.extract(name, output_dir)
+ _set_permissions(file_info, extracted_path)
+
+
+def _set_permissions(zip_file_info, extracted_path):
+ """
+ Sets permissions on the extracted file by reading the ``external_attr`` property of given file info.
+
+ Parameters
+ ----------
+ zip_file_info : zipfile.ZipInfo
+ Object containing information about a file within a zip archive
+
+ extracted_path : str
+ Path where the file has been extracted to
+ """
+
+ # Permission information is stored in first two bytes.
+ permission = zip_file_info.external_attr >> 16
+ if not permission:
+ # Zips created on certain Windows machines, however, might not have any permission information on them.
+ # Skip setting a permission on these files.
+ LOG.debug("File %s in zipfile does not have permission information", zip_file_info.filename)
+ return
+
+ os.chmod(extracted_path, permission)
| diff --git a/tests/integration/local/invoke/invoke_integ_base.py b/tests/integration/local/invoke/invoke_integ_base.py
--- a/tests/integration/local/invoke/invoke_integ_base.py
+++ b/tests/integration/local/invoke/invoke_integ_base.py
@@ -15,9 +15,10 @@ def setUpClass(cls):
integration_dir = str(Path(__file__).resolve().parents[2])
- cls.template_path = integration_dir + "/testdata/invoke/template.yml"
- cls.event_path = integration_dir + "/testdata/invoke/event.json"
- cls.env_var_path = integration_dir + "/testdata/invoke/vars.json"
+ cls.test_data_path = os.path.join(integration_dir, "testdata")
+ cls.template_path = os.path.join(cls.test_data_path, "invoke", "template.yml")
+ cls.event_path = os.path.join(cls.test_data_path, "invoke", "event.json")
+ cls.env_var_path = os.path.join(cls.test_data_path, "invoke", "vars.json")
@classmethod
def base_command(cls):
diff --git a/tests/integration/local/invoke/runtimes/__init__.py b/tests/integration/local/invoke/runtimes/__init__.py
new file mode 100644
diff --git a/tests/integration/local/invoke/runtimes/test_with_runtime_zips.py b/tests/integration/local/invoke/runtimes/test_with_runtime_zips.py
new file mode 100644
--- /dev/null
+++ b/tests/integration/local/invoke/runtimes/test_with_runtime_zips.py
@@ -0,0 +1,39 @@
+import os
+import tempfile
+
+from subprocess import Popen, PIPE
+from nose_parameterized import parameterized, param
+
+from tests.integration.local.invoke.invoke_integ_base import InvokeIntegBase
+
+
+class TestWithDifferentLambdaRuntimeZips(InvokeIntegBase):
+
+ def setUp(self):
+
+ self.template_path = os.path.join(self.test_data_path, "invoke", "runtimes", "template.yaml")
+
+ self.events_file = tempfile.NamedTemporaryFile()
+ self.events_file.write('"yolo"') # Just empty event
+ self.events_file.flush()
+
+ self.events_file_path = self.events_file.name
+
+ def tearDown(self):
+ self.events_file.close()
+
+ @parameterized.expand([
+ param("Go1xFunction"),
+ param("Java8Function")
+ ])
+ def test_runtime_zip(self, function_name):
+ command_list = self.get_command_list(function_name,
+ template_path=self.template_path,
+ event_path=self.events_file_path)
+
+ process = Popen(command_list, stdout=PIPE)
+ return_code = process.wait()
+
+ self.assertEquals(return_code, 0)
+ process_stdout = b"".join(process.stdout.readlines()).strip()
+ self.assertEquals(process_stdout.decode('utf-8'), '"Hello World"')
diff --git a/tests/integration/testdata/invoke/runtimes/go1.x/main.go b/tests/integration/testdata/invoke/runtimes/go1.x/main.go
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/invoke/runtimes/go1.x/main.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "github.com/aws/aws-lambda-go/lambda"
+)
+
+// Handler is your Lambda function handler
+// It uses Amazon API Gateway request/responses provided by the aws-lambda-go/events package,
+// However you could use other event sources (S3, Kinesis etc), or JSON-decoded primitive types such as 'string'.
+func Handler() (string, error) {
+
+ return "Hello World", nil
+
+}
+
+func main() {
+ lambda.Start(Handler)
+}
diff --git a/tests/integration/testdata/invoke/runtimes/go1.x/main.zip b/tests/integration/testdata/invoke/runtimes/go1.x/main.zip
new file mode 100644
Binary files /dev/null and b/tests/integration/testdata/invoke/runtimes/go1.x/main.zip differ
diff --git a/tests/integration/testdata/invoke/runtimes/java8/pom.xml b/tests/integration/testdata/invoke/runtimes/java8/pom.xml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/invoke/runtimes/java8/pom.xml
@@ -0,0 +1,47 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>helloworld</groupId>
+ <artifactId>HelloWorld</artifactId>
+ <version>1.0</version>
+ <packaging>jar</packaging>
+ <name>A sample Hello World created for SAM CLI.</name>
+ <properties>
+ <maven.compiler.source>1.8</maven.compiler.source>
+ <maven.compiler.target>1.8</maven.compiler.target>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.amazonaws</groupId>
+ <artifactId>aws-lambda-java-core</artifactId>
+ <version>1.1.0</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.12</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>3.1.1</version>
+ <configuration>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/tests/integration/testdata/invoke/runtimes/java8/src/main/java/helloworld/App.java b/tests/integration/testdata/invoke/runtimes/java8/src/main/java/helloworld/App.java
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/invoke/runtimes/java8/src/main/java/helloworld/App.java
@@ -0,0 +1,22 @@
+package helloworld;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.StringJoiner;
+
+import com.amazonaws.services.lambda.runtime.Context;
+import com.amazonaws.services.lambda.runtime.RequestHandler;
+
+/**
+ * Handler for requests to Lambda function.
+ */
+public class App implements RequestHandler<Object, Object> {
+
+ public Object handleRequest(final Object input, final Context context) {
+ return "Hello World";
+ }
+}
diff --git a/tests/integration/testdata/invoke/runtimes/template.yaml b/tests/integration/testdata/invoke/runtimes/template.yaml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/invoke/runtimes/template.yaml
@@ -0,0 +1,19 @@
+AWSTemplateFormatVersion : '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+Resources:
+
+ Go1xFunction:
+ Type: AWS::Serverless::Function
+ Properties:
+ Handler: main
+ Runtime: go1.x
+ CodeUri: ./go1.x/main.zip
+ Timeout: 300
+
+ Java8Function:
+ Type: AWS::Serverless::Function
+ Properties:
+ Handler: helloworld.App::handleRequest
+ Runtime: java8
+ CodeUri: ./java8/target/HelloWorld-1.0.jar
+ Timeout: 300
diff --git a/tests/unit/local/lambdafn/test_runtime.py b/tests/unit/local/lambdafn/test_runtime.py
--- a/tests/unit/local/lambdafn/test_runtime.py
+++ b/tests/unit/local/lambdafn/test_runtime.py
@@ -337,22 +337,19 @@ def test_must_return_a_valid_file(self, unzip_file_mock, shutil_mock, os_mock):
class TestUnzipFile(TestCase):
@patch("samcli.local.lambdafn.runtime.tempfile")
- @patch("samcli.local.lambdafn.runtime.zipfile")
+ @patch("samcli.local.lambdafn.runtime.unzip")
@patch("samcli.local.lambdafn.runtime.os")
- def test_must_unzip(self, os_mock, zipfile_mock, tempfile_mock):
+ def test_must_unzip(self, os_mock, unzip_mock, tempfile_mock):
inputpath = "somepath"
tmpdir = "/tmp/dir"
realpath = "/foo/bar/tmp/dir/code.zip"
- zipref = Mock()
tempfile_mock.mkdtemp.return_value = tmpdir
- zipfile_mock.ZipFile.return_value = zipref
os_mock.path.realpath.return_value = realpath
output = _unzip_file(inputpath)
self.assertEquals(output, realpath)
tempfile_mock.mkdtemp.assert_called_with()
- zipfile_mock.ZipFile.assert_called_with(inputpath, 'r') # Open file for reading
- zipref.extractall.assert_called_with(tmpdir) # unzip files to temporary directory
+ unzip_mock.assert_called_with(inputpath, tmpdir) # unzip files to temporary directory
os_mock.path.realpath(tmpdir) # Return the real path of temporary directory
diff --git a/tests/unit/local/lambdafn/test_zip.py b/tests/unit/local/lambdafn/test_zip.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/local/lambdafn/test_zip.py
@@ -0,0 +1,79 @@
+
+import stat
+import zipfile
+import os
+import shutil
+
+from samcli.local.lambdafn.zip import unzip
+
+from tempfile import NamedTemporaryFile, mkdtemp
+from contextlib import contextmanager
+from unittest import TestCase
+from nose_parameterized import parameterized, param
+
+
+class TestUnzipWithPermissions(TestCase):
+
+ files_with_permissions = {
+ "folder1/1.txt": 0o644,
+ "folder1/2.txt": 0o777,
+ "folder2/subdir/1.txt": 0o666,
+ "folder2/subdir/2.txt": 0o400
+ }
+
+ @parameterized.expand([param(True), param(False)])
+ def test_must_unzip(self, check_permissions):
+
+ with self._create_zip(self.files_with_permissions, check_permissions) as zip_file_name:
+ with self._temp_dir() as extract_dir:
+
+ unzip(zip_file_name, extract_dir)
+
+ for root, dirs, files in os.walk(extract_dir):
+ for file in files:
+ filepath = os.path.join(extract_dir, root, file)
+ perm = oct(stat.S_IMODE(os.stat(filepath).st_mode))
+ key = os.path.relpath(filepath, extract_dir)
+ expected_permission = oct(self.files_with_permissions[key])
+
+ self.assertIn(key, self.files_with_permissions)
+
+ if check_permissions:
+ self.assertEquals(expected_permission,
+ perm,
+ "File {} has wrong permission {}".format(key, perm))
+
+ @contextmanager
+ def _create_zip(self, files_with_permissions, add_permissions=True):
+
+ zipfilename = None
+ data = b'hello world'
+ try:
+ zipfilename = NamedTemporaryFile(mode="w+b").name
+
+ zf = zipfile.ZipFile(zipfilename, "w", zipfile.ZIP_DEFLATED)
+ for filename, perm in files_with_permissions.items():
+ fileinfo = zipfile.ZipInfo(filename)
+
+ if add_permissions:
+ fileinfo.external_attr = perm << 16
+
+ zf.writestr(fileinfo, data)
+
+ zf.close()
+
+ yield zipfilename
+
+ finally:
+ if zipfilename:
+ os.remove(zipfilename)
+
+ @contextmanager
+ def _temp_dir(self):
+ name = None
+ try:
+ name = mkdtemp()
+ yield name
+ finally:
+ if name:
+ shutil.rmtree(name)
| Golang binary provided as a zip file does not work with v0.3.0
<!--
Before reporting a new issue, make sure we don't have any duplicates already open or closed by
searching the issues list. If there is a duplicate, re-open or add a comment to the
existing issue instead of creating a new one. If you are reporting a bug,
make sure to include relevant information asked below to help with debugging.
## GENERAL HELP QUESTIONS ##
Github Issues is for bug reports and feature requests. If you have general support
questions, the following locations are a good place:
- Slack channel (#samdev): https://awssamopensource.splashthat.com/
- Post a question in StackOverflow with "aws-sam-cli" tag
-->
**Description:**
After upgrading to 0.3.0 I am now getting the following error while using `sam local start-api`
```
{
"errorMessage": "fork/exec /var/task/main: permission denied",
"errorType": "PathError"
}
```
I have tried rebuilding my functions using
```
GOOS=linux go build -o main main.go
zip -j main.zip main
```
but that doesn't seem to be fixing the issue.
**Additional environment details (Ex: Windows, Mac, Amazon Linux etc)**
Mac/OSX
**Output of `sam --version`**:
SAM CLI, version 0.3.0
**Optional Debug logs**:
```
2018-05-08 21:40:21 http://localhost:None "POST /v1.35/images/create?tag=go1.x&fromImage=lambci%2Flambda HTTP/1.1" 200 None
Fetching lambci/lambda:go1.x Docker container image......
2018-05-08 21:40:21 Mounting /private/var/folders/6k/wl5glw6x5vb2s4z2f6yfqr280000gn/T/tmplyA5JV as /var/task:ro inside runtime container
2018-05-08 21:40:21 http://localhost:None "POST /v1.35/containers/create HTTP/1.1" 201 90
2018-05-08 21:40:21 http://localhost:None "GET /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/json HTTP/1.1" 200 None
2018-05-08 21:40:21 http://localhost:None "GET /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/json HTTP/1.1" 200 None
2018-05-08 21:40:22 http://localhost:None "POST /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/start HTTP/1.1" 204 0
2018-05-08 21:40:22 Starting a timer for 90 seconds for function 'RegionRates'
2018-05-08 21:40:22 http://localhost:None "GET /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/json HTTP/1.1" 200 None
2018-05-08 21:40:22 http://localhost:None "POST /containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/attach?stream=1&stdin=0&logs=1&stderr=1&stdout=1 HTTP/1.1" 101 0
START RequestId: 9c3b6799-610b-1d1a-9e28-e37afcec3d87 Version: $LATEST
END RequestId: 9c3b6799-610b-1d1a-9e28-e37afcec3d87
REPORT RequestId: 9c3b6799-610b-1d1a-9e28-e37afcec3d87 Duration: 1.43 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 6 MB
{
"errorMessage": "fork/exec /var/task/main: permission denied",
"errorType": "PathError"
}
2018-05-08 21:40:22 http://localhost:None "GET /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6/json HTTP/1.1" 200 None
2018-05-08 21:40:22 http://localhost:None "DELETE /v1.35/containers/e0bbd05b31ca340043f139387946e89ac1fb69d0c9b32ff19d81e456d5522bc6?force=True&link=False&v=False HTTP/1.1" 204 0
2018-05-08 21:40:22 Function returned an invalid response (must include one of: body, headers or statusCode in the response object). Response received:
2018-05-08 21:40:22 127.0.0.1 - - [08/May/2018 21:40:22] "POST /v2/exports/rates HTTP/1.1" 502 -
```
| You should chmod and give your binary executable permissions
I'm also getting this on a Go project. My reproducible branch is at https://github.com/nzoschke/gofaas/pull/62
Turns out, this is a regression from previous version because Python's ZipFile module does not retain file permissions on unzipping - https://bugs.python.org/issue15795.
Several folks have fixed it in different ways (ex: https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module). We need to implement a clean solution to this problem.
We will prioritize this and get a release out asap
We did some digging in on the awsdevelopers Slack. With some sleep and debug statements I see that the tmp dir doesn't have any executable bits on the main file:
```
$ cd /var/folders/px/fd8j3qvn13gcxw9_nw25pphw0000gn/T/tmpKXhUgv
$ ls -al
total 36816
drwx------ 3 noah staff 102 May 9 07:45 .
drwx------@ 259 noah staff 8806 May 9 07:45 ..
-rw-r--r-- 1 noah staff 18846338 May 9 07:45 main
```
@sanathkr found this, which points to Python zip library:
> looks like zipfile Python module doesn’t retain permissions on unzip.. -
> https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838
This problems is specifically only when you bundle the Golang binary as a zipfile and provide to CodeUri. As a workaround, you can set CodeUri to be a path to a folder that contains your binary. This will work. In fact this will be faster because SAM CLI does not have to unzip every time you run the invoke.
If you have something like:
```
MyFunction:
Type: AWS::Serverless::Function
Properties:
...
CodeUri: ./myfunction.zip
```
Instead, convert it to something like:
```
MyFunction:
Type: AWS::Serverless::Function
Properties:
...
CodeUri: ./build
```
Where the `build` folder contains your Golang binary. This will save you the step of building a zip file and also improve performance because SAM CLI doesn't have to unzip on every invoke.
> This problems is specifically only when you bundle the Golang binary as a zipfile and provide to CodeUri.
I can confirm that this is an issue on Linux for a Python project (the issue is not limited to Golang on MacOS).
> As a workaround, you can set CodeUri to be a path to a folder that contains your binary.
I just tested this, and it worked properly. Thanks!
Thanks all!
Reopening. The root cause of this is not fixed. | 2018-06-06T21:28:31 |
aws/aws-sam-cli | 815 | aws__aws-sam-cli-815 | [
"812"
] | b428871db2ef0a2faa2c40d9b9252502996c4506 | diff --git a/samcli/__init__.py b/samcli/__init__.py
--- a/samcli/__init__.py
+++ b/samcli/__init__.py
@@ -2,4 +2,4 @@
SAM CLI version
"""
-__version__ = '0.8.0'
+__version__ = '0.8.1'
diff --git a/samcli/cli/options.py b/samcli/cli/options.py
--- a/samcli/cli/options.py
+++ b/samcli/cli/options.py
@@ -41,7 +41,6 @@ def callback(ctx, param, value):
return click.option('--region',
expose_value=False,
help='Set the AWS Region of the service (e.g. us-east-1).',
- default='us-east-1',
callback=callback)(f)
| diff --git a/tests/integration/local/invoke/test_integrations_cli.py b/tests/integration/local/invoke/test_integrations_cli.py
--- a/tests/integration/local/invoke/test_integrations_cli.py
+++ b/tests/integration/local/invoke/test_integrations_cli.py
@@ -1,6 +1,7 @@
import json
import shutil
import os
+import copy
from nose_parameterized import parameterized
from subprocess import Popen, PIPE
@@ -176,6 +177,34 @@ def test_invoke_with_env_using_parameters_with_custom_region(self):
self.assertEquals(environ["Region"], custom_region)
+ def test_invoke_with_env_with_aws_creds(self):
+ custom_region = "my-custom-region"
+ key = "key"
+ secret = "secret"
+ session = "session"
+
+ command_list = self.get_command_list("EchoEnvWithParameters",
+ template_path=self.template_path,
+ event_path=self.event_path)
+
+ env = copy.deepcopy(dict(os.environ))
+ env["AWS_DEFAULT_REGION"] = custom_region
+ env["AWS_REGION"] = custom_region
+ env["AWS_ACCESS_KEY_ID"] = key
+ env["AWS_SECRET_ACCESS_KEY"] = secret
+ env["AWS_SESSION_TOKEN"] = session
+
+ process = Popen(command_list, stdout=PIPE, env=env)
+ process.wait()
+ process_stdout = b"".join(process.stdout.readlines()).strip()
+ environ = json.loads(process_stdout.decode('utf-8'))
+
+ self.assertEquals(environ["AWS_DEFAULT_REGION"], custom_region)
+ self.assertEquals(environ["AWS_REGION"], custom_region)
+ self.assertEquals(environ["AWS_ACCESS_KEY_ID"], key)
+ self.assertEquals(environ["AWS_SECRET_ACCESS_KEY"], secret)
+ self.assertEquals(environ["AWS_SESSION_TOKEN"], session)
+
def test_invoke_with_docker_network_of_host(self):
command_list = self.get_command_list("HelloWorldServerlessFunction",
template_path=self.template_path,
| Region from Env Vars or profile are not respected for ALL commands but package and deploy
The region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.
Current workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.
Fix is in #811
| 2018-11-30T16:46:59 |
|
aws/aws-sam-cli | 828 | aws__aws-sam-cli-828 | [
"774"
] | 100920e84c7f6c3d236ab15661804bf95f8da4cb | diff --git a/samcli/commands/local/cli_common/invoke_context.py b/samcli/commands/local/cli_common/invoke_context.py
--- a/samcli/commands/local/cli_common/invoke_context.py
+++ b/samcli/commands/local/cli_common/invoke_context.py
@@ -6,9 +6,6 @@
import json
import os
-import docker
-import requests
-
import samcli.lib.utils.osutils as osutils
from samcli.commands.local.lib.local_lambda import LocalLambdaRunner
from samcli.commands.local.lib.debug_context import DebugContext
@@ -118,6 +115,7 @@ def __init__(self, # pylint: disable=R0914
self._log_file_handle = None
self._debug_context = None
self._layers_downloader = None
+ self._container_manager = None
def __enter__(self):
"""
@@ -137,7 +135,11 @@ def __enter__(self):
self._debug_args,
self._debugger_path)
- self._check_docker_connectivity()
+ self._container_manager = self._get_container_manager(self._docker_network,
+ self._skip_pull_image)
+
+ if not self._container_manager.is_docker_reachable:
+ raise InvokeContextException("Running AWS SAM projects locally requires Docker. Have you got it installed?")
return self
@@ -185,15 +187,12 @@ def local_lambda_runner(self):
locally
"""
- container_manager = ContainerManager(docker_network_id=self._docker_network,
- skip_pull_image=self._skip_pull_image)
-
layer_downloader = LayerDownloader(self._layer_cache_basedir, self.get_cwd())
image_builder = LambdaImage(layer_downloader,
self._skip_pull_image,
self._force_image_build)
- lambda_runtime = LambdaRuntime(container_manager, image_builder)
+ lambda_runtime = LambdaRuntime(self._container_manager, image_builder)
return LocalLambdaRunner(local_runtime=lambda_runtime,
function_provider=self._function_provider,
cwd=self.get_cwd(),
@@ -350,19 +349,21 @@ def _get_debug_context(debug_port, debug_args, debugger_path):
return DebugContext(debug_port=debug_port, debug_args=debug_args, debugger_path=debugger_path)
@staticmethod
- def _check_docker_connectivity(docker_client=None):
+ def _get_container_manager(docker_network, skip_pull_image):
"""
- Checks if Docker daemon is running. This is required for us to invoke the function locally
+ Creates a ContainerManager with specified options
- :param docker_client: Instance of Docker client
- :return bool: True, if Docker is available
- :raises InvokeContextException: If Docker is not available
- """
+ Parameters
+ ----------
+ docker_network str
+ Docker network identifier
+ skip_pull_image bool
+ Should the manager skip pulling the image
- docker_client = docker_client or docker.from_env()
+ Returns
+ -------
+ samcli.local.docker.manager.ContainerManager
+ Object representing Docker container manager
+ """
- try:
- docker_client.ping()
- # When Docker is not installed, a request.exceptions.ConnectionError is thrown.
- except (docker.errors.APIError, requests.exceptions.ConnectionError):
- raise InvokeContextException("Running AWS SAM projects locally requires Docker. Have you got it installed?")
+ return ContainerManager(docker_network_id=docker_network, skip_pull_image=skip_pull_image)
diff --git a/samcli/local/docker/manager.py b/samcli/local/docker/manager.py
--- a/samcli/local/docker/manager.py
+++ b/samcli/local/docker/manager.py
@@ -6,6 +6,7 @@
import sys
import docker
+import requests
LOG = logging.getLogger(__name__)
@@ -33,6 +34,26 @@ def __init__(self,
self.docker_network_id = docker_network_id
self.docker_client = docker_client or docker.from_env()
+ @property
+ def is_docker_reachable(self):
+ """
+ Checks if Docker daemon is running. This is required for us to invoke the function locally
+
+ Returns
+ -------
+ bool
+ True, if Docker is available, False otherwise
+ """
+ try:
+ self.docker_client.ping()
+
+ return True
+
+ # When Docker is not installed, a request.exceptions.ConnectionError is thrown.
+ except (docker.errors.APIError, requests.exceptions.ConnectionError):
+ LOG.debug("Docker is not reachable", exc_info=True)
+ return False
+
def run(self, container, input_data=None, warm=False):
"""
Create and run a Docker container based on the given configuration.
| diff --git a/tests/unit/commands/local/cli_common/test_invoke_context.py b/tests/unit/commands/local/cli_common/test_invoke_context.py
--- a/tests/unit/commands/local/cli_common/test_invoke_context.py
+++ b/tests/unit/commands/local/cli_common/test_invoke_context.py
@@ -5,15 +5,11 @@
import os
import sys
-import docker
-import requests
-
from samcli.commands.local.cli_common.user_exceptions import InvokeContextException, DebugContextException
from samcli.commands.local.cli_common.invoke_context import InvokeContext
from unittest import TestCase
-from mock import Mock, patch, ANY, mock_open
-from parameterized import parameterized, param
+from mock import Mock, PropertyMock, patch, ANY, mock_open
class TestInvokeContext__enter__(TestCase):
@@ -57,24 +53,70 @@ def test_must_read_from_necessary_files(self, SamFunctionProviderMock):
invoke_context._get_debug_context = Mock()
invoke_context._get_debug_context.return_value = debug_context_mock
- invoke_context._check_docker_connectivity = Mock()
+ container_manager_mock = Mock()
+ container_manager_mock.is_docker_reachable = True
+ invoke_context._get_container_manager = Mock(return_value=container_manager_mock)
# Call Enter method manually for testing purposes
result = invoke_context.__enter__()
self.assertTrue(result is invoke_context, "__enter__() must return self")
- self.assertEquals(invoke_context._template_dict, template_dict)
- self.assertEquals(invoke_context._function_provider, function_provider)
- self.assertEquals(invoke_context._env_vars_value, env_vars_value)
- self.assertEquals(invoke_context._log_file_handle, log_file_handle)
- self.assertEquals(invoke_context._debug_context, debug_context_mock)
+ self.assertEqual(invoke_context._template_dict, template_dict)
+ self.assertEqual(invoke_context._function_provider, function_provider)
+ self.assertEqual(invoke_context._env_vars_value, env_vars_value)
+ self.assertEqual(invoke_context._log_file_handle, log_file_handle)
+ self.assertEqual(invoke_context._debug_context, debug_context_mock)
+ self.assertEqual(invoke_context._container_manager, container_manager_mock)
invoke_context._get_template_data.assert_called_with(template_file)
SamFunctionProviderMock.assert_called_with(template_dict, {"AWS::Region": "region"})
invoke_context._get_env_vars_value.assert_called_with(env_vars_file)
invoke_context._setup_log_file.assert_called_with(log_file)
invoke_context._get_debug_context.assert_called_once_with(1111, "args", "path-to-debugger")
- invoke_context._check_docker_connectivity.assert_called_with()
+ invoke_context._get_container_manager.assert_called_once_with("network", True)
+
+ @patch("samcli.commands.local.cli_common.invoke_context.SamFunctionProvider")
+ def test_must_use_container_manager_to_check_docker_connectivity(self, SamFunctionProviderMock):
+ invoke_context = InvokeContext("template-file")
+
+ invoke_context._get_template_data = Mock()
+ invoke_context._get_env_vars_value = Mock()
+ invoke_context._setup_log_file = Mock()
+ invoke_context._get_debug_context = Mock()
+
+ container_manager_mock = Mock()
+
+ with patch.object(type(container_manager_mock), "is_docker_reachable",
+ create=True, new_callable=PropertyMock, return_value=True) as is_docker_reachable_mock:
+ invoke_context._get_container_manager = Mock()
+ invoke_context._get_container_manager.return_value = container_manager_mock
+
+ invoke_context.__enter__()
+
+ is_docker_reachable_mock.assert_called_once_with()
+
+ @patch("samcli.commands.local.cli_common.invoke_context.SamFunctionProvider")
+ def test_must_raise_if_docker_is_not_reachable(self, SamFunctionProviderMock):
+ invoke_context = InvokeContext("template-file")
+
+ invoke_context._get_template_data = Mock()
+ invoke_context._get_env_vars_value = Mock()
+ invoke_context._setup_log_file = Mock()
+ invoke_context._get_debug_context = Mock()
+
+ container_manager_mock = Mock()
+
+ with patch.object(type(container_manager_mock), "is_docker_reachable",
+ create=True, new_callable=PropertyMock, return_value=False):
+
+ invoke_context._get_container_manager = Mock()
+ invoke_context._get_container_manager.return_value = container_manager_mock
+
+ with self.assertRaises(InvokeContextException) as ex_ctx:
+ invoke_context.__enter__()
+
+ self.assertEqual("Running AWS SAM projects locally requires Docker. Have you got it installed?",
+ str(ex_ctx.exception))
class TestInvokeContext__exit__(TestCase):
@@ -170,19 +212,16 @@ def setUp(self):
@patch("samcli.commands.local.cli_common.invoke_context.LambdaImage")
@patch("samcli.commands.local.cli_common.invoke_context.LayerDownloader")
- @patch("samcli.commands.local.cli_common.invoke_context.ContainerManager")
@patch("samcli.commands.local.cli_common.invoke_context.LambdaRuntime")
@patch("samcli.commands.local.cli_common.invoke_context.LocalLambdaRunner")
+ @patch("samcli.commands.local.cli_common.invoke_context.SamFunctionProvider")
def test_must_create_runner(self,
+ SamFunctionProviderMock,
LocalLambdaMock,
LambdaRuntimeMock,
- ContainerManagerMock,
download_layers_mock,
lambda_image_patch):
- container_mock = Mock()
- ContainerManagerMock.return_value = container_mock
-
runtime_mock = Mock()
LambdaRuntimeMock.return_value = runtime_mock
@@ -199,18 +238,26 @@ def test_must_create_runner(self,
self.context.get_cwd = Mock()
self.context.get_cwd.return_value = cwd
- result = self.context.local_lambda_runner
- self.assertEquals(result, runner_mock)
+ self.context._get_template_data = Mock()
+ self.context._get_env_vars_value = Mock()
+ self.context._setup_log_file = Mock()
+ self.context._get_debug_context = Mock(return_value=None)
+
+ container_manager_mock = Mock()
+ container_manager_mock.is_docker_reachable = PropertyMock(return_value=True)
+ self.context._get_container_manager = Mock(return_value=container_manager_mock)
+
+ with self.context:
+ result = self.context.local_lambda_runner
+ self.assertEquals(result, runner_mock)
- ContainerManagerMock.assert_called_with(docker_network_id="network",
- skip_pull_image=True)
- LambdaRuntimeMock.assert_called_with(container_mock, image_mock)
- lambda_image_patch.assert_called_once_with(download_mock, True, True)
- LocalLambdaMock.assert_called_with(local_runtime=runtime_mock,
- function_provider=ANY,
- cwd=cwd,
- debug_context=None,
- env_vars_values=ANY)
+ LambdaRuntimeMock.assert_called_with(container_manager_mock, image_mock)
+ lambda_image_patch.assert_called_once_with(download_mock, True, True)
+ LocalLambdaMock.assert_called_with(local_runtime=runtime_mock,
+ function_provider=ANY,
+ cwd=cwd,
+ debug_context=None,
+ env_vars_values=ANY)
class TestInvokeContext_stdout_property(TestCase):
@@ -398,34 +445,3 @@ def test_debugger_path_resolves(self, pathlib_mock, debug_context_mock):
resolve_path_mock.is_dir.assert_called_once()
pathlib_path_mock.resolve.assert_called_once_with(strict=True)
pathlib_mock.assert_called_once_with("./path")
-
-
-class TestInvokeContext_check_docker_connectivity(TestCase):
-
- def test_must_call_ping(self):
- client = Mock()
- InvokeContext._check_docker_connectivity(client)
- client.ping.assert_called_with()
-
- @patch("samcli.commands.local.cli_common.invoke_context.docker")
- def test_must_call_ping_with_docker_client_from_env(self, docker_mock):
- client = Mock()
- docker_mock.from_env.return_value = client
-
- InvokeContext._check_docker_connectivity()
- client.ping.assert_called_with()
-
- @parameterized.expand([
- param("Docker APIError thrown", docker.errors.APIError("error")),
- param("Requests ConnectionError thrown", requests.exceptions.ConnectionError("error"))
- ])
- def test_must_raise_if_docker_not_found(self, test_name, error_docker_throws):
- client = Mock()
-
- client.ping.side_effect = error_docker_throws
-
- with self.assertRaises(InvokeContextException) as ex_ctx:
- InvokeContext._check_docker_connectivity(client)
-
- msg = str(ex_ctx.exception)
- self.assertEquals(msg, "Running AWS SAM projects locally requires Docker. Have you got it installed?")
diff --git a/tests/unit/local/docker/test_manager.py b/tests/unit/local/docker/test_manager.py
--- a/tests/unit/local/docker/test_manager.py
+++ b/tests/unit/local/docker/test_manager.py
@@ -3,8 +3,10 @@
"""
import io
-
from unittest import TestCase
+
+import requests
+
from mock import Mock
from docker.errors import APIError, ImageNotFound
from samcli.local.docker.manager import ContainerManager, DockerImagePullFailedException
@@ -206,6 +208,41 @@ def test_must_raise_if_image_not_found(self):
self.assertEquals(str(ex), msg)
+class TestContainerManager_is_docker_reachable(TestCase):
+
+ def setUp(self):
+ self.ping_mock = Mock()
+
+ docker_client_mock = Mock()
+ docker_client_mock.ping = self.ping_mock
+
+ self.manager = ContainerManager(docker_client=docker_client_mock)
+
+ def test_must_use_docker_client_ping(self):
+ self.manager.is_docker_reachable
+
+ self.ping_mock.assert_called_once_with()
+
+ def test_must_return_true_if_ping_does_not_raise(self):
+ is_reachable = self.manager.is_docker_reachable
+
+ self.assertTrue(is_reachable)
+
+ def test_must_return_false_if_ping_raises_api_error(self):
+ self.ping_mock.side_effect = APIError("error")
+
+ is_reachable = self.manager.is_docker_reachable
+
+ self.assertFalse(is_reachable)
+
+ def test_must_return_false_if_ping_raises_connection_error(self):
+ self.ping_mock.side_effect = requests.exceptions.ConnectionError("error")
+
+ is_reachable = self.manager.is_docker_reachable
+
+ self.assertFalse(is_reachable)
+
+
class TestContainerManager_has_image(TestCase):
def setUp(self):
| Container name feature implementation
## Changes
This PR brings new feature to the SAM cli - `--container-name` option, which enables customer to specify the name for AWS Lambda runtime Docker container when run.
### Original issue
#568 - root cause issue; #759 - design doc PR, which describes the need for new `--container-name` flag. It is a requred step for enabling .NET Core debugging. For detailed explanation see the [rendered doc](https://github.com/ndobryanskyy/aws-sam-cli/blob/dotnetcore-debugging-design/designs/dotnetcore-debugging.md).
### Sidenote
_I do still need help of where to put some help to let the user know, that this flag is here. Any comments on this is appreciated._
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
| I looked at this issue yesterday. I was thinking of going further than just names for containers — I was also thinking of adding labels too (perhaps ones for another PR)
@sthulb yes we can add this functionality, but I don't see practical use-case for this yet.
`--container-name` is specifically added to enable .NET Core debugging, see - #568 .
And what do you think about the changes, do you have any remarks, comments?
from a personal point of view, I’d rather see the container name be automatic, so every container gets a name, probably based on the resource name.
It would still give you the ability to have an explicit name to allow for debugging.
@sthulb, you mean, that SAM should assign explicit name for each container it runs? And `--container-name` option will override this behavior?
Fixed review comments and implemented integration and functional tests. @sanathkr, please, take a look, when you have time.
@ndobryanskyy Could you please rebase and address the merge conflicts. Our recent release brought in a bunch of changes.
I am going to review now, as well.
Guys, yesterday, after looking through your comments I've actually got hit by brilliant idea. What if we just eliminate the container name feature altogether and use some cool trick with Docker, namely `docker ps --filter`.
### The Trick
Docker supports listing containers by using `docker ps` [command](https://docs.docker.com/engine/reference/commandline/ps/), but this particular command not only can list all containers, but it could also filter them based on certain attributes. But what is most interesting for us is that it could filter running containers based on **published** port (and as you know, when `-d <debugger_port>` flag is supplied to SAM CLI it will publish `debugger_port` for the running container), so using `docker ps --filter publish=<debugger_port> -q` (`-q` for returning only container ID) we could easily find id of the Lambda container started by SAM, without knowing or specifying the exact name. Pretty neat, right ?
### Real world example
Yesterday, I've also tried to use this approach for .NET Core debugging by checking out SAM `develop` branch, adding debugging support entry point there, which uses [my modified version of .NET Core lambda runner](https://github.com/lambci/docker-lambda/pull/130) with _wait for debugger_ flag support. And it worked! 🎉
This how my `pipeTransport` section of `launch.json` look like:
```json
"pipeTransport": {
"pipeProgram": "powershell",
"pipeArgs": [
"-c",
"docker exec -i $(docker ps -q -f publish=6000)"
],
"debuggerPath": "/tmp/lambci_debug_files/vsdbg",
"pipeCwd": "${workspaceRoot}"
}
```
Lambda was invoked with this command: `samdev local invoke -e event.json -d 6000 --debugger-path "C:/users/ndobryanskyy/vsdbg" --skip-pull-image "SuperFunction" --region us-east`
I like that it requires nearly no changes to the SAM CLI in order to work,the only thing we need to is provide customers with this `launch.json` file. I will have time tomorrow to investigate whether it is possible to use the same technique for VS 2017, but I'm [almost sure](https://github.com/Microsoft/MIEngine/wiki/Offroad-Debugging-of-.NET-Core-on-Linux---OSX-from-Visual-Studio), that the answer is **yes**.
@sanathkr @TheSriram @jfuss what do you guys think of it?
I can prepare the PR with required changes by Monday. And file another one with `ContainerManager` improvements from this one.
This is incredibly slick! I wish we had found this before you did the whole container name PR.
+1 to take this route as it leads to a good UX | 2018-12-04T18:29:52 |
aws/aws-sam-cli | 935 | aws__aws-sam-cli-935 | [
"934"
] | 57e67e2ab2f60f28d5aa141018ba6fa134010e28 | diff --git a/samcli/commands/_utils/template.py b/samcli/commands/_utils/template.py
--- a/samcli/commands/_utils/template.py
+++ b/samcli/commands/_utils/template.py
@@ -14,6 +14,10 @@
from samcli.yamlhelper import yaml_parse, yaml_dump
+_METADATA_WITH_LOCAL_PATHS = {
+ "AWS::ServerlessRepo::Application": ["LicenseUrl", "ReadmeUrl"]
+}
+
_RESOURCES_WITH_LOCAL_PATHS = {
"AWS::Serverless::Function": ["CodeUri"],
"AWS::Serverless::Api": ["DefinitionUri"],
@@ -132,6 +136,22 @@ def _update_relative_paths(template_dict,
"""
+ for resource_type, properties in template_dict.get("Metadata", {}).items():
+
+ if resource_type not in _METADATA_WITH_LOCAL_PATHS:
+ # Unknown resource. Skipping
+ continue
+
+ for path_prop_name in _METADATA_WITH_LOCAL_PATHS[resource_type]:
+ path = properties.get(path_prop_name)
+
+ updated_path = _resolve_relative_to(path, original_root, new_root)
+ if not updated_path:
+ # This path does not need to get updated
+ continue
+
+ properties[path_prop_name] = updated_path
+
for _, resource in template_dict.get("Resources", {}).items():
resource_type = resource.get("Type")
| diff --git a/tests/unit/commands/_utils/test_template.py b/tests/unit/commands/_utils/test_template.py
--- a/tests/unit/commands/_utils/test_template.py
+++ b/tests/unit/commands/_utils/test_template.py
@@ -7,8 +7,8 @@
from mock import patch, mock_open
from parameterized import parameterized, param
-from samcli.commands._utils.template import get_template_data, _RESOURCES_WITH_LOCAL_PATHS, _update_relative_paths, \
- move_template
+from samcli.commands._utils.template import get_template_data, _METADATA_WITH_LOCAL_PATHS, \
+ _RESOURCES_WITH_LOCAL_PATHS, _update_relative_paths, move_template
class Test_get_template_data(TestCase):
@@ -78,10 +78,41 @@ def setUp(self):
self.expected_result = os.path.join("..", "foo", "bar")
+ @parameterized.expand(
+ [(resource_type, props) for resource_type, props in _METADATA_WITH_LOCAL_PATHS.items()]
+ )
+ def test_must_update_relative_metadata_paths(self, resource_type, properties):
+
+ for propname in properties:
+ for path in [self.s3path, self.abspath, self.curpath]:
+ template_dict = {
+ "Metadata": {
+ resource_type: {
+ propname: path
+ },
+ "AWS::Ec2::Instance": {
+ propname: path
+ }
+ },
+ "Parameters": {
+ "a": "b"
+ }
+ }
+
+ expected_template_dict = copy.deepcopy(template_dict)
+ if path == self.curpath:
+ expected_template_dict["Metadata"][resource_type][propname] = \
+ self.expected_result
+
+ result = _update_relative_paths(template_dict, self.src, self.dest)
+
+ self.maxDiff = None
+ self.assertEquals(result, expected_template_dict)
+
@parameterized.expand(
[(resource_type, props) for resource_type, props in _RESOURCES_WITH_LOCAL_PATHS.items()]
)
- def test_must_update_relative_paths(self, resource_type, properties):
+ def test_must_update_relative_resource_paths(self, resource_type, properties):
for propname in properties:
| sam package of template with SAR metadata fails when using sam build
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
`sam package` fails, when trying to package artifacts built by `sam build`, if the template contains SAR metadata and references local files for `LicenseUrl` or `ReadmeUrl` which should get uploaded by `sam package`. Without using `sam build` that works properly, as the files are present in the template directory.
### Steps to reproduce
```
/tmp $ sam init
2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/Grammar.txt
2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/PatternGrammar.txt
[+] Initializing project structure...
[SUCCESS] - Read sam-app/README.md for further instructions on how to proceed
[*] Project initialization is now complete
/tmp $ cd sam-app/
```
* Insert minimal SAR-meta data into the template:
```
Metadata:
AWS::ServerlessRepo::Application:
Name: hello-world
Description: hello world
Author: John
SpdxLicenseId: MIT
LicenseUrl: ./LICENSE
SemanticVersion: 0.0.1
```
```
/tmp/sam-app $ echo "dummy license text" > LICENSE
/tmp/sam-app $ sam build --use-container
2019-01-14 13:45:23 Starting Build inside a container
2019-01-14 13:45:23 Found credentials in shared credentials file: ~/.aws/credentials
2019-01-14 13:45:23 Building resource 'HelloWorldFunction'
Fetching lambci/lambda:build-nodejs8.10 Docker container image......
2019-01-14 13:45:32 Mounting /tmp/sam-app/hello-world as /tmp/samcli/source:ro inside runtime container
Build Succeeded
Built Artifacts : .aws-sam/build
Built Template : .aws-sam/build/template.yaml
Commands you can use next
=========================
[*] Invoke Function: sam local invoke
[*] Package: sam package --s3-bucket <yourbucket>
'nodejs' runtime has not been validated!
Running NodejsNpmBuilder:NpmPack
Running NodejsNpmBuilder:CopySource
Running NodejsNpmBuilder:NpmInstall
/tmp/sam-app $ sam package --s3-bucket dummy
Unable to upload artifact ./LICENSE referenced by LicenseUrl parameter of AWS::ServerlessRepo::Application resource.
Parameter LicenseUrl of resource AWS::ServerlessRepo::Application refers to a file or folder that does not exist /tmp/sam-app/.aws-sam/build/LICENSE
```
### Observed result
`sam package` fails, because the `LICENSE` file isn't present in the build directory.
### Expected result
`sam package` succeeds.
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Debian/unstable
2. `sam --version`: `SAM CLI, version 0.10.0`
| 2019-01-14T14:31:03 |
|
aws/aws-sam-cli | 1,030 | aws__aws-sam-cli-1030 | [
"1029"
] | ec3097a5b6b7e4533fd28e944a6ce343c109f390 | diff --git a/samcli/local/docker/lambda_build_container.py b/samcli/local/docker/lambda_build_container.py
--- a/samcli/local/docker/lambda_build_container.py
+++ b/samcli/local/docker/lambda_build_container.py
@@ -46,6 +46,18 @@ def __init__(self, # pylint: disable=too-many-locals
container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)
+ # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to
+ # the builder. But these paths don't exist within the container. We use the following method to convert the
+ # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore
+ # it. In essence, only when the path is already in the mounted path, can the path resolver within the
+ # container even find the executable.
+ executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(
+ host_paths_to_convert=executable_search_paths,
+ host_to_container_path_mapping={
+ source_dir: container_dirs["source_dir"],
+ manifest_dir: container_dirs["manifest_dir"]
+ })
+
request_json = self._make_request(protocol_version,
language,
dependency_manager,
@@ -163,6 +175,54 @@ def _get_container_dirs(source_dir, manifest_dir):
return result
+ @staticmethod
+ def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):
+ """
+ Use this method to convert a list of host paths to a list of equivalent paths within the container
+ where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to
+ the Lambda Builder running within the container.
+
+ If a host path is not mounted within the container, then this method simply passes the path to the result
+ without any changes.
+
+ Ex:
+ [ "/home/foo", "/home/bar", "/home/not/mounted"] => ["/tmp/source", "/tmp/manifest", "/home/not/mounted"]
+
+ Parameters
+ ----------
+ host_paths_to_convert : list
+ List of paths in host that needs to be converted
+
+ host_to_container_path_mapping : dict
+ Mapping of paths in host to the equivalent paths within the container
+
+ Returns
+ -------
+ list
+ Equivalent paths within the container
+ """
+
+ if not host_paths_to_convert:
+ # Nothing to do
+ return host_paths_to_convert
+
+ # Make sure the key is absolute host path. Relative paths are tricky to work with because two different
+ # relative paths can point to the same directory ("../foo", "../../foo")
+ mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}
+
+ result = []
+ for original_path in host_paths_to_convert:
+ abspath = str(pathlib.Path(original_path).resolve())
+
+ if abspath in mapping:
+ result.append(mapping[abspath])
+ else:
+ result.append(original_path)
+ LOG.debug("Cannot convert host path '%s' to its equivalent path within the container. "
+ "Host path is not mounted within the container", abspath)
+
+ return result
+
@staticmethod
def _get_image(runtime):
return "{}:build-{}".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)
| diff --git a/tests/unit/local/docker/test_lambda_build_container.py b/tests/unit/local/docker/test_lambda_build_container.py
--- a/tests/unit/local/docker/test_lambda_build_container.py
+++ b/tests/unit/local/docker/test_lambda_build_container.py
@@ -3,6 +3,11 @@
"""
import json
+try:
+ import pathlib
+except ImportError:
+ import pathlib2 as pathlib
+
from unittest import TestCase
from mock import patch
@@ -157,3 +162,44 @@ class TestLambdaBuildContainer_get_entrypoint(TestCase):
def test_must_get_entrypoint(self):
self.assertEquals(["lambda-builders", "requestjson"],
LambdaBuildContainer._get_entrypoint("requestjson"))
+
+
+class TestLambdaBuildContainer_convert_to_container_dirs(TestCase):
+
+ def test_must_work_on_abs_and_relative_paths(self):
+
+ input = [".", "../foo", "/some/abs/path"]
+ mapping = {
+ str(pathlib.Path(".").resolve()): "/first",
+ "../foo": "/second",
+ "/some/abs/path": "/third"
+ }
+
+ expected = ["/first", "/second", "/third"]
+ result = LambdaBuildContainer._convert_to_container_dirs(input, mapping)
+
+ self.assertEquals(result, expected)
+
+ def test_must_skip_unknown_paths(self):
+
+ input = ["/known/path", "/unknown/path"]
+ mapping = {
+ "/known/path": "/first"
+ }
+
+ expected = ["/first", "/unknown/path"]
+ result = LambdaBuildContainer._convert_to_container_dirs(input, mapping)
+
+ self.assertEquals(result, expected)
+
+ def test_must_skip_on_empty_input(self):
+
+ input = None
+ mapping = {
+ "/known/path": "/first"
+ }
+
+ expected = None
+ result = LambdaBuildContainer._convert_to_container_dirs(input, mapping)
+
+ self.assertEquals(result, expected)
| `sam build` for Gradle using gradlew does not work with --use-container
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
Briefly describe the bug you are facing.
When a Gradle project uses `gradlew` scripts, this file does not get picked up with building inside a container.
### Steps to reproduce
Provide steps to replicate.
This integration test sets everything up to repro this issue - https://github.com/awslabs/aws-sam-cli/blob/develop/tests/integration/buildcmd/test_build_cmd.py#L256
### Observed result
Please provide command output with `--debug` flag set.
`gradle` installation within the container is used instead of the `gradlew` script
### Expected result
Describe what you expected.
`gradlew` script is used to build the project
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS:
2. `sam --version`:
`Add --debug flag to command you are running`
| 2019-02-28T16:26:23 |
|
aws/aws-sam-cli | 1,348 | aws__aws-sam-cli-1348 | [
"1313"
] | 9c0cfd309134bc5f4b0fd867b696974c38391860 | diff --git a/samcli/cli/global_config.py b/samcli/cli/global_config.py
--- a/samcli/cli/global_config.py
+++ b/samcli/cli/global_config.py
@@ -47,7 +47,6 @@ def config_dir(self):
# Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.
app_dir = os.getenv("__SAM_CLI_APP_DIR")
self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))
-
return Path(self._config_dir)
@property
@@ -76,7 +75,7 @@ def installation_id(self):
try:
self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)
return self._installation_id
- except (ValueError, IOError):
+ except (ValueError, IOError, OSError):
return None
@property
@@ -112,7 +111,7 @@ def telemetry_enabled(self):
try:
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
return self._telemetry_enabled
- except (ValueError, IOError) as ex:
+ except (ValueError, IOError, OSError) as ex:
LOG.debug("Error when retrieving telemetry_enabled flag", exc_info=ex)
return False
@@ -164,6 +163,10 @@ def _set_value(self, key, value):
return self._set_json_cfg(cfg_path, key, value, json_body)
def _create_dir(self):
+ """
+ Creates configuration directory if it does not already exist, otherwise does nothing.
+ May raise an OSError if we do not have permissions to create the directory.
+ """
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
def _get_config_file_path(self, filename):
| diff --git a/tests/unit/cli/test_global_config.py b/tests/unit/cli/test_global_config.py
--- a/tests/unit/cli/test_global_config.py
+++ b/tests/unit/cli/test_global_config.py
@@ -19,6 +19,16 @@ def test_config_write_error(self):
installation_id = gc.installation_id
self.assertIsNone(installation_id)
+ def test_unable_to_create_dir(self):
+ m = mock_open()
+ m.side_effect = OSError("Permission DENIED")
+ gc = GlobalConfig()
+ with patch('samcli.cli.global_config.Path.mkdir', m):
+ installation_id = gc.installation_id
+ self.assertIsNone(installation_id)
+ telemetry_enabled = gc.telemetry_enabled
+ self.assertFalse(telemetry_enabled)
+
def test_setter_cannot_open_path(self):
m = mock_open()
m.side_effect = IOError("fail")
| sam 0.19.0 crashes on trying to create .aws-sam directory when not permitted
### Description
While running `sam validate` from a freshly built docker image, it crashes while trying to create the `.aws-sam` config directory.
### Steps to reproduce
In our CI pipeline, we run `sam` from within a docker container built according to this `Dockerfile`:
```
FROM alpine:3.9
RUN apk add --no-cache \
build-base \
python \
python-dev \
py-pip \
groff \
docker && \
pip install awscli aws-sam-cli
```
AWS credentials are passed in as environment variables, volumes mounted from the Jenkins workspace into the docker image, and then we simply run:
`sam validate -t ./.../template.yaml`
### Observed result
Logs from the Jenkins build:
```
$ docker run -t -d -u 3000:100 --network=host -w /data/hudson/workspace/loper-portal_feature_jenkinstest -v /data/hudson/workspace/loper-portal_feature_jenkinstest:/data/hudson/workspace/loper-portal_feature_jenkinstest:rw,z -v /data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:/data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** xxxx-aws-sam-cli cat
$ docker top 04fa30b17ceb0ae6d45b66190f32f4bad8dedd57386352a31e61f0da6ac18aa2 -eo pid,comm
[Pipeline] {
[Pipeline] withEnv
[Pipeline] {
[Pipeline] withCredentials
Masking supported pattern matches of $AWS_ACCESS_KEY_ID or $AWS_SECRET_ACCESS_KEY
[Pipeline] {
[Pipeline] stage
[Pipeline] { (Validate CloudFormation template)
[Pipeline] sh
+ sam validate --debug -t cloudformation/template.yaml
Traceback (most recent call last):
File "/usr/bin/sam", line 11, in <module>
sys.exit(cli())
File "/usr/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1063, in invoke
Command.invoke(self, ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/samcli/cli/main.py", line 83, in cli
if global_cfg.telemetry_enabled is None:
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 113, in telemetry_enabled
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 145, in _get_value
cfg_path = self._get_config_file_path(CONFIG_FILENAME)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 170, in _get_config_file_path
self._create_dir()
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 167, in _create_dir
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1540, in mkdir
_try_except_filenotfounderror(_try_func, _exc_func)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 113, in _try_except_filenotfounderror
try_func()
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1531, in _try_func
self._accessor.mkdir(self, mode)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 585, in wrapped
return strfunc(str(pathobj), *args)
OSError: [Errno 13] Permission denied: '/.aws-sam'
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
```
### Expected result
A non-crashing sam :)
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: alpine linux, docker container
2. `sam --version`: 0.19.0
### Others
This was added in the 0.19.0 Telemetry release, where in commit dbd534a `samcli/cli/global_config.py` was added, that tries to write the config dir if it doesn't exist.
Actually we should've pinned the versions used in our `Dockerfile`. Not pinning it had us miss the addition of the Telemetry feature in 0.19.0. Although I'd love to contribute, enabling Telemetry would undoubtedly go against some company policy. There _is_ the message in the first run of the new version (if it was able to write the config dir), but in our setup that's quite easy to miss. Basically I'm advocating an opt-in instead
| I think this is less of an opt-in issue, since this config file can be written to in non-telemetry contexts (layer support has a similar global directory feature that's been in place for a while) but we definitely need a better failure case. | 2019-08-20T18:12:07 |
aws/aws-sam-cli | 2,007 | aws__aws-sam-cli-2007 | [
"1999"
] | 98be672418f76bf1cc54db7528988b8a1c6a6b52 | diff --git a/samcli/commands/local/generate_event/cli.py b/samcli/commands/local/generate_event/cli.py
--- a/samcli/commands/local/generate_event/cli.py
+++ b/samcli/commands/local/generate_event/cli.py
@@ -22,7 +22,7 @@
$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key>\n
\b
After you generate a sample event, you can use it to test your Lambda function locally
-$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id>
+$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>
"""
| Incorrect inline help in "sam local generate-event" command
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). -->
### Describe your idea/feature/enhancement
Using the CLI I had some problems with the inline help of the command "sam local generate-event". I was trying to pipe the event generated by that command with "sam local invoke" and it failed. The part of the inline help that it is incorrect is this:
`{...} After you generate a sample event, you can use it to test your Lambda function locally
$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id> {...}`
In the web documentation here (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-local-generate-event.html) the help is correct:
`After you generate a sample event, you can use it to test your Lambda function locally
sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>`
### Proposal
Replace the incorrect help by the correct one.
| Thanks for submitting this! Looks to be a quick fix, would be glad to take in a PR for this :) | 2020-05-26T17:27:38 |
|
aws/aws-sam-cli | 2,215 | aws__aws-sam-cli-2215 | [
"1392"
] | 2d8a3688f95a4d8ad24f4fbcbbc1f8d3c991761a | diff --git a/samcli/commands/local/cli_common/invoke_context.py b/samcli/commands/local/cli_common/invoke_context.py
--- a/samcli/commands/local/cli_common/invoke_context.py
+++ b/samcli/commands/local/cli_common/invoke_context.py
@@ -134,7 +134,9 @@ def __enter__(self):
self._container_manager = self._get_container_manager(self._docker_network, self._skip_pull_image)
if not self._container_manager.is_docker_reachable:
- raise InvokeContextException("Running AWS SAM projects locally requires Docker. Have you got it installed?")
+ raise InvokeContextException(
+ "Running AWS SAM projects locally requires Docker. Have you got it installed and running?"
+ )
return self
diff --git a/samcli/local/docker/manager.py b/samcli/local/docker/manager.py
--- a/samcli/local/docker/manager.py
+++ b/samcli/local/docker/manager.py
@@ -6,6 +6,7 @@
import sys
import re
+import platform
import docker
import requests
@@ -44,13 +45,22 @@ def is_docker_reachable(self):
bool
True, if Docker is available, False otherwise
"""
+ errors = (
+ docker.errors.APIError,
+ requests.exceptions.ConnectionError,
+ )
+ if platform.system() == "Windows":
+ import pywintypes # pylint: disable=import-error
+
+ errors += (pywintypes.error,) # pylint: disable=no-member
+
try:
self.docker_client.ping()
-
return True
# When Docker is not installed, a request.exceptions.ConnectionError is thrown.
- except (docker.errors.APIError, requests.exceptions.ConnectionError):
+ # and also windows-specific errors
+ except errors:
LOG.debug("Docker is not reachable", exc_info=True)
return False
| diff --git a/tests/unit/commands/local/cli_common/test_invoke_context.py b/tests/unit/commands/local/cli_common/test_invoke_context.py
--- a/tests/unit/commands/local/cli_common/test_invoke_context.py
+++ b/tests/unit/commands/local/cli_common/test_invoke_context.py
@@ -127,7 +127,7 @@ def test_must_raise_if_docker_is_not_reachable(self, SamFunctionProviderMock):
invoke_context.__enter__()
self.assertEqual(
- "Running AWS SAM projects locally requires Docker. Have you got it installed?",
+ "Running AWS SAM projects locally requires Docker. Have you got it installed and running?",
str(ex_ctx.exception),
)
diff --git a/tests/unit/local/docker/test_manager.py b/tests/unit/local/docker/test_manager.py
--- a/tests/unit/local/docker/test_manager.py
+++ b/tests/unit/local/docker/test_manager.py
@@ -3,11 +3,11 @@
"""
import io
+import importlib
from unittest import TestCase
+from unittest.mock import Mock, patch
import requests
-
-from unittest.mock import Mock
from docker.errors import APIError, ImageNotFound
from samcli.local.docker.manager import ContainerManager, DockerImagePullFailedException
@@ -226,10 +226,10 @@ class TestContainerManager_is_docker_reachable(TestCase):
def setUp(self):
self.ping_mock = Mock()
- docker_client_mock = Mock()
- docker_client_mock.ping = self.ping_mock
+ self.docker_client_mock = Mock()
+ self.docker_client_mock.ping = self.ping_mock
- self.manager = ContainerManager(docker_client=docker_client_mock)
+ self.manager = ContainerManager(docker_client=self.docker_client_mock)
def test_must_use_docker_client_ping(self):
self.manager.is_docker_reachable
@@ -255,6 +255,35 @@ def test_must_return_false_if_ping_raises_connection_error(self):
self.assertFalse(is_reachable)
+ def test_must_return_false_if_ping_raises_pywintypes_error(self):
+ # pywintypes is not available non-Windows OS,
+ # we need to make up an Exception for this
+ class MockPywintypesError(Exception):
+ pass
+
+ # Mock these modules to simulate a Windows environment
+ platform_mock = Mock()
+ platform_mock.system.return_value = "Windows"
+ pywintypes_mock = Mock()
+ pywintypes_mock.error = MockPywintypesError
+ modules = {
+ "platform": platform_mock,
+ "pywintypes": pywintypes_mock,
+ }
+ with patch.dict("sys.modules", modules):
+ import samcli.local.docker.manager as manager_module
+
+ importlib.reload(manager_module)
+ manager = manager_module.ContainerManager(docker_client=self.docker_client_mock)
+ import pywintypes # pylint: disable=import-error
+
+ self.ping_mock.side_effect = pywintypes.error("pywintypes.error")
+ is_reachable = manager.is_docker_reachable
+ self.assertFalse(is_reachable)
+
+ # reload modules to ensure platform.system() is unpatched
+ importlib.reload(manager_module)
+
class TestContainerManager_has_image(TestCase):
def setUp(self):
| 'Docker is unreachable' error message not shown in some scenarios
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
I tried running my local Lambda function locally this morning and was greeted with this cryptic error.
```
2019-08-30 16:23:41 Starting Build inside a container
2019-08-30 16:23:46 Building resource 'Function'
Traceback (most recent call last):
File "runpy.py", line 193, in _run_module_as_main
File "runpy.py", line 85, in _run_code
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\__main__.py", line 12, in <module>
cli(prog_name="sam")
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 697, in main
rv = self.invoke(ctx)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 94, in wrapped
raise exception # pylint: disable=raising-bad-type
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 65, in wrapped
return_value = func(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 105, in cli
skip_pull_image, parameter_overrides, mode) # pragma: no cover
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 150, in do_cli
artifacts = builder.build()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 108, in build
lambda_function.runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 199, in _build_function
runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 234, in _build_function_on_container
if not self._container_manager.is_docker_reachable:
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\local\docker\manager.py", line 50, in is_docker_reachable
self.docker_client.ping()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\client.py", line 187, in ping
return self.api.ping(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\daemon.py", line 166, in ping
return self._result(self._get(self._url('/_ping'))) == 'OK'
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\client.py", line 230, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\adapters.py", line 449, in send
timeout=timeout
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 603, in urlopen
chunked=chunked)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 355, in _make_request
conn.request(method, url, **httplib_request_kw)
File "http\client.py", line 1239, in request
File "http\client.py", line 1285, in _send_request
File "http\client.py", line 1234, in endheaders
File "http\client.py", line 1026, in _send_output
File "http\client.py", line 964, in send
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipeconn.py", line 32, in connect
sock.connect(self.npipe_path)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 22, in wrapped
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 50, in connect
win32pipe.WaitNamedPipe(address, self._timeout)
pywintypes.error: (2, 'WaitNamedPipe', 'The system cannot find the file specified.')
```
This turned out to be because Docker Desktop was not running but I was adding the --use-container argument.
I started Docker Desktop, and continually tried running the function. Interestingly, **only if Docker is in the process of starting**, do you get an informative error message:
```
2019-08-30 16:26:39 Starting Build inside a container
2019-08-30 16:26:39 Building resource 'Function'
Build Failed
Error: Docker is unreachable. Docker needs to be running to build inside a container.
```
### Steps to reproduce
1. Start a local Lambda function with Docker Desktop closed and observe the ugly error
2. Start Docker Desktop and repeatedly run the local lambda function
3. After a while you will see the informative error.
### Observed result
```
2019-08-31 18:05:16 Changing event name from creating-client-class.iot-data to creating-client-class.iot-data-plane
2019-08-31 18:05:16 Changing event name from before-call.apigateway to before-call.api-gateway
2019-08-31 18:05:16 Changing event name from request-created.machinelearning.Predict to request-created.machine-learning.Predict
2019-08-31 18:05:16 Changing event name from before-parameter-build.autoscaling.CreateLaunchConfiguration to before-parameter-build.auto-scaling.CreateLaunchConfiguration
2019-08-31 18:05:16 Changing event name from before-parameter-build.route53 to before-parameter-build.route-53
2019-08-31 18:05:16 Changing event name from request-created.cloudsearchdomain.Search to request-created.cloudsearch-domain.Search
2019-08-31 18:05:16 Changing event name from docs.*.autoscaling.CreateLaunchConfiguration.complete-section to docs.*.auto-scaling.CreateLaunchConfiguration.complete-section
2019-08-31 18:05:16 Changing event name from before-parameter-build.logs.CreateExportTask to before-parameter-build.cloudwatch-logs.CreateExportTask
2019-08-31 18:05:16 Changing event name from docs.*.logs.CreateExportTask.complete-section to docs.*.cloudwatch-logs.CreateExportTask.complete-section
2019-08-31 18:05:16 Changing event name from before-parameter-build.cloudsearchdomain.Search to before-parameter-build.cloudsearch-domain.Search
2019-08-31 18:05:16 Changing event name from docs.*.cloudsearchdomain.Search.complete-section to docs.*.cloudsearch-domain.Search.complete-section
2019-08-31 18:05:16 Changing event name from creating-client-class.iot-data to creating-client-class.iot-data-plane
2019-08-31 18:05:16 Changing event name from before-call.apigateway to before-call.api-gateway
2019-08-31 18:05:16 Changing event name from request-created.machinelearning.Predict to request-created.machine-learning.Predict
2019-08-31 18:05:16 Changing event name from before-parameter-build.autoscaling.CreateLaunchConfiguration to before-parameter-build.auto-scaling.CreateLaunchConfiguration
2019-08-31 18:05:16 Changing event name from before-parameter-build.route53 to before-parameter-build.route-53
2019-08-31 18:05:16 Changing event name from request-created.cloudsearchdomain.Search to request-created.cloudsearch-domain.Search
2019-08-31 18:05:16 Changing event name from docs.*.autoscaling.CreateLaunchConfiguration.complete-section to docs.*.auto-scaling.CreateLaunchConfiguration.complete-section
2019-08-31 18:05:16 Changing event name from before-parameter-build.logs.CreateExportTask to before-parameter-build.cloudwatch-logs.CreateExportTask
2019-08-31 18:05:16 Changing event name from docs.*.logs.CreateExportTask.complete-section to docs.*.cloudwatch-logs.CreateExportTask.complete-section
2019-08-31 18:05:16 Changing event name from before-parameter-build.cloudsearchdomain.Search to before-parameter-build.cloudsearch-domain.Search
2019-08-31 18:05:16 Changing event name from docs.*.cloudsearchdomain.Search.complete-section to docs.*.cloudsearch-domain.Search.complete-section
2019-08-31 18:05:16 Telemetry endpoint configured to be https://aws-serverless-tools-telemetry.us-west-2.amazonaws.com/metrics
2019-08-31 18:05:16 'build' command is called
2019-08-31 18:05:16 Starting Build inside a container
2019-08-31 18:05:16 No Parameters detected in the template
2019-08-31 18:05:16 2 resources found in the template
2019-08-31 18:05:16 Found Serverless function with name='AutomatedSageMakerTraining' and CodeUri='.'
2019-08-31 18:05:16 Found Serverless function with name='AutomatedSageMakerEvaluation' and CodeUri='.'
2019-08-31 18:05:16 Trying paths: ['C:\\Users\\Oscar\\.docker\\config.json', 'C:\\Users\\Oscar\\.dockercfg']
2019-08-31 18:05:16 Found file at path: C:\Users\Oscar\.docker\config.json
2019-08-31 18:05:16 Found 'auths' section
2019-08-31 18:05:16 Auth data for https://index.docker.io/v1/ is absent. Client might be using a credentials store instead.
2019-08-31 18:05:16 Found 'credsStore' section
2019-08-31 18:05:16 Building resource 'AutomatedSageMakerTraining'
2019-08-31 18:05:16 Sending Telemetry: {'metrics': [{'commandRun': {'awsProfileProvided': False, 'debugFlagProvided': True, 'region': '', 'commandName': 'sam build', 'duration': 39, 'exitReason': 'error', 'exitCode': 255, 'requestId': 'f2b3683b-1131-49a7-b189-22bb5b5c408d', 'installationId': '0c5d4463-668a-4706-ae40-afd0704d66bd', 'sessionId': '2d3237ae-c0db-490b-8fc8-4f5f60b5d2de', 'executionEnvironment': 'CLI', 'pyversion': '3.6.7', 'samcliVersion': '0.19.0'}}]}
2019-08-31 18:05:16 Starting new HTTPS connection (1): aws-serverless-tools-telemetry.us-west-2.amazonaws.com:443
2019-08-31 18:05:17 HTTPSConnectionPool(host='aws-serverless-tools-telemetry.us-west-2.amazonaws.com', port=443): Read timed out. (read timeout=0.1)
Traceback (most recent call last):
File "runpy.py", line 193, in _run_module_as_main
File "runpy.py", line 85, in _run_code
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\__main__.py", line 12, in <module>
cli(prog_name="sam")
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 697, in main
rv = self.invoke(ctx)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 94, in wrapped
raise exception # pylint: disable=raising-bad-type
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 65, in wrapped
return_value = func(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 105, in cli
skip_pull_image, parameter_overrides, mode) # pragma: no cover
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 150, in do_cli
artifacts = builder.build()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 108, in build
lambda_function.runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 199, in _build_function
runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 234, in _build_function_on_container
if not self._container_manager.is_docker_reachable:
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\local\docker\manager.py", line 50, in is_docker_reachable
self.docker_client.ping()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\client.py", line 187, in ping
return self.api.ping(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\daemon.py", line 166, in ping
return self._result(self._get(self._url('/_ping'))) == 'OK'
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\client.py", line 230, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\adapters.py", line 449, in send
timeout=timeout
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 603, in urlopen
chunked=chunked)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 355, in _make_request
conn.request(method, url, **httplib_request_kw)
File "http\client.py", line 1239, in request
File "http\client.py", line 1285, in _send_request
File "http\client.py", line 1234, in endheaders
File "http\client.py", line 1026, in _send_output
File "http\client.py", line 964, in send
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipeconn.py", line 32, in connect
sock.connect(self.npipe_path)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 22, in wrapped
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 50, in connect
win32pipe.WaitNamedPipe(address, self._timeout)
pywintypes.error: (2, 'WaitNamedPipe', 'The system cannot find the file specified.')
```
### Expected result
The informative 'Docker is unreachable' error should be shown even if Docker Desktop is closed.
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Windows 10 Education
2. `sam --version`: 0.19.0
| Thanks for the issue! definitely a good catch, we should be surfacing an appropriate error even in the cases that docker is not running. Would you be interested in submitting a PR for this? :)
I've already made some changes that fixed it a few days ago, although I tried to follow your steps in `DEVELOPMENT_GUIDE.md`, and as I'm based in Windows this wasn't possible. I looked into `pipenv` but this just gives an error whenever I try to switch types. I spent about 15 minutes fixing the bug and 4 hours trying to set up the environment you want for contributing, and ended up giving up.
I'll see if I can get on a unix machine and try again soon.
As I tried and could not reproduce the issue in Ubuntu and in MacOS, I think the problem is Windows-specific.
As @OscarVanL said above, `DEVELOPMENT_GUIDE.md` is quite unfriendly to developers on Windows. We could have a friendlier development guide for Windows users, and also need to address some Windows-specific errors.
This issue didn't get any update for a while... I will pick up this issue and submit a PR soon. | 2020-09-11T18:21:02 |
aws/aws-sam-cli | 2,555 | aws__aws-sam-cli-2555 | [
"2462"
] | 1eaf48a385bef4b1a2e7a9f13c7ff037262e9bab | diff --git a/samcli/local/docker/lambda_container.py b/samcli/local/docker/lambda_container.py
--- a/samcli/local/docker/lambda_container.py
+++ b/samcli/local/docker/lambda_container.py
@@ -2,11 +2,12 @@
Represents Lambda runtime containers.
"""
import logging
+from typing import List
from samcli.local.docker.lambda_debug_settings import LambdaDebugSettings
from samcli.lib.utils.packagetype import IMAGE
from .container import Container
-from .lambda_image import Runtime
+from .lambda_image import Runtime, LambdaImage
LOG = logging.getLogger(__name__)
@@ -77,7 +78,7 @@ def __init__(
if not Runtime.has_value(runtime) and not packagetype == IMAGE:
raise ValueError("Unsupported Lambda runtime {}".format(runtime))
- image = LambdaContainer._get_image(lambda_image, runtime, packagetype, imageuri, layers, debug_options)
+ image = LambdaContainer._get_image(lambda_image, runtime, packagetype, imageuri, layers)
ports = LambdaContainer._get_exposed_ports(debug_options)
config = LambdaContainer._get_config(lambda_image, image)
entry, container_env_vars = LambdaContainer._get_debug_settings(runtime, debug_options)
@@ -181,7 +182,7 @@ def _get_additional_volumes(runtime, debug_options):
return volumes
@staticmethod
- def _get_image(lambda_image, runtime, packagetype, image, layers, debug_options):
+ def _get_image(lambda_image: LambdaImage, runtime: str, packagetype: str, image: str, layers: List[str]):
"""
Returns the name of Docker Image for the given runtime
@@ -199,8 +200,7 @@ def _get_image(lambda_image, runtime, packagetype, image, layers, debug_options)
str
Name of Docker Image for the given runtime
"""
- is_debug = bool(debug_options and debug_options.debugger_path)
- return lambda_image.build(runtime, packagetype, image, layers, is_debug)
+ return lambda_image.build(runtime, packagetype, image, layers)
@staticmethod
def _get_config(lambda_image, image):
diff --git a/samcli/local/docker/lambda_debug_settings.py b/samcli/local/docker/lambda_debug_settings.py
--- a/samcli/local/docker/lambda_debug_settings.py
+++ b/samcli/local/docker/lambda_debug_settings.py
@@ -3,7 +3,9 @@
"""
import logging
+from argparse import ArgumentParser
from collections import namedtuple
+from typing import List, cast
from samcli.local.docker.lambda_image import Runtime
@@ -88,10 +90,14 @@ def get_debug_settings(debug_port, debug_args_list, _container_env_vars, runtime
container_env_vars={"_AWS_LAMBDA_DOTNET_DEBUGGING": "1", **_container_env_vars},
),
Runtime.go1x.value: DebugSettings(
- ["/var/runtime/aws-lambda-go"]
- + debug_args_list
- + ["-debug=true", "-delvePort=" + str(debug_port), "-delvePath=" + options.get("delvePath")],
- container_env_vars=_container_env_vars,
+ entry,
+ container_env_vars={
+ "_AWS_LAMBDA_GO_DEBUGGING": "1",
+ "_AWS_LAMBDA_GO_DELVE_API_VERSION": LambdaDebugSettings.parse_go_delve_api_version(debug_args_list),
+ "_AWS_LAMBDA_GO_DELVE_LISTEN_PORT": debug_port,
+ "_AWS_LAMBDA_GO_DELVE_PATH": options.get("delvePath"),
+ **_container_env_vars,
+ },
),
Runtime.nodejs10x.value: DebugSettings(
entry
@@ -143,3 +149,12 @@ def get_debug_settings(debug_port, debug_args_list, _container_env_vars, runtime
LOG.debug("Passing entrypoint as specified in template")
return DebugSettings(entry + debug_args_list, _container_env_vars)
raise DebuggingNotSupported("Debugging is not currently supported for {}".format(runtime)) from ex
+
+ @staticmethod
+ def parse_go_delve_api_version(debug_args_list: List[str]) -> int:
+ parser = ArgumentParser("Parser for delve args")
+ parser.add_argument("-delveApi", type=int, default=1)
+ args, unknown_args = parser.parse_known_args(debug_args_list)
+ if unknown_args:
+ LOG.info("Ignoring unrecognized arguments: %s", unknown_args)
+ return cast(int, args.delveApi)
diff --git a/samcli/local/docker/lambda_image.py b/samcli/local/docker/lambda_image.py
--- a/samcli/local/docker/lambda_image.py
+++ b/samcli/local/docker/lambda_image.py
@@ -56,7 +56,6 @@ class LambdaImage:
_INVOKE_REPO_PREFIX = "amazon/aws-sam-cli-emulation-image"
_SAM_CLI_REPO_NAME = "samcli/lambda"
_RAPID_SOURCE_PATH = Path(__file__).parent.joinpath("..", "rapid").resolve()
- _GO_BOOTSTRAP_PATH = Path(__file__).parent.joinpath("..", "go-bootstrap").resolve()
def __init__(self, layer_downloader, skip_pull_image, force_image_build, docker_client=None):
"""
@@ -77,7 +76,7 @@ def __init__(self, layer_downloader, skip_pull_image, force_image_build, docker_
self.force_image_build = force_image_build
self.docker_client = docker_client or docker.from_env()
- def build(self, runtime, packagetype, image, layers, is_debug, stream=None):
+ def build(self, runtime, packagetype, image, layers, stream=None):
"""
Build the image if one is not already on the system that matches the runtime and layers
@@ -124,9 +123,6 @@ def build(self, runtime, packagetype, image, layers, is_debug, stream=None):
image_tag = f"{self._SAM_CLI_REPO_NAME}:{docker_image_version}"
image_not_found = False
- is_debug_go = runtime == "go1.x" and is_debug
- if is_debug_go:
- image_tag = f"{self._INVOKE_REPO_PREFIX}-{runtime}:debug-{version}"
# If we are not using layers, build anyways to ensure any updates to rapid get added
try:
@@ -144,9 +140,7 @@ def build(self, runtime, packagetype, image, layers, is_debug, stream=None):
stream_writer = stream or StreamWriter(sys.stderr)
stream_writer.write("Building image...")
stream_writer.flush()
- self._build_image(
- image if image else image_name, image_tag, downloaded_layers, is_debug_go, stream=stream_writer
- )
+ self._build_image(image if image else image_name, image_tag, downloaded_layers, stream=stream_writer)
return image_tag
@@ -186,7 +180,7 @@ def _generate_docker_image_version(layers, runtime):
runtime + "-" + hashlib.sha256("-".join([layer.name for layer in layers]).encode("utf-8")).hexdigest()[0:25]
)
- def _build_image(self, base_image, docker_tag, layers, is_debug_go, stream=None):
+ def _build_image(self, base_image, docker_tag, layers, stream=None):
"""
Builds the image
@@ -208,7 +202,7 @@ def _build_image(self, base_image, docker_tag, layers, is_debug_go, stream=None)
samcli.commands.local.cli_common.user_exceptions.ImageBuildException
When docker fails to build the image
"""
- dockerfile_content = self._generate_dockerfile(base_image, layers, is_debug_go)
+ dockerfile_content = self._generate_dockerfile(base_image, layers)
# Create dockerfile in the same directory of the layer cache
dockerfile_name = "dockerfile_" + str(uuid.uuid4())
@@ -222,10 +216,6 @@ def _build_image(self, base_image, docker_tag, layers, is_debug_go, stream=None)
# add dockerfile and rapid source paths
tar_paths = {str(full_dockerfile_path): "Dockerfile", self._RAPID_SOURCE_PATH: "/aws-lambda-rie"}
- if is_debug_go:
- LOG.debug("Adding custom GO Bootstrap to support debugging")
- tar_paths[self._GO_BOOTSTRAP_PATH] = "/aws-lambda-go"
-
for layer in layers:
tar_paths[layer.codeuri] = "/" + layer.name
@@ -257,7 +247,7 @@ def set_item_permission(tar_info):
full_dockerfile_path.unlink()
@staticmethod
- def _generate_dockerfile(base_image, layers, is_debug_go):
+ def _generate_dockerfile(base_image, layers):
"""
Generate the Dockerfile contents
@@ -288,11 +278,6 @@ def _generate_dockerfile(base_image, layers, is_debug_go):
f"FROM {base_image}\nADD aws-lambda-rie /var/rapid\nRUN chmod +x /var/rapid/aws-lambda-rie\n"
)
- if is_debug_go:
- dockerfile_content = (
- dockerfile_content + "ADD aws-lambda-go /var/runtime\nRUN chmod +x /var/runtime/aws-lambda-go\n"
- )
-
for layer in layers:
dockerfile_content = dockerfile_content + f"ADD {layer.name} {LambdaImage._LAYERS_DIR}\n"
return dockerfile_content
| diff --git a/tests/unit/local/docker/test_lambda_container.py b/tests/unit/local/docker/test_lambda_container.py
--- a/tests/unit/local/docker/test_lambda_container.py
+++ b/tests/unit/local/docker/test_lambda_container.py
@@ -28,6 +28,7 @@
Runtime.java8al2.value,
Runtime.dotnetcore21.value,
Runtime.dotnetcore31.value,
+ Runtime.go1x.value,
]
RUNTIMES_WITH_ENTRYPOINT_OVERRIDES = RUNTIMES_WITH_ENTRYPOINT + RUNTIMES_WITH_BOOTSTRAP_ENTRYPOINT
@@ -102,9 +103,7 @@ def test_must_configure_container_properly_zip(
self.assertEqual(expected_env_vars, container._env_vars)
self.assertEqual(self.memory_mb, container._memory_limit_mb)
- get_image_mock.assert_called_with(
- image_builder_mock, self.runtime, self.packagetype, self.imageuri, [], self.debug_options
- )
+ get_image_mock.assert_called_with(image_builder_mock, self.runtime, self.packagetype, self.imageuri, [])
get_exposed_ports_mock.assert_called_with(self.debug_options)
get_debug_settings_mock.assert_called_with(self.runtime, self.debug_options)
get_additional_options_mock.assert_called_with(self.runtime, self.debug_options)
@@ -172,9 +171,7 @@ def test_must_configure_container_properly_image_no_debug(
self.assertEqual({**expected_env_vars, **{"AWS_LAMBDA_FUNCTION_HANDLER": "mycommand"}}, container._env_vars)
self.assertEqual(self.memory_mb, container._memory_limit_mb)
- get_image_mock.assert_called_with(
- image_builder_mock, self.runtime, self.packagetype, self.imageuri, [], self.debug_options
- )
+ get_image_mock.assert_called_with(image_builder_mock, self.runtime, self.packagetype, self.imageuri, [])
get_exposed_ports_mock.assert_called_with(self.debug_options)
get_additional_options_mock.assert_called_with(self.runtime, self.debug_options)
get_additional_volumes_mock.assert_called_with(self.runtime, self.debug_options)
@@ -245,9 +242,7 @@ def test_must_configure_container_properly_image_debug(
self.assertEqual(expected_env_vars, container._env_vars)
self.assertEqual(self.memory_mb, container._memory_limit_mb)
- get_image_mock.assert_called_with(
- image_builder_mock, self.runtime, IMAGE, self.imageuri, [], self.debug_options
- )
+ get_image_mock.assert_called_with(image_builder_mock, self.runtime, IMAGE, self.imageuri, [])
get_exposed_ports_mock.assert_called_with(self.debug_options)
get_additional_options_mock.assert_called_with(self.runtime, self.debug_options)
get_additional_volumes_mock.assert_called_with(self.runtime, self.debug_options)
@@ -320,9 +315,7 @@ def test_must_configure_container_properly_image_with_imageconfig_debug(
)
self.assertEqual(self.memory_mb, container._memory_limit_mb)
- get_image_mock.assert_called_with(
- image_builder_mock, self.runtime, IMAGE, self.imageuri, [], self.debug_options
- )
+ get_image_mock.assert_called_with(image_builder_mock, self.runtime, IMAGE, self.imageuri, [])
get_exposed_ports_mock.assert_called_with(self.debug_options)
get_additional_options_mock.assert_called_with(self.runtime, self.debug_options)
get_additional_volumes_mock.assert_called_with(self.runtime, self.debug_options)
@@ -396,9 +389,7 @@ def test_must_configure_container_properly_image_with_imageconfig_no_debug(
)
self.assertEqual(self.memory_mb, container._memory_limit_mb)
- get_image_mock.assert_called_with(
- image_builder_mock, self.runtime, self.packagetype, self.imageuri, [], self.debug_options
- )
+ get_image_mock.assert_called_with(image_builder_mock, self.runtime, self.packagetype, self.imageuri, [])
get_exposed_ports_mock.assert_called_with(self.debug_options)
get_additional_options_mock.assert_called_with(self.runtime, self.debug_options)
get_additional_volumes_mock.assert_called_with(self.runtime, self.debug_options)
@@ -461,10 +452,8 @@ def test_must_skip_if_port_is_not_given(self):
class TestLambdaContainer_get_image(TestCase):
- def test_must_return_lambci_image_with_debug(self):
- debug_options = DebugContext(debug_ports=[1235], debugger_path="a", debug_args="a=b c=d e=f")
-
- expected = "lambci/lambda:foo"
+ def test_must_return_build_image(self):
+ expected = "amazon/aws-sam-cli-emulation-image-foo:rapid-x.y.z"
image_builder = Mock()
image_builder.build.return_value = expected
@@ -476,34 +465,11 @@ def test_must_return_lambci_image_with_debug(self):
packagetype=ZIP,
image=None,
layers=[],
- debug_options=debug_options,
),
expected,
)
- image_builder.build.assert_called_with("foo", ZIP, None, [], True)
-
- def test_must_return_lambci_image_without_debug(self):
- debug_options = DebugContext()
-
- expected = "lambci/lambda:foo"
-
- image_builder = Mock()
- image_builder.build.return_value = expected
-
- self.assertEqual(
- LambdaContainer._get_image(
- lambda_image=image_builder,
- runtime="foo",
- packagetype=ZIP,
- image=None,
- layers=[],
- debug_options=debug_options,
- ),
- expected,
- )
-
- image_builder.build.assert_called_with("foo", ZIP, None, [], False)
+ image_builder.build.assert_called_with("foo", ZIP, None, [])
class TestLambdaContainer_get_debug_settings(TestCase):
@@ -544,17 +510,6 @@ def test_must_provide_container_env_vars(self, runtime):
self.assertIsNotNone(container_env_vars)
- @parameterized.expand([param(r) for r in set(RUNTIMES_WITH_ENTRYPOINT) if not r.startswith("dotnetcore")])
- def test_debug_arg_must_be_split_by_spaces_and_appended_to_entrypoint(self, runtime):
- """
- Debug args list is appended starting at second position in the array
- """
- expected_debug_args = ["a=b", "c=d", "e=f"]
- result, _ = LambdaContainer._get_debug_settings(runtime, self.debug_options)
- actual = result[1:4]
-
- self.assertEqual(actual, expected_debug_args)
-
@parameterized.expand([param(r) for r in set(RUNTIMES_WITH_BOOTSTRAP_ENTRYPOINT)])
def test_debug_arg_must_be_split_by_spaces_and_appended_to_bootstrap_based_entrypoint(self, runtime):
"""
@@ -573,6 +528,13 @@ def test_must_provide_entrypoint_even_without_debug_args(self, runtime):
self.assertIsNotNone(result)
+ @parameterized.expand([(2, "-delveApi=2"), (2, "-delveApi 2"), (1, None)])
+ def test_delve_api_version_can_be_read_from_debug_args(self, version, debug_args):
+ debug_options = DebugContext(debug_ports=[1235], debug_args=debug_args)
+ _, env_vars = LambdaContainer._get_debug_settings(Runtime.go1x.value, debug_options)
+
+ self.assertEqual(env_vars.get("_AWS_LAMBDA_GO_DELVE_API_VERSION"), version)
+
class TestLambdaContainer_get_additional_options(TestCase):
def test_no_additional_options_when_debug_options_is_none(self):
diff --git a/tests/unit/local/docker/test_lambda_image.py b/tests/unit/local/docker/test_lambda_image.py
--- a/tests/unit/local/docker/test_lambda_image.py
+++ b/tests/unit/local/docker/test_lambda_image.py
@@ -46,7 +46,7 @@ def test_building_image_with_no_runtime_only_image(self):
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
- lambda_image.build(None, IMAGE, "mylambdaimage:v1", [], False),
+ lambda_image.build(None, IMAGE, "mylambdaimage:v1", []),
f"mylambdaimage:rapid-{version}",
)
@@ -67,14 +67,12 @@ def test_building_image_with_no_runtime_only_image_always_build(
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
- lambda_image.build(None, IMAGE, "mylambdaimage:v1", ["mylayer"], False),
+ lambda_image.build(None, IMAGE, "mylambdaimage:v1", ["mylayer"]),
f"mylambdaimage:rapid-{version}",
)
# No layers are added, because runtime is not defined.
- build_image_patch.assert_called_once_with(
- "mylambdaimage:v1", f"mylambdaimage:rapid-{version}", [], False, stream=ANY
- )
+ build_image_patch.assert_called_once_with("mylambdaimage:v1", f"mylambdaimage:rapid-{version}", [], stream=ANY)
# No Layers are added.
layer_downloader_mock.assert_not_called()
@@ -86,9 +84,9 @@ def test_building_image_with_non_accpeted_package_type(self):
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
with self.assertRaises(InvalidIntermediateImageError):
- lambda_image.build("python3.6", "Non-accepted-packagetype", None, [], False)
+ lambda_image.build("python3.6", "Non-accepted-packagetype", None, [])
with self.assertRaises(InvalidIntermediateImageError):
- lambda_image.build("python3.6", None, None, [], False)
+ lambda_image.build("python3.6", None, None, [])
def test_building_image_with_no_layers(self):
docker_client_mock = Mock()
@@ -99,22 +97,10 @@ def test_building_image_with_no_layers(self):
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
- lambda_image.build("python3.6", ZIP, None, [], False),
+ lambda_image.build("python3.6", ZIP, None, []),
f"amazon/aws-sam-cli-emulation-image-python3.6:rapid-{version}",
)
- def test_building_image_with_go_debug(self):
- docker_client_mock = Mock()
- layer_downloader_mock = Mock()
- setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
- docker_client_mock.api.build.return_value = ["mock"]
- lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
-
- self.assertEqual(
- lambda_image.build("go1.x", ZIP, None, [], True),
- f"amazon/aws-sam-cli-emulation-image-go1.x:debug-{version}",
- )
-
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_not_building_image_that_already_exists(self, generate_docker_image_version_patch, build_image_patch):
@@ -130,7 +116,7 @@ def test_not_building_image_that_already_exists(self, generate_docker_image_vers
docker_client_mock.images.get.return_value = Mock()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
- actual_image_id = lambda_image.build("python3.6", ZIP, None, [layer_mock], False)
+ actual_image_id = lambda_image.build("python3.6", ZIP, None, [layer_mock])
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
@@ -155,7 +141,7 @@ def test_force_building_image_that_doesnt_already_exists(
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, True, docker_client=docker_client_mock)
- actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], False, stream=stream)
+ actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
@@ -166,7 +152,6 @@ def test_force_building_image_that_doesnt_already_exists(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
- False,
stream=stream,
)
@@ -186,7 +171,7 @@ def test_not_force_building_image_that_doesnt_already_exists(
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
- actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], False, stream=stream)
+ actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
@@ -197,7 +182,6 @@ def test_not_force_building_image_that_doesnt_already_exists(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
- False,
stream=stream,
)
@@ -228,19 +212,7 @@ def test_generate_dockerfile(self, docker_patch):
layer_mock = Mock()
layer_mock.name = "layer1"
- self.assertEqual(LambdaImage._generate_dockerfile("python", [layer_mock], False), expected_docker_file)
-
- @patch("samcli.local.docker.lambda_image.docker")
- def test_generate_dockerfile_with_go_debug(self, docker_patch):
- docker_client_mock = Mock()
- docker_patch.from_env.return_value = docker_client_mock
-
- expected_docker_file = "FROM python\nADD aws-lambda-rie /var/rapid\nRUN chmod +x /var/rapid/aws-lambda-rie\nADD aws-lambda-go /var/runtime\nRUN chmod +x /var/runtime/aws-lambda-go\nADD layer1 /opt\n"
-
- layer_mock = Mock()
- layer_mock.name = "layer1"
-
- self.assertEqual(LambdaImage._generate_dockerfile("python", [layer_mock], True), expected_docker_file)
+ self.assertEqual(LambdaImage._generate_dockerfile("python", [layer_mock]), expected_docker_file)
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@@ -270,7 +242,7 @@ def test_build_image(self, generate_dockerfile_patch, path_patch, uuid_patch, cr
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
- "base_image", "docker_tag", [layer_version1], True
+ "base_image", "docker_tag", [layer_version1]
)
handle = m()
@@ -313,7 +285,7 @@ def test_build_image_fails_with_BuildError(
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
- "base_image", "docker_tag", [layer_version1], True
+ "base_image", "docker_tag", [layer_version1]
)
handle = m()
@@ -355,7 +327,7 @@ def test_build_image_fails_with_ApiError(
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
- "base_image", "docker_tag", [layer_version1], True
+ "base_image", "docker_tag", [layer_version1]
)
handle = m()
| SAM cli 1.13.2+ Go debugging broken
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description:
<!-- Briefly describe the bug you are facing.-->
Attempting to debug a go lambda with sam cli start-api/invoke makes the lambda code fail to start
### Steps to reproduce:
1. Create a sam app with these parameters using 1.13.2
```
-----------------------
Generating application:
-----------------------
Name: test113sam
Runtime: go1.x
Dependency Manager: mod
Application Template: hello-world
Output Directory: .
Next steps can be found in the README file at ./test113sam/README.md
```
2. `sam build`
3. run `sam local invoke` it works fine:
```
$ sam local invoke
Invoking hello-world (go1.x)
Skip pulling image and use local one: amazon/aws-sam-cli-emulation-image-go1.x:rapid-1.13.2.
Mounting /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction as /var/task:ro,delegated inside runtime container
START RequestId: d1f12116-37df-4127-9b64-36aacdccea8c Version: $LATEST
END RequestId: d1f12116-37df-4127-9b64-36aacdccea8c
REPORT RequestId: d1f12116-37df-4127-9b64-36aacdccea8c Init Duration: 1.57 ms Duration: 785.02 ms Billed Duration: 800 ms Memory Size: 128 MB Max Memory Used: 128 MB
{"statusCode":200,"headers":null,"multiValueHeaders":null,"body":"Hello, 72.21.198.67\n"}
```
Now, try invoking with Devle debugger args:
```
sam local invoke --debug-args -delveAPI=2 --debugger-path /Users/werlla/IdeaProjects/go1sam/ --debug-port 59394 --debug
2020-12-07 13:10:50,211 | Telemetry endpoint configured to be https://aws-serverless-tools-telemetry.us-west-2.amazonaws.com/metrics
2020-12-07 13:10:50,418 | local invoke command is called
2020-12-07 13:10:50,427 | No Parameters detected in the template
2020-12-07 13:10:50,454 | 2 resources found in the template
2020-12-07 13:10:50,455 | Found Serverless function with name='HelloWorldFunction' and CodeUri='HelloWorldFunction'
2020-12-07 13:10:50,464 | Found one Lambda function with name 'HelloWorldFunction'
2020-12-07 13:10:50,464 | Invoking hello-world (go1.x)
2020-12-07 13:10:50,464 | Environment variables overrides data is standard format
2020-12-07 13:10:50,464 | Loading AWS credentials from session with profile 'None'
2020-12-07 13:10:51,929 | Resolving code path. Cwd=/private/tmp/test113sam/.aws-sam/build, CodeUri=HelloWorldFunction
2020-12-07 13:10:51,929 | Resolved absolute path to code is /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction
2020-12-07 13:10:51,930 | Code /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction is not a zip/jar file
2020-12-07 13:10:53,185 | Failed to download image with name amazon/aws-sam-cli-emulation-image-go1.x:debug-1.13.2
2020-12-07 13:10:53,185 | Failed to download a new amazon/aws-sam-cli-emulation-image-go1.x:debug-1.13.2 image. Invoking with the already downloaded image.
2020-12-07 13:10:53,186 | Mounting /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction as /var/task:ro,delegated inside runtime container
2020-12-07 13:10:53,859 | Setting up SIGTERM interrupt handler
2020/12/07 21:10:53 Error starting mock server: fork/exec /var/rapid/init: no such file or directory
2020-12-07 13:10:54,710 | No response from invoke container for HelloWorldFunction
2020-12-07 13:10:54,711 | Sending Telemetry: {'metrics': [{'commandRun': {'awsProfileProvided': False, 'debugFlagProvided': True, 'region': '', 'commandName': 'sam local invoke', 'duration': 4498, 'exitReason': 'success', 'exitCode': 0, 'requestId': 'fd63fac1-9b23-4de9-a4ab-e6b76e979b13', 'installationId': '8e36c82e-d3f0-45b7-9e48-1e6f0ad535a6', 'sessionId': '10201463-def2-41d3-97ea-e2c5b3ee5e6f', 'executionEnvironment': 'CLI', 'pyversion': '3.8.6', 'samcliVersion': '1.13.2'}}]}
2020-12-07 13:10:54,880 | Telemetry response: 200
```
### Observed result:
<!-- Please provide command output with `--debug` flag set.-->
```
2020/12/07 21:10:53 Error starting mock server: fork/exec /var/rapid/init: no such file or directory
```
And it does not start.
```
sam local invoke --debug-args "-delveAPI=2" --debugger-path /Users/werlla/IdeaProjects/go1sam/ --debug-port 59394 --debug
```
Quoting debug args results in the same output, and single quoting debug args leads to some more strange output without it running
```
sam local invoke --debug-args '-delveAPI=2' --debugger-path /Users/werlla/IdeaProjects/go1sam/ --debug-port 59394 --debug
2020-12-07 13:15:41,148 | Telemetry endpoint configured to be https://aws-serverless-tools-telemetry.us-west-2.amazonaws.com/metrics
2020-12-07 13:15:41,541 | local invoke command is called
2020-12-07 13:15:41,548 | No Parameters detected in the template
2020-12-07 13:15:41,589 | 2 resources found in the template
2020-12-07 13:15:41,589 | Found Serverless function with name='HelloWorldFunction' and CodeUri='HelloWorldFunction'
2020-12-07 13:15:41,608 | Found one Lambda function with name 'HelloWorldFunction'
2020-12-07 13:15:41,608 | Invoking hello-world (go1.x)
2020-12-07 13:15:41,608 | Environment variables overrides data is standard format
2020-12-07 13:15:41,609 | Loading AWS credentials from session with profile 'None'
2020-12-07 13:15:43,363 | Resolving code path. Cwd=/private/tmp/test113sam/.aws-sam/build, CodeUri=HelloWorldFunction
2020-12-07 13:15:43,363 | Resolved absolute path to code is /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction
2020-12-07 13:15:43,363 | Code /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction is not a zip/jar file
2020-12-07 13:15:44,558 | Failed to download image with name amazon/aws-sam-cli-emulation-image-go1.x:debug-1.13.2
2020-12-07 13:15:44,558 | Failed to download a new amazon/aws-sam-cli-emulation-image-go1.x:debug-1.13.2 image. Invoking with the already downloaded image.
2020-12-07 13:15:44,558 | Mounting /private/tmp/test113sam/.aws-sam/build/HelloWorldFunction as /var/task:ro,delegated inside runtime container
2020-12-07 13:15:45,629 | Setting up SIGTERM interrupt handler
2020-12-07 13:15:46,391 | No response from invoke container for HelloWorldFunction
2020-12-07 13:15:46,392 | Sending Telemetry: {'metrics': [{'commandRun': {'awsProfileProvided': False, 'debugFlagProvided': True, 'region': '', 'commandName': 'sam local invoke', 'duration': 5242, 'exitReason': 'success', 'exitCode': 0, 'requestId': '61ed57b7-4d4d-4081-b4b7-88af18a3ec2f', 'installationId': '8e36c82e-d3f0-45b7-9e48-1e6f0ad535a6', 'sessionId': 'a568cab5-a111-40a0-bfef-a9800ec0a2a4', 'executionEnvironment': 'CLI', 'pyversion': '3.8.6', 'samcliVersion': '1.13.2'}}]}
2020-12-07 13:15:46,521 | Telemetry response: 200
```
### Expected result:
<!-- Describe what you expected.-->
Running with the same arguments starts correctly with sam cli `1.12.0`
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Mac
2. `sam --version`: SAM CLI, version 1.13.2
| I'm also seeing this issue. I've set up the hello-world via sam init and pretty much did the same commands as above.
So that is exactly the usecase which is described in the docs on how to debug with go.
Downgrading to v1.12.0 worked for me(Had to reinstall via pip `pip3 install --user 'aws-sam-cli==1.12.0'`)
Some initial findings:
In https://github.com/aws/aws-sam-cli/pull/2425, the file `/local/rapid/init` was changed to `/local/rapid/aws-lambda-rie`, while in “samcli/local/go-bootstrap/aws-lambda-mock.go:96” `rapid/init` is still used.
This can be easily fixed in #2544. However, with #2544, we see a new error:
```
2021/01/20 00:50:48 http: panic serving 127.0.0.1:49540: &{0xc0001c8000 map[] 2021-01-20 00:50:48.4458573 +0000 UTC m=+0.017114501 panic <nil> ReplyStream not available <nil> <nil> }
goroutine 35 [running]:
net/http.(*conn).serve.func1(0xc0002880a0)
/usr/local/go/src/net/http/server.go:1800 +0x139
panic(0x866640, 0xc0002cc150)
/usr/local/go/src/runtime/panic.go:975 +0x3e3
github.com/sirupsen/logrus.Entry.log(0xc0001c8000, 0xc0002845a0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...)
/go/pkg/mod/github.com/sirupsen/[email protected]/entry.go:259 +0x335
```
(full text: [1611104121.txt](https://github.com/aws/aws-sam-cli/files/5840043/1611104121.txt))
We narrow the cause to the same PR (#2425, commit SHA: 12efe15b682926b006ce2a846d7cc6c71f5f2d74).
* with 12efe1 and 2544 fix, we have this error when running `local invoke` with delve debugger.
* without 12efe1, everything works fine.
### How to reproduce the (new) error?
(See our official guide ["Step-through debugging Lambda functions locally"](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-using-debugging.html) for full details)
0. check out #2544, which contains the fix of the original error.
1. Install delve ([on macOS](https://github.com/go-delve/delve/blob/master/Documentation/installation/osx/install.md)), assuming it is install to `/Users/xxx/go/bin/dlv`.
2. build the hello world project with `sam build`
3. `sam local invoke`, now it works with or without 12efe1
4. `sam local invoke --debug-args -delveAPI=2 --debugger-path /Users/xxx/go/bin/ --debug-port 59394 --debug`, the error above shows up.
We have a guess: `aws-lambda-mock.go` does not play nicely with the new `aws-lambda-rie`. The error message is not very helpful, we need to isolate what inputs to the lambda container cause this problem by doing some experiments.
We found the issue and working on developing a fix. If you need this feature urgently, you can try the workaround #2548.
```sh
git clone [email protected]:aws/aws-sam-cli.git
cd aws-sam-cli
git pull origin pull/2548/head
python3 -m venv .env
source .env/bin/activate
make init
# then you can use samdev to replace sam for local invoking
samdev local invoke --debug-args "--api-version=2 --log" --debugger-path <delve-directory> --debug-port 5986 --debug
``` | 2021-01-26T19:43:37 |
aws/aws-sam-cli | 2,760 | aws__aws-sam-cli-2760 | [
"2724"
] | e653fe214255357aa6eb025dc060b2672d9e8e51 | diff --git a/samcli/lib/providers/provider.py b/samcli/lib/providers/provider.py
--- a/samcli/lib/providers/provider.py
+++ b/samcli/lib/providers/provider.py
@@ -120,6 +120,9 @@ class LayerVersion:
LAYER_NAME_DELIMETER = "-"
+ _name: Optional[str] = None
+ _version: Optional[int] = None
+
def __init__(
self,
arn: str,
@@ -149,8 +152,6 @@ def __init__(
self._arn = arn
self._codeuri = codeuri
self.is_defined_within_template = bool(codeuri)
- self._name = LayerVersion._compute_layer_name(self.is_defined_within_template, arn)
- self._version = LayerVersion._compute_layer_version(self.is_defined_within_template, arn)
self._build_method = cast(Optional[str], metadata.get("BuildMethod", None))
self._compatible_runtimes = compatible_runtimes
@@ -238,6 +239,11 @@ def name(self) -> str:
str
A name of the Layer that is used on the system to uniquely identify the layer
"""
+ # because self.name is only used in local invoke.
+ # here we delay the validation process (in _compute_layer_name) rather than in __init__() to ensure
+ # customers still have a smooth build experience.
+ if not self._name:
+ self._name = LayerVersion._compute_layer_name(self.is_defined_within_template, self.arn)
return self._name
@property
@@ -250,6 +256,11 @@ def codeuri(self, codeuri: Optional[str]) -> None:
@property
def version(self) -> Optional[int]:
+ # because self.version is only used in local invoke.
+ # here we delay the validation process (in _compute_layer_name) rather than in __init__() to ensure
+ # customers still have a smooth build experience.
+ if self._version is None:
+ self._version = LayerVersion._compute_layer_version(self.is_defined_within_template, self.arn)
return self._version
@property
@@ -284,7 +295,10 @@ def get_build_dir(self, build_root_dir: str) -> str:
def __eq__(self, other: object) -> bool:
if isinstance(other, type(self)):
- return self.__dict__ == other.__dict__
+ # self._name and self._version are generated from self._arn, and they are initialized as None
+ # and their values are assigned at runtime. Here we exclude them from comparison
+ overrides = {"_name": None, "_version": None}
+ return {**self.__dict__, **overrides} == {**other.__dict__, **overrides}
return False
diff --git a/samcli/lib/providers/sam_function_provider.py b/samcli/lib/providers/sam_function_provider.py
--- a/samcli/lib/providers/sam_function_provider.py
+++ b/samcli/lib/providers/sam_function_provider.py
@@ -421,6 +421,12 @@ def _parse_layer_info(
stack_path=stack.stack_path,
)
)
+ else:
+ LOG.debug(
+ 'layer "%s" is not recognizable, '
+ "it might be using intrinsic functions that we don't support yet. Skipping.",
+ str(layer),
+ )
return layers
| diff --git a/tests/integration/buildcmd/build_integ_base.py b/tests/integration/buildcmd/build_integ_base.py
--- a/tests/integration/buildcmd/build_integ_base.py
+++ b/tests/integration/buildcmd/build_integ_base.py
@@ -304,3 +304,75 @@ def _verify_process_code_and_output(self, command_result, function_full_paths):
def _verify_invoke_built_functions(self, template_path, overrides, function_and_expected):
for function_identifier, expected in function_and_expected:
self._verify_invoke_built_function(template_path, function_identifier, overrides, expected)
+
+
+class IntrinsicIntegBase(BuildIntegBase):
+ """
+ Currently sam-cli does not have great support for intrinsic functions,
+ in this kind of integ tests, there are functions that are buildable but not invocable.
+ """
+
+ def _verify_build(self, function_full_paths, layer_full_path, stack_paths, command_result):
+ """
+ Verify resources have their build artifact folders, stack has their own template.yaml, and command succeeds.
+ """
+ self._verify_process_code_and_output(command_result, function_full_paths, layer_full_path)
+ for function_full_path in function_full_paths:
+ self._verify_build_artifact(self.default_build_dir, function_full_path)
+ for stack_path in stack_paths:
+ self._verify_move_template(self.default_build_dir, stack_path)
+
+ def _verify_build_artifact(self, build_dir, function_full_path):
+ self.assertTrue(build_dir.exists(), "Build directory should be created")
+
+ build_dir_files = os.listdir(str(build_dir))
+ self.assertIn("template.yaml", build_dir_files)
+ # full_path is always posix path
+ path_components = posixpath.split(function_full_path)
+ artifact_path = Path(build_dir, *path_components)
+ self.assertTrue(artifact_path.exists())
+
+ def _verify_move_template(self, build_dir, stack_path):
+ path_components = posixpath.split(stack_path)
+ stack_build_dir_path = Path(build_dir, Path(*path_components), "template.yaml")
+ self.assertTrue(stack_build_dir_path.exists())
+
+ def _verify_process_code_and_output(self, command_result, function_full_paths, layer_full_path):
+ self.assertEqual(command_result.process.returncode, 0)
+ # check HelloWorld and HelloMars functions are built in the same build
+ for function_full_path in function_full_paths:
+ self.assertRegex(
+ command_result.stderr.decode("utf-8"),
+ f"Building codeuri: .* runtime: .* metadata: .* functions: \\[.*'{function_full_path}'.*\\]",
+ )
+ self.assertIn(
+ f"Building layer '{layer_full_path}'",
+ command_result.stderr.decode("utf-8"),
+ )
+
+ def _verify_invoke_built_functions(self, template_path, functions, error_message):
+ """
+ Invoke the function, if error_message is not None, the invoke should fail.
+ """
+ for function_logical_id in functions:
+ LOG.info("Invoking built function '{}'".format(function_logical_id))
+
+ cmdlist = [
+ self.cmd,
+ "local",
+ "invoke",
+ function_logical_id,
+ "-t",
+ str(template_path),
+ "--no-event",
+ ]
+
+ process_execute = run_command(cmdlist)
+ process_execute.process.wait()
+
+ process_stderr = process_execute.stderr.decode("utf-8")
+ if error_message:
+ self.assertNotEqual(0, process_execute.process.returncode)
+ self.assertIn(error_message, process_stderr)
+ else:
+ self.assertEqual(0, process_execute.process.returncode)
diff --git a/tests/integration/buildcmd/test_build_cmd.py b/tests/integration/buildcmd/test_build_cmd.py
--- a/tests/integration/buildcmd/test_build_cmd.py
+++ b/tests/integration/buildcmd/test_build_cmd.py
@@ -17,6 +17,7 @@
CachedBuildIntegBase,
BuildIntegRubyBase,
NestedBuildIntegBase,
+ IntrinsicIntegBase,
)
from tests.testing_utils import (
IS_WINDOWS,
@@ -1812,3 +1813,59 @@ def test_nested_build(self, use_container, cached, parallel):
("LocalNestedStack/Function2", {"pi": "3.14"}),
],
)
+
+
+@parameterized_class(
+ ("template", "stack_paths", "layer_full_path", "function_full_paths", "invoke_error_message"),
+ [
+ (
+ os.path.join("nested-with-intrinsic-functions", "template-pass-down.yaml"),
+ ["", "AppUsingRef", "AppUsingJoin"],
+ "MyLayerVersion",
+ ["AppUsingRef/FunctionInChild", "AppUsingJoin/FunctionInChild"],
+ # Note(xinhol), intrinsic function passed by parameter are resolved as string,
+ # therefore it is being treated as an Arn, it is a bug in intrinsic resolver
+ "Invalid Layer Arn",
+ ),
+ (
+ os.path.join("nested-with-intrinsic-functions", "template-pass-up.yaml"),
+ ["", "ChildApp"],
+ "ChildApp/MyLayerVersion",
+ ["FunctionInRoot"],
+ # for this pass-up use case, since we are not sure whether there are valid local invoke cases out there,
+ # so we don't want to block customers from local invoking it.
+ None,
+ ),
+ ],
+)
+class TestBuildPassingLayerAcrossStacks(IntrinsicIntegBase):
+ @pytest.mark.flaky(reruns=3)
+ def test_nested_build(self):
+ if SKIP_DOCKER_TESTS:
+ self.skipTest(SKIP_DOCKER_MESSAGE)
+
+ """
+ Build template above and verify that each function call returns as expected
+ """
+ cmdlist = self.get_command_list(
+ use_container=True,
+ cached=True,
+ parallel=True,
+ )
+
+ LOG.info("Running Command: %s", cmdlist)
+ LOG.info(self.working_dir)
+
+ command_result = run_command(cmdlist, cwd=self.working_dir)
+
+ if not SKIP_DOCKER_TESTS:
+ self._verify_build(
+ self.function_full_paths,
+ self.layer_full_path,
+ self.stack_paths,
+ command_result,
+ )
+
+ self._verify_invoke_built_functions(
+ self.built_template, self.function_full_paths, self.invoke_error_message
+ )
diff --git a/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-down.yaml b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-down.yaml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-down.yaml
@@ -0,0 +1,16 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+
+Parameters:
+ Layer:
+ Type: String
+
+Resources:
+ FunctionInChild:
+ Type: AWS::Serverless::Function
+ Properties:
+ CodeUri: ../Python
+ Handler: main.handler
+ Runtime: python3.8
+ Layers:
+ - !Ref Layer
diff --git a/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-up.yaml b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-up.yaml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/child-pass-up.yaml
@@ -0,0 +1,18 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+
+Resources:
+ MyLayerVersion:
+ Type: AWS::Serverless::LayerVersion
+ Properties:
+ LayerName: MyLayerInChild
+ ContentUri: ../PyLayer
+ CompatibleRuntimes:
+ - python3.8
+ Metadata:
+ BuildMethod: python3.8
+
+Outputs:
+ LayerVersion:
+ Value: !Ref MyLayerVersion
+ Description: MyLayerVersion in child.yaml
diff --git a/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-down.yaml b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-down.yaml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-down.yaml
@@ -0,0 +1,27 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+
+Resources:
+ MyLayerVersion:
+ Type: AWS::Serverless::LayerVersion
+ Properties:
+ LayerName: MyLayerInRoot
+ ContentUri: ../PyLayer
+ CompatibleRuntimes:
+ - python3.8
+ Metadata:
+ BuildMethod: python3.8
+
+ AppUsingRef:
+ Type: AWS::Serverless::Application
+ Properties:
+ Location: ./child-pass-down.yaml
+ Parameters:
+ Layer: !Ref MyLayerVersion
+
+ AppUsingJoin:
+ Type: AWS::Serverless::Application
+ Properties:
+ Location: ./child-pass-down.yaml
+ Parameters:
+ Layer: !Ref MyLayerVersion
\ No newline at end of file
diff --git a/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-up.yaml b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-up.yaml
new file mode 100644
--- /dev/null
+++ b/tests/integration/testdata/buildcmd/nested-with-intrinsic-functions/template-pass-up.yaml
@@ -0,0 +1,17 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+
+Resources:
+ ChildApp:
+ Type: AWS::Serverless::Application
+ Properties:
+ Location: ./child-pass-up.yaml
+
+ FunctionInRoot:
+ Type: AWS::Serverless::Function
+ Properties:
+ CodeUri: ../Python
+ Handler: main.handler
+ Runtime: python3.8
+ Layers:
+ - !GetAtt ChildApp.Outputs.LayerVersion
\ No newline at end of file
diff --git a/tests/unit/commands/local/lib/test_provider.py b/tests/unit/commands/local/lib/test_provider.py
--- a/tests/unit/commands/local/lib/test_provider.py
+++ b/tests/unit/commands/local/lib/test_provider.py
@@ -49,8 +49,9 @@ class TestLayerVersion(TestCase):
]
)
def test_invalid_arn(self, arn):
+ layer = LayerVersion(arn, None) # creation of layer does not raise exception
with self.assertRaises(InvalidLayerVersionArn):
- LayerVersion(arn, None)
+ layer.version, layer.name
def test_layer_version_returned(self):
layer_version = LayerVersion("arn:aws:lambda:region:account-id:layer:layer-name:1", None)
| Layers shared in multiple stacks cause build/deploy to fail
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
The issue I am facing seems to be because of a recent update to SAM cli. Since my SAM templates did not change recently and my deploy was running fine up until a few days ago. Possibly a bug introduced in 1.21.x or 1.20.0.
### Description:
<!-- Briefly describe the bug you are facing.-->
`sam build` errors out with following message:
`Error: L is an Invalid Layer Arn.`
_The following steps will help replicate the issue, but please refer to the "debug" output at the bottom. Because I think the underlying problem is exposed there._
I create a nested application in this case. In the parent application's template there are 2 layers defined. I pass the layer ARNs from parent to child template using Parameters. The error origins where the layer ARNs are specified inside a lambda function resource definition.
### Steps to reproduce:
<!-- Provide detailed steps to replicate the bug, including steps from third party tools (CDK, etc.) -->
Parent template:
```
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Parameters:
AWSAccountId:
Type: String
Default: "123"
IAMRoleName:
Type: String
Default: "role"
Resources:
OneLayer:
Type: AWS::Serverless::LayerVersion
Properties:
LayerName: "one-layer"
ContentUri: ../layers/one/
CompatibleRuntimes:
- python3.8
RetentionPolicy: Delete
AnotherLayer:
Type: AWS::Serverless::LayerVersion
Properties:
LayerName: "another-layer"
ContentUri: ../layers/another/
CompatibleRuntimes:
- python3.8
RetentionPolicy: Delete
SomeLambdaFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: ../lambdas/example/
FunctionName: "SomeLambdaName"
Handler: test.lambda_handler
Role: !Ref IAMRole
Layers:
- !Ref OneLayer
- !Ref AnotherLayer
TestApp:
Type: AWS::Serverless::Application
Properties:
Location: apps/test.yaml
Parameters:
IAMRole: !Join ['', ["arn:aws:iam::", !Ref AWSAccountId, ":role/", !Ref IAMRoleName]]
Layers: !Join [",", [!Ref OneLayer, !Ref AnotherLayer]]
```
Nested App template:
```
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Parameters:
IAMRole:
Type: String
Layers:
Type: CommaDelimitedList
Resources:
NestedLambda:
Type: AWS::Serverless::Function
Properties:
CodeUri: ../lambdas/example/
FunctionName: "NestedLambdaName"
Handler: nested.lambda_handler
Role: !Ref IAMRole
Layers: !Ref Layers
```
### Observed result:
<!-- Please provide command output with `--debug` flag set. -->
```
Error: L is an Invalid Layer Arn.
```
### Expected result:
<!-- Describe what you expected. -->
```
Build Succeeded
Built Artifacts ...
.
.
```
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: MacOS/Ubuntu
2. `sam --version`: 1.21.1/1.21.0
3. AWS region: us-east-1
With the `--debug` flag the log statement in output that stands out is:
`Unable to resolve property Layers: {'Fn::Join': [',', [{'Ref': 'OneLayer'}, {'Ref': 'AnotherLayer'}]]}. Leaving as is.`
In my actual template I can see more statements like this for other parameters which use `Join` function in value.
| After initial investigation, `sam` requires the property `Layers` to be a list of string. According to the [CFN doc](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-layers), `Layers` should be of type `List of String`. But apparently CFN supports comma delimited string as well. `sam build` or `sam deploy` will treat this as invalid Arn, which is a bug. This bug exists before 1.20.0, I can reproduced it in 1.19.0 without nested stacks using the template below:
**template A**
```yaml
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Parameters:
Layers:
Type: CommaDelimitedList
Default: "arn:aws:lambda:::awslayer:AmazonLinux1703,arn:aws:lambda:::awslayer:AmazonLinux1803"
Resources:
Function:
Type: AWS::Serverless::Function
Properties:
CodeUri: function
Handler: app.lambda_handler
Runtime: python3.8
Layers: !Ref Layers
```
### Why this is surfaced now?
before 1.20.0, `sam build` won't go into nested stacks and `sam deploy` simply upload all resources in nested stacks to CFN without too much checks. Since 1.20.0, this behavior changed.
[Update] I renamed the title of this issue to better describe the issue.
@aahung Does `!Ref Layers` _not_ resolve to a list of strings in `sam`, since the type for parameter `Layers` is `CommaDelimitedList`.
> @aahung Does `!Ref Layers` _not_ resolve to a list of strings in `sam`, since the type for parameter `Layers` is `CommaDelimitedList`.
Yes this is one, also there might be other issues behind this. We are currently investigating and working on a fix. Sorry about the inconvenience.
A related question: Does your `NestedLambda` in your example require building? I wonder what was your nested stack workflow was and whether rolling back to not building nested stacks is a solution for you.
Yes, in the actual workflow, the nested stack does require building, AFAIK.
For now, pinning the version to 1.19 works for me. At this point, are you able to vaguely estimate when a fix may be released?
TBH, One of the main reasons I _initially_ chose to have nested stacks is for better management of the template. The template grew to a point where it was getting difficult to manage. I wanted a way to break it up and nested stacks was the only way I could find to logically group resources into smaller, more manageable templates. Maybe there should be some feature which would allow including/importing templates. :)
> Yes, in the actual workflow, the nested stack does require building, AFAIK.
in 1.19.0, functions in nested stacks won't get built by `sam build`. Do you build them separately? (I am trying to understand how your workflow works and what we can do to unblock you)
> For now, pinning the version to 1.19 works for me. At this point, are you able to vaguely estimate when a fix may be released?
We are actively exploring possible solutions to this problem and treating it as a matter of top priority - we'll push out a patch release as soon as it's ready. We'll update this issue with a link to the pull request as soon as we create it.
Also I want to say your use case is very valuable for our development. Thank you for providing info about your template!
Thank you for taking this up on priority.
> Do you build them separately?
No. I do a `sam build` on the parent and then a `sam deploy` on the parent too.
> Thank you for taking this up on priority.
>
> > Do you build them separately?
>
> No. I do a `sam build` on the parent and then a `sam deploy` on the parent too.
ok, if `sam build` is only executed on the parent, before 1.20.0, resources in nested stacks are not built.
When you run `sam build`, you can see which functions are built in stdout.
```
Building codeuri: ...... functions: [<the function being built>]
``` | 2021-03-23T19:00:04 |
aws/aws-sam-cli | 3,272 | aws__aws-sam-cli-3272 | [
"3239"
] | 842b34f71fd1bc56e04171c6c217b46198e7af68 | diff --git a/samcli/lib/providers/sam_function_provider.py b/samcli/lib/providers/sam_function_provider.py
--- a/samcli/lib/providers/sam_function_provider.py
+++ b/samcli/lib/providers/sam_function_provider.py
@@ -144,8 +144,12 @@ def _extract_functions(
SamFunctionProvider._warn_code_extraction(resource_type, name, code_property_key)
continue
- if resource_package_type == IMAGE and SamBaseProvider._is_ecr_uri(
- resource_properties.get(image_property_key)
+ if (
+ resource_package_type == IMAGE
+ and SamBaseProvider._is_ecr_uri(resource_properties.get(image_property_key))
+ and not SamFunctionProvider._metadata_has_necessary_entries_for_image_function_to_be_built(
+ resource_metadata
+ )
):
# ImageUri can be an ECR uri, which is not supported
if not ignore_code_extraction_warnings:
@@ -460,3 +464,19 @@ def get_resources_by_stack_path(self, stack_path: str) -> Dict:
if not candidates:
raise RuntimeError(f"Cannot find resources with stack_path = {stack_path}")
return candidates[0]
+
+ @staticmethod
+ def _metadata_has_necessary_entries_for_image_function_to_be_built(metadata: Optional[Dict[str, Any]]) -> bool:
+ """
+ > Note: If the PackageType property is set to Image, then either ImageUri is required,
+ or you must build your application with necessary Metadata entries in the AWS SAM template file.
+ https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html#sam-function-imageuri
+
+ When ImageUri and Metadata are both provided, we will try to determine whether to treat the function
+ as to be built or to be skipped. When we skip it whenever "ImageUri" is provided,
+ we introduced a breaking change https://github.com/aws/aws-sam-cli/issues/3239
+
+ This function is used to check whether there are the customers have "intention" to
+ let AWS SAM CLI to build this image function.
+ """
+ return isinstance(metadata, dict) and bool(metadata.get("DockerContext"))
| diff --git a/tests/unit/commands/local/lib/test_sam_function_provider.py b/tests/unit/commands/local/lib/test_sam_function_provider.py
--- a/tests/unit/commands/local/lib/test_sam_function_provider.py
+++ b/tests/unit/commands/local/lib/test_sam_function_provider.py
@@ -95,6 +95,15 @@ class TestSamFunctionProviderEndToEnd(TestCase):
"PackageType": IMAGE,
},
},
+ "SamFuncWithImage4": {
+ # ImageUri is unsupported ECR location, but metadata is still provided, build
+ "Type": "AWS::Serverless::Function",
+ "Properties": {
+ "ImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/myrepo:myimage",
+ "PackageType": IMAGE,
+ },
+ "Metadata": {"DockerTag": "tag", "DockerContext": "./image", "Dockerfile": "Dockerfile"},
+ },
"LambdaFunc1": {
"Type": "AWS::Lambda::Function",
"Properties": {
@@ -126,6 +135,15 @@ class TestSamFunctionProviderEndToEnd(TestCase):
"PackageType": IMAGE,
},
},
+ "LambdaFuncWithImage4": {
+ # ImageUri is unsupported ECR location, but metadata is still provided, build
+ "Type": "AWS::Lambda::Function",
+ "Properties": {
+ "Code": {"ImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/myrepo"},
+ "PackageType": IMAGE,
+ },
+ "Metadata": {"DockerTag": "tag", "DockerContext": "./image", "Dockerfile": "Dockerfile"},
+ },
"LambdaFuncWithInlineCode": {
"Type": "AWS::Lambda::Function",
"Properties": {
@@ -337,6 +355,33 @@ def setUp(self):
),
),
("SamFuncWithImage3", None), # imageuri is ecr location, ignored
+ (
+ "SamFuncWithImage4", # despite imageuri is ecr location, the necessary metadata is still provided, build
+ Function(
+ name="SamFuncWithImage4",
+ functionname="SamFuncWithImage4",
+ runtime=None,
+ handler=None,
+ codeuri=".",
+ memory=None,
+ timeout=None,
+ environment=None,
+ rolearn=None,
+ layers=[],
+ events=None,
+ inlinecode=None,
+ imageuri="123456789012.dkr.ecr.us-east-1.amazonaws.com/myrepo:myimage",
+ imageconfig=None,
+ packagetype=IMAGE,
+ metadata={
+ "DockerTag": "tag",
+ "DockerContext": os.path.join("image"),
+ "Dockerfile": "Dockerfile",
+ },
+ codesign_config_arn=None,
+ stack_path="",
+ ),
+ ),
(
"SamFuncWithFunctionNameOverride-x",
Function(
@@ -416,6 +461,33 @@ def setUp(self):
),
),
("LambdaFuncWithImage3", None), # imageuri is a ecr location, ignored
+ (
+ "LambdaFuncWithImage4", # despite imageuri is ecr location, the necessary metadata is still provided, build
+ Function(
+ name="LambdaFuncWithImage4",
+ functionname="LambdaFuncWithImage4",
+ runtime=None,
+ handler=None,
+ codeuri=".",
+ memory=None,
+ timeout=None,
+ environment=None,
+ rolearn=None,
+ layers=[],
+ events=None,
+ metadata={
+ "DockerTag": "tag",
+ "DockerContext": os.path.join("image"),
+ "Dockerfile": "Dockerfile",
+ },
+ inlinecode=None,
+ imageuri="123456789012.dkr.ecr.us-east-1.amazonaws.com/myrepo",
+ imageconfig=None,
+ packagetype=IMAGE,
+ codesign_config_arn=None,
+ stack_path="",
+ ),
+ ),
(
"LambdaFuncWithInlineCode",
Function(
@@ -595,10 +667,12 @@ def test_get_all_must_return_all_functions(self):
"SamFunctions",
"SamFuncWithImage1",
"SamFuncWithImage2",
+ "SamFuncWithImage4",
"SamFuncWithInlineCode",
"SamFuncWithFunctionNameOverride",
"LambdaFuncWithImage1",
"LambdaFuncWithImage2",
+ "LambdaFuncWithImage4",
"LambdaFuncWithInlineCode",
"LambdaFuncWithLocalPath",
"LambdaFuncWithFunctionNameOverride",
| `sam build` broken after upgrading SAM CLI when using ECR (backward compatibility issue)
### Description:
I have a SAM application with an AWS ECR repository configured as the storage for the app snapshots/releases.
So far I was using just `sam build` and `sam deploy` to deploy my SAM application. It used to work with SAM CLI 1.24.1.
Unfortunately after I upgraded to SAM CLI 1.27.2 it stopped working. Now `sam build` says:
> The resource AWS::Serverless::Function 'HelloFunction' has specified ECR registry image for ImageUri. It will not be built and SAM CLI does not support invoking it locally.
Upgrading SAM CLI to the current latest version 1.30.0 doesn't help either.
**This is a breaking change.** Not sure if it was announced anywhere. Also it seems the problem was introduced as part of #2934.
Honestly I don't know what I should do now and how I should fix it. For now I can stick to the SAM CLI version that works for me but this is only a temporary solution.
### Steps to reproduce:
Create a SAM app with a template including:
```
Resources:
HelloFunction:
Type: AWS::Serverless::Function
Properties:
PackageType: Image
ImageUri: <account-number>.dkr.ecr.<region>.amazonaws.com/hello-app
Metadata:
Dockerfile: Dockerfile
DockerContext: ./app
DockerTag: python3.8-v1
```
Use `sam build` and then guided deploy with SAM CLI 1.24.1. Then upgrade to SAM CLI 1.27.2 - `sam build` will stop working.
### Observed result:
`sam build` doesn't build the image anymore and `sam deploy` doesn't push the image to ECR. As a result `sam deploy` fails.
### Expected result:
`sam build` should build the image, as it used to. `sam deploy` should push the image to ECR and update the app without issues.
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Ubuntu 20.04
2. `sam --version`: 1.24.1, 1.27.2, 1.30.0
3. AWS region: Ireland (eu-west-1)
| Tagging @alexisfacques
Heya
Given the fact, the `ImageUri` parameter is irrelevant in the build/deploy process (the final ECR Repo / Image URI to which you publish the image must be specified via deploy config parameters), it was agreed off-side in #2934 to skip the image build when the `ImageUri` parameter is a valid ECR image.
This allows you to deploy pre-packaged `PackageType: Image` functions with SAM, the same way you may deploy `PackageType: Zip` images pointing to a valid S3 location.
Changing the `ImageUri` parameter to something else than the ECR repo image name will fix the problem (e.g. `hello-app:latest`). This will be the tag used by SAM to build Docker image in your local environment, prior to deploying it
Hi,
Thanks for providing some background. Next time I'd suggest making such new behavior optional so that existing users could adapt to it over time. Or you could just bump the major version of the tool to indicate a breaking change and document it properly in the release notes.
Anyway I was just looking at the description of [ImageUri](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html#sam-function-imageuri) parameter:
> The URI of the Amazon Elastic Container Registry (Amazon ECR) repository for the Lambda function's container image. This property only applies if the PackageType property is set to Image, otherwise it is ignored.
If setting `ImageUri` to something other than an ECR repo URL is supported then it should be mentioned here.
Moreover, while I understand that the change you introduced allows someone to use a pre-packaged and pre-published Docker image, the docs should still make it clear on how to use `sam build`/`sam deploy` together with ECR to achieve the simpler use-case where all you want to do is to use SAM CLI to do the build/push steps for you.
I checked other parts of the documentation related to building images with SAM apps and I must admit I haven't found a clear description of how this should be done. For example see [Building applications, Example 2: Container image](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-using-build.html#examples-container-image-1) - it shows you an exemplary SAM template and a Dockerfile, but it doesn't say how it should (could) be built and published.
Can you explain more about why `sam deploy` is failing? What message do you get on deployment?
The error message is:
```
Resource handler returned message: "Source image <account-nr>.dkr.ecr.eu-west-1.amazonaws.com/hello-world is not valid. Provide a valid source image. (Service: Lambda, Status Code: 400, Request ID: <request-id>, Extended Request ID: null)" (RequestToken: <request-token>, HandlerErrorCode: InvalidRequest)
```
Below is a full log:
```
$ sam deploy
Deploying with following values
===============================
Stack name : hello-world
Region : eu-west-1
Confirm changeset : True
Deployment image repository :
{
"HelloFunction": "<account-number>.dkr.ecr.eu-west-1.amazonaws.com/hello-world"
}
Deployment s3 bucket : <bucket-name>
Capabilities : ["CAPABILITY_IAM"]
Parameter overrides : {}
Signing Profiles : {}
Initiating deployment
=====================
File with same data already exists at hello-world/01dab9ed7445d678ea5d297a4b4e801c.template, skipping upload
Waiting for changeset to be created..
CloudFormation stack changeset
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Operation LogicalResourceId ResourceType Replacement
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Modify HelloFunctionHelloPermission AWS::Lambda::Permission Conditional
* Modify HelloFunction AWS::Lambda::Function False
* Modify ServerlessHttpApiApiGatewayDefaultStage AWS::ApiGatewayV2::Stage Conditional
* Modify ServerlessHttpApi AWS::ApiGatewayV2::Api Conditional
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Changeset created successfully. arn:aws:cloudformation:eu-west-1:<account-nr>:changeSet/samcli-deploy1630913936/1e0f72bc-453d-44f4-9b6f-5f34946eb8fb
Previewing CloudFormation changeset before deployment
======================================================
Deploy this changeset? [y/N]: y
2021-09-06 09:39:09 - Waiting for stack create/update to complete
CloudFormation events from changeset
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
ResourceStatus ResourceType LogicalResourceId ResourceStatusReason
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
UPDATE_IN_PROGRESS AWS::Lambda::Function HelloFunction -
UPDATE_ROLLBACK_IN_PROGRESS AWS::CloudFormation::Stack hello-stack The following resource(s) failed to update:
[HelloFunction].
UPDATE_FAILED AWS::Lambda::Function HelloFunction Resource handler returned message: "Source image
<account-nr>.dkr.ecr.eu-west-1.amazonaws.com/hello-world is not valid. Provide a valid source image.
(Service: Lambda, Status Code: 400, Request ID:
b31c6317-5934-4cd2-9c0d-461a0883344a, Extended Request
ID: null)" (RequestToken:
dbae00b4-952f-0818-5a44-6cb126006e97, HandlerErrorCode:
InvalidRequest)
UPDATE_IN_PROGRESS AWS::Lambda::Function HelloFunction -
UPDATE_COMPLETE AWS::Lambda::Function HelloFunction -
UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS AWS::CloudFormation::Stack hello-stack -
UPDATE_ROLLBACK_COMPLETE AWS::CloudFormation::Stack hello-stack -
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Error: Failed to create/update the stack: hello-stack, Waiter StackUpdateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "UPDATE_ROLLBACK_COMPLETE" at least once
```
Tested with `sam deploy` using SAM CLI 1.27.2, of course using the original SAM template, with a full ECR repository URI in `ImageUri`.
Are you able to reproduce the issue?
@romanek-adam Sorry about the confusion.
When you provide the Metadata below and set the package type to `Image`, the `ImageUri` is ignored during the `sam build` process in previous version of sam-cli. The built artifact (`.aws-sam/build/template.yaml`) will have `ImageUri` with values like "ImageUri: helloworldfunction:nodejs14.x-v1" and `sam package` or `sam deploy` will upload the built imagine and replace it with uploaded image uri. **So the previously provided ImageUri has no effect**
```
Metadata:
Dockerfile: Dockerfile
DockerContext: ./app
DockerTag: python3.8-v1
```
Related doc:
> Note: If the PackageType property is set to Image, then either ImageUri is required, or you must build your application with necessary Metadata entries in the AWS SAM template file. https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html#sam-function-imageuri
After the change in #2934, we skipped building functions with a "valid" `ImageUri`, and this change broke your workflow, sorry about the inconvenience.
You can remove `ImageUri` since the value is not used. Then do `sam build` then `sam deploy` (or `sam package`) to test whether it fixes your issue or not.
We will work on some documentation change to avoid such confusion in the future. | 2021-09-14T00:15:02 |
aws/aws-sam-cli | 3,533 | aws__aws-sam-cli-3533 | [
"2034"
] | 159fad886dc387452d0562da330a91a52d6b2488 | diff --git a/samcli/cli/types.py b/samcli/cli/types.py
--- a/samcli/cli/types.py
+++ b/samcli/cli/types.py
@@ -39,16 +39,7 @@ def _generate_match_regex(match_pattern, delim):
def _unquote_wrapped_quotes(value):
r"""
- Removes wrapping double quotes and any '\ ' characters. They are usually added to preserve spaces when passing
- value thru shell.
-
- Examples
- --------
- >>> _unquote_wrapped_quotes('val\ ue')
- value
-
- >>> _unquote_wrapped_quotes("hel\ lo")
- hello
+ Removes wrapping single or double quotes and unescapes '\ ', '\"' and '\''.
Parameters
----------
@@ -59,12 +50,9 @@ def _unquote_wrapped_quotes(value):
-------
Unquoted string
"""
- if value and (value[0] == value[-1] == '"'):
- # Remove double quotes only if the string is wrapped in double quotes
- value = value.strip('"')
- elif value and (value[0] == value[-1] == "'"):
- # Remove single quotes only if the string is wrapped in single quotes
- value = value.strip("'")
+
+ if value and ((value[0] == value[-1] == '"') or (value[0] == value[-1] == "'")):
+ value = value[1:-1]
return value.replace("\\ ", " ").replace('\\"', '"').replace("\\'", "'")
diff --git a/samcli/commands/deploy/guided_config.py b/samcli/commands/deploy/guided_config.py
--- a/samcli/commands/deploy/guided_config.py
+++ b/samcli/commands/deploy/guided_config.py
@@ -111,4 +111,4 @@ def _save_image_repositories(self, cmd_names, config_env, samconfig, image_repos
@staticmethod
def quote_parameter_values(parameter_value: Any) -> str:
- return '"{}"'.format(parameter_value)
+ return '"{}"'.format(parameter_value.replace('"', r"\""))
| sam deploy --guided doesn't escape quotation marks for parameters written to samconfig.toml
### Description
When using `sam deploy --guided`, parameters with quotation marks `"` are written to `parameter_overrides` in samconfig.toml without proper escaping. As a result, the parameter isn't correctly populated the next time `sam deploy` is called.
### Steps to reproduce
Run `sam deploy --guided` and use these parameters:
```
Stack Name [sftp-lambda]:
AWS Region [us-east-1]:
Parameter FilesListJSON []: [ { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet", "TargetPath": "/sftp-lambda/test"}, { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet", "TargetPath": "/sftp-lambda/test"}]
#Shows you resources changes to be deployed and require a 'Y' to initiate deploy
Confirm changes before deploy [Y/n]: Y
#SAM needs permission to be able to create roles to connect to the resources in your template
Allow SAM CLI IAM role creation [Y/n]: Y
Save arguments to samconfig.toml [Y/n]: Y
```
### Observed result
Written to `samconfig.toml`:
```
parameter_overrides = "FilesListJSON=\"[ { \"SrcFile\": \"/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet\", \"TargetPath\": \"/sftp-lambda/test\"}, { \"SrcFile\": \"/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet\", \"TargetPath\": \"/sftp-lambda/test\"}]\""
```
What happens when `sam deploy` is called again:
```
$ sam deploy
Deploying with following values
===============================
Stack name : sftp-lambda
Region : us-east-1
Confirm changeset : True
Deployment s3 bucket : aws-sam-cli-managed-default-samclisourcebucket-
Capabilities : ["CAPABILITY_IAM"]
Parameter overrides : {'FilesListJSON': '[ { '}
```
(Note that Parameter overrides is incorrect)
Please provide command output with `--debug` flag set.
### Expected result
Written to `samconfig.toml`:
```
parameter_overrides = "FilesListJSON=\"[ { \\\"SrcFile\\\": \\\"/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet\\\", \\\"TargetPath\\\": \\\"/sftp-lambda/test\\\"}, { \\\"SrcFile\\\": \\\"/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet\\\", \\\"TargetPath\\\": \\\"/sftp-lambda/test\\\"}]\""
```
```
$ sam deploy
Deploying with following values
===============================
Stack name : sftp-lambda
Region : us-east-1
Confirm changeset : True
Deployment s3 bucket : aws-sam-cli-managed-default-samclisourcebucket-
Capabilities : ["CAPABILITY_IAM"]
Parameter overrides : {'FilesListJSON': '[ { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet", "TargetPath": "/sftp-lambda/test"}, { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet"'}
```
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Amazon Linux
2. `sam --version`: SAM CLI, version 0.52.0
`Add --debug flag to command you are running`
| Hi @senorkrabs,
For now, we only support wrapping strings with double quotes `\"`, the escape sequence needs to be `\\\"` inside double quotes. I have created a pr to support single quoting parameter overrides. You can simply use `\"` inside pair of single quotes after this pr is released.
```
samdev deploy --parameter-overrides "FilesListJSON='[ { \"SrcFile\": \"/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet\", \"TargetPath\": \"/sftp-lambda/test\"}, { \"SrcFile\": \"/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet\", \"TargetPath\": \"/sftp-lambda/test\"}]'"
Deploying with following values
===============================
Stack name : sam-app
Region : us-east-1
Confirm changeset : False
Deployment s3 bucket : aws-sam-cli-managed-default-samclisourcebucket-6wsl7tf21dut
Capabilities : ["CAPABILITY_IAM"]
Parameter overrides : {'FilesListJSON': '[ { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=6/*.snappy.parquet", "TargetPath": "/sftp-lambda/test"}, { "SrcFile": "/athena/CUR2/CUR2/year=2020/month=5/*.snappy.parquet", "TargetPath": "/sftp-lambda/test"}]'}
```
PR is released.
This issue should be reopened.
Specifying parameter values containing double quotes`"` records them as escaped as `\"`, which then is not properly distinguished from the exact same escape sequence enclosing the value (`\"value\"`) and on the next run the saved values are not parsed correctly.
So, if a value is `""` it will be recorded as `key=\"\"\"\"` and will be interpreted as `key=\"\"`.
For example:
```console
$ sam --version
SAM CLI, version 1.29.0
$ sam deploy -g --confirm-changeset
Configuring SAM deploy
======================
Looking for config file [samconfig.toml] : Found
Reading default arguments : Success
Setting default arguments for 'sam deploy'
=========================================
Stack Name [myapp-backend]:
AWS Region [us-east-1]:
Parameter DBInstanceClass [db.t3.micro]:
Parameter DBName [mydb]:
Parameter DBAdminUser [admin]:
Parameter DBAdminPassword []: "
Parameter DBWriterPassword []: \
Parameter DBReaderPassword []: '
```
On the subsequent run, the values are:
```console
$ sam deploy -g --confirm-changeset
Configuring SAM deploy
======================
Looking for config file [samconfig.toml] : Found
Reading default arguments : Success
Setting default arguments for 'sam deploy'
=========================================
Stack Name [myapp-backend]:
AWS Region [us-east-1]:
Parameter DBInstanceClass [db.t3.micro]:
Parameter DBName [mydb]:
Parameter DBAdminUser [admin]:
Parameter DBAdminPassword []:
Parameter DBWriterPassword [" DBReaderPassword=]:
Parameter DBReaderPassword []: ^CAborted!
$ grep parameter_overrides samconfig.toml
parameter_overrides = "DBInstanceClass=\"db.t3.micro\" DBName=\"mydb\" DBAdminUser=\"admin\" DBAdminPassword=\"\"\" DBWriterPassword=\"\\\" DBReaderPassword=\"'\""
```
I've got a use case where I'd like to use strong passwords generated by `aws secretsmanager get-random-password --output=text` as parameter values for password parameters.
I've got a script that pre-populates the password values in `samconfig.toml`. Escaping `\` as `\\` works as intended, but I haven't found a working solution for escaping `"` characters.
Escaping `"` as `\\\"` does not work. The `\\` is interpreted as a slash `\` and then the `\"` is interpreted as terminating the value field.
Also, in reference to #2139, single quotes (`'value'`) can be used when pre-populating the values, but after running `sam deploy -g` even if unchanged the values will be saved with double quotes (`\"value\"`).
That means a value pre-populated as `'\"'` will be correctly interpreted as `"`. However, it will then be saved as `\"\"\"` and will be read as an empty value the next time.
My current working solution is to escape `\` as `\\` and replace `"` with `'`:
```bash
#!/usr/bin/env sh
echo -n "Generating strong passwords for database access and administration"
function pw () {
# Escape \ as \\ and replace " with '
echo $(aws secretsmanager get-random-password --output=text | sed 's/\\/\\\\/g' | sed "s/\"/'/g")
}
p1="DBAdminPassword=\\\"$(pw)\\\"" && echo -n .
p2="DBWriterPassword=\\\"$(pw)\\\"" && echo -n .
p3="DBReaderPassword=\\\"$(pw)\\\"" && echo -n .
echo parameter_overrides = \"$p1 $p2 $p3\" >> samconfig.toml
echo \ done
```
Which works well:
```console
$ cat samconfig.toml
version = 0.1
[default]
[default.deploy]
[default.deploy.parameters]
stack_name = "myapp-backend"
$ ./prepare
Generating strong passwords for database access and administration... done
$ sam deploy -g --confirm-changeset
Configuring SAM deploy
======================
Looking for config file [samconfig.toml] : Found
Reading default arguments : Success
Setting default arguments for 'sam deploy'
=========================================
Stack Name [myapp-backend]:
AWS Region [us-east-1]:
Parameter DBInstanceClass [db.t3.micro]:
Parameter DBName [mydb]:
Parameter DBAdminUser [admin]:
Parameter DBAdminPassword [@Tv+lFiB~N7fSUj(7l6?pLkG#Nh:?Z8`]:
Parameter DBWriterPassword [|fS';v,5HA`89{`Bfx),MS9%ZXR(90Y@]:
Parameter DBReaderPassword [SgZ^TVh[d&>xvKmM)L4Nn^yOR/9b>r4O]:
```
However, I would like to be able to have double quotes as well.
@wchengru, I saw what you did in #2139, however I'm having some trouble seeing how to properly approach fixing this issue. Would you be able to suggest a general course of action? | 2021-12-15T22:31:31 |
|
aws/aws-sam-cli | 3,919 | aws__aws-sam-cli-3919 | [
"3911"
] | 1f327ef61ca582f6d5a398d9b58aa12f7d5b19cb | diff --git a/samcli/commands/_utils/table_print.py b/samcli/commands/_utils/table_print.py
--- a/samcli/commands/_utils/table_print.py
+++ b/samcli/commands/_utils/table_print.py
@@ -11,7 +11,9 @@
MIN_OFFSET = 20
-def pprint_column_names(format_string, format_kwargs, margin=None, table_header=None, color="yellow"):
+def pprint_column_names(
+ format_string, format_kwargs, margin=None, table_header=None, color="yellow", display_sleep=False
+):
"""
:param format_string: format string to be used that has the strings, minimum width to be replaced
@@ -19,6 +21,7 @@ def pprint_column_names(format_string, format_kwargs, margin=None, table_header=
:param margin: margin that is to be reduced from column width for columnar text.
:param table_header: Supplied table header
:param color: color supplied for table headers and column names.
+ :param display_sleep: flag to format table_header to include deployer's client_sleep
:return: boilerplate table string
"""
@@ -59,7 +62,7 @@ def pprint_wrap(func):
def wrap(*args, **kwargs):
# The table is setup with the column names, format_string contains the column names.
if table_header:
- click.secho("\n" + table_header)
+ click.secho("\n" + table_header.format(args[0].client_sleep) if display_sleep else table_header)
click.secho("-" * usable_width, fg=color)
click.secho(format_string.format(*format_args, **format_kwargs), fg=color)
click.secho("-" * usable_width, fg=color)
diff --git a/samcli/lib/deploy/deployer.py b/samcli/lib/deploy/deployer.py
--- a/samcli/lib/deploy/deployer.py
+++ b/samcli/lib/deploy/deployer.py
@@ -17,7 +17,7 @@
import sys
import math
-from collections import OrderedDict
+from collections import OrderedDict, deque
import logging
import time
from datetime import datetime
@@ -53,7 +53,7 @@
}
)
-DESCRIBE_STACK_EVENTS_TABLE_HEADER_NAME = "CloudFormation events from stack operations"
+DESCRIBE_STACK_EVENTS_TABLE_HEADER_NAME = "CloudFormation events from stack operations (refresh every {} seconds)"
DESCRIBE_CHANGESET_FORMAT_STRING = "{Operation:<{0}} {LogicalResourceId:<{1}} {ResourceType:<{2}} {Replacement:<{3}}"
DESCRIBE_CHANGESET_DEFAULT_ARGS = OrderedDict(
@@ -360,6 +360,7 @@ def get_last_event_time(self, stack_name):
format_string=DESCRIBE_STACK_EVENTS_FORMAT_STRING,
format_kwargs=DESCRIBE_STACK_EVENTS_DEFAULT_ARGS,
table_header=DESCRIBE_STACK_EVENTS_TABLE_HEADER_NAME,
+ display_sleep=True,
)
def describe_stack_events(self, stack_name, time_stamp_marker, **kwargs):
"""
@@ -377,45 +378,50 @@ def describe_stack_events(self, stack_name, time_stamp_marker, **kwargs):
try:
# Only sleep if there have been no retry_attempts
time.sleep(0 if retry_attempts else self.client_sleep)
- describe_stacks_resp = self._client.describe_stacks(StackName=stack_name)
paginator = self._client.get_paginator("describe_stack_events")
response_iterator = paginator.paginate(StackName=stack_name)
- stack_status = describe_stacks_resp["Stacks"][0]["StackStatus"]
- latest_time_stamp_marker = time_stamp_marker
+ new_events = deque() # event buffer
for event_items in response_iterator:
for event in event_items["StackEvents"]:
- if event["EventId"] not in events and utc_to_timestamp(event["Timestamp"]) > time_stamp_marker:
- events.add(event["EventId"])
- latest_time_stamp_marker = max(
- latest_time_stamp_marker, utc_to_timestamp(event["Timestamp"])
- )
- row_color = self.deploy_color.get_stack_events_status_color(status=event["ResourceStatus"])
- pprint_columns(
- columns=[
- event["ResourceStatus"],
- event["ResourceType"],
- event["LogicalResourceId"],
- event.get("ResourceStatusReason", "-"),
- ],
- width=kwargs["width"],
- margin=kwargs["margin"],
- format_string=DESCRIBE_STACK_EVENTS_FORMAT_STRING,
- format_args=kwargs["format_args"],
- columns_dict=DESCRIBE_STACK_EVENTS_DEFAULT_ARGS.copy(),
- color=row_color,
- )
- # Skip already shown old event entries
- elif utc_to_timestamp(event["Timestamp"]) <= time_stamp_marker:
- time_stamp_marker = latest_time_stamp_marker
+ # Skip already shown old event entries or former deployments
+ if utc_to_timestamp(event["Timestamp"]) <= time_stamp_marker:
break
- else: # go to next loop if not break from inside loop
- time_stamp_marker = latest_time_stamp_marker # update marker if all events are new
+ if event["EventId"] not in events:
+ events.add(event["EventId"])
+ # Events are in reverse chronological order
+ # Pushing in front reverse the order to display older events first
+ new_events.appendleft(event)
+ else: # go to next loop (page of events) if not break from inside loop
continue
break # reached here only if break from inner loop!
- if self._check_stack_not_in_progress(stack_status):
- stack_change_in_progress = False
- break
+ # Override timestamp marker with latest event (last in deque)
+ if len(new_events) > 0:
+ time_stamp_marker = utc_to_timestamp(new_events[-1]["Timestamp"])
+
+ for new_event in new_events:
+ row_color = self.deploy_color.get_stack_events_status_color(status=new_event["ResourceStatus"])
+ pprint_columns(
+ columns=[
+ new_event["ResourceStatus"],
+ new_event["ResourceType"],
+ new_event["LogicalResourceId"],
+ new_event.get("ResourceStatusReason", "-"),
+ ],
+ width=kwargs["width"],
+ margin=kwargs["margin"],
+ format_string=DESCRIBE_STACK_EVENTS_FORMAT_STRING,
+ format_args=kwargs["format_args"],
+ columns_dict=DESCRIBE_STACK_EVENTS_DEFAULT_ARGS.copy(),
+ color=row_color,
+ )
+ # Skip events from another consecutive deployment triggered during sleep by another process
+ if self._is_root_stack_event(new_event) and self._check_stack_not_in_progress(
+ new_event["ResourceStatus"]
+ ):
+ stack_change_in_progress = False
+ break
+
# Reset retry attempts if iteration is a success to use client_sleep again
retry_attempts = 0
except botocore.exceptions.ClientError as ex:
@@ -426,6 +432,14 @@ def describe_stack_events(self, stack_name, time_stamp_marker, **kwargs):
# Sleep in exponential backoff mode
time.sleep(math.pow(self.backoff, retry_attempts))
+ @staticmethod
+ def _is_root_stack_event(event: Dict) -> bool:
+ return bool(
+ event["ResourceType"] == "AWS::CloudFormation::Stack"
+ and event["StackName"] == event["LogicalResourceId"]
+ and event["PhysicalResourceId"] == event["StackId"]
+ )
+
@staticmethod
def _check_stack_not_in_progress(status: str) -> bool:
return "IN_PROGRESS" not in status
| diff --git a/tests/unit/lib/deploy/test_deployer.py b/tests/unit/lib/deploy/test_deployer.py
--- a/tests/unit/lib/deploy/test_deployer.py
+++ b/tests/unit/lib/deploy/test_deployer.py
@@ -1,8 +1,10 @@
from logging import captureWarnings
+from operator import inv
+from typing import Container, Iterable, Union
import uuid
import time
import math
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, timezone
from unittest import TestCase
from unittest.mock import patch, MagicMock, ANY, call
@@ -47,7 +49,17 @@ def wait(self, StackName, WaiterConfig):
return
-class TestDeployer(TestCase):
+class CustomTestCase(TestCase):
+ def assertListSubset(self, l1: Iterable, l2: Union[Iterable, Container], msg=None) -> None:
+ """
+ Assert l2 contains all items in l1.
+ Just like calling self.assertIn(l1[x], l2) in a loop.
+ """
+ for x in l1:
+ self.assertIn(x, l2, msg)
+
+
+class TestDeployer(CustomTestCase):
def setUp(self):
self.session = MagicMock()
self.cloudformation_client = self.session.client("cloudformation")
@@ -376,26 +388,45 @@ def test_get_last_event_time_unknown_last_time(self):
self.assertEqual(last_stack_event_timestamp.second, current_timestamp.second)
@patch("time.sleep")
- def test_describe_stack_events(self, patched_time):
- current_timestamp = datetime.utcnow()
+ @patch("samcli.lib.deploy.deployer.pprint_columns")
+ def test_describe_stack_events_chronological_order(self, patched_pprint_columns, patched_time):
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
- self.deployer._client.describe_stacks = MagicMock(
- side_effect=[
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE_CLEANUP_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE"}]},
- ]
- )
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
+ # describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_IN_PROGRESS",
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=3),
+ "ResourceStatus": "CREATE_COMPLETE",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=2),
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "kms",
+ "LogicalResourceId": "mykms",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=1),
+ "ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
@@ -405,7 +436,7 @@ def test_describe_stack_events(self, patched_time):
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
+ "Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
@@ -416,72 +447,199 @@ def test_describe_stack_events(self, patched_time):
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_COMPLETE",
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
+ ]
+ )
+ )
+
+ self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
+ self.assertEqual(patched_pprint_columns.call_count, 5)
+ self.assertListSubset(
+ ["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
+ patched_pprint_columns.call_args_list[4][1]["columns"],
+ )
+
+ @patch("time.sleep")
+ @patch("samcli.lib.deploy.deployer.pprint_columns")
+ def test_describe_stack_events_chronological_order_with_previous_event(self, patched_pprint_columns, patched_time):
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
+ last_event_timestamp = start_timestamp - timedelta(hours=6)
+
+ self.deployer._client.get_paginator = MagicMock(
+ return_value=MockPaginator(
+ # describe_stack_events is in reverse chronological order
+ [
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_COMPLETE",
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=3),
+ "ResourceStatus": "UPDATE_COMPLETE",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=2),
+ "ResourceStatus": "UPDATE_COMPLETE",
+ "ResourceType": "kms",
+ "LogicalResourceId": "mykms",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=1),
+ "ResourceStatus": "UPDATE_COMPLETE",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ # Last event (from a former deployment)
+ {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "EventId": str(uuid.uuid4()),
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": last_event_timestamp,
+ "ResourceStatus": "CREATE_COMPLETE",
+ }
+ ]
+ },
]
)
)
- self.deployer.describe_stack_events("test", time.time() - 1)
+ self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
+ self.assertEqual(patched_pprint_columns.call_count, 5)
+ self.assertListSubset(
+ ["UPDATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
+ )
+ self.assertListSubset(
+ ["UPDATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
+ )
+ self.assertListSubset(
+ ["UPDATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
+ )
+ self.assertListSubset(
+ ["UPDATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
+ )
+ self.assertListSubset(
+ ["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
+ patched_pprint_columns.call_args_list[4][1]["columns"],
+ )
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_skip_old_event(self, patched_pprint_columns, patched_time):
- current_timestamp = datetime.utcnow()
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
+ last_event_timestamp = start_timestamp - timedelta(hours=6)
- self.deployer._client.describe_stacks = MagicMock(
- side_effect=[
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE_CLEANUP_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE"}]},
- ]
- )
sample_events = [
+ # old deployment
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": last_event_timestamp - timedelta(seconds=10),
"ResourceStatus": "CREATE_IN_PROGRESS",
- "ResourceType": "s3",
- "LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp + timedelta(seconds=10),
- "ResourceStatus": "CREATE_IN_PROGRESS",
- "ResourceType": "kms",
- "LogicalResourceId": "mykms",
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": last_event_timestamp,
+ "ResourceStatus": "CREATE_COMPLETE",
}
]
},
+ # new deployment
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp + timedelta(seconds=20),
- "ResourceStatus": "CREATE_COMPLETE",
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=10),
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
@@ -491,10 +649,24 @@ def test_describe_stack_events_skip_old_event(self, patched_pprint_columns, patc
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp + timedelta(seconds=30),
- "ResourceStatus": "CREATE_COMPLETE",
- "ResourceType": "kms",
- "LogicalResourceId": "mykms",
+ "Timestamp": start_timestamp + timedelta(seconds=20),
+ "ResourceStatus": "UPDATE_COMPLETE",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "EventId": str(uuid.uuid4()),
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=30),
+ "ResourceStatus": "UPDATE_COMPLETE",
}
]
},
@@ -502,95 +674,97 @@ def test_describe_stack_events_skip_old_event(self, patched_pprint_columns, patc
invalid_event = {"StackEvents": [{}]} # if deployer() loop read this, KeyError would raise
self.deployer._client.get_paginator = MagicMock(
side_effect=[
- MockPaginator([sample_events[0]]),
+ MockPaginator([sample_events[0], invalid_event]),
MockPaginator([sample_events[1], sample_events[0], invalid_event]),
MockPaginator([sample_events[2], sample_events[1], invalid_event]),
MockPaginator([sample_events[3], sample_events[2], invalid_event]),
+ MockPaginator([sample_events[4], sample_events[3], invalid_event]),
+ MockPaginator([sample_events[5], sample_events[4], invalid_event]),
]
)
- self.deployer.describe_stack_events("test", time.time() - 1)
+ self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
self.assertEqual(patched_pprint_columns.call_count, 4)
-
- @patch("samcli.lib.deploy.deployer.math")
- @patch("time.sleep")
- def test_describe_stack_events_exceptions(self, patched_time, patched_math):
-
- self.deployer._client.describe_stacks = MagicMock(
- side_effect=[
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ]
+ self.assertListSubset(
+ ["UPDATE_IN_PROGRESS", "AWS::CloudFormation::Stack", "test"],
+ patched_pprint_columns.call_args_list[0][1]["columns"],
+ )
+ self.assertListSubset(
+ ["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
+ patched_pprint_columns.call_args_list[3][1]["columns"],
)
- # No exception raised, we return with a log message, this is because,
- # the changeset is still getting executed, but displaying them is getting throttled.
- self.deployer.describe_stack_events("test", time.time())
- self.assertEqual(patched_math.pow.call_count, 3)
- self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
- @patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
- def test_describe_stack_events_resume_after_exceptions(self, patched_time, patched_math):
- current_timestamp = datetime.utcnow()
-
- self.deployer._client.describe_stacks = MagicMock(
- side_effect=[
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- ClientError(
- error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
- ),
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE_CLEANUP_IN_PROGRESS"}]},
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE"}]},
- ]
- )
+ @patch("samcli.lib.deploy.deployer.pprint_columns")
+ def test_describe_stack_events_stop_at_first_not_in_progress(self, patched_pprint_columns, patched_time):
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
+ # describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_IN_PROGRESS",
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=33),
+ "ResourceStatus": "UPDATE_COMLPETE",
+ },
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=32),
+ "ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
- }
+ },
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=31),
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ },
]
},
{
"StackEvents": [
{
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_IN_PROGRESS",
- "ResourceType": "kms",
- "LogicalResourceId": "mykms",
- }
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=30),
+ "ResourceStatus": "UPDATE_IN_PROGRESS",
+ },
+ {
+ # This event should stop the loop and ignore above events
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "EventId": str(uuid.uuid4()),
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=3),
+ "ResourceStatus": "CREATE_COMPLETE",
+ },
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
+ "Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
@@ -601,10 +775,10 @@ def test_describe_stack_events_resume_after_exceptions(self, patched_time, patch
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_COMPLETE",
- "ResourceType": "kms",
- "LogicalResourceId": "mykms",
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
}
]
},
@@ -612,73 +786,194 @@ def test_describe_stack_events_resume_after_exceptions(self, patched_time, patch
)
)
+ self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
+ self.assertEqual(patched_pprint_columns.call_count, 3)
+ self.assertListSubset(
+ ["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[1][1]["columns"]
+ )
+ self.assertListSubset(
+ ["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
+ patched_pprint_columns.call_args_list[2][1]["columns"],
+ )
+
+ @patch("samcli.lib.deploy.deployer.math")
+ @patch("time.sleep")
+ def test_describe_stack_events_exceptions(self, patched_time, patched_math):
+
+ self.deployer._client.get_paginator = MagicMock(
+ side_effect=[
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ ]
+ )
+ # No exception raised, we return with a log message, this is because,
+ # the changeset is still getting executed, but displaying them is getting throttled.
self.deployer.describe_stack_events("test", time.time())
self.assertEqual(patched_math.pow.call_count, 3)
self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
- @patch("samcli.lib.deploy.deployer.math.pow", wraps=math.pow)
+ @patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
- def test_describe_stack_events_reset_retry_on_success_after_exceptions(self, patched_time, patched_pow):
- current_timestamp = datetime.utcnow()
+ def test_describe_stack_events_resume_after_exceptions(self, patched_time, patched_math):
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
- self.deployer._client.describe_stacks = MagicMock(
+ self.deployer._client.get_paginator = MagicMock(
side_effect=[
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
- {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]},
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
- {"Stacks": [{"StackStatus": "CREATE_COMPLETE"}]},
+ MockPaginator(
+ [
+ {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "EventId": str(uuid.uuid4()),
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_COMPLETE",
+ },
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "kms",
+ "LogicalResourceId": "mykms",
+ },
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "kms",
+ "LogicalResourceId": "mykms",
+ }
+ ]
+ },
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ ]
+ ),
]
)
+ self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
+ self.assertEqual(patched_math.pow.call_count, 3)
+ self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
+
+ @patch("samcli.lib.deploy.deployer.math.pow", wraps=math.pow)
+ @patch("time.sleep")
+ def test_describe_stack_events_reset_retry_on_success_after_exceptions(self, patched_time, patched_pow):
+ start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
+
self.deployer._client.get_paginator = MagicMock(
- return_value=MockPaginator(
- [
- {
- "StackEvents": [
- {
- "EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_IN_PROGRESS",
- "ResourceType": "s3",
- "LogicalResourceId": "mybucket",
- }
- ]
- },
- {
- "StackEvents": [
- {
- "EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_IN_PROGRESS",
- "ResourceType": "kms",
- "LogicalResourceId": "mykms",
- }
- ]
- },
- {
- "StackEvents": [
- {
- "EventId": str(uuid.uuid4()),
- "Timestamp": current_timestamp,
- "ResourceStatus": "CREATE_COMPLETE",
- "ResourceType": "s3",
- "LogicalResourceId": "mybucket",
- }
- ]
- },
- ]
- )
+ side_effect=[
+ MockPaginator(
+ [
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp,
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ },
+ ]
+ },
+ ]
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ MockPaginator(
+ [
+ {
+ "StackEvents": [
+ {
+ "EventId": str(uuid.uuid4()),
+ "Timestamp": start_timestamp + timedelta(seconds=10),
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "s3",
+ "LogicalResourceId": "mybucket",
+ }
+ ]
+ },
+ ]
+ ),
+ ClientError(
+ error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
+ ),
+ MockPaginator(
+ [
+ {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "EventId": str(uuid.uuid4()),
+ "StackName": "test",
+ "LogicalResourceId": "test",
+ "PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": start_timestamp + timedelta(seconds=20),
+ "ResourceStatus": "CREATE_COMPLETE",
+ },
+ ]
+ },
+ ]
+ ),
+ ]
)
- self.deployer.describe_stack_events("test", time.time())
+ self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
# There are 2 sleep call for exceptions (backoff + regular one at 0)
self.assertEqual(patched_time.call_count, 9)
| Feature request: remove unnecessary describe-stack calls during deploy
### Describe your idea/feature/enhancement
Prevent throttling during `sam deploy` by reducing requests.
### Proposal
After my last PR #3500, I saw that during `sam deploy`, the function [`describe_stack_events`](https://github.com/aws/aws-sam-cli/blob/aa75d721b1b463e0a4f2717524cfee80e9fac9d2/samcli/lib/deploy/deployer.py#L364) sends 2 requests for each iteration:
- `DescribeStacks`: to retrieve the status and end the loop if it's not `IN_PROGRESS`
- `DescribeStackEvents`: to display the deployment events
The `DescribeStacks` could be skipped as the status should be accessible via the `DescribeStackEvents`:
```json
{
"StackId": "arn:aws:cloudformation:region:xxx:stack/my-stack/an-id",
"EventId": "abc",
"StackName": "my-stack",
"LogicalResourceId": "my-stack",
"PhysicalResourceId": "arn:aws:cloudformation:region:xxx:stack/my-stack/an-id",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": "2022-04-27T16:00:33.955000+00:00",
"ResourceStatus": "UPDATE_COMPLETE"
}
```
So we could check the `ResourceStatus` when `StackId` = `PhysicalResourceId` and `StackName` = `LogicalResourceId`
---
This could be done to fix part of the bug: https://github.com/aws/aws-sam-cli/issues/3910
| 2022-05-30T02:40:56 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.