repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
holoviz/hvplot | 997 | holoviz__hvplot-997 | [
"996"
] | f30a1f1952ca1e14fb7a8eaf0b6566816c56e5aa | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -1860,6 +1860,8 @@ def hist(self, x=None, y=None, data=None):
ymin, ymax = (ys.min(), ys.max())
if is_dask(ys):
ymin, ymax = ymin.compute(), ymax.compute()
+ elif is_ibis(ys):
+ ymin, ymax = ymin.execute(), ymax.execute()
hist_opts['bin_range'] = ymin, ymax
ds = Dataset(data, self.by)
@@ -1883,6 +1885,8 @@ def hist(self, x=None, y=None, data=None):
ymin, ymax = (ys.min(), ys.max())
if is_dask(ys):
ymin, ymax = ymin.compute(), ymax.compute()
+ elif is_ibis(ys):
+ ymin, ymax = ymin.execute(), ymax.execute()
ranges.append((ymin, ymax))
if ranges:
hist_opts['bin_range'] = max_range(ranges)
diff --git a/hvplot/ibis.py b/hvplot/ibis.py
--- a/hvplot/ibis.py
+++ b/hvplot/ibis.py
@@ -1,3 +1,7 @@
+"""
+Experimental support for ibis.
+"""
+
def patch(name='hvplot', extension='bokeh', logo=False):
from . import hvPlotTabular, post_patch
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,6 +63,7 @@ def get_setup_version(reponame):
'dask',
'polars',
'fugue',
+ 'ibis-framework', # ibis-sqlite on conda
]
# Dependencies required to run the notebooks
| diff --git a/hvplot/tests/testibis.py b/hvplot/tests/testibis.py
--- a/hvplot/tests/testibis.py
+++ b/hvplot/tests/testibis.py
@@ -1,22 +1,19 @@
-"""Ibis works with hvplot"""
+"""Basic ibis tests"""
+
+import numpy as np
+import pandas as pd
import pytest
try:
+ import hvplot.ibis # noqa
import ibis
- import hvplot.ibis # noqa
- import pandas as pd
-except:
+except ImportError:
pytest.skip(allow_module_level=True)
+else:
+ ibis.set_backend('sqlite')
[email protected]
-def table():
- df = pd.DataFrame({
- "x": [pd.Timestamp("2022-01-01"), pd.Timestamp("2022-01-02")], "y": [1,2]
- })
- con = ibis.pandas.connect({"df": df})
- return con.table("df")
-
-def test_can_hvplot(table):
- """hvplot works with Ibis"""
- table.hvplot(x="x", y="y")
+def test_ibis_hist():
+ df = pd.DataFrame(dict(x=np.arange(10)))
+ table = ibis.memtable(df)
+ table.hvplot.hist('x')
| hvplot.hist with ibis+duckdb: ValueError: The truth value of an Ibis expression is not defined
hvplot==0.8.1
I'm trying to contribute a guide for ibis + DuckDB. When I try to plot a `hist` plot I get `ValueError: The truth value of an Ibis expression is not defined`.
```python
import duckdb
import ibis
import pandas as pd
from pathlib import Path
import hvplot.ibis
import holoviews as hv
hvplot.extension("plotly")
DUCKDB_PATH = "DuckDB.db"
if not Path(DUCKDB_PATH).exists():
pandas_df = pd.DataFrame(
{
"actual": [100, 150, 125, 140, 145, 135, 123],
"forecast": [90, 160, 125, 150, 141, 141, 120],
"numerical": [1.1, 1.9, 3.2, 3.8, 4.3, 5.0, 5.5],
"date": pd.date_range("2022-01-03", "2022-01-09"),
"string": ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"],
},
)
duckdb_con = duckdb.connect(DUCKDB_PATH)
duckdb_con.execute("CREATE TABLE df AS SELECT * FROM pandas_df")
ibis.options.sql.default_limit = None
db = ibis.duckdb.connect(DUCKDB_PATH)
df = db.table("df")
df.hvplot.hist("forecast", bins=3)
```
```bash
C:\repos\private\hvplot\.venv\lib\site-packages\ibis\backends\postgres\registry.py:164: UserWarning: locale specific date formats (%c, %x, %X) are not yet implemented for Windows
warnings.warn(
C:\repos\private\hvplot\.venv\lib\site-packages\duckdb_engine\__init__.py:229: DuckDBEngineWarning: duckdb-engine doesn't yet support
reflection on indices
warnings.warn(
Traceback (most recent call last):
File "C:\repos\private\hvplot\script2.py", line 31, in <module>
df.hvplot.hist("forecast", bins=3)
File "C:\repos\private\hvplot\hvplot\plotting\core.py", line 1309, in hist
return self(kind="hist", x=None, y=y, by=by, **kwds)
File "C:\repos\private\hvplot\hvplot\plotting\core.py", line 92, in __call__
return self._get_converter(x, y, kind, **kwds)(kind, x, y)
File "C:\repos\private\hvplot\hvplot\converter.py", line 1240, in __call__
obj = method(x, y)
File "C:\repos\private\hvplot\hvplot\converter.py", line 1820, in hist
hists = histogram(ds, dimension=y, **hist_opts)
File "C:\repos\private\hvplot\.venv\lib\site-packages\param\parameterized.py", line 3654, in __new__
return inst.__call__(*args,**params)
File "c:\repos\private\tmp\holoviews\holoviews\core\operation.py", line 220, in __call__
return element.apply(self, **kwargs)
File "c:\repos\private\tmp\holoviews\holoviews\core\accessors.py", line 45, in pipelined_call
result = __call__(*args, **kwargs)
File "c:\repos\private\tmp\holoviews\holoviews\core\accessors.py", line 202, in __call__
new_obj = apply_function(self._obj, **inner_kwargs)
File "c:\repos\private\tmp\holoviews\holoviews\core\operation.py", line 214, in __call__
return self._apply(element)
File "c:\repos\private\tmp\holoviews\holoviews\core\operation.py", line 141, in _apply
ret = self._process(element, key)
File "c:\repos\private\tmp\holoviews\holoviews\operation\element.py", line 785, in _process
null_hist_range = hist_range == (0, 0)
File "C:\repos\private\hvplot\.venv\lib\site-packages\ibis\expr\types\core.py", line 68, in __bool__
raise ValueError(
ValueError: The truth value of an Ibis expression is not defined
```
| 2022-12-01T06:19:04 |
|
holoviz/hvplot | 1,002 | holoviz__hvplot-1002 | [
"998"
] | 85848de0b74f56f1d78e945dc7a9445ddf896af0 | diff --git a/hvplot/plotting/core.py b/hvplot/plotting/core.py
--- a/hvplot/plotting/core.py
+++ b/hvplot/plotting/core.py
@@ -1244,7 +1244,7 @@ def violin(self, y=None, by=None, **kwds):
def hist(self, y=None, by=None, **kwds):
"""
- A `histogram` displays an approximate representation of the distribution of numerical data.
+ A `histogram` displays an approximate representation of the distribution of continous data.
Reference: https://hvplot.holoviz.org/reference/pandas/hist.html
@@ -1252,6 +1252,7 @@ def hist(self, y=None, by=None, **kwds):
----------
y : string or sequence
Field(s) in the *wide* data to compute the distribution(s) from.
+ Please note the fields should contain continuous data. Not categorical.
by : string or sequence
Field(s) in the *long* data to group by.
bins : int, optional
@@ -1295,6 +1296,20 @@ def hist(self, y=None, by=None, **kwds):
df['two'] = df['one'] + np.random.randint(1, 7, 6000)
df.hvplot.hist(bins=12, alpha=0.5, color=["lightgreen", "pink"])
+ If you want to show the distribution of the values of a categorical column,
+ you can use Pandas' method `value_counts` and `bar` as shown below
+
+ .. code-block::
+
+ import hvplot.pandas
+ import pandas as pd
+
+ data = pd.DataFrame({
+ "library": ["bokeh", "plotly", "matplotlib", "bokeh", "matplotlib", "matplotlib"]
+ })
+
+ data["library"].value_counts().hvplot.bar()
+
References
----------
| Make it easier to plot distributions of categorical data
Trying to contribute to pythonplot.com during the PyData Global 2022 I realized that it might be difficult for users to figure out how to plot a distribution of a categorical variable.
hvPlot (and Pandas) follows the definition of *histogram*: A histogram is an approximate representation of the [distribution](https://en.wikipedia.org/wiki/Frequency_distribution) of **numerical** data.

Plotly on the other hand does not care about that. They just make it easy to plot for their users

We should also make this easier for our users. We could
- Point out in the histogram reference guide that a histogram is only for numerical data.
- Provide an example of a distribution plot for categorical variable
- Document this in docstrings
- Improve the error message if the user provides a categorical variable. It is really, really hard to understand the error message right now.
Or maybe just support categorical parameters in `.hvplot.hist`.
| 2022-12-03T06:42:48 |
||
holoviz/hvplot | 1,015 | holoviz__hvplot-1015 | [
"1013"
] | 5129d1f03d3f91c7e428f65b811143f6c4b2bd12 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,7 @@ def get_setup_version(reponame):
]
_examples = [
- 'geoviews >=1.6.0',
- 'numba >=0.51.0',
+ 'geoviews >=1.9.0',
'geopandas',
'xarray >=0.18.2',
'networkx >=2.6.3',
@@ -48,7 +47,6 @@ def get_setup_version(reponame):
'intake-parquet >=0.2.3',
'intake-xarray >=0.5.0',
'dask >=2021.3.0',
- 'datashader >=0.6.5',
'notebook >=5.4',
'rasterio',
'cartopy',
@@ -57,7 +55,6 @@ def get_setup_version(reponame):
'scipy >=1.5.3',
'pillow >=8.2.0',
'selenium >=3.141.0',
- 'spatialpandas >=0.4.3',
'scikit-image >=0.17.2',
'python-snappy >=0.6.0',
'pooch >=1.6.0',
@@ -71,6 +68,14 @@ def get_setup_version(reponame):
'ipykernel <6.18.0' # temporary
]
+# Packages not working on python 3.11 because of numba
+if sys.version_info < (3, 11):
+ _examples += [
+ 'numba >=0.51.0',
+ 'datashader >=0.6.5',
+ 'spatialpandas >=0.4.3',
+ ]
+
extras_require = {
'tests': [
'codecov',
@@ -132,6 +137,7 @@ def get_setup_version(reponame):
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
| diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -10,9 +10,6 @@ on:
schedule:
- cron: '0 15 * * SUN'
-env:
- CACHE_VERSION: 3
-
jobs:
pre_commit:
name: Run pre-commit hooks
@@ -37,76 +34,32 @@ jobs:
fail-fast: false
matrix:
os: ['ubuntu-latest', 'macos-latest', 'windows-latest']
- python-version: ['3.7', '3.8', '3.9', '3.10']
- include:
- - os: ubuntu-latest
- path: /usr/share/miniconda3/envs/
- - os: macos-latest
- path: /Users/runner/miniconda3/envs/
- - os: windows-latest
- path: C:\Miniconda3\envs\
- exclude:
- # Excluded because of issues with the geo-stack
- - os: 'macos-latest'
- python-version: '3.7'
+ # Run on the full set on schedule, workflow_dispatch and push&tags events, otherwise on a subset.
+ python-version: ${{ ( github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || ( github.event_name == 'push' && github.ref_type == 'tag' ) ) && fromJSON('["3.7", "3.8", "3.9", "3.10", "3.11"]') || fromJSON('["3.7", "3.9", "3.11"]') }}
timeout-minutes: 90
defaults:
run:
- shell: bash -l {0}
+ shell: bash -el {0}
env:
DESC: "Python ${{ matrix.python-version }} tests"
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- MAMBA_NO_BANNER: 1
steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: "100"
- - uses: conda-incubator/setup-miniconda@v2
+ - uses: pyviz-dev/holoviz_tasks/[email protected]
with:
- miniconda-version: "latest"
- - name: Fetch unshallow
- run: git fetch --prune --tags --unshallow
- - name: Get Today's date for cache
- run: echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV
- - name: conda cache
- uses: actions/cache@v2
- with:
- path: ${{ matrix.path }}
- key: ${{ runner.os }}-conda-${{ matrix.python-version }}-${{ hashFiles('setup.py') }}-tests_examples-${{ env.TODAY }}-${{ env.CACHE_VERSION }}
- id: cache
- - name: conda setup
- if: steps.cache.outputs.cache-hit != 'true'
- run: |
- conda install -n base -c defaults "conda>=4.12"
- # TODO: remove pin
- conda install -n base -c conda-forge "mamba<0.26" --no-update-deps
- conda create -n test-environment
- conda activate test-environment
- conda config --env --append channels pyviz/label/dev --append channels conda-forge
- conda config --env --remove channels defaults
- conda install python=${{ matrix.python-version }} pyctdev
- - name: doit develop_install
- if: matrix.os != 'macos-latest' && steps.cache.outputs.cache-hit != 'true'
- run: |
- conda activate test-environment
- conda list
- doit develop_install -o examples -o tests --conda-mode=mamba
- - name: doit develop_install macos hack
- if: matrix.os == 'macos-latest' && steps.cache.outputs.cache-hit != 'true'
- run: |
- conda activate test-environment
- conda list
- doit develop_install -o examples -o tests --conda-mode=mamba || echo "Keep going"
- pip install --no-deps --no-build-isolation -e .
- - name: hvplot install if env cached
- if: steps.cache.outputs.cache-hit == 'true'
- run: |
- conda activate test-environment
- python -m pip install --no-deps --no-build-isolation -e .
- - name: doit env_capture
+ name: unit_test_suite
+ python-version: ${{ matrix.python-version }}
+ channel-priority: strict
+ channels: pyviz/label/dev,conda-forge,nodefaults
+ envs: "-o examples -o tests"
+ cache: true
+ conda-update: true
+ conda-mamba: mamba
+ id: install
+ - name: patch fiona/geostack on Python 3.7 / Macos
+ if: steps.install.outputs.cache-hit != 'true' && contains(matrix.os, 'macos') && matrix.python-version == '3.7'
run: |
conda activate test-environment
- doit env_capture
+ mamba install "fiona=1.8" "gdal=3.3"
- name: doit test_flakes
run: |
conda activate test-environment
@@ -116,6 +69,8 @@ jobs:
conda activate test-environment
doit test_unit
- name: test examples
+ # Should be removed when numba support python 3.11
+ if: matrix.python-version != '3.11'
run: |
conda activate test-environment
bokeh sampledata
| Support Python 3.11
Python 3.11 provides major performance improvement that I would like to take advantage of when using the HoloViz ecosystem.
Panel and HoloViews already support python 3.11 according to Pypi.
Please support python 3.11. Thanks.
| 2023-01-18T08:17:20 |
|
holoviz/hvplot | 1,025 | holoviz__hvplot-1025 | [
"666"
] | 0dd4aae5746141099c98872e36d3e182e35d8f8d | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -747,7 +747,7 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
if isinstance(data, xr.Dataset):
z = list(data.data_vars)[0]
else:
- z = data.name or 'value'
+ z = data.name or label or value_label
if gridded and isinstance(data, xr.Dataset) and not isinstance(z, list):
data = data[z]
self.z = z
| diff --git a/hvplot/tests/testgridplots.py b/hvplot/tests/testgridplots.py
--- a/hvplot/tests/testgridplots.py
+++ b/hvplot/tests/testgridplots.py
@@ -227,3 +227,19 @@ def test_symmetric_dataset_in_memory(self):
plot_opts = Store.lookup_options('bokeh', plot.last, 'plot')
# This DataArray happens to be symmetric.
assert plot_opts.kwargs['symmetric']
+
+ def test_dataarray_unnamed_label(self):
+ plot = self.da_rgb.sel(band=1).hvplot.image(label='test')
+ assert plot.vdims[0].name == 'test'
+
+ def test_dataarray_unnamed_value_label(self):
+ plot = self.da_rgb.sel(band=1).hvplot.image(value_label='test')
+ assert plot.vdims[0].name == 'test'
+
+ def test_dataarray_label_precedence(self):
+ # name > label > value_label
+ plot = self.da_rgb.sel(band=1).rename('a').hvplot.image(label='b')
+ assert plot.vdims[0].name == 'a'
+
+ plot = self.da_rgb.sel(band=1).hvplot.image(label='b', value_label='c')
+ assert plot.vdims[0].name == 'b'
| Adding label to xarray plot produces KeyError
#### ALL software version info
- hvplot 0.7.3
- holoviews 1.14.6
- xarray 0.19.0
#### Description of expected behavior and the observed behavior
I want to plot an Xarray DataArray with a specified label. This does not work unless the DataArray has a name. The error raised is not helpful and the behavior is confusing / inconsistent. Passing `label=` should always work, no matter what.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import xarray as xr
import numpy as np
import hvplot.xarray
ny, nx = (30, 20)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 2, ny)
coords={'y': ('y', y), 'x': ('x', x)}
foo = xr.DataArray(np.random.rand(ny, nx), dims=['y', 'x'], coords=coords)
# works
foo.hvplot('x', 'y')
# Key Error
foo.hvplot('x', 'y', label='Foo')
```
This can be resolved by giving the array a name
```python
foo.rename('bar').hvplot('x', 'y', label='Foo')
```
or by specifying a clim manually
```python
foo.hvplot('x', 'y', label='Foo', clim=(0, 1))
```
Neither of these workarounds makes sense conceptually.
#### Stack traceback and/or browser JavaScript console output
```
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/srv/conda/envs/notebook/lib/python3.8/site-packages/xarray/core/dataset.py in _construct_dataarray(self, name)
1397 try:
-> 1398 variable = self._variables[name]
1399 except KeyError:
KeyError: 'value'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/tmp/ipykernel_926/56029127.py in <module>
13
14 # Key Error
---> 15 foo.hvplot('x', 'y', label='Foo')
/srv/conda/envs/notebook/lib/python3.8/site-packages/hvplot/plotting/core.py in __call__(self, x, y, kind, **kwds)
77 return pn.panel(plot, **panel_dict)
78
---> 79 return self._get_converter(x, y, kind, **kwds)(kind, x, y)
80
81 def _get_converter(self, x=None, y=None, kind=None, **kwds):
/srv/conda/envs/notebook/lib/python3.8/site-packages/hvplot/plotting/core.py in _get_converter(self, x, y, kind, **kwds)
84 y = y or params.pop('y', None)
85 kind = kind or params.pop('kind', None)
---> 86 return HoloViewsConverter(
87 self._data, x, y, kind=kind, **params
88 )
/srv/conda/envs/notebook/lib/python3.8/site-packages/hvplot/converter.py in __init__(self, data, x, y, kind, by, use_index, group_label, value_label, backlog, persist, use_dask, crs, fields, groupby, dynamic, grid, legend, rot, title, xlim, ylim, clim, symmetric, logx, logy, loglog, hover, subplots, label, invert, stacked, colorbar, datashade, rasterize, row, col, figsize, debug, framewise, aggregator, projection, global_extent, geo, precompute, flip_xaxis, flip_yaxis, dynspread, hover_cols, x_sampling, y_sampling, project, tools, attr_labels, coastline, tiles, sort_date, check_symmetric_max, transforms, stream, cnorm, features, **kwds)
504 try:
505 if not use_dask:
--> 506 symmetric = self._process_symmetric(symmetric, clim, check_symmetric_max)
507
508 if self._style_opts.get('cmap') is None:
/srv/conda/envs/notebook/lib/python3.8/site-packages/hvplot/converter.py in _process_symmetric(self, symmetric, clim, check_symmetric_max)
547 if self.data.chunks:
548 return False
--> 549 data = self.data[self.z]
550 if is_xarray_dataarray(data):
551 if data.size > check_symmetric_max:
/srv/conda/envs/notebook/lib/python3.8/site-packages/xarray/core/dataset.py in __getitem__(self, key)
1500
1501 if hashable(key):
-> 1502 return self._construct_dataarray(key)
1503 else:
1504 return self._copy_listed(key)
/srv/conda/envs/notebook/lib/python3.8/site-packages/xarray/core/dataset.py in _construct_dataarray(self, name)
1398 variable = self._variables[name]
1399 except KeyError:
-> 1400 _, name, variable = _get_virtual_variable(
1401 self._variables, name, self._level_coords, self.dims
1402 )
/srv/conda/envs/notebook/lib/python3.8/site-packages/xarray/core/dataset.py in _get_virtual_variable(variables, key, level_vars, dim_sizes)
171 ref_var = dim_var.to_index_variable().get_level_variable(ref_name)
172 else:
--> 173 ref_var = variables[ref_name]
174
175 if var_name is None:
KeyError: 'value'
```
| I agree, that behavior doesn't look reasonable at all! @jlstevens should be able to address that.
Sorry for letting this languish! I'll definitely have a proper look for 0.8.1. | 2023-02-24T21:48:56 |
holoviz/hvplot | 1,053 | holoviz__hvplot-1053 | [
"951"
] | fe39eff256f031889f089b239489d33f88658fa0 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -230,6 +230,9 @@ class HoloViewsConverter:
Whether to project the data before plotting (adds initial
overhead but avoids projecting data when plot is dynamically
updated).
+ projection (default=None): str or Cartopy CRS
+ Coordinate reference system of the plot specified as Cartopy
+ CRS object or class name.
tiles (default=False):
Whether to overlay the plot on a tile source. Tiles sources
can be selected by name or a tiles object or class can be passed,
@@ -255,7 +258,8 @@ class HoloViewsConverter:
]
_geo_options = [
- 'geo', 'crs', 'features', 'project', 'coastline', 'tiles'
+ 'geo', 'crs', 'features', 'project', 'coastline', 'tiles',
+ 'projection', 'global_extents'
]
_axis_options = [
@@ -393,7 +397,7 @@ def __init__(
)
self.dynamic = dynamic
- self.geo = any([geo, crs, global_extent, projection, project, coastline, features])
+ self.geo = any([geo, crs, global_extent, projection, project, coastline, features, tiles])
self.crs = self._process_crs(data, crs) if self.geo else None
self.project = project
self.coastline = coastline
@@ -434,13 +438,20 @@ def __init__(
"Projection must be defined as cartopy CRS or "
"one of the following CRS string:\n {}".format(all_crs))
- proj_crs = projection or ccrs.GOOGLE_MERCATOR
- if self.crs != proj_crs:
+ projection = projection or (ccrs.GOOGLE_MERCATOR if tiles else self.crs)
+ if tiles and projection != ccrs.GOOGLE_MERCATOR:
+ raise ValueError(
+ "Tiles can only be used with output projection of "
+ "`cartopy.crs.GOOGLE_MERCATOR`. To get rid of this error "
+ "remove `projection=` or `tiles=`"
+ )
+
+ if self.crs != projection:
px0, py0, px1, py1 = ccrs.GOOGLE_MERCATOR.boundary.bounds
x0, x1 = xlim or (px0, px1)
y0, y1 = ylim or (py0, py1)
extents = (x0, y0, x1, y1)
- x0, y0, x1, y1 = project_extents(extents, self.crs, proj_crs)
+ x0, y0, x1, y1 = project_extents(extents, self.crs, projection)
if xlim:
xlim = (x0, x1)
if ylim:
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -107,6 +107,11 @@ def test_plot_with_projection_as_invalid_string(self):
with self.assertRaisesRegex(ValueError, "Projection must be defined"):
self.da.hvplot.image('x', 'y', projection='foo')
+ def test_plot_with_projection_raises_an_error_when_tiles_set(self):
+ da = self.da.copy()
+ with self.assertRaisesRegex(ValueError, "Tiles can only be used with output projection"):
+ da.hvplot.image('x', 'y', crs=self.crs, projection='Robinson', tiles=True)
+
class TestGeoAnnotation(TestCase):
@@ -274,6 +279,12 @@ def test_points_hover_cols_with_by_set_to_name(self):
assert points.kdims == ['x', 'y']
assert points.vdims == ['name']
+ def test_points_project_xlim_and_ylim(self):
+ points = self.cities.hvplot(geo=True, xlim=(-10, 10), ylim=(-20, -10))
+ opts = hv.Store.lookup_options('bokeh', points, 'plot').options
+ assert opts['xlim'] == (-10, 10)
+ assert opts['ylim'] == (-20, -10)
+
@pytest.mark.xfail(
reason='Waiting for upstream fix https://github.com/holoviz/holoviews/pull/5325',
raises=KeyError,
| xlim/ylim errors when geo=True
#### ALL software version info
<details> <summary> conda environment </summary>
```
# Name Version Build Channel
abseil-cpp 20211102.0 h6b3803e_1 conda-forge
affine 2.3.1 pyhd8ed1ab_0 conda-forge
aiobotocore 2.4.0 pyhd8ed1ab_0 conda-forge
aiohttp 3.8.3 py310h8e9501a_0 conda-forge
aioitertools 0.11.0 pyhd8ed1ab_0 conda-forge
aiosignal 1.2.0 pyhd8ed1ab_0 conda-forge
anyio 3.6.2 pyhd8ed1ab_0 conda-forge
appdirs 1.4.4 pyh9f0ad1d_0 conda-forge
appnope 0.1.3 pyhd8ed1ab_0 conda-forge
argon2-cffi 21.3.0 pyhd8ed1ab_0 conda-forge
argon2-cffi-bindings 21.2.0 py310hf8d0d8f_2 conda-forge
arrow-cpp 8.0.0 py310h6b3e42c_0
asciitree 0.3.3 py_2 conda-forge
asttokens 2.0.8 pyhd8ed1ab_0 conda-forge
async-timeout 4.0.2 pyhd8ed1ab_0 conda-forge
attrs 22.1.0 pyh71513ae_1 conda-forge
aws-c-common 0.6.8 h1a28f6b_0
aws-c-event-stream 0.1.6 hc377ac9_5
aws-checksums 0.1.11 h1a28f6b_1
aws-sdk-cpp 1.8.185 h4a942e0_0
babel 2.10.3 pyhd8ed1ab_0 conda-forge
backcall 0.2.0 pyh9f0ad1d_0 conda-forge
backports 1.0 py_2 conda-forge
backports.functools_lru_cache 1.6.4 pyhd8ed1ab_0 conda-forge
beautifulsoup4 4.11.1 pyha770c72_0 conda-forge
bleach 5.0.1 pyhd8ed1ab_0 conda-forge
blosc 1.21.1 hd414afc_3 conda-forge
bokeh 2.4.3 pyhd8ed1ab_3 conda-forge
boost-cpp 1.78.0 h1cb353e_1 conda-forge
botocore 1.27.59 pyhd8ed1ab_0 conda-forge
branca 0.5.0 pyhd8ed1ab_0 conda-forge
brotli 1.0.9 h1c322ee_7 conda-forge
brotli-bin 1.0.9 h1c322ee_7 conda-forge
brotlipy 0.7.0 py310hf8d0d8f_1004 conda-forge
bzip2 1.0.8 h3422bc3_4 conda-forge
c-ares 1.18.1 h3422bc3_0 conda-forge
ca-certificates 2022.9.24 h4653dfc_0 conda-forge
cairo 1.16.0 h73a0509_1014 conda-forge
cartopy 0.21.0 py310h9c658c6_0 conda-forge
certifi 2022.9.24 pyhd8ed1ab_0 conda-forge
cffi 1.15.1 py310h2399d43_1 conda-forge
cfitsio 4.1.0 hd4f5c17_0 conda-forge
cftime 1.6.2 py310hf1a086a_0 conda-forge
charset-normalizer 2.1.1 pyhd8ed1ab_0 conda-forge
click 8.1.3 py310hbe9552e_0 conda-forge
click-plugins 1.1.1 py_0 conda-forge
cligj 0.7.2 pyhd8ed1ab_1 conda-forge
cloudpickle 2.2.0 pyhd8ed1ab_0 conda-forge
colorama 0.4.5 pyhd8ed1ab_0 conda-forge
colorcet 3.0.1 py_0 pyviz
contourpy 1.0.5 py310h2887b22_0 conda-forge
cramjam 2.5.0 py310he41690c_0 conda-forge
cryptography 38.0.2 py310h4fe9c50_0 conda-forge
curl 7.85.0 hd538317_0 conda-forge
cycler 0.11.0 pyhd8ed1ab_0 conda-forge
cytoolz 0.12.0 py310h02f21da_0 conda-forge
dask 2022.10.0 pyhd8ed1ab_2 conda-forge
dask-core 2022.10.0 pyhd8ed1ab_1 conda-forge
datashader 0.14.2 py_0 pyviz
datashape 0.5.4 py_1 conda-forge
debugpy 1.6.3 py310hc6dc59f_0 conda-forge
decorator 5.1.1 pyhd8ed1ab_0 conda-forge
defusedxml 0.7.1 pyhd8ed1ab_0 conda-forge
distributed 2022.10.0 pyhd8ed1ab_2 conda-forge
entrypoints 0.4 pyhd8ed1ab_0 conda-forge
executing 1.1.1 pyhd8ed1ab_0 conda-forge
expat 2.4.9 hb7217d7_0 conda-forge
fasteners 0.17.3 pyhd8ed1ab_0 conda-forge
fastparquet 0.8.3 py310hf1a086a_0 conda-forge
fiona 1.8.22 py310hcdcf461_0 conda-forge
flit-core 3.7.1 pyhd8ed1ab_0 conda-forge
folium 0.13.0 pyhd8ed1ab_0 conda-forge
font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge
font-ttf-inconsolata 3.000 h77eed37_0 conda-forge
font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge
font-ttf-ubuntu 0.83 hab24e00_0 conda-forge
fontconfig 2.14.0 h82840c6_1 conda-forge
fonts-conda-ecosystem 1 0 conda-forge
fonts-conda-forge 1 0 conda-forge
fonttools 4.37.4 py310h8e9501a_0 conda-forge
freetype 2.12.1 hd633e50_0 conda-forge
freexl 1.0.6 h1a8c8d9_1 conda-forge
frozenlist 1.3.1 py310h8c01e39_0 conda-forge
fsspec 2022.10.0 pyhd8ed1ab_0 conda-forge
gdal 3.5.2 py310hc67b115_4 conda-forge
geopandas 0.11.1 pyhd8ed1ab_0 conda-forge
geopandas-base 0.11.1 pyha770c72_0 conda-forge
geos 3.11.0 h9a09cb3_0 conda-forge
geotiff 1.7.1 hc898e3f_3 conda-forge
geoviews 1.9.5 py_0 pyviz
geoviews-core 1.9.5 py_0 pyviz
gettext 0.21.1 h0186832_0 conda-forge
gflags 2.2.2 hc88da5d_1004 conda-forge
giflib 5.2.1 h27ca646_2 conda-forge
glog 0.6.0 h6da1cb0_0 conda-forge
grpc-cpp 1.46.1 h64f8b41_0
hdf4 4.2.15 hc683e77_4 conda-forge
hdf5 1.12.2 nompi_h8968d4b_100 conda-forge
heapdict 1.0.1 py_0 conda-forge
holoviews 1.15.1 py_0 pyviz
hvplot 0.8.1 py_0 pyviz
icu 70.1 h6b3803e_0 conda-forge
idna 3.4 pyhd8ed1ab_0 conda-forge
importlib-metadata 4.11.4 py310hbe9552e_0 conda-forge
importlib_resources 5.10.0 pyhd8ed1ab_0 conda-forge
intake 0.6.6 pyhd8ed1ab_0 conda-forge
intake-parquet 0.2.3 py_0 conda-forge
intake-xarray 0.6.1 pyhd8ed1ab_0 conda-forge
ipykernel 6.16.1 pyh736e0ef_0 conda-forge
ipython 8.5.0 pyhd1c38e8_1 conda-forge
ipython_genutils 0.2.0 py_1 conda-forge
ipywidgets 8.0.2 pyhd8ed1ab_1 conda-forge
jedi 0.18.1 pyhd8ed1ab_2 conda-forge
jinja2 3.1.2 pyhd8ed1ab_1 conda-forge
jmespath 1.0.1 pyhd8ed1ab_0 conda-forge
joblib 1.2.0 pyhd8ed1ab_0 conda-forge
jpeg 9e he4db4b2_2 conda-forge
json-c 0.16 hc449e50_0 conda-forge
json5 0.9.5 pyh9f0ad1d_0 conda-forge
jsonschema 4.16.0 pyhd8ed1ab_0 conda-forge
jupyter 1.0.0 py310hbe9552e_7 conda-forge
jupyter_client 7.3.4 pyhd8ed1ab_0 conda-forge
jupyter_console 6.4.4 pyhd8ed1ab_0 conda-forge
jupyter_core 4.11.1 py310hbe9552e_0 conda-forge
jupyter_server 1.21.0 pyhd8ed1ab_0 conda-forge
jupyterlab 3.4.8 pyhd8ed1ab_0 conda-forge
jupyterlab_pygments 0.2.2 pyhd8ed1ab_0 conda-forge
jupyterlab_server 2.16.1 pyhd8ed1ab_0 conda-forge
jupyterlab_widgets 3.0.3 pyhd8ed1ab_0 conda-forge
kealib 1.4.15 h02ce806_1 conda-forge
kiwisolver 1.4.4 py310hd23d0e8_0 conda-forge
krb5 1.19.3 hf9b2bbe_0 conda-forge
lcms2 2.12 had6a04f_0 conda-forge
lerc 4.0.0 h9a09cb3_0 conda-forge
libblas 3.9.0 16_osxarm64_openblas conda-forge
libbrotlicommon 1.0.9 h1c322ee_7 conda-forge
libbrotlidec 1.0.9 h1c322ee_7 conda-forge
libbrotlienc 1.0.9 h1c322ee_7 conda-forge
libcblas 3.9.0 16_osxarm64_openblas conda-forge
libcurl 7.85.0 hd538317_0 conda-forge
libcxx 14.0.6 h2692d47_0 conda-forge
libdap4 3.20.6 h8510809_2 conda-forge
libdeflate 1.14 h1a8c8d9_0 conda-forge
libedit 3.1.20191231 hc8eb9b7_2 conda-forge
libev 4.33 h642e427_1 conda-forge
libevent 2.1.10 hbae9a57_4 conda-forge
libffi 3.4.2 h3422bc3_5 conda-forge
libgdal 3.5.2 hdf189a9_4 conda-forge
libgfortran 5.0.0 11_3_0_hd922786_25 conda-forge
libgfortran5 11.3.0 hdaf2cc0_25 conda-forge
libglib 2.74.0 h14ed1c1_0 conda-forge
libiconv 1.17 he4db4b2_0 conda-forge
libkml 1.3.0 h41464e4_1015 conda-forge
liblapack 3.9.0 16_osxarm64_openblas conda-forge
libllvm11 11.1.0 hfa12f05_4 conda-forge
libnetcdf 4.8.1 nompi_h996a5af_104 conda-forge
libnghttp2 1.47.0 h232270b_1 conda-forge
libopenblas 0.3.21 openmp_hc731615_3 conda-forge
libpng 1.6.38 h76d750c_0 conda-forge
libpq 14.5 hb2ab832_0 conda-forge
libprotobuf 3.20.1 hb5ab8b9_4 conda-forge
librttopo 1.1.0 h275bb25_11 conda-forge
libsodium 1.0.18 h27ca646_1 conda-forge
libspatialindex 1.9.3 hbdafb3b_4 conda-forge
libspatialite 5.0.1 h47b1232_18 conda-forge
libsqlite 3.39.4 h76d750c_0 conda-forge
libssh2 1.10.0 hb80f160_3 conda-forge
libthrift 0.13.0 hd358383_6
libtiff 4.4.0 hfa0b094_4 conda-forge
libwebp-base 1.2.4 h57fd34a_0 conda-forge
libxcb 1.13 h9b22ae9_1004 conda-forge
libxml2 2.9.14 h9d8dfc2_4 conda-forge
libzip 1.9.2 h96606af_1 conda-forge
libzlib 1.2.13 h03a7124_4 conda-forge
llvm-openmp 14.0.4 hd125106_0 conda-forge
llvmlite 0.39.1 py310h1e34944_0 conda-forge
locket 1.0.0 pyhd8ed1ab_0 conda-forge
lz4 4.0.0 py310h810453e_2 conda-forge
lz4-c 1.9.3 hbdafb3b_1 conda-forge
mapclassify 2.4.3 pyhd8ed1ab_0 conda-forge
markdown 3.4.1 pyhd8ed1ab_0 conda-forge
markupsafe 2.1.1 py310hf8d0d8f_1 conda-forge
matplotlib 3.6.1 py310hb6292c7_0 conda-forge
matplotlib-base 3.6.1 py310h78c5c2f_0 conda-forge
matplotlib-inline 0.1.6 pyhd8ed1ab_0 conda-forge
mistune 2.0.4 pyhd8ed1ab_0 conda-forge
msgpack-python 1.0.4 py310hd23d0e8_0 conda-forge
multidict 6.0.2 py310hf8d0d8f_1 conda-forge
multipledispatch 0.6.0 py_0 conda-forge
munch 2.5.0 py_0 conda-forge
munkres 1.1.4 pyh9f0ad1d_0 conda-forge
nbclassic 0.4.5 pyhd8ed1ab_0 conda-forge
nbclient 0.7.0 pyhd8ed1ab_0 conda-forge
nbconvert 7.2.2 pyhd8ed1ab_0 conda-forge
nbconvert-core 7.2.2 pyhd8ed1ab_0 conda-forge
nbconvert-pandoc 7.2.2 pyhd8ed1ab_0 conda-forge
nbformat 5.7.0 pyhd8ed1ab_0 conda-forge
ncurses 6.3 h07bb92c_1 conda-forge
nest-asyncio 1.5.6 pyhd8ed1ab_0 conda-forge
netcdf4 1.6.1 nompi_py310haaa361f_100 conda-forge
networkx 2.8.7 pyhd8ed1ab_0 conda-forge
notebook 6.5.1 pyha770c72_0 conda-forge
notebook-shim 0.2.0 pyhd8ed1ab_0 conda-forge
nspr 4.32 hbdafb3b_1 conda-forge
nss 3.78 h1483a63_0 conda-forge
numba 0.56.3 py310h3124f1e_0 conda-forge
numcodecs 0.10.2 py310hc6dc59f_0 conda-forge
numpy 1.23.4 py310h5d7c261_0 conda-forge
openjpeg 2.5.0 h5d4e404_1 conda-forge
openssl 1.1.1q h03a7124_1 conda-forge
orc 1.7.4 hb613a79_0
packaging 21.3 pyhd8ed1ab_0 conda-forge
pandas 1.5.1 py310h2b830bf_0 conda-forge
pandoc 2.12 hca03da5_0
pandocfilters 1.5.0 pyhd8ed1ab_0 conda-forge
panel 0.14.0 py_0 pyviz
param 1.12.2 py_0 pyviz
parso 0.8.3 pyhd8ed1ab_0 conda-forge
partd 1.3.0 pyhd8ed1ab_0 conda-forge
pcre 8.45 hbdafb3b_0 conda-forge
pcre2 10.37 hcf5f1cc_1 conda-forge
pexpect 4.8.0 pyh9f0ad1d_2 conda-forge
pickleshare 0.7.5 py_1003 conda-forge
pillow 9.2.0 py310h173adc3_2 conda-forge
pip 22.3 pyhd8ed1ab_0 conda-forge
pixman 0.40.0 h27ca646_0 conda-forge
pkgutil-resolve-name 1.3.10 pyhd8ed1ab_0 conda-forge
plotly 5.10.0 pyhd8ed1ab_0 conda-forge
pooch 1.6.0 pyhd8ed1ab_0 conda-forge
poppler 22.10.0 hae7f5f0_0 conda-forge
poppler-data 0.4.11 hd8ed1ab_0 conda-forge
postgresql 14.5 hb0ca4ee_0 conda-forge
proj 9.0.1 h4c79c2b_1 conda-forge
prometheus_client 0.15.0 pyhd8ed1ab_0 conda-forge
prompt-toolkit 3.0.31 pyha770c72_0 conda-forge
prompt_toolkit 3.0.31 hd8ed1ab_0 conda-forge
psutil 5.9.3 py310h8e9501a_0 conda-forge
pthread-stubs 0.4 h27ca646_1001 conda-forge
ptyprocess 0.7.0 pyhd3deb0d_0 conda-forge
pure_eval 0.2.2 pyhd8ed1ab_0 conda-forge
pyarrow 8.0.0 py310hf303d72_0
pycparser 2.21 pyhd8ed1ab_0 conda-forge
pyct 0.4.8 py_0 pyviz
pyct-core 0.4.8 py_0 pyviz
pygments 2.13.0 pyhd8ed1ab_0 conda-forge
pyopenssl 22.1.0 pyhd8ed1ab_0 conda-forge
pyparsing 3.0.9 pyhd8ed1ab_0 conda-forge
pyproj 3.4.0 py310h7275060_0 conda-forge
pyrsistent 0.18.1 py310hf8d0d8f_1 conda-forge
pyshp 2.3.1 pyhd8ed1ab_0 conda-forge
pysocks 1.7.1 pyha2e5f31_6 conda-forge
python 3.10.6 hbce4517_0_cpython conda-forge
python-dateutil 2.8.2 pyhd8ed1ab_0 conda-forge
python-fastjsonschema 2.16.2 pyhd8ed1ab_0 conda-forge
python-snappy 0.6.0 py310h8bfdf0b_2 conda-forge
python_abi 3.10 2_cp310 conda-forge
pytz 2022.5 pyhd8ed1ab_0 conda-forge
pyviz_comms 2.2.1 py_0 pyviz
pyyaml 6.0 py310hf8d0d8f_4 conda-forge
pyzmq 24.0.1 py310hc407298_0 conda-forge
rasterio 1.3.3 py310ha36aacf_0 conda-forge
re2 2022.04.01 h6b3803e_0 conda-forge
readline 8.1.2 h46ed386_0 conda-forge
requests 2.28.1 pyhd8ed1ab_1 conda-forge
retrying 1.3.3 py_2 conda-forge
rtree 1.0.1 py310ha3239f5_0 conda-forge
s3fs 2022.10.0 pyhd8ed1ab_0 conda-forge
scikit-learn 1.1.2 py310h3d7afdd_0 conda-forge
scipy 1.9.3 py310ha0d8a01_0 conda-forge
send2trash 1.8.0 pyhd8ed1ab_0 conda-forge
setuptools 65.5.0 pyhd8ed1ab_0 conda-forge
shapely 1.8.5 py310hc253cc3_0 conda-forge
six 1.16.0 pyh6c4a22f_0 conda-forge
snappy 1.1.9 h39c3846_1 conda-forge
sniffio 1.3.0 pyhd8ed1ab_0 conda-forge
snuggs 1.4.7 py_0 conda-forge
sortedcontainers 2.4.0 pyhd8ed1ab_0 conda-forge
soupsieve 2.3.2.post1 pyhd8ed1ab_0 conda-forge
spatialpandas 0.4.4 py_0 pyviz
sqlite 3.39.4 h2229b38_0 conda-forge
stack_data 0.5.1 pyhd8ed1ab_0 conda-forge
streamz 0.6.4 pyh6c4a22f_0 conda-forge
tblib 1.7.0 pyhd8ed1ab_0 conda-forge
tenacity 8.1.0 pyhd8ed1ab_0 conda-forge
terminado 0.16.0 pyhd1c38e8_0 conda-forge
threadpoolctl 3.1.0 pyh8a188c0_0 conda-forge
tiledb 2.12.0 hc7ac4c9_0 conda-forge
tinycss2 1.2.1 pyhd8ed1ab_0 conda-forge
tk 8.6.12 he1e0b03_0 conda-forge
tomli 2.0.1 pyhd8ed1ab_0 conda-forge
toolz 0.12.0 pyhd8ed1ab_0 conda-forge
tornado 6.1 py310hf8d0d8f_3 conda-forge
tqdm 4.64.1 pyhd8ed1ab_0 conda-forge
traitlets 5.5.0 pyhd8ed1ab_0 conda-forge
typing-extensions 4.4.0 hd8ed1ab_0 conda-forge
typing_extensions 4.4.0 pyha770c72_0 conda-forge
tzcode 2022e h1a8c8d9_0 conda-forge
tzdata 2022e h191b570_0 conda-forge
unicodedata2 14.0.0 py310hf8d0d8f_1 conda-forge
urllib3 1.26.11 pyhd8ed1ab_0 conda-forge
utf8proc 2.6.1 h1a28f6b_0
wcwidth 0.2.5 pyh9f0ad1d_2 conda-forge
webencodings 0.5.1 py_1 conda-forge
websocket-client 1.4.1 pyhd8ed1ab_0 conda-forge
wheel 0.37.1 pyhd8ed1ab_0 conda-forge
widgetsnbextension 4.0.3 pyhd8ed1ab_0 conda-forge
wrapt 1.14.1 py310h02f21da_0 conda-forge
xarray 2022.10.0 pyhd8ed1ab_0 conda-forge
xerces-c 3.2.4 h627aa08_1 conda-forge
xorg-libxau 1.0.9 h27ca646_0 conda-forge
xorg-libxdmcp 1.1.3 h27ca646_0 conda-forge
xyzservices 2022.9.0 pyhd8ed1ab_0 conda-forge
xz 5.2.6 h57fd34a_0 conda-forge
yaml 0.2.5 h3422bc3_2 conda-forge
yarl 1.7.2 py310hf8d0d8f_2 conda-forge
zarr 2.13.3 pyhd8ed1ab_0 conda-forge
zeromq 4.3.4 hbdafb3b_1 conda-forge
zict 2.2.0 pyhd8ed1ab_0 conda-forge
zipp 3.9.0 pyhd8ed1ab_0 conda-forge
zlib 1.2.13 h03a7124_4 conda-forge
zstd 1.5.2 h8128057_4 conda-forge
```
</details>
OS: macOS 12.5.1 (running on an M1 Mac, native conda), Safari 15.6.1
#### Description of expected behavior and the observed behavior
Setting xlim/ylim when using geo=True leads to unexpected behavior and fails in some cases. xlim/ylim are not obeyed when background map tiles are removed from the plot. Setting xlim and ylim raises errors for some cases (seems to error when ymin > 0 or ymax < 0).
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import hvplot.pandas # noqa
import cartopy.crs as ccrs
from bokeh.sampledata.airport_routes import airports
# Case 1: This example works
airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2,
xlim=(-180, -30), ylim=(0, 72), tiles='ESRI')
# Case 2: xlim/ylim are not obeyed when the map tile is removed
airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2,
xlim=(-180, -30), ylim=(0, 72))
# Case 3: Setting ymin > 0 raises errors
# Change ymin from 0 to 10
airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2,
xlim=(-180, -30), ylim=(10, 72))
# Case 4: Setting ymax < 0 also raises errors
airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2,
xlim=(-180, -30), ylim=(-20, -5))
# Works if geo=False
airports.hvplot.points('Longitude', 'Latitude', geo=False, color='red', alpha=0.2,
xlim=(-180, -30), ylim=(10, 72))
```
#### Stack traceback and/or browser JavaScript console output
<details><summary> Traceback for case 3 </summary>
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/IPython/core/formatters.py:973, in MimeBundleFormatter.__call__(self, obj, include, exclude)
970 method = get_real_method(obj, self.print_method)
972 if method is not None:
--> 973 return method(include=include, exclude=exclude)
974 return None
975 else:
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/core/dimension.py:1294, in Dimensioned._repr_mimebundle_(self, include, exclude)
1287 def _repr_mimebundle_(self, include=None, exclude=None):
1288 """
1289 Resolves the class hierarchy for the class rendering the
1290 object using any display hooks registered on Store.display
1291 hooks. The output of all registered display_hooks is then
1292 combined and returned.
1293 """
-> 1294 return Store.render(self)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/core/options.py:1426, in Store.render(cls, obj)
1424 data, metadata = {}, {}
1425 for hook in hooks:
-> 1426 ret = hook(obj)
1427 if ret is None:
1428 continue
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:277, in pprint_display(obj)
275 if not ip.display_formatter.formatters['text/plain'].pprint:
276 return None
--> 277 return display(obj, raw_output=True)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:247, in display(obj, raw_output, **kwargs)
245 elif isinstance(obj, (CompositeOverlay, ViewableElement)):
246 with option_state(obj):
--> 247 output = element_display(obj)
248 elif isinstance(obj, (Layout, NdLayout, AdjointLayout)):
249 with option_state(obj):
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:141, in display_hook.<locals>.wrapped(element)
139 try:
140 max_frames = OutputSettings.options['max_frames']
--> 141 mimebundle = fn(element, max_frames=max_frames)
142 if mimebundle is None:
143 return {}, {}
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:187, in element_display(element, max_frames)
184 if type(element) not in Store.registry[backend]:
185 return None
--> 187 return render(element)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:68, in render(obj, **kwargs)
65 if renderer.fig == 'pdf':
66 renderer = renderer.instance(fig='png')
---> 68 return renderer.components(obj, **kwargs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:398, in Renderer.components(self, obj, fmt, comm, **kwargs)
395 embed = (not (dynamic or streams or self.widget_mode == 'live') or config.embed)
397 if embed or config.comms == 'default':
--> 398 return self._render_panel(plot, embed, comm)
399 return self._render_ipywidget(plot)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:405, in Renderer._render_panel(self, plot, embed, comm)
403 doc = Document()
404 with config.set(embed=embed):
--> 405 model = plot.layout._render_model(doc, comm)
406 if embed:
407 return render_model(model, comm)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/viewable.py:505, in Renderable._render_model(self, doc, comm)
503 if comm is None:
504 comm = state._comm_manager.get_server_comm()
--> 505 model = self.get_root(doc, comm)
507 if config.embed:
508 embed_state(self, model, doc,
509 json=config.embed_json,
510 json_prefix=config.embed_json_prefix,
511 save_path=config.embed_save_path,
512 load_path=config.embed_load_path,
513 progress=False)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/viewable.py:556, in Renderable.get_root(self, doc, comm, preprocess)
539 """
540 Returns the root model and applies pre-processing hooks
541
(...)
553 Returns the bokeh model corresponding to this panel object
554 """
555 doc = init_doc(doc)
--> 556 root = self._get_model(doc, comm=comm)
557 if preprocess:
558 self._preprocess(root)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/pane/holoviews.py:265, in HoloViews._get_model(self, doc, root, parent, comm)
263 plot = self.object
264 else:
--> 265 plot = self._render(doc, comm, root)
267 plot.pane = self
268 backend = plot.renderer.backend
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/pane/holoviews.py:342, in HoloViews._render(self, doc, comm, root)
339 if comm:
340 kwargs['comm'] = comm
--> 342 return renderer.get_plot(self.object, **kwargs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/renderer.py:70, in BokehRenderer.get_plot(self_or_cls, obj, doc, renderer, **kwargs)
63 @bothmethod
64 def get_plot(self_or_cls, obj, doc=None, renderer=None, **kwargs):
65 """
66 Given a HoloViews Viewable return a corresponding plot instance.
67 Allows supplying a document attach the plot to, useful when
68 combining the bokeh model with another plot.
69 """
---> 70 plot = super().get_plot(obj, doc, renderer, **kwargs)
71 if plot.document is None:
72 plot.document = Document() if self_or_cls.notebook_context else curdoc()
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:240, in Renderer.get_plot(self_or_cls, obj, doc, renderer, comm, **kwargs)
237 defaults = [kd.default for kd in plot.dimensions]
238 init_key = tuple(v if d is None else d for v, d in
239 zip(plot.keys[0], defaults))
--> 240 plot.update(init_key)
241 else:
242 plot = obj
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/plot.py:948, in DimensionedPlot.update(self, key)
946 def update(self, key):
947 if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
--> 948 return self.initialize_plot()
949 item = self.__getitem__(key)
950 self.traverse(lambda x: setattr(x, '_updated', True))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/plotting/bokeh/plot.py:111, in GeoPlot.initialize_plot(self, ranges, plot, plots, source)
109 def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
110 opts = {} if isinstance(self, HvOverlayPlot) else {'source': source}
--> 111 fig = super(GeoPlot, self).initialize_plot(ranges, plot, plots, **opts)
112 if self.geographic and self.show_bounds and not self.overlaid:
113 from . import GeoShapePlot
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:1388, in ElementPlot.initialize_plot(self, ranges, plot, plots, source)
1386 # Initialize plot, source and glyph
1387 if plot is None:
-> 1388 plot = self._init_plot(key, style_element, ranges=ranges, plots=plots)
1389 self._init_axes(plot)
1390 else:
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:487, in ElementPlot._init_plot(self, key, element, plots, ranges)
480 """
481 Initializes Bokeh figure to draw Element into and sets basic
482 figure and axis attributes including axes types, labels,
483 titles and plot height and width.
484 """
485 subplots = list(self.subplots.values()) if self.subplots else []
--> 487 axis_types, labels, plot_ranges = self._axes_props(plots, subplots, element, ranges)
488 xlabel, ylabel, _ = labels
489 x_axis_type, y_axis_type = axis_types
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:398, in ElementPlot._axes_props(self, plots, subplots, element, ranges)
396 # Get the Element that determines the range and get_extents
397 range_el = el if self.batched and not isinstance(self, OverlayPlot) else element
--> 398 l, b, r, t = self.get_extents(range_el, ranges)
399 if self.invert_axes:
400 l, b, r, t = b, l, t, r
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/plotting/plot.py:73, in ProjectionPlot.get_extents(self, element, ranges, range_type)
71 extents = None
72 else:
---> 73 extents = project_extents(extents, element.crs, proj)
74 return (np.NaN,)*4 if not extents else extents
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/util.py:102, in project_extents(extents, src_proj, dest_proj, tol)
100 geom_in_src_proj = geom_clipped_to_dest_proj
101 try:
--> 102 geom_in_crs = dest_proj.project_geometry(geom_in_src_proj, src_proj)
103 except ValueError:
104 src_name =type(src_proj).__name__
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/cartopy/crs.py:808, in Projection.project_geometry(self, geometry, src_crs)
806 if not method_name:
807 raise ValueError(f'Unsupported geometry type {geom_type!r}')
--> 808 return getattr(self, method_name)(geometry, src_crs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/cartopy/crs.py:944, in Projection._project_polygon(self, polygon, src_crs)
942 is_ccw = True
943 else:
--> 944 is_ccw = polygon.exterior.is_ccw
945 # Project the polygon exterior/interior rings.
946 # Each source ring will result in either a ring, or one or more
947 # lines.
948 rings = []
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/geometry/polygon.py:99, in LinearRing.is_ccw(self)
96 @property
97 def is_ccw(self):
98 """True is the ring is oriented counter clock-wise"""
---> 99 return bool(self.impl['is_ccw'](self))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/algorithms/cga.py:14, in is_ccw_impl.<locals>.is_ccw_op(ring)
13 def is_ccw_op(ring):
---> 14 return signed_area(ring) >= 0.0
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/algorithms/cga.py:7, in signed_area(ring)
3 """Return the signed area enclosed by a ring in linear time using the
4 algorithm at: https://web.archive.org/web/20080209143651/http://cgafaq.info:80/wiki/Polygon_Area
5 """
6 xs, ys = ring.coords.xy
----> 7 xs.append(xs[1])
8 ys.append(ys[1])
9 return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(ring.coords)))/2.0
IndexError: array index out of range
```
</details>
<details> <summary> Traceback for case 4 </summary>
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/IPython/core/formatters.py:973, in MimeBundleFormatter.__call__(self, obj, include, exclude)
970 method = get_real_method(obj, self.print_method)
972 if method is not None:
--> 973 return method(include=include, exclude=exclude)
974 return None
975 else:
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/core/dimension.py:1294, in Dimensioned._repr_mimebundle_(self, include, exclude)
1287 def _repr_mimebundle_(self, include=None, exclude=None):
1288 """
1289 Resolves the class hierarchy for the class rendering the
1290 object using any display hooks registered on Store.display
1291 hooks. The output of all registered display_hooks is then
1292 combined and returned.
1293 """
-> 1294 return Store.render(self)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/core/options.py:1426, in Store.render(cls, obj)
1424 data, metadata = {}, {}
1425 for hook in hooks:
-> 1426 ret = hook(obj)
1427 if ret is None:
1428 continue
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:277, in pprint_display(obj)
275 if not ip.display_formatter.formatters['text/plain'].pprint:
276 return None
--> 277 return display(obj, raw_output=True)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:247, in display(obj, raw_output, **kwargs)
245 elif isinstance(obj, (CompositeOverlay, ViewableElement)):
246 with option_state(obj):
--> 247 output = element_display(obj)
248 elif isinstance(obj, (Layout, NdLayout, AdjointLayout)):
249 with option_state(obj):
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:141, in display_hook.<locals>.wrapped(element)
139 try:
140 max_frames = OutputSettings.options['max_frames']
--> 141 mimebundle = fn(element, max_frames=max_frames)
142 if mimebundle is None:
143 return {}, {}
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:187, in element_display(element, max_frames)
184 if type(element) not in Store.registry[backend]:
185 return None
--> 187 return render(element)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:68, in render(obj, **kwargs)
65 if renderer.fig == 'pdf':
66 renderer = renderer.instance(fig='png')
---> 68 return renderer.components(obj, **kwargs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:398, in Renderer.components(self, obj, fmt, comm, **kwargs)
395 embed = (not (dynamic or streams or self.widget_mode == 'live') or config.embed)
397 if embed or config.comms == 'default':
--> 398 return self._render_panel(plot, embed, comm)
399 return self._render_ipywidget(plot)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:405, in Renderer._render_panel(self, plot, embed, comm)
403 doc = Document()
404 with config.set(embed=embed):
--> 405 model = plot.layout._render_model(doc, comm)
406 if embed:
407 return render_model(model, comm)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/viewable.py:505, in Renderable._render_model(self, doc, comm)
503 if comm is None:
504 comm = state._comm_manager.get_server_comm()
--> 505 model = self.get_root(doc, comm)
507 if config.embed:
508 embed_state(self, model, doc,
509 json=config.embed_json,
510 json_prefix=config.embed_json_prefix,
511 save_path=config.embed_save_path,
512 load_path=config.embed_load_path,
513 progress=False)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/viewable.py:556, in Renderable.get_root(self, doc, comm, preprocess)
539 """
540 Returns the root model and applies pre-processing hooks
541
(...)
553 Returns the bokeh model corresponding to this panel object
554 """
555 doc = init_doc(doc)
--> 556 root = self._get_model(doc, comm=comm)
557 if preprocess:
558 self._preprocess(root)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/pane/holoviews.py:265, in HoloViews._get_model(self, doc, root, parent, comm)
263 plot = self.object
264 else:
--> 265 plot = self._render(doc, comm, root)
267 plot.pane = self
268 backend = plot.renderer.backend
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/panel/pane/holoviews.py:342, in HoloViews._render(self, doc, comm, root)
339 if comm:
340 kwargs['comm'] = comm
--> 342 return renderer.get_plot(self.object, **kwargs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/renderer.py:70, in BokehRenderer.get_plot(self_or_cls, obj, doc, renderer, **kwargs)
63 @bothmethod
64 def get_plot(self_or_cls, obj, doc=None, renderer=None, **kwargs):
65 """
66 Given a HoloViews Viewable return a corresponding plot instance.
67 Allows supplying a document attach the plot to, useful when
68 combining the bokeh model with another plot.
69 """
---> 70 plot = super().get_plot(obj, doc, renderer, **kwargs)
71 if plot.document is None:
72 plot.document = Document() if self_or_cls.notebook_context else curdoc()
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/renderer.py:240, in Renderer.get_plot(self_or_cls, obj, doc, renderer, comm, **kwargs)
237 defaults = [kd.default for kd in plot.dimensions]
238 init_key = tuple(v if d is None else d for v, d in
239 zip(plot.keys[0], defaults))
--> 240 plot.update(init_key)
241 else:
242 plot = obj
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/plot.py:948, in DimensionedPlot.update(self, key)
946 def update(self, key):
947 if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
--> 948 return self.initialize_plot()
949 item = self.__getitem__(key)
950 self.traverse(lambda x: setattr(x, '_updated', True))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/plotting/bokeh/plot.py:111, in GeoPlot.initialize_plot(self, ranges, plot, plots, source)
109 def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
110 opts = {} if isinstance(self, HvOverlayPlot) else {'source': source}
--> 111 fig = super(GeoPlot, self).initialize_plot(ranges, plot, plots, **opts)
112 if self.geographic and self.show_bounds and not self.overlaid:
113 from . import GeoShapePlot
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:1388, in ElementPlot.initialize_plot(self, ranges, plot, plots, source)
1386 # Initialize plot, source and glyph
1387 if plot is None:
-> 1388 plot = self._init_plot(key, style_element, ranges=ranges, plots=plots)
1389 self._init_axes(plot)
1390 else:
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:487, in ElementPlot._init_plot(self, key, element, plots, ranges)
480 """
481 Initializes Bokeh figure to draw Element into and sets basic
482 figure and axis attributes including axes types, labels,
483 titles and plot height and width.
484 """
485 subplots = list(self.subplots.values()) if self.subplots else []
--> 487 axis_types, labels, plot_ranges = self._axes_props(plots, subplots, element, ranges)
488 xlabel, ylabel, _ = labels
489 x_axis_type, y_axis_type = axis_types
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:398, in ElementPlot._axes_props(self, plots, subplots, element, ranges)
396 # Get the Element that determines the range and get_extents
397 range_el = el if self.batched and not isinstance(self, OverlayPlot) else element
--> 398 l, b, r, t = self.get_extents(range_el, ranges)
399 if self.invert_axes:
400 l, b, r, t = b, l, t, r
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/plotting/plot.py:73, in ProjectionPlot.get_extents(self, element, ranges, range_type)
71 extents = None
72 else:
---> 73 extents = project_extents(extents, element.crs, proj)
74 return (np.NaN,)*4 if not extents else extents
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/geoviews/util.py:102, in project_extents(extents, src_proj, dest_proj, tol)
100 geom_in_src_proj = geom_clipped_to_dest_proj
101 try:
--> 102 geom_in_crs = dest_proj.project_geometry(geom_in_src_proj, src_proj)
103 except ValueError:
104 src_name =type(src_proj).__name__
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/cartopy/crs.py:808, in Projection.project_geometry(self, geometry, src_crs)
806 if not method_name:
807 raise ValueError(f'Unsupported geometry type {geom_type!r}')
--> 808 return getattr(self, method_name)(geometry, src_crs)
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/cartopy/crs.py:944, in Projection._project_polygon(self, polygon, src_crs)
942 is_ccw = True
943 else:
--> 944 is_ccw = polygon.exterior.is_ccw
945 # Project the polygon exterior/interior rings.
946 # Each source ring will result in either a ring, or one or more
947 # lines.
948 rings = []
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/geometry/polygon.py:99, in LinearRing.is_ccw(self)
96 @property
97 def is_ccw(self):
98 """True is the ring is oriented counter clock-wise"""
---> 99 return bool(self.impl['is_ccw'](self))
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/algorithms/cga.py:14, in is_ccw_impl.<locals>.is_ccw_op(ring)
13 def is_ccw_op(ring):
---> 14 return signed_area(ring) >= 0.0
File ~/miniconda3/envs/hvplot-env/lib/python3.10/site-packages/shapely/algorithms/cga.py:7, in signed_area(ring)
3 """Return the signed area enclosed by a ring in linear time using the
4 algorithm at: https://web.archive.org/web/20080209143651/http://cgafaq.info:80/wiki/Polygon_Area
5 """
6 xs, ys = ring.coords.xy
----> 7 xs.append(xs[1])
8 ys.append(ys[1])
9 return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(ring.coords)))/2.0
IndexError: array index out of range
```
</details>
#### Screenshots or screencasts of the bug in action
Case 1 (which works):

Case 2 (set same xlim/ylim as case 1, but remove map tile):

Setting ylim works if geo=False (here ylim=(10, 72):

| I can get it to work by moving the limits outside the initial `hvplot()` and into `.opts()`. Like this for case 3:
``` python
airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2).opts(
xlim=(-180, -30), ylim=(10, 72))
```
When having tiles enabled, there is a change from a `Points` plot to an `Overlay` plot which probaly makes it go down a different code path.
I think the bug is in hvplot and not in Geoviews/Holoviews.
I just ran into this same bug and I assumed that it had to do with some projection that was being applied unevenly. When I converted the dataframe and bounding box to Web Mercator `df.to_crs(epsg=3857)` it worked as expected. | 2023-04-04T15:57:33 |
holoviz/hvplot | 1,055 | holoviz__hvplot-1055 | [
"842"
] | 093d0d940bbf131fccea49ee9dab5550c1202225 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -2243,9 +2243,12 @@ def _geom_plot(self, x=None, y=None, data=None, kind='polygons'):
for opts_ in [cur_opts, compat_opts]:
if 'color' in opts_ and opts_['color'] in vdims:
opts_['color'] = hv.dim(opts_['color'])
+ # if there is nothing to put in hover, turn it off
+ if 'tools' in opts_ and kind in ["polygons", "paths"] and not vdims:
+ opts_["tools"] = [t for t in opts_["tools"] if t != "hover"]
if self.geo: params['crs'] = self.crs
if self.by:
- obj = Dataset(data).to(element, kdims, vdims, self.by, **params)
+ obj = Dataset(data, self.by+kdims, vdims).to(element, kdims, vdims, self.by, **params)
if self.subplots:
obj = obj.layout(sort=False)
else:
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -7,7 +7,6 @@
import numpy as np
import pandas as pd
import holoviews as hv
-import pytest
from hvplot.util import proj_to_cartopy
@@ -273,11 +272,14 @@ def test_points_hover_cols_with_c_set_to_name(self):
opts = hv.Store.lookup_options('bokeh', points, 'style').kwargs
assert opts['color'] == 'name'
- @pytest.mark.xfail
def test_points_hover_cols_with_by_set_to_name(self):
points = self.cities.hvplot(by='name')
- assert points.kdims == ['x', 'y']
- assert points.vdims == ['name']
+ assert isinstance(points, hv.core.overlay.NdOverlay)
+ assert points.kdims == ['name']
+ assert points.vdims == []
+ for element in points.values():
+ assert element.kdims == ['x', 'y']
+ assert element.vdims == []
def test_points_project_xlim_and_ylim(self):
points = self.cities.hvplot(geo=True, xlim=(-10, 10), ylim=(-20, -10))
@@ -285,10 +287,11 @@ def test_points_project_xlim_and_ylim(self):
assert opts['xlim'] == (-10, 10)
assert opts['ylim'] == (-20, -10)
- @pytest.mark.xfail(
- reason='Waiting for upstream fix https://github.com/holoviz/holoviews/pull/5325',
- raises=KeyError,
- )
def test_polygons_by_subplots(self):
polygons = self.polygons.hvplot(geo=True, by="name", subplots=True)
assert isinstance(polygons, hv.core.layout.NdLayout)
+
+ def test_polygons_turns_off_hover_when_there_are_no_fields_to_include(self):
+ polygons = self.polygons.hvplot(geo=True)
+ opts = hv.Store.lookup_options('bokeh', polygons, 'plot').kwargs
+ assert 'hover' not in opts.get('tools')
| Plot a time-dependent series of geographic polygons from GeoDataframe using hvplot and groupby
#### ALL software version info
hvplot 0.8.0 pyh6c4a22f_0 conda-forge
bokeh 2.4.3 pyhd8ed1ab_3 conda-forge
python 3.10.5 h582c2e5_0_cpython conda-forge
jupyter 1.0.0 py310hff52083_7 conda-forge
jupyterlab 3.4.5 pyhd8ed1ab_0 conda-forge
OS: Rocky Linux 8.5 (Green Obsidian) - remote
Browser: Mozilla Firefox v82.0.2 (on Windows 10 18.09)
#### Description of expected behavior and the observed behavior
I am trying to plot a time series of maps with geometric polygons from a GeoDataframe. The GeoDataframe holds a `reference_time` next to the `geometry` with the polygons. So I am using the `groupby` argument of hvplot on the `reference_time` variable.
The plotting work with `geo=False`. However it crashes with an error with `geo=True`.
#### Complete, minimal, self-contained example code that reproduces the issue
```
import geopandas
import hvplot.pandas
json_str = '{"type": "FeatureCollection", "features": [{"id": "292", "type": "Feature", "properties": {"reference_time": "2021-08-03T06:00:00"}, "geometry": {"type": "Polygon", "coordinates": [[[6.734387964482268, 50.096981436429836], [6.715702760798321, 50.08350087669019], [6.7164606556881665, 50.07500090230046], [6.726932700709735, 50.06899131283367], [6.727498990205583, 50.062616788193964], [6.730988279570088, 50.06061342371476], [6.7311767879985, 50.05848865569604], [6.748616494592647, 50.048470246593965], [6.749553435189103, 50.0378472878539], [6.7565245571179045, 50.033839329129954], [6.822494229902199, 50.036222772078176], [6.829457117826622, 50.03220914103082], [6.832755487784024, 50.032326768682196], [6.836236195568452, 50.03031965546154], [6.839534418239325, 50.03043703037027], [6.843014579155103, 50.02842967525436], [6.846312654459997, 50.02854679744998], [6.849792268503478, 50.026539200507266], [6.853090196362978, 50.026656070019335], [6.856569263530571, 50.024648231318345], [6.882952088134839, 50.02557774446169], [6.889189529279116, 50.03005826409537], [6.888113617274682, 50.04280701271642], [6.884634555008016, 50.04481630967492], [6.8747356652759315, 50.04446880452035], [6.8712557178674984, 50.04647759882098], [6.8646562244125935, 50.04624505628612], [6.861175560726028, 50.048253478263526], [6.854575813313595, 50.04802018550571], [6.847612674584511, 50.05203617315432], [6.847430905745282, 50.05416109906186], [6.843948557379587, 50.056168944871104], [6.843766557088876, 50.05829391731779], [6.833316605440537, 50.06431684307392], [6.83001537252634, 50.06419921302388], [6.826531128284592, 50.066206501150646], [6.806723163146417, 50.065497367264776], [6.799751515201596, 50.069510039897715], [6.799566902060483, 50.07163509569316], [6.789105952186076, 50.077653323156746], [6.788550085538567, 50.0840288255632], [6.785061709927709, 50.086034734508154], [6.784876167120653, 50.088159975675374], [6.781387200638471, 50.09016579161379], [6.781201425169829, 50.092291079083346], [6.777711867667758, 50.094296801962315], [6.777525859439406, 50.09642213571793], [6.77403571076884, 50.098427765484495], [6.734387964482268, 50.096981436429836]]]}}, {"id": "329", "type": "Feature", "properties": {"reference_time": "2021-08-03T06:15:00"}, "geometry": {"type": "Polygon", "coordinates": [[[6.53500332673663, 50.17262838814129], [6.525676395793689, 50.165864441490726], [6.509130066483161, 50.165217619771184], [6.493598140164972, 50.15393999616791], [6.490290012794701, 50.15380969662393], [6.487184983165786, 50.151553827645586], [6.483877092942987, 50.151423292112206], [6.477668602593298, 50.1469110968444], [6.477872177168287, 50.14478575952954], [6.474768669293165, 50.142529583182], [6.474972387017253, 50.140404309639585], [6.468766845311061, 50.135891799690036], [6.452234991265688, 50.135234543767545], [6.449133859035186, 50.1329776075923], [6.449954293080332, 50.12447741050391], [6.443754499017571, 50.119963601072335], [6.440449599993195, 50.119831480170184], [6.437350486271928, 50.117574347547794], [6.434045824577542, 50.11744199108877], [6.430947304193949, 50.115184668906], [6.4276428798783325, 50.115052076929956], [6.424544952781937, 50.11279456525802], [6.421240765893827, 50.11266173780486], [6.421447405298261, 50.11053697719101], [6.415253448962403, 50.10602157197326], [6.415460396331883, 50.10389691192589], [6.412364153209042, 50.101639131494146], [6.409061010457461, 50.101505861479545], [6.405965360262447, 50.099247891817576], [6.406172804999734, 50.09712334179774], [6.403077699809985, 50.09486533258377], [6.406587622735568, 50.092874323519545], [6.406794995742371, 50.0907498552632], [6.41030432603753, 50.088758750082896], [6.413606331103893, 50.08889196357895], [6.417115116616857, 50.08690061275905], [6.433624670504715, 50.08756418677074], [6.452208535577216, 50.101104117299926], [6.458815082702433, 50.10136733427171], [6.461914147828928, 50.10362349560114], [6.465217670042701, 50.10375480739412], [6.4683173279463855, 50.10601077904516], [6.468113416590205, 50.108135588575635], [6.480516509589828, 50.1171588550765], [6.483821311675101, 50.11728948785017], [6.48692324762389, 50.11954499954339], [6.506753830987399, 50.120325544782304], [6.516065757468713, 50.12708957980573], [6.519371575991884, 50.127218901978026], [6.522476524780929, 50.129473313114914], [6.529088657857204, 50.129731363130404], [6.535300548774286, 50.13423950078656], [6.5340989899325645, 50.146991888717444], [6.537206508245576, 50.149246060243634], [6.537006334657516, 50.15137159007232], [6.540114400713651, 50.1536257208377], [6.539714318626019, 50.15787693505321], [6.542823099854781, 50.16013106138122], [6.541823326839114, 50.170759755944204], [6.538313303264706, 50.17275704375211], [6.53500332673663, 50.17262838814129]]]}}]}'
small_dataset = geopandas.read_file(json_str)
small_dataset.hvplot(geo=False, groupby="reference_time") # works
small_dataset.hvplot(geo=True, groupby="reference_time") # does not work
```
#### Stack traceback and/or browser JavaScript console output
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/IPython/core/formatters.py:973, in MimeBundleFormatter.__call__(self, obj, include, exclude)
970 method = get_real_method(obj, self.print_method)
972 if method is not None:
--> 973 return method(include=include, exclude=exclude)
974 return None
975 else:
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/core/dimension.py:1293, in Dimensioned._repr_mimebundle_(self, include, exclude)
1286 def _repr_mimebundle_(self, include=None, exclude=None):
1287 """
1288 Resolves the class hierarchy for the class rendering the
1289 object using any display hooks registered on Store.display
1290 hooks. The output of all registered display_hooks is then
1291 combined and returned.
1292 """
-> 1293 return Store.render(self)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/core/options.py:1418, in Store.render(cls, obj)
1416 data, metadata = {}, {}
1417 for hook in hooks:
-> 1418 ret = hook(obj)
1419 if ret is None:
1420 continue
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:277, in pprint_display(obj)
275 if not ip.display_formatter.formatters['text/plain'].pprint:
276 return None
--> 277 return display(obj, raw_output=True)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:253, in display(obj, raw_output, **kwargs)
251 elif isinstance(obj, (HoloMap, DynamicMap)):
252 with option_state(obj):
--> 253 output = map_display(obj)
254 elif isinstance(obj, Plot):
255 output = render(obj)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:141, in display_hook.<locals>.wrapped(element)
139 try:
140 max_frames = OutputSettings.options['max_frames']
--> 141 mimebundle = fn(element, max_frames=max_frames)
142 if mimebundle is None:
143 return {}, {}
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:201, in map_display(vmap, max_frames)
198 max_frame_warning(max_frames)
199 return None
--> 201 return render(vmap)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:68, in render(obj, **kwargs)
65 if renderer.fig == 'pdf':
66 renderer = renderer.instance(fig='png')
---> 68 return renderer.components(obj, **kwargs)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/plotting/renderer.py:398, in Renderer.components(self, obj, fmt, comm, **kwargs)
395 embed = (not (dynamic or streams or self.widget_mode == 'live') or config.embed)
397 if embed or config.comms == 'default':
--> 398 return self._render_panel(plot, embed, comm)
399 return self._render_ipywidget(plot)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/plotting/renderer.py:405, in Renderer._render_panel(self, plot, embed, comm)
403 doc = Document()
404 with config.set(embed=embed):
--> 405 model = plot.layout._render_model(doc, comm)
406 if embed:
407 return render_model(model, comm)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/viewable.py:507, in Renderable._render_model(self, doc, comm)
505 if comm is None:
506 comm = state._comm_manager.get_server_comm()
--> 507 model = self.get_root(doc, comm)
509 if config.embed:
510 embed_state(self, model, doc,
511 json=config.embed_json,
512 json_prefix=config.embed_json_prefix,
513 save_path=config.embed_save_path,
514 load_path=config.embed_load_path,
515 progress=False)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/viewable.py:558, in Renderable.get_root(self, doc, comm, preprocess)
541 """
542 Returns the root model and applies pre-processing hooks
543
(...)
555 Returns the bokeh model corresponding to this panel object
556 """
557 doc = init_doc(doc)
--> 558 root = self._get_model(doc, comm=comm)
559 if preprocess:
560 self._preprocess(root)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/pane/holoviews.py:272, in HoloViews._get_model(self, doc, root, parent, comm)
269 kwargs = {p: v for p, v in self.param.values().items()
270 if p in Layoutable.param and p != 'name'}
271 child_pane = self._get_pane(backend, state, **kwargs)
--> 272 self._update_plot(plot, child_pane)
273 model = child_pane._get_model(doc, root, parent, comm)
274 if ref in self._plots:
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/panel/pane/holoviews.py:208, in HoloViews._update_plot(self, plot, pane)
206 if plot.comm or state._unblocked(plot.document):
207 with unlocked():
--> 208 plot.update(key)
209 if plot.comm and 'embedded' not in plot.root.tags:
210 plot.push()
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/plotting/plot.py:949, in DimensionedPlot.update(self, key)
947 if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
948 return self.initialize_plot()
--> 949 item = self.__getitem__(key)
950 self.traverse(lambda x: setattr(x, '_updated', True))
951 return item
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/plotting/plot.py:435, in DimensionedPlot.__getitem__(self, frame)
433 if not isinstance(frame, tuple):
434 frame = self.keys[frame]
--> 435 self.update_frame(frame)
436 return self.state
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/holoviews/plotting/bokeh/element.py:1526, in ElementPlot.update_frame(self, key, ranges, plot, element)
1523 self._updated = True
1525 if 'hover' in self.handles:
-> 1526 self._update_hover(element)
1527 if 'cds' in self.handles:
1528 cds = self.handles['cds']
File /media/users/user/opt/miniconda3/envs/holoviz/lib/python3.10/site-packages/geoviews/plotting/bokeh/plot.py:161, in GeoPlot._update_hover(self, element)
158 if 'hv_created' in hover.tags:
159 tooltips = [(ttp.pprint_label, '@{%s}' % dimension_sanitizer(ttp.name))
160 if isinstance(ttp, Dimension) else ttp for ttp in tooltips]
--> 161 if self.geographic and tooltips[2:] == hover.tooltips[2:]:
162 return
163 tooltips = [(l, t+'{custom}' if t in hover.formatters else t) for l, t in tooltips]
TypeError: 'NoneType' object is not subscriptable
:DynamicMap [reference_time]
:Polygons [Longitude,Latitude]
```
#### Screenshots or screencasts of the bug in action
| Thanks for reporting this issue, it looks like a bug indeed. I've seen you posted it first on [StackOverflow](https://stackoverflow.com/questions/73449326/plot-a-time-dependent-series-of-geographic-polygons-from-geodataframe-using-hvpl). The HoloViz community is more active on [Discourse](https://discourse.holoviz.org/), this is the recommended place for asking questions :)
As a workaround, if you pass `hover_cols="all"` you can get passed this error. The issue is that hvplot is trying to be too clever about which columns to include in the input for the plot.
```python
small_dataset.hvplot(geo=True, groupby="reference_time", hover_cols=True) # works
```
I have a fix for this. PR coming shortly. Just need to write a test.
Ok I had what I thought was a solution but it resulted in a test failure (yay tests!) so I ended up with a much more niche solution. There will be a geoviews PR as well. | 2023-04-06T19:34:00 |
holoviz/hvplot | 1,056 | holoviz__hvplot-1056 | [
"778"
] | 874e4d9835980b820aaa36802e1449f4ad0ee163 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -706,6 +706,10 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
kind = 'polygons'
elif geom_type in ('LineString', 'LineRing', 'Line'):
kind = 'paths'
+ # if only one arg is provided, treat it like color
+ if x is not None and y is None:
+ kwds['color'] = kwds.pop('color', kwds.pop('c', x))
+ x = None
elif isinstance(data, pd.DataFrame):
datatype = 'pandas'
self.data = data
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -261,6 +261,13 @@ def test_points_hover_cols_index_in_list(self):
assert points.kdims == ['x', 'y']
assert points.vdims == ['index']
+ def test_points_hover_cols_positional_arg_sets_color(self):
+ points = self.cities.hvplot('name')
+ assert points.kdims == ['x', 'y']
+ assert points.vdims == ['name']
+ opts = hv.Store.lookup_options('bokeh', points, 'style').kwargs
+ assert opts['color'] == 'name'
+
def test_points_hover_cols_with_c_set_to_name(self):
points = self.cities.hvplot(c='name')
assert points.kdims == ['x', 'y']
| Positional arguments to `.polygons`
I am in the pysal tutorial and noticed that the in matplotlib the positional arg on `gdf.plot` maps to the `c` arg. In hvplot _I think_ it maps to `x` but it seems like it gets dropped.
It seems to me that the only positional argument should be `c` OR positional arguments should not be allowed at all.

| 2023-04-06T21:15:06 |
|
holoviz/hvplot | 1,135 | holoviz__hvplot-1135 | [
"1090"
] | 8062d520c87b54d9d7899759741a5a42960e5272 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -446,20 +446,19 @@ def __init__(
"Projection must be defined as cartopy CRS or "
f"one of the following CRS string:\n {all_crs}")
- projection = projection or (ccrs.GOOGLE_MERCATOR if tiles else self.crs)
- if tiles and projection != ccrs.GOOGLE_MERCATOR:
+ self.output_projection = projection or (ccrs.GOOGLE_MERCATOR if tiles else self.crs)
+ if tiles and self.output_projection != ccrs.GOOGLE_MERCATOR:
raise ValueError(
"Tiles can only be used with output projection of "
"`cartopy.crs.GOOGLE_MERCATOR`. To get rid of this error "
"remove `projection=` or `tiles=`"
)
-
- if self.crs != projection:
+ if self.crs != projection and (xlim or ylim):
px0, py0, px1, py1 = ccrs.GOOGLE_MERCATOR.boundary.bounds
x0, x1 = xlim or (px0, px1)
y0, y1 = ylim or (py0, py1)
extents = (x0, y0, x1, y1)
- x0, y0, x1, y1 = project_extents(extents, self.crs, projection)
+ x0, y0, x1, y1 = project_extents(extents, self.crs, self.output_projection)
if xlim:
xlim = (x0, x1)
if ylim:
@@ -1363,6 +1362,10 @@ def method_wrapper(ds, x, y):
if self._dim_ranges.get('c', (None, None)) != (None, None):
style['clim'] = self._dim_ranges['c']
+ if self.geo and self.crs != self.output_projection:
+ import geoviews as gv
+ obj = gv.project(obj, projection=self.output_projection)
+
processed = operation(obj, **opts)
if self.dynspread:
@@ -1389,7 +1392,7 @@ def _apply_layers(self, obj):
param.main.param.warning(
"coastline scale of %s not recognized, must be one "
"'10m', '50m' or '110m'." % self.coastline)
- obj = obj * coastline
+ obj = obj * coastline.opts(projection=self.output_projection)
if self.features:
import geoviews as gv
@@ -1411,32 +1414,46 @@ def _apply_layers(self, obj):
else:
feature_obj = feature_obj.opts(scale=scale)
if feature_obj.group in ["Land", "Ocean"]:
- obj = feature_obj * obj # Underlay land/ocean
+ # Underlay land/ocean
+ obj = feature_obj.opts(projection=self.output_projection) * obj
else:
- obj = obj * feature_obj # overlay everything else
-
- if self.tiles:
- tile_source = 'EsriImagery' if self.tiles == 'ESRI' else self.tiles
- warning = ("{} tiles not recognized, must be one of: {} or a tile object".format(tile_source, sorted(hv.element.tile_sources)))
- if tile_source is True:
- tiles = hv.element.tiles.OSM()
- elif tile_source in hv.element.tile_sources.keys():
- tiles = hv.element.tile_sources[tile_source]()
- elif tile_source in hv.element.tile_sources.values():
- tiles = tile_source()
- elif isinstance(tile_source, hv.element.tiles.Tiles):
- tiles = tile_source
- elif self.geo:
- from geoviews.element import WMTS
- if isinstance(tile_source, WMTS):
- tiles = tile_source
- else:
- param.main.param.warning(warning)
- else:
- param.main.param.warning(warning)
+ # overlay everything else
+ obj = obj * feature_obj.opts(projection=self.output_projection)
+
+ if self.tiles and not self.geo:
+ tiles = self._get_tiles(
+ self.tiles,
+ hv.element.tile_sources,
+ hv.element.tiles.Tiles
+ )
+ obj = tiles * obj
+ elif self.tiles and self.geo:
+ import geoviews as gv
+ tiles = self._get_tiles(
+ self.tiles,
+ gv.tile_sources.tile_sources,
+ (gv.element.WMTS, hv.element.tiles.Tiles),
+ )
obj = tiles * obj
return obj
+ def _get_tiles(self, source, sources, types):
+ tile_source = 'EsriImagery' if self.tiles == 'ESRI' else self.tiles
+ if tile_source is True:
+ tiles = sources["OSM"]()
+ elif tile_source in sources:
+ tiles = sources[tile_source]()
+ elif tile_source in sources.values():
+ tiles = tile_source()
+ elif isinstance(tile_source, types):
+ tiles = tile_source
+ else:
+ msg = (
+ f"{tile_source} tiles not recognized, must be one of: {sorted(sources)} or a tile object"
+ )
+ raise ValueError(msg)
+ return tiles
+
def _merge_redim(self, ranges, attr='range'):
redim = dict(self._redim)
for k, r in ranges.items():
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -111,6 +111,43 @@ def test_plot_with_projection_raises_an_error_when_tiles_set(self):
with self.assertRaisesRegex(ValueError, "Tiles can only be used with output projection"):
da.hvplot.image('x', 'y', crs=self.crs, projection='Robinson', tiles=True)
+ def test_overlay_with_projection(self):
+ # Regression test for https://github.com/holoviz/hvplot/issues/1090
+ df = pd.DataFrame({"lon": [0, 10], "lat": [40, 50], "v": [0, 1]})
+
+ plot1 = df.hvplot.points(x="lon", y="lat", s=200, c="y", geo=True, tiles="CartoLight")
+ plot2 = df.hvplot.points(x="lon", y="lat", c="v", geo=True)
+
+ # This should work without erroring
+ plot = plot1 * plot2
+ hv.renderer("bokeh").get_plot(plot)
+
+ def test_geo_with_rasterize(self):
+ import xarray as xr
+ import cartopy.crs as ccrs
+ import geoviews as gv
+ try:
+ from holoviews.operation.datashader import rasterize
+ except:
+ raise SkipTest('datashader not available')
+
+ ds = xr.tutorial.open_dataset("air_temperature")
+ hvplot_output = ds.isel(time=0).hvplot.points(
+ "lon",
+ "lat",
+ crs=ccrs.PlateCarree(),
+ projection=ccrs.LambertConformal(),
+ rasterize=True,
+ dynamic=False,
+ aggregator="max",
+ )
+
+ p1 = gv.Points(ds.isel(time=0), kdims=["lon", "lat"], crs=ccrs.PlateCarree())
+ p2 = gv.project(p1, projection=ccrs.LambertConformal())
+ expected = rasterize(p2, dynamic=False, aggregator="max")
+
+ xr.testing.assert_allclose(hvplot_output.data, expected.data)
+
class TestGeoAnnotation(TestCase):
@@ -141,27 +178,55 @@ def test_plot_with_coastline_sets_geo_by_default(self):
def test_plot_with_coastline_scale(self):
plot = self.df.hvplot.points('x', 'y', geo=True, coastline='10m')
opts = plot.get(1).opts.get('plot')
- self.assertEqual(opts.kwargs, {'scale': '10m'})
+ assert opts.kwargs["scale"] == '10m'
def test_plot_with_tiles(self):
- plot = self.df.hvplot.points('x', 'y', geo=True, tiles=True)
+ plot = self.df.hvplot.points('x', 'y', geo=False, tiles=True)
self.assertEqual(len(plot), 2)
self.assertIsInstance(plot.get(0), hv.Tiles)
self.assertIn('openstreetmap', plot.get(0).data)
+ def test_plot_with_tiles_with_geo(self):
+ import geoviews as gv
+
+ plot = self.df.hvplot.points('x', 'y', geo=True, tiles=True)
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), gv.element.WMTS)
+ self.assertIn('openstreetmap', plot.get(0).data)
+
def test_plot_with_specific_tiles(self):
- plot = self.df.hvplot.points('x', 'y', geo=True, tiles='ESRI')
+ plot = self.df.hvplot.points('x', 'y', geo=False, tiles='ESRI')
self.assertEqual(len(plot), 2)
self.assertIsInstance(plot.get(0), hv.Tiles)
self.assertIn('ArcGIS', plot.get(0).data)
+ def test_plot_with_specific_tiles_geo(self):
+ import geoviews as gv
+ plot = self.df.hvplot.points('x', 'y', geo=True, tiles='ESRI')
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), gv.element.WMTS)
+ self.assertIn('ArcGIS', plot.get(0).data)
+
def test_plot_with_specific_tile_class(self):
- plot = self.df.hvplot.points('x', 'y', geo=True, tiles=hv.element.tiles.EsriImagery)
+ plot = self.df.hvplot.points('x', 'y', geo=False, tiles=hv.element.tiles.EsriImagery)
self.assertEqual(len(plot), 2)
self.assertIsInstance(plot.get(0), hv.Tiles)
self.assertIn('ArcGIS', plot.get(0).data)
+ def test_plot_with_specific_tile_class_with_geo(self):
+ import geoviews as gv
+ plot = self.df.hvplot.points('x', 'y', geo=True, tiles=gv.tile_sources.EsriImagery)
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), gv.element.WMTS)
+ self.assertIn('ArcGIS', plot.get(0).data)
+
def test_plot_with_specific_tile_obj(self):
+ plot = self.df.hvplot.points('x', 'y', geo=False, tiles=hv.element.tiles.EsriImagery())
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), hv.Tiles)
+ self.assertIn('ArcGIS', plot.get(0).data)
+
+ def test_plot_with_specific_tile_obj_with_geo(self):
plot = self.df.hvplot.points('x', 'y', geo=True, tiles=hv.element.tiles.EsriImagery())
self.assertEqual(len(plot), 2)
self.assertIsInstance(plot.get(0), hv.Tiles)
@@ -294,10 +359,16 @@ def test_points_hover_cols_with_by_set_to_name(self):
assert element.vdims == []
def test_points_project_xlim_and_ylim(self):
+ points = self.cities.hvplot(geo=False, xlim=(-10, 10), ylim=(-20, -10))
+ opts = hv.Store.lookup_options('bokeh', points, 'plot').options
+ np.testing.assert_equal(opts['xlim'], (-10, 10))
+ np.testing.assert_equal(opts['ylim'], (-20, -10))
+
+ def test_points_project_xlim_and_ylim_with_geo(self):
points = self.cities.hvplot(geo=True, xlim=(-10, 10), ylim=(-20, -10))
opts = hv.Store.lookup_options('bokeh', points, 'plot').options
- assert opts['xlim'] == (-10, 10)
- assert opts['ylim'] == (-20, -10)
+ np.testing.assert_allclose(opts['xlim'], (-10, 10))
+ np.testing.assert_allclose(opts['ylim'], (-20, -10))
def test_polygons_by_subplots(self):
polygons = self.polygons.hvplot(geo=True, by="name", subplots=True)
| Tile + multiple Point breaks
#### ALL software version info
- geoviews 1.10.0
- hvplot 0.8.4
- holoviews 1.16.2
#### Description of expected behavior and the observed behavior
Overlapping a tile plus multiple Points plots breaks.
This used to be working with earlier versions.
#### Complete, minimal, self-contained example code that reproduces the issue
```
import pandas as pd
import hvplot.pandas
df = pd.DataFrame(dict(lon=[0, 10], lat=[40, 50], v=[0,1]))
(
df.hvplot.points(x="lon", y="lat", s=200, c="y", geo=True, tiles='CartoLight')
*df.hvplot.points(x="lon", y="lat", c="v", geo=True)
)
```
#### Stack traceback and/or browser JavaScript console output
```
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/IPython/core/formatters.py:974, in MimeBundleFormatter.__call__(self, obj, include, exclude)
971 method = get_real_method(obj, self.print_method)
973 if method is not None:
--> 974 return method(include=include, exclude=exclude)
975 return None
976 else:
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/core/dimension.py:1290, in Dimensioned._repr_mimebundle_(self, include, exclude)
1283 def _repr_mimebundle_(self, include=None, exclude=None):
1284 """
1285 Resolves the class hierarchy for the class rendering the
1286 object using any display hooks registered on Store.display
1287 hooks. The output of all registered display_hooks is then
1288 combined and returned.
1289 """
-> 1290 return Store.render(self)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/core/options.py:1425, in Store.render(cls, obj)
1423 data, metadata = {}, {}
1424 for hook in hooks:
-> 1425 ret = hook(obj)
1426 if ret is None:
1427 continue
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:280, in pprint_display(obj)
278 if not ip.display_formatter.formatters['text/plain'].pprint:
279 return None
--> 280 return display(obj, raw_output=True)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:248, in display(obj, raw_output, **kwargs)
246 elif isinstance(obj, (CompositeOverlay, ViewableElement)):
247 with option_state(obj):
--> 248 output = element_display(obj)
249 elif isinstance(obj, (Layout, NdLayout, AdjointLayout)):
250 with option_state(obj):
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:142, in display_hook.<locals>.wrapped(element)
140 try:
141 max_frames = OutputSettings.options['max_frames']
--> 142 mimebundle = fn(element, max_frames=max_frames)
143 if mimebundle is None:
144 return {}, {}
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:188, in element_display(element, max_frames)
185 if type(element) not in Store.registry[backend]:
186 return None
--> 188 return render(element)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/ipython/display_hooks.py:69, in render(obj, **kwargs)
66 if renderer.fig == 'pdf':
67 renderer = renderer.instance(fig='png')
---> 69 return renderer.components(obj, **kwargs)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/plotting/renderer.py:399, in Renderer.components(self, obj, fmt, comm, **kwargs)
396 embed = (not (dynamic or streams or self.widget_mode == 'live') or config.embed)
398 if embed or config.comms == 'default':
--> 399 return self._render_panel(plot, embed, comm)
400 return self._render_ipywidget(plot)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/plotting/renderer.py:406, in Renderer._render_panel(self, plot, embed, comm)
404 doc = Document()
405 with config.set(embed=embed):
--> 406 model = plot.layout._render_model(doc, comm)
407 if embed:
408 return render_model(model, comm)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/viewable.py:736, in Viewable._render_model(self, doc, comm)
734 if comm is None:
735 comm = state._comm_manager.get_server_comm()
--> 736 model = self.get_root(doc, comm)
738 if self._design and self._design.theme.bokeh_theme:
739 doc.theme = self._design.theme.bokeh_theme
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/layout/base.py:286, in Panel.get_root(self, doc, comm, preprocess)
282 def get_root(
283 self, doc: Optional[Document] = None, comm: Optional[Comm] = None,
284 preprocess: bool = True
285 ) -> Model:
--> 286 root = super().get_root(doc, comm, preprocess)
287 # ALERT: Find a better way to handle this
288 if hasattr(root, 'styles') and 'overflow-x' in root.styles:
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/viewable.py:658, in Renderable.get_root(self, doc, comm, preprocess)
656 wrapper = self._design._wrapper(self)
657 if wrapper is self:
--> 658 root = self._get_model(doc, comm=comm)
659 if preprocess:
660 self._preprocess(root)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/layout/base.py:170, in Panel._get_model(self, doc, root, parent, comm)
168 root = root or model
169 self._models[root.ref['id']] = (model, parent)
--> 170 objects, _ = self._get_objects(model, [], doc, root, comm)
171 props = self._get_properties(doc)
172 props[self._property_mapping['objects']] = objects
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/layout/base.py:155, in Panel._get_objects(self, model, old_objects, doc, root, comm)
153 else:
154 try:
--> 155 child = pane._get_model(doc, root, model, comm)
156 except RerenderError:
157 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/pane/holoviews.py:380, in HoloViews._get_model(self, doc, root, parent, comm)
378 plot = self.object
379 else:
--> 380 plot = self._render(doc, comm, root)
382 plot.pane = self
383 backend = plot.renderer.backend
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/panel/pane/holoviews.py:461, in HoloViews._render(self, doc, comm, root)
458 if comm:
459 kwargs['comm'] = comm
--> 461 return renderer.get_plot(self.object, **kwargs)
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/plotting/bokeh/renderer.py:70, in BokehRenderer.get_plot(self_or_cls, obj, doc, renderer, **kwargs)
63 @bothmethod
64 def get_plot(self_or_cls, obj, doc=None, renderer=None, **kwargs):
65 """
66 Given a HoloViews Viewable return a corresponding plot instance.
67 Allows supplying a document attach the plot to, useful when
68 combining the bokeh model with another plot.
69 """
---> 70 plot = super().get_plot(obj, doc, renderer, **kwargs)
71 if plot.document is None:
72 plot.document = Document() if self_or_cls.notebook_context else curdoc()
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/plotting/renderer.py:236, in Renderer.get_plot(self_or_cls, obj, doc, renderer, comm, **kwargs)
234 if isinstance(obj, AdjointLayout):
235 obj = Layout(obj)
--> 236 plot = self_or_cls.plotting_class(obj)(obj, renderer=renderer,
237 **plot_opts)
238 defaults = [kd.default for kd in plot.dimensions]
239 init_key = tuple(v if d is None else d for v, d in
240 zip(plot.keys[0], defaults))
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/geoviews/plotting/bokeh/plot.py:179, in GeoOverlayPlot.__init__(self, element, **params)
178 def __init__(self, element, **params):
--> 179 super().__init__(element, **params)
180 self.geographic = any(element.traverse(is_geographic, [_Element]))
181 if self.geographic:
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/geoviews/plotting/bokeh/plot.py:57, in GeoPlot.__init__(self, element, **params)
56 def __init__(self, element, **params):
---> 57 super().__init__(element, **params)
58 self.geographic = is_geographic(self.hmap.last)
59 if self.geographic and not isinstance(self.projection, (PlateCarree, Mercator)):
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/holoviews/plotting/plot.py:1535, in GenericOverlayPlot.__init__(self, overlay, ranges, batched, keys, group_counter, **params)
1533 def __init__(self, overlay, ranges=None, batched=True, keys=None, group_counter=None, **params):
1534 if 'projection' not in params:
-> 1535 params['projection'] = self._get_projection(overlay)
1537 super().__init__(overlay, ranges=ranges, keys=keys,
1538 batched=batched, **params)
1540 # Apply data collapse
File ~/.miniconda3/envs/pynsitu/lib/python3.10/site-packages/geoviews/plotting/plot.py:42, in ProjectionPlot._get_projection(self, obj)
39 custom_projs = [p for p in projections if p is not None]
41 if len(set([type(p) for p in custom_projs])) > 1:
---> 42 raise Exception("An axis may only be assigned one projection type")
43 elif custom_projs:
44 return custom_projs[0]
Exception: An axis may only be assigned one projection type
```
| A workaround is to add `tiles='CartoLight'` to both of the plots.
Thanks for the feeback !
I tried it earlier and it hides the first set of points unfortunately and is hence not satisfactory.
A bisect shows this regression was introduced in #1053.
For now, you should get the functionality back by downgrading it to 0.8.3.
Or adding: `projection=cartopy.crs.GOOGLE_MERCATOR` to both of the plots
The suggested fix works like a charm, thx a bunch @Hoxbro ! | 2023-09-14T15:32:00 |
holoviz/hvplot | 1,142 | holoviz__hvplot-1142 | [
"1104"
] | 69d19a28136f35948eb05c8eb52aed644f1b880d | diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -410,7 +410,7 @@ def __init__(self, df, **params):
cls.name.lower(): cls(df, explorer=self, **cparams)
for cls, cparams in controller_params.items()
}
- self.param.set_param(**self._controllers)
+ self.param.update(**self._controllers)
self.param.watch(self._plot, list(self.param))
for controller in self._controllers.values():
controller.param.watch(self._plot, list(controller.param))
@@ -478,7 +478,7 @@ def _plot(self, *events):
self._layout[1][1] = self._hvpane
self._alert.visible = False
except Exception as e:
- self._alert.param.set_param(
+ self._alert.param.update(
object=f'**Rendering failed with following error**: {e}',
visible=True
)
diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -135,11 +135,21 @@ def proj_to_cartopy(proj):
srs = proj.srs
if has_gdal:
- # this is more robust, as srs could be anything (espg, etc.)
- s1 = osr.SpatialReference()
- s1.ImportFromProj4(proj.srs)
- if s1.ExportToProj4():
- srs = s1.ExportToProj4()
+ import warnings
+ with warnings.catch_warnings():
+ # Avoiding this warning could be done by setting osr.UseExceptions(),
+ # except there might be a risk to break the code of users leveraging
+ # GDAL on their side or through other libraries. So we just silence it.
+ warnings.filterwarnings('ignore', category=FutureWarning, message=
+ r'Neither osr\.UseExceptions\(\) nor osr\.DontUseExceptions\(\) has '
+ r'been explicitly called\. In GDAL 4\.0, exceptions will be enabled '
+ 'by default'
+ )
+ # this is more robust, as srs could be anything (espg, etc.)
+ s1 = osr.SpatialReference()
+ s1.ImportFromProj4(proj.srs)
+ if s1.ExportToProj4():
+ srs = s1.ExportToProj4()
km_proj = {'lon_0': 'central_longitude',
'lat_0': 'central_latitude',
| diff --git a/hvplot/tests/testui.py b/hvplot/tests/testui.py
--- a/hvplot/tests/testui.py
+++ b/hvplot/tests/testui.py
@@ -23,7 +23,7 @@ def test_explorer_basic():
def test_explorer_settings():
explorer = hvplot.explorer(df)
- explorer.param.set_param(
+ explorer.param.update(
kind='scatter',
x='bill_length_mm',
y_multi=['bill_depth_mm'],
@@ -43,7 +43,7 @@ def test_explorer_settings():
def test_explorer_plot_code():
explorer = hvplot.explorer(df)
- explorer.param.set_param(
+ explorer.param.update(
kind='scatter',
x='bill_length_mm',
y_multi=['bill_depth_mm'],
@@ -62,7 +62,7 @@ def test_explorer_plot_code():
def test_explorer_hvplot():
explorer = hvplot.explorer(df)
- explorer.param.set_param(
+ explorer.param.update(
kind='scatter',
x='bill_length_mm',
y_multi=['bill_depth_mm'],
@@ -78,7 +78,7 @@ def test_explorer_hvplot():
def test_explorer_save(tmp_path):
explorer = hvplot.explorer(df)
- explorer.param.set_param(
+ explorer.param.update(
kind='scatter',
x='bill_length_mm',
y_multi=['bill_depth_mm'],
| GDAL 4.0 warning from osgeo
Running this:
```
import hvplot.xarray
import rioxarray as rxr
# Downloaded from: https://oin-hotosm.s3.amazonaws.com/5d7dad0becaf880008a9bc88/0/5d7dad0becaf880008a9bc89.tif
cog_url = "/home/shh/Downloads/5d7dad0becaf880008a9bc89.tif"
da = rxr.open_rasterio(cog_url)
plot = da.hvplot.rgb(x="x", y="y", rasterize=True, projection="Robinson")
plot
```
I'm getting this warning:
`/home/shh/miniconda3/envs/holoviz/lib/python3.11/site-packages/osgeo/osr.py:385: FutureWarning: Neither osr.UseExceptions() nor osr.DontUseExceptions() has been explicitly called. In GDAL 4.0, exceptions will be enabled by default.`
This should be looked into, and check which setting we should use.
| 2023-09-18T07:46:39 |
|
holoviz/hvplot | 1,163 | holoviz__hvplot-1163 | [
"1162"
] | e31afb07469b08da0f627368d476f35c48353c98 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -407,6 +407,7 @@ def __init__(
self.dynamic = dynamic
self.geo = any([geo, crs, global_extent, projection, project, coastline, features])
self.crs = self._process_crs(data, crs) if self.geo else None
+ self.output_projection = self.crs
self.project = project
self.coastline = coastline
self.features = features
@@ -585,7 +586,7 @@ def __init__(
if self.crs and global_extent:
plot_opts['global_extent'] = global_extent
if projection:
- plot_opts['projection'] = process_crs(projection)
+ plot_opts['projection'] = self.output_projection
title = title if title is not None else getattr(self, '_title', None)
if title is not None:
plot_opts['title'] = title
@@ -1264,10 +1265,8 @@ def method_wrapper(ds, x, y):
if self.crs and self.project:
# Apply projection before rasterizing
- import cartopy.crs as ccrs
- from geoviews import project
- projection = self._plot_opts.get('projection', ccrs.GOOGLE_MERCATOR)
- obj = project(obj, projection=projection)
+ import geoviews as gv
+ obj = gv.project(obj, projection=self.output_projection)
if not (self.datashade or self.rasterize or self.downsample):
layers = self._apply_layers(obj)
@@ -1362,10 +1361,6 @@ def method_wrapper(ds, x, y):
if self._dim_ranges.get('c', (None, None)) != (None, None):
style['clim'] = self._dim_ranges['c']
- if self.geo and self.crs != self.output_projection:
- import geoviews as gv
- obj = gv.project(obj, projection=self.output_projection)
-
processed = operation(obj, **opts)
if self.dynspread:
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -140,6 +140,7 @@ def test_geo_with_rasterize(self):
rasterize=True,
dynamic=False,
aggregator="max",
+ project=True,
)
p1 = gv.Points(ds.isel(time=0), kdims=["lon", "lat"], crs=ccrs.PlateCarree())
| Rasterized and projected RGB plot very slow or hanging
Running the code from https://github.com/holoviz/hvplot/issues/1104 on the main branch is either very very slow or hanging on my laptop:
```python
import hvplot.xarray
import rioxarray as rxr
# Downloaded from: https://oin-hotosm.s3.amazonaws.com/5d7dad0becaf880008a9bc88/0/5d7dad0becaf880008a9bc89.tif
cog_url = "/home/shh/Downloads/5d7dad0becaf880008a9bc89.tif"
da = rxr.open_rasterio(cog_url)
plot = da.hvplot.rgb(x="x", y="y", rasterize=True, projection="Robinson")
plot
```
| 2023-10-04T16:17:58 |
|
holoviz/hvplot | 1,247 | holoviz__hvplot-1247 | [
"1246"
] | 734c0cc922152ea6a0f3332bc2bf19f5430d7629 | diff --git a/hvplot/plotting/core.py b/hvplot/plotting/core.py
--- a/hvplot/plotting/core.py
+++ b/hvplot/plotting/core.py
@@ -1882,13 +1882,21 @@ def _get_converter(self, x=None, y=None, kind=None, **kwds):
for v in params.values()
if isinstance(v, (str, list))
]
+
columns = (
set(self._data.columns) & set(itertools.chain(*possible_columns))
) or {self._data.columns[0]}
+ if y is None:
+ # When y is not specified HoloViewsConverter finds all the numeric
+ # columns and use them as y values (see _process_chart_y). We meed
+ # to include these columns too.
+ columns |= set(self._data.select(pl.col(pl.NUMERIC_DTYPES)).columns)
xs = x if is_list_like(x) else (x,)
ys = y if is_list_like(y) else (y,)
columns |= {*xs, *ys}
columns.discard(None)
+ # Reorder the columns as in the data.
+ columns = sorted(columns, key=lambda c: self._data.columns.index(c))
if isinstance(self._data, pl.DataFrame):
data = self._data.select(columns).to_pandas()
| diff --git a/hvplot/tests/testplotting.py b/hvplot/tests/testplotting.py
--- a/hvplot/tests/testplotting.py
+++ b/hvplot/tests/testplotting.py
@@ -67,4 +67,5 @@ def test_plot_supports_polars():
pl = pytest.importorskip("polars")
dfp = pl.DataFrame(pd._testing.makeDataFrame())
out = plot(dfp, 'line')
- assert isinstance(out, hv.Curve)
+ assert isinstance(out, hv.NdOverlay)
+ assert out.keys() == dfp.columns
| y=None doesn't work for Polars dataframes
#### ALL software version info
```
>>> import hvplot
>>> hvplot.__version__
'0.9.1'
>>> import holoviews
>>> holoviews.__version__
'1.18.1'
>>> import polars
>>> polars.__version__
'0.20.3'
```
#### Description of expected behavior and the observed behavior
If you call, say `df.plot.line(x="foo")`, the docs say that all other numeric cols should be used as y values, but instead we get no plot and the following message: `matplotlib backend could not plot any Elements in the Overlay.`
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import polars as pl
pl.DataFrame({"a": [1, 2], "b": [3, 4]}).plot.line(x="a")
```
In the [_get_converter](https://github.com/holoviz/hvplot/blob/734c0cc922152ea6a0f3332bc2bf19f5430d7629/hvplot/plotting/core.py#L1867) method of hvPlotTabularPolars, we see that (if hover_cols is not set to "all") only columns from `x`, `y` and `kwds` will be selected:
```python
possible_columns = [
[v] if isinstance(v, str) else v
for v in params.values()
if isinstance(v, (str, list))
]
columns = (
set(self._data.columns) & set(itertools.chain(*possible_columns))
) or {self._data.columns[0]}
xs = x if is_list_like(x) else (x,)
ys = y if is_list_like(y) else (y,)
columns |= {*xs, *ys}
columns.discard(None)
```
### Proposed solution
When the y value is not specified, select all numeric columns (plus whatever is specified) in _get_converter. We can do this in Polars like this:
```python
import polars as pl
pl.DataFrame({"a": [1, 2], "b": [3, 4], "c": ["foo", "bar"], "d": ["ping", "pong"]}).select([pl.col("d"), pl.col(pl.NUMERIC_DTYPES)])
```
This code will select columns d, a and b.
There is still some filtering based on pandas dtype later in the code but I think this is applied to non-Polars sources too, so we may as well just leave it as it is for now.
- [x] I may be interested in making a pull request to address this
| 2024-01-02T22:17:35 |
|
holoviz/hvplot | 1,314 | holoviz__hvplot-1314 | [
"1312"
] | d8c95001ae0d0ccfc114fbe1c8ab4d030d59b0aa | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -206,9 +206,28 @@ class HoloViewsConverter:
the Datashader library, returning an RGB object instead of
individual points
downsample (default=False):
- Whether to apply LTTB (Largest Triangle Three Buckets)
- downsampling to the element (note this is only well behaved for
- timeseries data). Requires HoloViews >= 1.16.
+ Controls the application of downsampling to the plotted data,
+ which is particularly useful for large timeseries datasets to
+ reduce the amount of data sent to browser and improve
+ visualization performance. Requires HoloViews >= 1.16. Additional
+ dependencies: Installing the `tsdownsample` library is required
+ for using any downsampling methods other than the default 'lttb'.
+ Acceptable values:
+ - False: No downsampling is applied.
+ - True: Applies downsampling using HoloViews' default algorithm
+ (LTTB - Largest Triangle Three Buckets).
+ - 'lttb': Explicitly applies the Largest Triangle Three Buckets
+ algorithm.
+ - 'minmax': Applies the MinMax algorithm, selecting the minimum
+ and maximum values in each bin. Requires `tsdownsample`.
+ - 'm4': Applies the M4 algorithm, selecting the minimum, maximum,
+ first, and last values in each bin. Requires `tsdownsample`.
+ - 'minmax-lttb': Combines MinMax and LTTB algorithms for
+ downsampling, first applying MinMax to reduce to a preliminary
+ set of points, then LTTB for further reduction. Requires
+ `tsdownsample`.
+ Other string values corresponding to supported algorithms in
+ HoloViews may also be used.
dynspread (default=False):
For plots generated with datashade=True or rasterize=True,
automatically increase the point size when the data is sparse
@@ -1321,6 +1340,11 @@ def method_wrapper(ds, x, y):
except ImportError:
raise ImportError('Downsampling requires HoloViews >=1.16')
+ # Let HoloViews choose the default algo if 'downsample' is True.
+ # Otherwise, user-specified algorithm
+ if isinstance(self.downsample, str):
+ opts['algorithm'] = self.downsample
+
if self.x_sampling:
opts['x_sampling'] = self.x_sampling
if self._plot_opts.get('xlim') is not None:
| diff --git a/hvplot/tests/testoperations.py b/hvplot/tests/testoperations.py
--- a/hvplot/tests/testoperations.py
+++ b/hvplot/tests/testoperations.py
@@ -319,3 +319,11 @@ def test_downsample_opts(self):
assert plot.callback.operation.p.height == 50
assert plot.callback.operation.p.x_sampling == 5
assert plot.callback.operation.p.x_range == (0, 5)
+
+ def test_downsample_algorithm_minmax(self):
+ from holoviews.operation.downsample import downsample1d
+
+ plot = self.df.hvplot.line(downsample='minmax')
+
+ assert isinstance(plot.callback.operation, downsample1d)
+ assert plot.callback.operation_kwargs['algorithm'] == 'minmax'
| Expose more downsample algorithms and options
At the moment, I think, `downsample=True` is the only way to toggle the `downsample1d` and there's no way to customize how it's called, i.e. there's no way to set `algorithm` or some of its other options. Multiple options to implement that:
- Overload `downsample` to accept a string (`minmax`) or a dict (`{'algorithm': 'minmax', **other_downsample1d_opts}`)
- Just allow extra kwargs to be passed to hvPlot and catch them internally, like `max_px` and `threshold` for `dynspread`.
- Probably many other ways
Small preference for the first option.
cc @droumis
| IMO: overload downsample to accept a string. `True` could still default to use LTTB but we should allow for others, like `downsample='minmax'`, etc | 2024-04-15T21:01:24 |
holoviz/hvplot | 1,330 | holoviz__hvplot-1330 | [
"1329"
] | f7f2d1ce25ada8ad6e3927f94451c37448b7b6f8 | diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -303,14 +303,24 @@ def process_crs(crs):
crs = crs.to_wkt()
errors = []
- if isinstance(crs, (str, int)): # epsg codes
+ if isinstance(crs, (str, int, pyproj.Proj)):
+ wkt = crs
+ if isinstance(crs, (str, int)): # epsg codes
+ try:
+ wkt = pyproj.CRS.from_epsg(crs).to_wkt()
+ except Exception as e:
+ errors.append(e)
try:
- crs = pyproj.CRS.from_epsg(crs).to_wkt()
+ return proj_to_cartopy(wkt) # should be all proj4 or wkt strings
except Exception as e:
errors.append(e)
- if isinstance(crs, (str, pyproj.Proj)): # proj4/wkt strings
+
+ if isinstance(crs, (str, int)):
+ if isinstance(crs, str):
+ # pyproj does not expect epsg to be prefixed with `EPSG:`
+ crs = crs.upper().replace('EPSG:', '').strip()
try:
- return proj_to_cartopy(crs)
+ return ccrs.epsg(crs)
except Exception as e:
errors.append(e)
| diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -261,6 +261,9 @@ def test_check_crs():
[
'+init=epsg:26911',
'PlateCarree',
+ 'epsg:6933',
+ 6933,
+ 'EPSG: 6933',
],
)
def test_process_crs(input):
| EPSG codes are failing in `process_crs`
#### ALL software version info
```python
hvplot.__version__ = '0.9.2'
geoviews.__version__ = '1.11.1'
rasterio.__version__ = '1.3.9'
xarray.__version__ = '2024.3.0'
cartopy.__version__ = '0.22.0'
pyproj.__version__ = '3.6.1'
```
#### Description of expected behavior
`process_crs` should work for all EPSG codes. This works when using cartopy.crs directly:
```python
import cartopy.crs as ccrs
ccrs.epsg(6933)
```
I think this was introduced in #1139
#### Complete, minimal, self-contained example code that reproduces the issue
```python
from hvplot.util import preprocess_crs
preprocess_crs(6933)
```
#### Stack traceback and/or browser JavaScript console output
```python-traceback
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
Exception: Unknown projection cea
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
Cell In[64], line 3
1 from hvplot.util import process_crs
----> 3 process_crs(6933)
File [/srv/conda/envs/notebook/lib/python3.11/site-packages/hvplot/util.py:288](https://hub.openveda.cloud/srv/conda/envs/notebook/lib/python3.11/site-packages/hvplot/util.py#line=287), in process_crs(crs)
285 except Exception as e:
286 errors.append(e)
--> 288 raise ValueError(
289 "Projection must be defined as a EPSG code, proj4 string, "
290 "WKT string, cartopy CRS, pyproj.Proj, or pyproj.CRS."
291 ) from Exception(*errors)
ValueError: Projection must be defined as a EPSG code, proj4 string, WKT string, cartopy CRS, pyproj.Proj, or pyproj.CRS.
```
#### Screenshots or screencasts of the bug in action
- [x] I may be interested in making a pull request to address this
| I will have a PR up shortly
| 2024-04-23T20:45:55 |
ckan/ckan | 248 | ckan__ckan-248 | [
"247"
] | 7aafdb9af7d477ab0cf7e7a43bb52316c861b351 | diff --git a/ckan/logic/auth/update.py b/ckan/logic/auth/update.py
--- a/ckan/logic/auth/update.py
+++ b/ckan/logic/auth/update.py
@@ -15,9 +15,16 @@ def package_update(context, data_dict):
package = get_package_object(context, data_dict)
if package.owner_org:
+ # if there is an owner org then we must have update_dataset
+ # premission for that organization
check1 = new_authz.has_user_permission_for_group_or_org(package.owner_org, user, 'update_dataset')
else:
- check1 = new_authz.check_config_permission('create_dataset_if_not_in_organization')
+ # If dataset is not owned then we can edit if config permissions allow
+ if new_authz.auth_is_registered_user():
+ check1 = new_authz.check_config_permission(
+ 'create_dataset_if_not_in_organization')
+ else:
+ check1 = new_authz.check_config_permission('anon_create_dataset')
if not check1:
return {'success': False, 'msg': _('User %s not authorized to edit package %s') % (str(user), package.id)}
else:
| Anonymous users can edit resources
There is an issue with the auth system that anonymous users can edit resources even if they are not allowed to create them.
| 2012-12-21T13:23:07 |
||
ckan/ckan | 260 | ckan__ckan-260 | [
"259",
"259"
] | b36ba5eaaa7380d377ab4b64fc5e226ecb19b464 | diff --git a/ckanext/reclinepreview/plugin.py b/ckanext/reclinepreview/plugin.py
--- a/ckanext/reclinepreview/plugin.py
+++ b/ckanext/reclinepreview/plugin.py
@@ -26,6 +26,9 @@ def update_config(self, config):
toolkit.add_resource('theme/public', 'ckanext-reclinepreview')
def can_preview(self, data_dict):
+ # if the resource is in the datastore then we can preview it with recline
+ if data_dict['resource'].get('datastore_active'):
+ return True
format_lower = data_dict['resource']['format'].lower()
return format_lower in ['csv', 'xls', 'tsv']
| diff --git a/ckanext/reclinepreview/tests/test_preview.py b/ckanext/reclinepreview/tests/test_preview.py
--- a/ckanext/reclinepreview/tests/test_preview.py
+++ b/ckanext/reclinepreview/tests/test_preview.py
@@ -88,6 +88,22 @@ def test_can_preview(self):
}
assert not self.p.can_preview(data_dict)
+ data_dict = {
+ 'resource': {
+ 'format': 'foo',
+ 'datastore_active': True
+ }
+ }
+ assert self.p.can_preview(data_dict)
+
+ data_dict = {
+ 'resource': {
+ 'format': 'foo',
+ 'datastore_active': False
+ }
+ }
+ assert not self.p.can_preview(data_dict)
+
def test_js_included(self):
res_id = self.resource['id']
| Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
| 2013-01-04T14:28:00 |
|
ckan/ckan | 359 | ckan__ckan-359 | [
"345"
] | 4fe7b50f7365ae686d4fb7d7d2deb778e782964d | diff --git a/ckan/model/meta.py b/ckan/model/meta.py
--- a/ckan/model/meta.py
+++ b/ckan/model/meta.py
@@ -161,4 +161,6 @@ def engine_is_sqlite():
def engine_is_pg():
# Returns true iff the engine is connected to a postgresql database.
- return engine.url.drivername in ['psycopg2', 'postgres']
+ # According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
+ # all Postgres driver names start with `postgresql`
+ return engine.url.drivername.startswith('postgresql')
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -68,8 +68,7 @@ def configure(self, config):
self._create_alias_table()
else:
log.warn("We detected that CKAN is running on a read only database. "
- "Permission checks and _table_metadata creation are skipped."
- "Make sure that replication is properly set-up.")
+ "Permission checks and the creation of _table_metadata are skipped.")
else:
log.warn("We detected that you do not use a PostgreSQL database. "
"The DataStore will NOT work and datastore tests will be skipped.")
| `engine_is_pg` does not recognize engine `postgres`
This pull request also includes minor changes to the commenting style.
| @domoritz @seanh I'm afraid I'm getting these messages on latest master:
```
WARNI [ckanext.datastore.plugin] We detected that you do not use a PostgreSQL database. The DataStore will NOT work and datastore tests will be skipped.
```
Looks like this check is not thorough enough:
def engine_is_pg():
# Returns true iff the engine is connected to a postgresql database.
return engine.url.drivername in ['psycopg2', 'postgres']
In my case engine.url.drivername is 'postgresql'. My connection string is `postgresql://ckantest:pass@localhost/ckantest`
Note that there may be other possible url [connection strings](http://docs.sqlalchemy.org/en/rel_0_8/dialects/postgresql.html#dialect-postgresql-psycopg2-connect).
@amercader I'll fix that.
Thanks. I would also drop "Make sure that replication is properly set-up" as it is confusing.
| 2013-02-05T18:45:40 |
|
ckan/ckan | 401 | ckan__ckan-401 | [
"344"
] | 217f71f9083e5f94e78191c0c71a23b06bd66821 | diff --git a/ckan/controllers/feed.py b/ckan/controllers/feed.py
--- a/ckan/controllers/feed.py
+++ b/ckan/controllers/feed.py
@@ -24,6 +24,7 @@
import webhelpers.feedgenerator
from pylons import config
+from pylons.i18n import _
from urllib import urlencode
from ckan import model
@@ -172,7 +173,7 @@ def group(self, id):
'user': c.user or c.author}
group_dict = get_action('group_show')(context, {'id': id})
except NotFound:
- abort(404, 'Group not found')
+ abort(404, _('Group not found'))
data_dict, params = self._parse_url_params()
data_dict['fq'] = 'groups:"%s"' % id
@@ -282,7 +283,9 @@ def custom(self):
try:
page = int(request.params.get('page', 1))
except ValueError:
- abort(400, ('"page" parameter must be an integer'))
+ abort(400, _('"page" parameter must be a positive integer'))
+ if page < 0:
+ abort(400, _('"page" parameter must be a positive integer'))
limit = ITEMS_LIMIT
data_dict = {
@@ -434,7 +437,9 @@ def _parse_url_params(self):
try:
page = int(request.params.get('page', 1)) or 1
except ValueError:
- abort(400, ('"page" parameter must be an integer'))
+ abort(400, _('"page" parameter must be a positive integer'))
+ if page < 0:
+ abort(400, _('"page" parameter must be a positive integer'))
limit = ITEMS_LIMIT
data_dict = {
| diff --git a/ckan/tests/functional/test_group.py b/ckan/tests/functional/test_group.py
--- a/ckan/tests/functional/test_group.py
+++ b/ckan/tests/functional/test_group.py
@@ -74,6 +74,18 @@ def test_atom_feed_page_zero(self):
assert 'xmlns="http://www.w3.org/2005/Atom"' in res, res
assert '</feed>' in res, res
+ def test_atom_feed_page_negative(self):
+ group_name = 'deletetest'
+ CreateTestData.create_groups([{'name': group_name,
+ 'packages': []}],
+ admin_user_name='testsysadmin')
+
+ offset = url_for(controller='feed', action='group',
+ id=group_name)
+ offset = offset + '?page=-2'
+ res = self.app.get(offset, expect_errors=True)
+ assert '"page" parameter must be a positive integer' in res, res
+
def test_children(self):
if model.engine_is_sqlite() :
from nose import SkipTest
| Better check for page parameter on feed controller
Right now we are checking for the "page" parameter to be an integer, but we have to also ensure that is a positive integer, otherwise we get an error from Solr
https://github.com/okfn/ckan/blob/master/ckan/controllers/feed.py
| Grabbing this.
| 2013-02-15T04:21:06 |
ckan/ckan | 453 | ckan__ckan-453 | [
"294"
] | 9c48a59b6e31089d67a8e2a37a9841f550643b8b | diff --git a/ckan/migration/versions/064_add_email_last_sent_column.py b/ckan/migration/versions/064_add_email_last_sent_column.py
--- a/ckan/migration/versions/064_add_email_last_sent_column.py
+++ b/ckan/migration/versions/064_add_email_last_sent_column.py
@@ -6,5 +6,5 @@ def upgrade(migrate_engine):
metadata.bind = migrate_engine
migrate_engine.execute('''
ALTER TABLE dashboard
- ADD COLUMN email_last_sent timestamp without time zone NOT NULL;
+ ADD COLUMN email_last_sent timestamp without time zone NOT NULL DEFAULT LOCALTIMESTAMP;
''')
| Migration 64 fails if dashboard table exists and contains data
Migration 64 adds a new NOT NULL column to the dashboard table but doesn't specify a default value, so the ALTER will fail if the table already contains data.
https://github.com/okfn/ckan/blob/master/ckan/migration/versions/064_add_email_last_sent_column.py
| @seanh
Do we have a sensible default value we can use here. If so could you fix this or let me know the default and I can add it.
thanks
@tobes Yeah I plan to fix this just a question of finding the time (so anyone feel free to jump on it if you can). The current time as a `timestamp without time zone` will do as a default I think, but gotta find the postgres constant or method that will give you it.
`localtimestamp` looks like what you want
| 2013-02-22T15:19:53 |
|
ckan/ckan | 457 | ckan__ckan-457 | [
"445"
] | aee8d37d811bed46c50c140d8c6fb2f9f9ad9311 | diff --git a/ckanext/multilingual/plugin.py b/ckanext/multilingual/plugin.py
--- a/ckanext/multilingual/plugin.py
+++ b/ckanext/multilingual/plugin.py
@@ -124,12 +124,11 @@ def before_index(self, search_data):
for key, value in search_data.iteritems():
if key in KEYS_TO_IGNORE or key.startswith('title'):
continue
- if isinstance(value, list):
- all_terms.extend(value)
- elif value in (None, True, False):
- continue
- else:
- all_terms.append(value)
+ if not isinstance(value, list):
+ value = [value]
+ for item in value:
+ if isinstance(item, basestring):
+ all_terms.append(item)
field_translations = action_get.term_translation_show(
{'model': ckan.model},
| Multilingual extension's tests are failing on master
| 2013-02-22T17:08:09 |
||
ckan/ckan | 561 | ckan__ckan-561 | [
"542"
] | 6e990b326c1a3e55981c91b0a1f2568d44ce3441 | diff --git a/ckan/migration/versions/067_turn_extras_to_strings.py b/ckan/migration/versions/067_turn_extras_to_strings.py
--- a/ckan/migration/versions/067_turn_extras_to_strings.py
+++ b/ckan/migration/versions/067_turn_extras_to_strings.py
@@ -7,7 +7,7 @@ def upgrade(migrate_engine):
revision_tables = 'package_extra_revision group_extra_revision'
for table in tables.split():
- sql = """select id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
id, value = result
@@ -16,7 +16,7 @@ def upgrade(migrate_engine):
json.loads(value), id)
for table in revision_tables.split():
- sql = """select id, revision_id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, revision_id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
| Postgresql 8.4 error when running paster db init
When running the paster db init command with the CKAN 2.0 beta, there is an error encountered that appears to be related to use of the left() string function in ckan/migration/versions/067_turn_extras_to_strings.py. According to the documentation and my own simple test, this function is not support in Postgresql 8.4. For a stack trace, see: https://gist.github.com/thriuin/5067819.
Is there a new minimum version of Postgresql required -- documentation still says 8.4 which unfortunately is what comes with RedHat Enterprise.
| Hi @thriuin,
This is related to https://github.com/okfn/ckan/pull/514. I've added a patch in the comments, at https://github.com/okfn/ckan/pull/514#issuecomment-14255638, that solves this issue. @domoritz created a patch of an issue that you will have down the line.
Could you apply both patches and try to run the tests under Postgres 8.4?
Cheers!
Thanks! I will test the patches on Monday morning when I have access to the RHEL environment again,
Hi @vitorbaptista,
I have tested and can confirm these patches allow the paster db init to work on Postgres 8.4
Thanks!
| 2013-03-05T21:04:15 |
|
ckan/ckan | 624 | ckan__ckan-624 | [
"421"
] | 45f540a2a058d9de644adc4c6158dfd4f9311821 | diff --git a/ckan/controllers/tag.py b/ckan/controllers/tag.py
--- a/ckan/controllers/tag.py
+++ b/ckan/controllers/tag.py
@@ -1,5 +1,5 @@
from pylons.i18n import _
-from pylons import request, c
+from pylons import request, c, config
import ckan.logic as logic
import ckan.model as model
@@ -65,4 +65,7 @@ def read(self, id):
except logic.NotFound:
base.abort(404, _('Tag not found'))
- return base.render('tag/read.html')
+ if h.asbool(config.get('ckan.legacy_templates', False)):
+ return base.render('tag/read.html')
+ else:
+ h.redirect_to(controller='package', action='search', tags=c.tag.get('name'))
| Tag pages still use old templates
/tag
| 2013-03-13T11:04:59 |
||
ckan/ckan | 692 | ckan__ckan-692 | [
"533"
] | 049d0500b5ee194c7f9a7602665cd484addb7938 | diff --git a/ckan/lib/cli.py b/ckan/lib/cli.py
--- a/ckan/lib/cli.py
+++ b/ckan/lib/cli.py
@@ -99,21 +99,21 @@ def _setup_app(self):
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
- db create # alias of db upgrade
- db init # create and put in default data
+ db create - alias of db upgrade
+ db init - create and put in default data
db clean
- db upgrade [{version no.}] # Data migrate
- db version # returns current version of data schema
- db dump {file-path} # dump to a pg_dump file
- db dump-rdf {dataset-name} {file-path}
- db simple-dump-csv {file-path} # dump just datasets in CSV format
- db simple-dump-json {file-path} # dump just datasets in JSON format
- db user-dump-csv {file-path} # dump user information to a CSV file
- db send-rdf {talis-store} {username} {password}
- db load {file-path} # load a pg_dump from a file
- db load-only {file-path} # load a pg_dump from a file but don\'t do
- # the schema upgrade or search indexing
- db create-from-model # create database from the model (indexes not made)
+ db upgrade [version no.] - Data migrate
+ db version - returns current version of data schema
+ db dump FILE_PATH - dump to a pg_dump file
+ db dump-rdf DATASET_NAME FILE_PATH
+ db simple-dump-csv FILE_PATH - dump just datasets in CSV format
+ db simple-dump-json FILE_PATH - dump just datasets in JSON format
+ db user-dump-csv FILE_PATH - dump user information to a CSV file
+ db send-rdf TALIS_STORE USERNAME PASSWORD
+ db load FILE_PATH - load a pg_dump from a file
+ db load-only FILE_PATH - load a pg_dump from a file but don\'t do
+ the schema upgrade or search indexing
+ db create-from-model - create database from the model (indexes not made)
'''
summary = __doc__.split('\n')[0]
usage = __doc__
@@ -312,10 +312,12 @@ class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
- search-index [-i] [-o] [-r] [-e] rebuild [dataset-name] - reindex dataset-name if given, if not then rebuild full search index (all datasets)
- search-index check - checks for datasets not indexed
- search-index show {dataset-name} - shows index of a dataset
- search-index clear [dataset-name] - clears the search index for the provided dataset or for the whole ckan instance
+ search-index [-i] [-o] [-r] [-e] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
+ full search index (all datasets)
+ search-index check - checks for datasets not indexed
+ search-index show DATASET_NAME - shows index of a dataset
+ search-index clear [dataset_name] - clears the search index for the provided dataset or
+ for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
@@ -434,7 +436,7 @@ def command(self):
class RDFExport(CkanCommand):
- '''
+ '''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
@@ -498,8 +500,8 @@ class Sysadmin(CkanCommand):
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
- sysadmin add <user-name> - add a user as a sysadmin
- sysadmin remove <user-name> - removes user from sysadmins
+ sysadmin add USERNAME - add a user as a sysadmin
+ sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
@@ -579,16 +581,16 @@ class UserCmd(CkanCommand):
Usage:
user - lists users
user list - lists users
- user <user-name> - shows user properties
- user add <user-name> [<field>=<value>]
+ user USERNAME - shows user properties
+ user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for password
if not supplied).
Field can be: apikey
password
email
- user setpass <user-name> - set user password (prompts)
- user remove <user-name> - removes user from users
- user search <query> - searches for a user name
+ user setpass USERNAME - set user password (prompts)
+ user remove USERNAME - removes user from users
+ user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
@@ -735,11 +737,11 @@ class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
- dataset <dataset-name/id> - shows dataset properties
- dataset show <dataset-name/id> - shows dataset properties
+ dataset DATASET_NAME|ID - shows dataset properties
+ dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
- dataset delete <dataset-name/id> - changes dataset state to 'deleted'
- dataset purge <dataset-name/id> - removes dataset from db entirely
+ dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
+ dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
@@ -816,12 +818,11 @@ class Celery(CkanCommand):
'''Celery daemon
Usage:
- celeryd - run the celery daemon
- celeryd run - run the celery daemon
- celeryd run concurrency - run the celery daemon with
- argument 'concurrency'
- celeryd view - view all tasks in the queue
- celeryd clean - delete all tasks in the queue
+ celeryd <run> - run the celery daemon
+ celeryd run concurrency - run the celery daemon with
+ argument 'concurrency'
+ celeryd view - view all tasks in the queue
+ celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
@@ -940,8 +941,8 @@ class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
- tracking update [start-date] - update tracking stats
- tracking export <file> [start-date] - export tracking stats to a csv file
+ tracking update [start_date] - update tracking stats
+ tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
@@ -1116,7 +1117,7 @@ def update_tracking(self, engine, summary_date):
engine.execute(sql)
class PluginInfo(CkanCommand):
- ''' Provide info on installed plugins.
+ '''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
@@ -1220,8 +1221,8 @@ class CreateTestDataCommand(CkanCommand):
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
- create-test-data vocabs - annakerenina, warandpeace, and some test
- vocabularies
+ create-test-data vocabs - annakerenina, warandpeace, and some test
+ vocabularies
'''
summary = __doc__.split('\n')[0]
@@ -1271,7 +1272,7 @@ class Profile(CkanCommand):
by runsnakerun.
Usage:
- profile {url}
+ profile URL
e.g. profile /data/search
@@ -1328,15 +1329,15 @@ def profile_url(url):
class CreateColorSchemeCommand(CkanCommand):
- ''' Create or remove a color scheme.
+ '''Create or remove a color scheme.
- less will need to generate the css files after this has been run
+ After running this, you'll need to regenerate the css files. See paster's less command for details.
- color - creates a random color scheme
- color clear - clears any color scheme
- color '<hex>' - uses as base color eg '#ff00ff' must be quoted.
- color <value> - a float between 0.0 and 1.0 used as base hue
- color <color name> - html color name used for base color eg lightblue
+ color - creates a random color scheme
+ color clear - clears any color scheme
+ color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
+ color <VALUE> - a float between 0.0 and 1.0 used as base hue
+ color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
@@ -1587,8 +1588,8 @@ def command(self):
class TranslationsCommand(CkanCommand):
'''Translation helper functions
- trans js - generate the javascript translations
- trans mangle - mangle the zh_TW translations for testing
+ trans js - generate the javascript translations
+ trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
@@ -1744,7 +1745,7 @@ class MinifyCommand(CkanCommand):
Usage:
- paster minify [--clean] <path>
+ paster minify [--clean] PATH
for example:
@@ -1919,7 +1920,7 @@ def compile_less(self, root, less_bin, color):
class FrontEndBuildCommand(CkanCommand):
- ''' Creates and minifies css and JavaScript files
+ '''Creates and minifies css and JavaScript files
Usage:
diff --git a/ckanext/datastore/commands.py b/ckanext/datastore/commands.py
--- a/ckanext/datastore/commands.py
+++ b/ckanext/datastore/commands.py
@@ -8,7 +8,7 @@
class SetupDatastoreCommand(cli.CkanCommand):
'''Perform commands to set up the datastore.
- Make sure that the datastore urls are set properly before you run these commands.
+ Make sure that the datastore URLs are set properly before you run these commands.
Usage::
@@ -16,9 +16,9 @@ class SetupDatastoreCommand(cli.CkanCommand):
Where:
SQL_SUPER_USER is the name of a postgres user with sufficient
- permissions to create new tables, users, and grant
- and revoke new permissions. Typically, this would
- be the "postgres" user.
+ permissions to create new tables, users, and grant
+ and revoke new permissions. Typically, this would
+ be the "postgres" user.
'''
summary = __doc__.split('\n')[0]
| Update 'Common CKAN Administrator Tasks' docs for 2.0
A few things on this page are out of date, e.g. they refer to the old auth system: http://docs.ckan.org/en/latest/paster.html
Some new paster commands aren't documented.
Should the paster commands docs be pulled from docstrings by autodoc like the api and plugin interfaces docs are? Seems a better way to keep them up to date and not have to write the docs twice
Everything on this page seems to be paster commands, so should the page be renamed _CKAN Paster Commands_? The current title is a bit generic
This [Database Dumps](http://docs.ckan.org/en/latest/database-dumps.html) page is also just about a couple of paster commands, so maybe belongs on the same page with the rest of the paster commands now on its own page
| @vitorbaptista Assigning this one to you. Can you try and make it pull the docs out of the source using autodoc? This is already done for the plugin interfaces and action API Ask me if you need any guidance. We're hoping to get these done by end of next week so we can release 2.0 in a couple of weeks
| 2013-03-23T01:23:26 |
|
ckan/ckan | 694 | ckan__ckan-694 | [
"256"
] | 0955b0c09d5345a3f9745dbe07aa8099916eca9d | diff --git a/ckan/controllers/user.py b/ckan/controllers/user.py
--- a/ckan/controllers/user.py
+++ b/ckan/controllers/user.py
@@ -575,8 +575,8 @@ def dashboard(self, id=None, offset=0):
context, {'id': c.userobj.id, 'q': q})
c.dashboard_activity_stream_context = self._get_dashboard_context(
filter_type, filter_id, q)
- c.dashboard_activity_stream = h.dashboard_activity_stream(
- id, filter_type, filter_id, offset)
+ c.dashboard_activity_stream = h.dashboard_activity_stream(filter_type,
+ filter_id, offset)
# Mark the user's new activities as old whenever they view their
# dashboard page.
diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1289,7 +1289,7 @@ def user_in_org_or_group(group_id):
def dashboard_activity_stream(user_id, filter_type=None, filter_id=None,
offset=0):
- '''Return the dashboard activity stream of the given user.
+ '''Return the dashboard activity stream of the current user.
:param user_id: the id of the user
:type user_id: string
@@ -1317,7 +1317,7 @@ def dashboard_activity_stream(user_id, filter_type=None, filter_id=None,
return action_function(context, {'id': filter_id, 'offset': offset})
else:
return logic.get_action('dashboard_activity_list_html')(
- context, {'id': user_id, 'offset': offset})
+ context, {'offset': offset})
def recently_changed_packages_activity_stream():
diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -830,7 +830,7 @@ def _group_or_org_show(context, data_dict, is_org=False):
_check_access('organization_show',context, data_dict)
else:
_check_access('group_show',context, data_dict)
-
+
group_dict = model_dictize.group_dictize(group, context)
@@ -2645,11 +2645,11 @@ def dashboard_activity_list_html(context, data_dict):
'''
activity_stream = dashboard_activity_list(context, data_dict)
+ model = context['model']
offset = int(data_dict.get('offset', 0))
extra_vars = {
- 'controller': 'dashboard',
- 'action': 'activity',
- 'id': data_dict['id'],
+ 'controller': 'user',
+ 'action': 'dashboard',
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
| diff --git a/ckan/tests/functional/api/test_dashboard.py b/ckan/tests/functional/api/test_dashboard.py
--- a/ckan/tests/functional/api/test_dashboard.py
+++ b/ckan/tests/functional/api/test_dashboard.py
@@ -331,3 +331,20 @@ def test_09_activities_that_should_not_show(self):
after = self.dashboard_activity_list(self.new_user)
assert before == after
+
+ def test_10_dashboard_activity_list_html_does_not_crash(self):
+
+ params = json.dumps({'name': 'irrelevant_dataset1'})
+ response = self.app.post('/api/action/package_create', params=params,
+ extra_environ={'Authorization': str(self.annafan['apikey'])})
+ assert response.json['success'] is True
+
+ params = json.dumps({'name': 'another_irrelevant_dataset'})
+ response = self.app.post('/api/action/package_create', params=params,
+ extra_environ={'Authorization': str(self.annafan['apikey'])})
+ assert response.json['success'] is True
+
+ res = self.app.get('/api/3/action/dashboard_activity_list_html',
+ extra_environ={'Authorization':
+ str(self.annafan['apikey'])})
+ assert res.json['success'] is True
| dashboard_activity_list_html 500s
It looks like commit e27b46b4 added an `id` arg to `dashboard_activity_list_html()` but didn't document it, now calling the function as described in the API docs 500s. Also looks like this id param is unnecessary as it should just use the id of the authorized user. Then the code that calls this function and passes an id in can be changed to not pass an id.
Also, are there any tests for this function? Apparently not or they would be failing. Need to add tests. It's just a simple wrapper function so should only need a couple of simple tests.
| @seanh do you have time to do this? I think john is snowed under at the mo, if not I could fix it
@tobes Yeah I'll take this
I looked into this pretty deeply today and I think the best course of action is to document that `id` is required. This is used to render the public activity streams for users and using the current user is not the right course of action there.
| 2013-03-23T13:33:03 |
ckan/ckan | 696 | ckan__ckan-696 | [
"646"
] | 3a3fff86417f5150add88bd7e93871a448e2a875 | diff --git a/ckan/logic/action/create.py b/ckan/logic/action/create.py
--- a/ckan/logic/action/create.py
+++ b/ckan/logic/action/create.py
@@ -3,7 +3,7 @@
import logging
from pylons import config
-from paste.deploy.converters import asbool
+import paste.deploy.converters
import ckan.lib.plugins as lib_plugins
import ckan.logic as logic
@@ -914,7 +914,8 @@ def activity_create(context, activity_dict, ignore_auth=False):
:rtype: dictionary
'''
- if not asbool(config.get('ckan.activity_streams_enabled', 'true')):
+ if not paste.deploy.converters.asbool(
+ config.get('ckan.activity_streams_enabled', 'true')):
return
model = context['model']
@@ -1141,10 +1142,41 @@ def _group_or_org_member_create(context, data_dict, is_org=False):
logic.get_action('member_create')(member_create_context, member_dict)
def group_member_create(context, data_dict):
+ '''Make a user a member of a group.
+
+ You must be authorized to edit the group.
+
+ :param id: the id or name of the group
+ :type id: string
+ :param username: name or id of the user to be made member of the group
+ :type username: string
+ :param role: role of the user in the group. One of ``member``, ``editor``,
+ or ``admin``
+ :type role: string
+
+ :returns: the newly created (or updated) membership
+ :rtype: dictionary
+ '''
_check_access('group_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict)
def organization_member_create(context, data_dict):
+ '''Make a user a member of an organization.
+
+ You must be authorized to edit the organization.
+
+ :param id: the id or name of the organization
+ :type id: string
+ :param username: name or id of the user to be made member of the
+ organization
+ :type username: string
+ :param role: role of the user in the organization. One of ``member``,
+ ``editor``, or ``admin``
+ :type role: string
+
+ :returns: the newly created (or updated) membership
+ :rtype: dictionary
+ '''
_check_access('organization_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict, is_org=True)
diff --git a/ckan/logic/action/delete.py b/ckan/logic/action/delete.py
--- a/ckan/logic/action/delete.py
+++ b/ckan/logic/action/delete.py
@@ -419,7 +419,8 @@ def _group_or_org_member_delete(context, data_dict=None):
group_id = data_dict.get('id')
group = model.Group.get(group_id)
- user_id = data_dict.get('user_id')
+ user_id = data_dict.get('username')
+ user_id = data_dict.get('user_id') if user_id is None else user_id
member_dict = {
'id': group.id,
'object': user_id,
@@ -434,9 +435,29 @@ def _group_or_org_member_delete(context, data_dict=None):
def group_member_delete(context, data_dict=None):
+ '''Remove a user from a group.
+
+ You must be authorized to edit the group.
+
+ :param id: the id or name of the group
+ :type id: string
+ :param username: name or id of the user to be removed
+ :type username: string
+
+ '''
return _group_or_org_member_delete(context, data_dict)
def organization_member_delete(context, data_dict=None):
+ '''Remove a user from an organization.
+
+ You must be authorized to edit the organization.
+
+ :param id: the id or name of the organization
+ :type id: string
+ :param username: name or id of the user to be removed
+ :type username: string
+
+ '''
return _group_or_org_member_delete(context, data_dict)
diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -670,12 +670,13 @@ def user_list(context, data_dict):
def package_relationships_list(context, data_dict):
'''Return a dataset (package)'s relationships.
- :param id: the id or name of the package
+ :param id: the id or name of the first package
+ :type id: string
+ :param id2: the id or name of the second package
:type id: string
- :param id2:
- :type id2:
- :param rel:
- :type rel:
+ :param rel: relationship as string see
+ :func:`ckan.logic.action.create.package_relationship_create()` for the
+ relationship types (optional)
:rtype: list of dictionaries
@@ -2799,4 +2800,12 @@ def _unpick_search(sort, allowed_fields=None, total=None):
def member_roles_list(context, data_dict):
+ '''Return the possible roles for members of groups and organizations.
+
+ :returns: a list of dictionaries each with two keys: "text" (the display
+ name of the role, e.g. "Admin") and "value" (the internal name of the
+ role, e.g. "admin")
+ :rtype: list of dictionaries
+
+ '''
return new_authz.roles_list()
| diff --git a/ckan/tests/test_coding_standards.py b/ckan/tests/test_coding_standards.py
--- a/ckan/tests/test_coding_standards.py
+++ b/ckan/tests/test_coding_standards.py
@@ -991,16 +991,11 @@ class TestActionAuth(object):
ACTION_NO_DOC_STR_BLACKLIST = [
'create: group_create_rest',
- 'create: group_member_create',
- 'create: organization_member_create',
'create: package_create_rest',
'create: package_relationship_create_rest',
- 'delete: group_member_delete',
- 'delete: organization_member_delete',
'delete: package_relationship_delete_rest',
'get: get_site_user',
'get: group_show_rest',
- 'get: member_roles_list',
'get: package_show_rest',
'get: tag_show_rest',
'update: group_update_rest',
| Document undocumented action functions
Currently the docs read:
> TODO: What does this function do?
and this is the first function on our [api docs page](http://docs.ckan.org/en/latest/ckan.logic.action.update.html) which looks quite bad. All action functions should have docstrings.
**Update**: a number of action functions have no docstrings. They should all have docstrings as described in the coding standards.
| I think we should also add a test to check each action has a docstring too
| 2013-03-25T05:06:15 |
ckan/ckan | 710 | ckan__ckan-710 | [
"710"
] | cce3602c92db39233ae1afdca786687fad9d60d6 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -925,7 +925,6 @@ def dataset_link(package_or_package_dict):
def resource_display_name(resource_dict):
name = resource_dict.get('name', None)
description = resource_dict.get('description', None)
- url = resource_dict.get('url')
if name:
return name
elif description:
@@ -934,11 +933,8 @@ def resource_display_name(resource_dict):
if len(description) > max_len:
description = description[:max_len] + '...'
return description
- elif url:
- return url
else:
- noname_string = _('no name')
- return '[%s] %s' % (noname_string, resource_dict['id'])
+ return _("Unnamed resource")
def resource_link(resource_dict, package_id):
| Use "Unnamed resource" not URL for display name when resource has no name
| 2013-03-27T18:07:51 |
||
ckan/ckan | 720 | ckan__ckan-720 | [
"718"
] | 80750708a09b643eb407d6ccdf5c89443cc89ba8 | diff --git a/ckanext/datastore/db.py b/ckanext/datastore/db.py
--- a/ckanext/datastore/db.py
+++ b/ckanext/datastore/db.py
@@ -11,7 +11,7 @@
import logging
import pprint
import sqlalchemy
-from sqlalchemy.exc import ProgrammingError, IntegrityError
+from sqlalchemy.exc import ProgrammingError, IntegrityError, DBAPIError
import psycopg2.extras
log = logging.getLogger(__name__)
@@ -31,19 +31,27 @@ def __init__(self, error_dict):
_type_names = set()
_engines = {}
+# See http://www.postgresql.org/docs/9.2/static/errcodes-appendix.html
+_PG_ERR_CODE = {
+ 'unique_violation': 23505,
+ 'query_canceled': 57014,
+ 'undefined_object': 42704,
+ 'syntax_error': 42601
+}
+
_date_formats = ['%Y-%m-%d',
- '%Y-%m-%d %H:%M:%S',
- '%Y-%m-%dT%H:%M:%S',
- '%Y-%m-%dT%H:%M:%SZ',
- '%d/%m/%Y',
- '%m/%d/%Y',
- '%d-%m-%Y',
- '%m-%d-%Y',
- ]
-INSERT = 'insert'
-UPSERT = 'upsert'
-UPDATE = 'update'
-_methods = [INSERT, UPSERT, UPDATE]
+ '%Y-%m-%d %H:%M:%S',
+ '%Y-%m-%dT%H:%M:%S',
+ '%Y-%m-%dT%H:%M:%SZ',
+ '%d/%m/%Y',
+ '%m/%d/%Y',
+ '%d-%m-%Y',
+ '%m-%d-%Y',
+ ]
+
+_INSERT = 'insert'
+_UPSERT = 'upsert'
+_UPDATE = 'update'
def _strip(input):
@@ -57,7 +65,7 @@ def _pluck(field, arr):
def _get_list(input, strip=True):
- """Transforms a string or list to a list"""
+ '''Transforms a string or list to a list'''
if input is None:
return
if input == '':
@@ -106,7 +114,7 @@ def _validate_int(i, field_name, non_negative=False):
def _get_engine(context, data_dict):
- 'Get either read or write engine.'
+ '''Get either read or write engine.'''
connection_url = data_dict['connection_url']
engine = _engines.get(connection_url)
@@ -159,10 +167,10 @@ def _is_valid_pg_type(context, type_name):
try:
connection.execute('SELECT %s::regtype', type_name)
except ProgrammingError, e:
- if 'invalid type name' in str(e) or 'does not exist' in str(e):
+ if int(e.orig.pgcode) in [_PG_ERR_CODE['undefined_object'],
+ _PG_ERR_CODE['syntax_error']]:
return False
- else:
- raise
+ raise
else:
return True
@@ -173,10 +181,8 @@ def _get_type(context, oid):
def _rename_json_field(data_dict):
- '''
- rename json type to a corresponding type for the datastore since
- pre 9.2 postgres versions do not support native json
- '''
+ '''Rename json type to a corresponding type for the datastore since
+ pre 9.2 postgres versions do not support native json'''
return _rename_field(data_dict, 'json', 'nested')
@@ -193,7 +199,8 @@ def _rename_field(data_dict, term, replace):
def _guess_type(field):
- 'Simple guess type of field, only allowed are integer, numeric and text'
+ '''Simple guess type of field, only allowed are
+ integer, numeric and text'''
data_types = set([int, float])
if isinstance(field, (dict, list)):
return 'nested'
@@ -252,7 +259,7 @@ def json_get_values(obj, current_list=None):
def check_fields(context, fields):
- 'Check if field types are valid.'
+ '''Check if field types are valid.'''
for field in fields:
if field.get('type') and not _is_valid_pg_type(context, field['type']):
raise ValidationError({
@@ -281,7 +288,7 @@ def convert(data, type_name):
def create_table(context, data_dict):
- 'Create table from combination of fields and first row of data.'
+ '''Create table from combination of fields and first row of data.'''
datastore_fields = [
{'id': '_id', 'type': 'serial primary key'},
@@ -331,7 +338,7 @@ def create_table(context, data_dict):
def _get_aliases(context, data_dict):
- ''' Get a list of aliases for a resource. '''
+ '''Get a list of aliases for a resource.'''
res_id = data_dict['resource_id']
alias_sql = sqlalchemy.text(
u'SELECT name FROM "_table_metadata" WHERE alias_of = :id')
@@ -340,8 +347,8 @@ def _get_aliases(context, data_dict):
def _get_resources(context, alias):
- ''' Get a list of resources for an alias. There could be more than one alias
- in a resource_dict. '''
+ '''Get a list of resources for an alias. There could be more than one alias
+ in a resource_dict.'''
alias_sql = sqlalchemy.text(
u'''SELECT alias_of FROM "_table_metadata"
WHERE name = :alias AND alias_of IS NOT NULL''')
@@ -521,7 +528,7 @@ def alter_table(context, data_dict):
def insert_data(context, data_dict):
- data_dict['method'] = INSERT
+ data_dict['method'] = _INSERT
return upsert_data(context, data_dict)
@@ -530,9 +537,9 @@ def upsert_data(context, data_dict):
if not data_dict.get('records'):
return
- method = data_dict.get('method', UPSERT)
+ method = data_dict.get('method', _UPSERT)
- if method not in _methods:
+ if method not in [_INSERT, _UPSERT, _UPDATE]:
raise ValidationError({
'method': [u'"{0}" is not defined'.format(method)]
})
@@ -543,7 +550,7 @@ def upsert_data(context, data_dict):
sql_columns = ", ".join(['"%s"' % name.replace('%', '%%') for name in field_names]
+ ['"_full_text"'])
- if method == INSERT:
+ if method == _INSERT:
rows = []
for num, record in enumerate(records):
_validate_record(record, num, field_names)
@@ -566,7 +573,7 @@ def upsert_data(context, data_dict):
context['connection'].execute(sql_string, rows)
- elif method in [UPDATE, UPSERT]:
+ elif method in [_UPDATE, _UPSERT]:
unique_keys = _get_unique_key(context, data_dict)
if len(unique_keys) < 1:
raise ValidationError({
@@ -608,7 +615,7 @@ def upsert_data(context, data_dict):
full_text = _to_full_text(fields, record)
- if method == UPDATE:
+ if method == _UPDATE:
sql_string = u'''
UPDATE "{res_id}"
SET ({columns}, "_full_text") = ({values}, to_tsvector(%s))
@@ -629,7 +636,7 @@ def upsert_data(context, data_dict):
'key': [u'key "{0}" not found'.format(unique_values)]
})
- elif method == UPSERT:
+ elif method == _UPSERT:
sql_string = u'''
UPDATE "{res_id}"
SET ({columns}, "_full_text") = ({values}, to_tsvector(%s))
@@ -700,7 +707,7 @@ def _to_full_text(fields, record):
def _where(field_ids, data_dict):
- 'Return a SQL WHERE clause from data_dict filters and q'
+ '''Return a SQL WHERE clause from data_dict filters and q'''
filters = data_dict.get('filters', {})
if not isinstance(filters, dict):
@@ -786,9 +793,8 @@ def _sort(context, data_dict, field_ids):
def _insert_links(data_dict, limit, offset):
- ''' Adds link to the next/prev part (same limit, offset=offset+limit)
- and the resource page.
- '''
+ '''Adds link to the next/prev part (same limit, offset=offset+limit)
+ and the resource page.'''
data_dict['_links'] = {}
# get the url from the request
@@ -963,23 +969,24 @@ def create(context, data_dict):
trans.commit()
return _unrename_json_field(data_dict)
except IntegrityError, e:
- if ('duplicate key value violates unique constraint' in str(e)
- or 'could not create unique index' in str(e)):
+ if int(e.orig.pgcode) == _PG_ERR_CODE['unique_violation']:
raise ValidationError({
- 'constraints': ['Cannot insert records or create index because of uniqueness constraint'],
+ 'constraints': ['Cannot insert records or create index because '
+ 'of uniqueness constraint'],
'info': {
'details': str(e)
}
})
- else:
- raise
- except Exception, e:
- trans.rollback()
- if 'due to statement timeout' in str(e):
+ raise
+ except DBAPIError, e:
+ if int(e.orig.pgcode) == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Query took too long']
})
raise
+ except Exception, e:
+ trans.rollback()
+ raise
finally:
context['connection'].close()
@@ -1005,22 +1012,24 @@ def upsert(context, data_dict):
trans.commit()
return _unrename_json_field(data_dict)
except IntegrityError, e:
- if 'duplicate key value violates unique constraint' in str(e):
+ if int(e.orig.pgcode) == _PG_ERR_CODE['unique_violation']:
raise ValidationError({
- 'constraints': ['Cannot insert records because of uniqueness constraint'],
+ 'constraints': ['Cannot insert records or create index because '
+ 'of uniqueness constraint'],
'info': {
'details': str(e)
}
})
- else:
- raise
- except Exception, e:
- trans.rollback()
- if 'due to statement timeout' in str(e):
+ raise
+ except DBAPIError, e:
+ if int(e.orig.pgcode) == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Query took too long']
})
raise
+ except Exception, e:
+ trans.rollback()
+ raise
finally:
context['connection'].close()
@@ -1079,8 +1088,8 @@ def search(context, data_dict):
data_dict['resource_id'])]
})
return search_data(context, data_dict)
- except Exception, e:
- if 'due to statement timeout' in str(e):
+ except DBAPIError, e:
+ if int(e.orig.pgcode) == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Search took too long']
})
@@ -1112,10 +1121,10 @@ def search_sql(context, data_dict):
'orig': [str(e.orig)]
}
})
- except Exception, e:
- if 'due to statement timeout' in str(e):
+ except DBAPIError, e:
+ if int(e.orig.pgcode) == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
- 'query': ['Search took too long']
+ 'query': ['Query took too long']
})
raise
finally:
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -1,6 +1,5 @@
import logging
import pylons
-from sqlalchemy.exc import ProgrammingError
import ckan.plugins as p
import ckanext.datastore.logic.action as action
@@ -18,9 +17,6 @@ class DatastoreException(Exception):
class DatastorePlugin(p.SingletonPlugin):
- '''
- Datastore plugin.
- '''
p.implements(p.IConfigurable, inherit=True)
p.implements(p.IActions)
p.implements(p.IAuthFunctions)
@@ -51,27 +47,25 @@ def configure(self, config):
self.write_url = self.config['ckan.datastore.write_url']
if self.legacy_mode:
self.read_url = self.write_url
+ log.warn('Legacy mode active. '
+ 'The sql search will not be available.')
else:
self.read_url = self.config['ckan.datastore.read_url']
- if model.engine_is_pg():
- if not self._is_read_only_database():
- # Make sure that the right permissions are set
- # so that no harmful queries can be made
- if not ('debug' in config and config['debug']):
- self._check_separate_db()
- if self.legacy_mode:
- log.warn('Legacy mode active. The sql search will not be available.')
- else:
- self._check_read_permissions()
+ if not model.engine_is_pg():
+ log.warn('We detected that you do not use a PostgreSQL '
+ 'database. The DataStore will NOT work and DataStore '
+ 'tests will be skipped.')
+ return
- self._create_alias_table()
- else:
- log.warn("We detected that CKAN is running on a read only database. "
- "Permission checks and the creation of _table_metadata are skipped.")
+ if self._is_read_only_database():
+ log.warn('We detected that CKAN is running on a read '
+ 'only database. Permission checks and the creation '
+ 'of _table_metadata are skipped.')
else:
- log.warn("We detected that you do not use a PostgreSQL database. "
- "The DataStore will NOT work and datastore tests will be skipped.")
+ self._check_urls_and_permissions()
+
+ self._create_alias_table()
## Do light wrapping around action function to add datastore_active
## to resource dict. Not using IAction extension as this prevents
@@ -106,79 +100,77 @@ def new_resource_show(context, data_dict):
new_resource_show._datastore_wrapped = True
logic._actions['resource_show'] = new_resource_show
+ def _log_or_raise(self, message):
+ if self.config.get('debug'):
+ log.critical(message)
+ else:
+ raise DatastoreException(message)
+
+ def _check_urls_and_permissions(self):
+ # Make sure that the right permissions are set
+ # so that no harmful queries can be made
+
+ if self._same_ckan_and_datastore_db():
+ self._log_or_raise('CKAN and DataStore database '
+ 'cannot be the same.')
+
+ # in legacy mode, the read and write url are ths same (both write url)
+ # consequently the same url check and and write privilege check
+ # don't make sense
+ if not self.legacy_mode:
+ if self._same_read_and_write_url():
+ self._log_or_raise('The write and read-only database '
+ 'connection urls are the same.')
+
+ if not self._read_connection_has_correct_privileges():
+ self._log_or_raise('The read-only user has write privileges.')
+
def _is_read_only_database(self):
+ ''' Returns True if no connection has CREATE privileges on the public
+ schema. This is the case if replication is enabled.'''
for url in [self.ckan_url, self.write_url, self.read_url]:
connection = db._get_engine(None,
- {'connection_url': url}).connect()
- trans = connection.begin()
- try:
- sql = u"CREATE TABLE test_readonly(id INTEGER);"
- connection.execute(sql)
- except ProgrammingError, e:
- if 'permission denied' in str(e) or 'read-only transaction' in str(e):
- pass
- else:
- raise
- else:
+ {'connection_url': url}).connect()
+ sql = u"SELECT has_schema_privilege('public', 'CREATE')"
+ is_writable = connection.execute(sql).first()[0]
+ if is_writable:
return False
- finally:
- trans.rollback()
return True
- def _check_separate_db(self):
- '''
- Make sure the datastore is on a separate db. Otherwise one could access
- all internal tables via the api.
- '''
-
- if not self.legacy_mode:
- if self.write_url == self.read_url:
- raise Exception("The write and read-only database connection url are the same.")
-
- if self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url):
- raise Exception("The CKAN and datastore database are the same.")
+ def _same_ckan_and_datastore_db(self):
+ '''Returns True if the CKAN and DataStore db are the same'''
+ return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)
def _get_db_from_url(self, url):
return url[url.rindex("@"):]
- def _check_read_permissions(self):
- '''
- Check whether the right permissions are set for the read only user.
+ def _same_read_and_write_url(self):
+ return self.write_url == self.read_url
+
+ def _read_connection_has_correct_privileges(self):
+ ''' Returns True if the right permissions are set for the read only user.
A table is created by the write user to test the read only user.
'''
write_connection = db._get_engine(None,
{'connection_url': self.write_url}).connect()
- write_connection.execute(u"DROP TABLE IF EXISTS public._foo;"
- u"CREATE TABLE public._foo (id INTEGER, name VARCHAR)")
-
read_connection = db._get_engine(None,
{'connection_url': self.read_url}).connect()
- statements = [
- u"CREATE TABLE public._bar (id INTEGER, name VARCHAR)",
- u"INSERT INTO public._foo VALUES (1, 'okfn')"
- ]
+ drop_foo_sql = u'DROP TABLE IF EXISTS _foo'
+
+ write_connection.execute(drop_foo_sql)
try:
- for sql in statements:
- read_trans = read_connection.begin()
- try:
- read_connection.execute(sql)
- except ProgrammingError, e:
- if 'permission denied' not in str(e):
- raise
- else:
- log.info("Connection url {0}".format(self.read_url))
- if 'debug' in self.config and self.config['debug']:
- log.critical("We have write permissions on the read-only database.")
- else:
- raise Exception("We have write permissions on the read-only database.")
- finally:
- read_trans.rollback()
- except Exception:
- raise
+ write_connection.execute(u'CREATE TABLE _foo ()')
+ for privilege in ['INSERT', 'UPDATE', 'DELETE']:
+ test_privilege_sql = u"SELECT has_table_privilege('_foo', '{privilege}')"
+ sql = test_privilege_sql.format(privilege=privilege)
+ have_privilege = read_connection.execute(sql).first()[0]
+ if have_privilege:
+ return False
finally:
- write_connection.execute("DROP TABLE _foo")
+ write_connection.execute(drop_foo_sql)
+ return True
def _create_alias_table(self):
mapping_sql = '''
| diff --git a/ckanext/datastore/tests/test_configure.py b/ckanext/datastore/tests/test_configure.py
--- a/ckanext/datastore/tests/test_configure.py
+++ b/ckanext/datastore/tests/test_configure.py
@@ -1,17 +1,25 @@
import unittest
+import nose.tools
+import pyutilib.component.core
+import ckan.plugins
import ckanext.datastore.plugin as plugin
-class TestTypeGetters(unittest.TestCase):
+class TestConfiguration(unittest.TestCase):
def setUp(self):
- self.p = plugin.DatastorePlugin()
+ self._original_plugin = ckan.plugins.unload('datastore')
+ pyutilib.component.core.PluginGlobals.singleton_services()[plugin.DatastorePlugin] = True
+ self.p = pyutilib.component.core.PluginGlobals.singleton_services()[plugin.DatastorePlugin] = ckan.plugins.load('datastore')
+
+ def tearDown(self):
+ ckan.plugins.unload('datastore')
+ pyutilib.component.core.PluginGlobals.singleton_services()[plugin.DatastorePlugin] = self._original_plugin
def test_legacy_mode_default(self):
assert not self.p.legacy_mode
def test_set_legacy_mode(self):
- assert not self.p.legacy_mode
c = {
'sqlalchemy.url': 'bar',
'ckan.datastore.write_url': 'foo'
@@ -24,16 +32,87 @@ def test_set_legacy_mode(self):
assert self.p.write_url == 'foo'
assert self.p.read_url == 'foo'
- def test_check_separate_db(self):
- self.p.write_url = 'postgresql://u:pass@localhost/dt'
+ def test_check_separate_write_and_read_url(self):
+ self.p.write_url = 'postgresql://u:pass@localhost/ds'
+ self.p.read_url = 'postgresql://u:pass@localhost/ds'
+ assert self.p._same_read_and_write_url()
+
+ self.p.write_url = 'postgresql://u:pass@localhost/ds'
+ self.p.read_url = 'postgresql://u2:pass@localhost/ds'
+ assert not self.p._same_read_and_write_url()
+
+ def test_same_ckan_and_datastore_db(self):
+ self.p.read_url = 'postgresql://u2:pass@localhost/ckan'
+ self.p.ckan_url = 'postgresql://u:pass@localhost/ckan'
+ assert self.p._same_ckan_and_datastore_db()
+
self.p.read_url = 'postgresql://u:pass@localhost/dt'
self.p.ckan_url = 'postgresql://u:pass@localhost/ckan'
+ assert not self.p._same_ckan_and_datastore_db()
+
+ def test_setup_plugin_for_check_urls_and_permissions_tests_should_leave_the_plugin_in_a_valid_state(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
+ self.p._check_urls_and_permissions() # Should be OK
+
+ def test_check_urls_and_permissions_requires_different_ckan_and_datastore_dbs(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
+ self.p._same_ckan_and_datastore_db = lambda: False
+ self.p._check_urls_and_permissions() # Should be OK
+
+ self.p._same_ckan_and_datastore_db = lambda: True
+ nose.tools.assert_raises(InvalidUrlsOrPermissionsException, self.p._check_urls_and_permissions)
+
+ def test_check_urls_and_permissions_requires_different_read_and_write_urls_when_not_in_legacy_mode(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
+ self.p.legacy_mode = False
+
+ self.p._same_read_and_write_url = lambda: False
+ self.p._check_urls_and_permissions() # Should be OK
+
+ self.p._same_read_and_write_url = lambda: True
+ nose.tools.assert_raises(InvalidUrlsOrPermissionsException, self.p._check_urls_and_permissions)
+
+ def test_check_urls_and_permissions_doesnt_require_different_read_and_write_urls_when_in_legacy_mode(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
self.p.legacy_mode = True
- try:
- self.p._check_separate_db()
- except Exception:
- self.fail("_check_separate_db raise Exception unexpectedly!")
+ self.p._same_read_and_write_url = lambda: False
+ self.p._check_urls_and_permissions() # Should be OK
+
+ self.p._same_read_and_write_url = lambda: True
+ self.p._check_urls_and_permissions() # Should be OK
+
+ def test_check_urls_and_permissions_requires_read_connection_with_correct_privileges_when_not_in_legacy_mode(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
self.p.legacy_mode = False
- self.assertRaises(Exception, self.p._check_separate_db)
+
+ self.p._read_connection_has_correct_privileges = lambda: True
+ self.p._check_urls_and_permissions() # Should be OK
+
+ self.p._read_connection_has_correct_privileges = lambda: False
+ nose.tools.assert_raises(InvalidUrlsOrPermissionsException, self.p._check_urls_and_permissions)
+
+ def test_check_urls_and_permissions_doesnt_care_about_read_connection_privileges_when_in_legacy_mode(self):
+ self.setUp_plugin_for_check_urls_and_permissions_tests()
+ self.p.legacy_mode = True
+
+ self.p._read_connection_has_correct_privileges = lambda: True
+ self.p._check_urls_and_permissions() # Should be OK
+
+ self.p._read_connection_has_correct_privileges = lambda: False
+ self.p._check_urls_and_permissions() # Should be OK
+
+ def setUp_plugin_for_check_urls_and_permissions_tests(self):
+ def _raise_invalid_urls_or_permissions_exception(message):
+ raise InvalidUrlsOrPermissionsException(message)
+
+ self.p._same_ckan_and_datastore_db = lambda: False
+ self.p.legacy_mode = True
+ self.p._same_read_and_write_url = lambda: False
+ self.p._read_connection_has_correct_privileges = lambda: True
+ self.p._log_or_raise = _raise_invalid_urls_or_permissions_exception
+
+
+class InvalidUrlsOrPermissionsException(Exception):
+ pass
| Datastore depends on english localisation
There are many checks where we rely on english error messages in the datastore.
See https://github.com/okfn/ckan/blob/master/ckanext/datastore/db.py
It is probably a good idea to use the [error codes](http://www.postgresql.org/docs/9.1/static/errcodes-appendix.html) instead of error message even though one error code seems to subsume a number of possible errors.
| 2013-03-28T18:11:24 |
|
ckan/ckan | 722 | ckan__ckan-722 | [
"621"
] | 96dee5fa6a3e7244f2fd40158d9a8485d5ef450f | diff --git a/ckan/logic/schema.py b/ckan/logic/schema.py
--- a/ckan/logic/schema.py
+++ b/ckan/logic/schema.py
@@ -116,6 +116,7 @@ def default_create_tag_schema():
def default_create_package_schema():
schema = {
+ '__before': [duplicate_extras_key, ignore],
'id': [empty],
'revision_id': [ignore],
'name': [not_empty, unicode, name_validator, package_name_validator],
@@ -139,7 +140,6 @@ def default_create_package_schema():
'tags': default_tags_schema(),
'tag_string': [ignore_missing, tag_string_convert],
'extras': default_extras_schema(),
- 'extras_validation': [duplicate_extras_key, ignore],
'save': [ignore],
'return_to': [ignore],
'relationships_as_object': default_relationship_schema(),
diff --git a/ckan/logic/validators.py b/ckan/logic/validators.py
--- a/ckan/logic/validators.py
+++ b/ckan/logic/validators.py
@@ -313,7 +313,9 @@ def duplicate_extras_key(key, data, errors, context):
for extra_key in set(extras_keys):
extras_keys.remove(extra_key)
if extras_keys:
- errors[key].append(_('Duplicate key "%s"') % extras_keys[0])
+ key_ = ('extras_validation',)
+ assert key_ not in errors
+ errors[key_] = [_('Duplicate key "%s"') % extras_keys[0]]
def group_name_validator(key, data, errors, context):
model = context['model']
| diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -1132,6 +1132,40 @@ def test_42_resource_search_accessible_via_get_request(self):
assert "index" in resource['description'].lower()
assert "json" in resource['format'].lower()
+ def test_package_create_duplicate_extras_error(self):
+ import ckan.tests
+ import paste.fixture
+ import pylons.test
+
+ # Posting a dataset dict to package_create containing two extras dicts
+ # with the same key, should return a Validation Error.
+ app = paste.fixture.TestApp(pylons.test.pylonsapp)
+ error = ckan.tests.call_action_api(app, 'package_create',
+ apikey=self.sysadmin_user.apikey, status=409,
+ name='foobar', extras=[{'key': 'foo', 'value': 'bar'},
+ {'key': 'foo', 'value': 'gar'}])
+ assert error['__type'] == 'Validation Error'
+ assert error['extras_validation'] == ['Duplicate key "foo"']
+
+ def test_package_update_duplicate_extras_error(self):
+ import ckan.tests
+ import paste.fixture
+ import pylons.test
+
+ # We need to create a package first, so that we can update it.
+ app = paste.fixture.TestApp(pylons.test.pylonsapp)
+ package = ckan.tests.call_action_api(app, 'package_create',
+ apikey=self.sysadmin_user.apikey, name='foobar')
+
+ # Posting a dataset dict to package_update containing two extras dicts
+ # with the same key, should return a Validation Error.
+ package['extras'] = [{'key': 'foo', 'value': 'bar'},
+ {'key': 'foo', 'value': 'gar'}]
+ error = ckan.tests.call_action_api(app, 'package_update',
+ apikey=self.sysadmin_user.apikey, status=409, **package)
+ assert error['__type'] == 'Validation Error'
+ assert error['extras_validation'] == ['Duplicate key "foo"']
+
class TestActionTermTranslation(WsgiAppCase):
@classmethod
@@ -1553,5 +1587,3 @@ def test_02_bulk_delete(self):
res = self.app.get('/api/action/package_search?q=*:*')
assert json.loads(res.body)['result']['count'] == 0
-
-
| Resource creation fails with IDatasetForm extra fields with names sorting before and after 'extra_validation'
Steps to reproduce:
1. Enable an IDatasetForm plugin that has extra fields with names that sort both before and after the string 'extra_validation', such as the one in https://github.com/wardi/ckan/commit/eeb42671620e166581b225dc3cf6ee32581f2fe7
2. Create a dataset (adding resources will still work if your plugin accepts its extra fields in the package_metadata_fields.html as the one above does)
3. Click Edit package, then Add resource
4. Try to save the resource
What happens:
A traceback is issued with an IndexError in ckan.lib.navl.dictization_functions.flatten()
Workaround:
I wrote a patch https://github.com/wardi/ckan/commit/fa9929877573aebc146ef4fde15048d517d4a3a0 that avoids the problem. This fix is rather inelegant, better would be to execute the 'extra_validation' rule before or after all the other rules. I leave that decision to the experts.
| @wardi could you replace the links so that they point to the specific file/patch you are referring to. Also if you could add the traceback in a comment on this issue that would be really helpful
thanks
@tobes links updated and:
```
Module ckan.controllers.package:645 in new_resource view
>> get_action('resource_create')(context, data)
Module ckan.logic:324 in wrapped view
>> return _action(context, data_dict, **kw)
Module ckan.logic.action.create:251 in resource_create view
>> pkg_dict = _get_action('package_update')(context, pkg_dict)
Module ckan.logic:324 in wrapped view
>> return _action(context, data_dict, **kw)
Module ckan.logic.action.update:251 in package_update view
>> data, errors = _validate(data_dict, schema, context)
Module ckan.lib.navl.dictization_functions:228 in validate view
>> converted_data, errors = _validate(flattened, schema, context)
Module ckan.lib.navl.dictization_functions:289 in _validate view
>> convert(converter, key, converted_data, errors, context)
Module ckan.lib.navl.dictization_functions:186 in convert view
>> converter(key, converted_data, errors, context)
Module ckan.logic.validators:306 in duplicate_extras_key view
>> unflattened = unflatten(data)
Module ckan.lib.navl.dictization_functions:393 in unflatten view
>> current_pos = current_pos[key]
IndexError: list index out of range
```
From the mailing list http://lists.okfn.org/pipermail/ckan-dev/2013-March/004242.html
Fabian says this applies to another issue as well
```
I was also able to fix my old issue: http://lists.okfn.org/pipermail/ckan-dev/2013-January/003852.html
by adding your fix to the lib.navl.dictization_functions.py validate method:
data_no_extras = dict(converted_data)
if ('extras',) in converted_data:
del data_no_extras[('extras',)]
converted_data = unflatten(data_no_extras)
#converted_data = unflatten(converted_data)
```
@seanh have you any thoughts on this?
Yes, I think I need a drink
:)
The schema is a dict so I don't know how we can make it run the "extra_validation" rule before or after all the others, unless we special-case that rule, If @wardi's fix fixes this problem and doesn't break any of the tests (either the core tests or my IDatasetForm tests) then I would be tempted to take it as-is, on the grounds that `validate()` is best treated as a black box.
I want to merge my pending IDatasetForm pull request before I look at this one
@seanh Assigning this to you just so you are aware of it and in case is related to the IDatasetForm stuff you are working on now. Feel free to ignore it if not.
I think we need to get my other idatasetform pull requests merged first, then we can see if this bug still occurs and if this fix still fixes it
Confirmed this is still happening on master
Confirmed @wardi's commit fixes the problem, and all the tests (including example_idatasetform tests) still pass with this commit. I'm tempted to just take this commit but I'll have a look after lunch and see if there's a more elegant way...
| 2013-04-01T20:05:05 |
ckan/ckan | 837 | ckan__ckan-837 | [
"436"
] | f4d3d726228504081e114e40c18132bab550e3c0 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1484,7 +1484,6 @@ def resource_preview(resource, pkg_id):
data_dict = {'resource': resource, 'package': c.package}
if not resource['url']:
- log.info('No url for resource {0} defined.'.format(resource['id']))
return snippet("dataviewer/snippets/no_preview.html",
resource_type=format_lower,
reason='No valid resource url has been defined.')
@@ -1504,9 +1503,6 @@ def resource_preview(resource, pkg_id):
elif format_lower in loadable_in_iframe:
url = resource['url']
else:
- log.info(
- 'No preview handler for resource type {0}'.format(format_lower)
- )
return snippet("dataviewer/snippets/no_preview.html",
resource_type=format_lower)
| Unicode filetypes break preview
This pr also improves the descriptions for when no preview is shown.
- [x] Improve descriptions
- [x] Make it work with unicode
- [x] Remove old libraries
- [x] All strings are translatable
| So, is this patch ready for merging?
In my opinion, yes.
Ops... just saw that @johnglover is assigned. It's probably better for him to check it :-)
I've merged this no need to delay things
@domoritz @tobes
This has not been merged properly into the `release-v2.0` branch as can be seen here:
https://github.com/okfn/ckan/blob/release-v2.0/ckan/lib/helpers.py#L1508
This unicode bug currently breaks https://offenedaten.de with umlauts in resource names because it is deployed on the `release-v2.0` branch. Please merge properly.
| 2013-04-28T15:52:36 |
|
ckan/ckan | 894 | ckan__ckan-894 | [
"893"
] | 1b71730157ede1dcc441e8afcca3af470b0dd836 | diff --git a/ckan/model/meta.py b/ckan/model/meta.py
--- a/ckan/model/meta.py
+++ b/ckan/model/meta.py
@@ -154,13 +154,13 @@ def after_rollback(self, session):
metadata = MetaData()
-def engine_is_sqlite():
+def engine_is_sqlite(sa_engine=None):
# Returns true iff the engine is connected to a sqlite database.
- return engine.url.drivername == 'sqlite'
+ return (sa_engine or engine).url.drivername == 'sqlite'
-def engine_is_pg():
+def engine_is_pg(sa_engine=None):
# Returns true iff the engine is connected to a postgresql database.
# According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
# all Postgres driver names start with `postgresql`
- return engine.url.drivername.startswith('postgresql')
+ return (sa_engine or engine).url.drivername.startswith('postgresql')
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -52,7 +52,9 @@ def configure(self, config):
else:
self.read_url = self.config['ckan.datastore.read_url']
- if not model.engine_is_pg():
+ read_engine = db._get_engine(
+ None, {'connection_url': self.read_url})
+ if not model.engine_is_pg(read_engine):
log.warn('We detected that you do not use a PostgreSQL '
'database. The DataStore will NOT work and DataStore '
'tests will be skipped.')
@@ -75,13 +77,9 @@ def configure(self, config):
resource_show = p.toolkit.get_action('resource_show')
def new_resource_show(context, data_dict):
- engine = db._get_engine(
- context,
- {'connection_url': self.read_url}
- )
new_data_dict = resource_show(context, data_dict)
try:
- connection = engine.connect()
+ connection = read_engine.connect()
result = connection.execute(
'SELECT 1 FROM "_table_metadata" WHERE name = %s AND alias_of IS NULL',
new_data_dict['id']
| diff --git a/ckan/tests/__init__.py b/ckan/tests/__init__.py
--- a/ckan/tests/__init__.py
+++ b/ckan/tests/__init__.py
@@ -334,6 +334,7 @@ def is_migration_supported():
return is_supported_db
def is_datastore_supported():
+ # we assume that the datastore uses the same db engine that ckan uses
is_supported_db = model.engine_is_pg()
return is_supported_db
| datastore plugin pgsql check on datastore DB, not main DB
I ran into a confusing situation here, getting "We detected that you do not use a PostgreSQL database. The DataStore will NOT work and DataStore tests will be skipped." I had the datastore as postgres://, so I tried changing to postgresql:// and got the same error. I googled, found PR 359 which says it's looking for 'postgresql', got more confused, looked through the code ...
Finally realized the code is checking the sqlalchemy.url, _not_ the datastore URL. This doesn't seem to be the right behavior or match the error message, so I made a simple patch to apply the same drivername startswith check to the datastore write_url directly, which seems the least-invasive way to make the behavior/error more understandable.
| @themgt Good catch. I didn't think someone would use a different db for CKAN and the datastore. We will need to write it a little bit different, though.
| 2013-05-11T14:11:00 |
ckan/ckan | 898 | ckan__ckan-898 | [
"897"
] | 1b71730157ede1dcc441e8afcca3af470b0dd836 | diff --git a/ckan/logic/action/update.py b/ckan/logic/action/update.py
--- a/ckan/logic/action/update.py
+++ b/ckan/logic/action/update.py
@@ -170,7 +170,6 @@ def resource_update(context, data_dict):
model = context['model']
user = context['user']
id = _get_or_bust(data_dict, "id")
- schema = context.get('schema') or schema_.default_update_resource_schema()
resource = model.Resource.get(id)
context["resource"] = resource
@@ -181,6 +180,13 @@ def resource_update(context, data_dict):
_check_access('resource_update', context, data_dict)
+ if 'schema' in context:
+ schema = context['schema']
+ else:
+ package_plugin = lib_plugins.lookup_package_plugin(
+ resource.resource_group.package.type)
+ schema = package_plugin.update_package_schema()['resources']
+
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
| resource_update doesn't use IDatasetForm for validation
With an IDatasetForm it is possible to add validation that applies to resources added while creating a dataset, but after creation editing resources has no validation from the IDatasetForm applied, and any values may be entered that satisfy the default resource schema.
| 2013-05-13T13:03:54 |
||
ckan/ckan | 1,009 | ckan__ckan-1009 | [
"1008"
] | ac1159b632fc8d89ce76b671a25755a19755cd30 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1457,7 +1457,12 @@ def format_resource_items(items):
continue
# size is treated specially as we want to show in MiB etc
if key == 'size':
- value = formatters.localised_filesize(int(value))
+ try:
+ value = formatters.localised_filesize(int(value))
+ except ValueError:
+ # Sometimes values that can't be converted to ints can sneak
+ # into the db. In this case, just leave them as they are.
+ pass
elif isinstance(value, basestring):
# check if strings are actually datetime/number etc
if re.search(reg_ex_datetime, value):
| ValueError when showing resources whose "size" is not an int
Example: http://www.publicdata.eu/dataset/pks-register-of-tender-documentation/resource/07554517-5171-4ff8-9ed3-fa5b6431a296
It crashes because the resource's "size" is not an int: http://publicdata.eu/api/3/action/resource_show?id=07554517-5171-4ff8-9ed3-fa5b6431a296
```
File '/usr/lib/ckan/src/ckan/ckan/lib/helpers.py', line 1454 in format_resource_items
value = formatters.localised_filesize(int(value))
ValueError: invalid literal for int() with base 10: 'None'
```
| 2013-06-14T16:03:21 |
||
ckan/ckan | 1,113 | ckan__ckan-1113 | [
"1111"
] | ccc41adb21c6cd1019a83e6a0cdc4e344c3883a7 | diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -245,7 +245,6 @@ def get_auth_functions(self):
'datastore_change_permissions': auth.datastore_change_permissions}
def before_map(self, m):
- print "Load mapping"
m.connect('/datastore/dump/{resource_id}',
controller='ckanext.datastore.controller:DatastoreController',
action='dump')
| 500 Error Upon Production Deployment
Hi everyone,
I was able to get CKAN working just fine locally, but trying to deploy per the instructions (http://docs.ckan.org/en/latest/deployment.html) gives me a 500 error. I checked permissions and everything is correct. Here's the log:
```
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] mod_wsgi (pid=17063): Target WSGI script '/etc/ckan/default/apache.wsgi' cannot be loaded as Python module.
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] mod_wsgi (pid=17063): Exception occurred processing WSGI script '/etc/ckan/default/apache.wsgi'.
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] Traceback (most recent call last):
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/etc/ckan/default/apache.wsgi", line 9, in <module>
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] application = loadapp('config:%s' % config_filepath)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 247, in loadapp
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] return loadobj(APP, uri, name=name, **kw)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 272, in loadobj
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] return context.create()
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 710, in create
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] return self.object_type.invoke(self)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 146, in invoke
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] return fix_call(context.object, context.global_conf, **context.local_conf)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/util.py", line 56, in fix_call
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] val = callable(*args, **kw)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/middleware.py", line 54, in make_app
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] load_environment(conf, app_conf)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/environment.py", line 177, in load_environment
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] config['routes.map'] = routing.make_map()
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/routing.py", line 100, in make_map
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] map = plugin.before_map(map)
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckanext/datastore/plugin.py", line 248, in before_map
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] print "Load mapping"
[Mon Jul 15 20:31:51 2013] [error] [client 198.211.103.157] IOError: sys.stdout access restricted by mod_wsgi
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] mod_wsgi (pid=17064): Target WSGI script '/etc/ckan/default/apache.wsgi' cannot be loaded as Python module.
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] mod_wsgi (pid=17064): Exception occurred processing WSGI script '/etc/ckan/default/apache.wsgi'.
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] Traceback (most recent call last):
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/etc/ckan/default/apache.wsgi", line 9, in <module>
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] application = loadapp('config:%s' % config_filepath)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 247, in loadapp
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] return loadobj(APP, uri, name=name, **kw)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 272, in loadobj
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] return context.create()
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 710, in create
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] return self.object_type.invoke(self)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/loadwsgi.py", line 146, in invoke
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] return fix_call(context.object, context.global_conf, **context.local_conf)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/lib/python2.6/site-packages/paste/deploy/util.py", line 56, in fix_call
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] val = callable(*args, **kw)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/middleware.py", line 54, in make_app
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] load_environment(conf, app_conf)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/environment.py", line 177, in load_environment
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] config['routes.map'] = routing.make_map()
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckan/config/routing.py", line 100, in make_map
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] map = plugin.before_map(map)
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] File "/usr/lib/ckan/default/src/ckan/ckanext/datastore/plugin.py", line 248, in before_map
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] print "Load mapping"
[Mon Jul 15 20:31:55 2013] [error] [client 198.211.103.157] IOError: sys.stdout access restricted by mod_wsgi
[Mon Jul 15 20:36:53 2013] [error] Exception KeyError: KeyError(140103623919424,) in <module 'threading' from '/usr/lib/python2.6/threading.pyc'> ignored
[Mon Jul 15 20:36:53 2013] [error] Exception KeyError: KeyError(140103623919424,) in <module 'threading' from '/usr/lib/python2.6/threading.pyc'> ignored
[Mon Jul 15 20:36:54 2013] [error] Exception AttributeError: "'NoneType' object has no attribute 'clearing'" in <bound method PluginEnvironment.__del__ of Services for Environment '<default>'
[Mon Jul 15 20:36:54 2013] [error] > ignored
[Mon Jul 15 20:36:54 2013] [error] Exception AttributeError: "'NoneType' object has no attribute 'clearing'" in <bound method PluginEnvironment.__del__ of Services for Environment '<default>'
[Mon Jul 15 20:36:54 2013] [error] > ignored
[Mon Jul 15 20:36:59 2013] [error] Load mapping
[Mon Jul 15 20:37:02 2013] [error] 2013-07-15 20:37:02,827 INFO [ckan.lib.base] / render time 2.856 seconds
```
| Thanks for reporting this bug. The print statement in `ckan/ckanext/datastore/plugin.py`, line 248 should not be there and apache does not like it. You can delete it.
Thanks for reporting this bug. The print statement in `ckan/ckanext/datastore/plugin.py`, line 248 should not be there and apache does not like it. You can delete it.
So I was trying to be clever and branch this off the 2.1 release branch commit a5e601b is all we care about
@domoritz can you cherry pick into master and get into 2.1 :)
| 2013-07-16T15:26:21 |
|
ckan/ckan | 1,161 | ckan__ckan-1161 | [
"1159"
] | 9e01b092b9b72674a5bb811ccb0dbeedbd85eb3f | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -2258,7 +2258,6 @@ def recently_changed_packages_activity_list_html(context, data_dict):
extra_vars = {
'controller': 'package',
'action': 'activity',
- 'id': data_dict['id'],
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
| recently_changed_packages_activity_stream in helpers.py is broken
We are attempting to put the 'recently changed packages' activity stream on the front page of our website, using code like this:
<pre>
{{ h.recently_changed_packages_activity_stream() }}
</pre>
However, this gives the following error:
<pre>
[Tue Aug 06 15:07:27 2013] [error] File '/usr/lib/ckan/default/lib/python2.7/site-packages/ckanext_qgov_data-SNAPSHOT-py2.7.egg/ckanext/qgov/theme/templates/home/index.html', line 172 in block "home_content"
[Tue Aug 06 15:07:27 2013] [error] {{ h.recently_changed_packages_activity_stream() }}
[Tue Aug 06 15:07:27 2013] [error] File '/usr/lib/ckan/default/src/ckan/ckan/lib/helpers.py', line 1325 in recently_changed_packages_activity_stream
[Tue Aug 06 15:07:27 2013] [error] context, {})
[Tue Aug 06 15:07:27 2013] [error] File '/usr/lib/ckan/default/src/ckan/ckan/logic/__init__.py', line 324 in wrapped
[Tue Aug 06 15:07:27 2013] [error] return _action(context, data_dict, **kw)
[Tue Aug 06 15:07:27 2013] [error] File '/usr/lib/ckan/default/src/ckan/ckan/logic/action/get.py', line 2174 in recently_changed_packages_activity_list_html
[Tue Aug 06 15:07:27 2013] [error] 'id': data_dict['id'],
[Tue Aug 06 15:07:27 2013] [error] KeyError: 'id'
</pre>
This worked correctly in version 1.8, but we are in the process of migrating to 2.0, and are being held up by this issue.
| 2013-08-06T12:30:02 |
||
ckan/ckan | 1,179 | ckan__ckan-1179 | [
"1137"
] | 01a3c050a3ffaaf1e3791e50301b6ffb9e5c8568 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -64,6 +64,13 @@ def site_read(context,data_dict=None):
def package_list(context, data_dict):
'''Return a list of the names of the site's datasets (packages).
+ :param limit: if given, the list of datasets will be broken into pages of
+ at most ``limit`` datasets per page and only one page will be returned
+ at a time (optional)
+ :type limit: int
+ :param offset: when ``limit`` is given, the offset to start returning packages from
+ :type offset: int
+
:rtype: list of strings
'''
@@ -72,6 +79,12 @@ def package_list(context, data_dict):
_check_access('package_list', context, data_dict)
+ schema = context.get('schema', logic.schema.default_pagination_schema())
+ data_dict, errors = _validate(data_dict, schema, context)
+ if errors:
+ raise ValidationError(errors)
+
+
package_revision_table = model.package_revision_table
col = (package_revision_table.c.id
if api == 2 else package_revision_table.c.name)
@@ -79,6 +92,14 @@ def package_list(context, data_dict):
query = query.where(_and_(package_revision_table.c.state=='active',
package_revision_table.c.current==True))
query = query.order_by(col)
+
+ limit = data_dict.get('limit')
+ if limit:
+ query = query.limit(limit)
+
+ offset = data_dict.get('offset')
+ if offset:
+ query = query.offset(offset)
return list(zip(*query.execute())[0])
def current_package_list_with_resources(context, data_dict):
| diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -60,8 +60,8 @@ def _add_basic_package(self, package_name=u'test_package', **kwargs):
return json.loads(res.body)['result']
def test_01_package_list(self):
- postparams = '%s=1' % json.dumps({})
- res = json.loads(self.app.post('/api/action/package_list', params=postparams).body)
+ res = json.loads(self.app.post('/api/action/package_list',
+ headers={'content-type': 'application/json'}).body)
assert res['success'] is True
assert len(res['result']) == 2
assert 'warandpeace' in res['result']
@@ -69,6 +69,13 @@ def test_01_package_list(self):
assert res['help'].startswith(
"Return a list of the names of the site's datasets (packages).")
+ postparams = '%s=1' % json.dumps({'limit': 1})
+ res = json.loads(self.app.post('/api/action/package_list',
+ params=postparams).body)
+ assert res['success'] is True
+ assert len(res['result']) == 1
+ assert 'warandpeace' in res['result'] or 'annakarenina' in res['result']
+
# Test GET request
res = json.loads(self.app.get('/api/action/package_list').body)
assert len(res['result']) == 2
| Implement paging in the API
## Problem
http://catalog.data.gov/api/3/action/package_list gives a time out (actually a 503 temp unavailable, but I think a time out is the real cause) because there are too many datasets in data.gov.
## Suggested solution
Implement paging in the API v3: the query to the database should be LIMIT-ed. When more rows are available, a HTTP header should be set linking towards the next page. The URL of the next page can be implemented using GET parameters: ?page=2 or ?limit=...&offset=... or ...
| 2013-08-11T15:07:19 |
|
ckan/ckan | 1,189 | ckan__ckan-1189 | [
"1180"
] | c78557a5c12031b3ba6a02cfa43eebb89b426204 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -850,6 +850,10 @@ def _group_or_org_show(context, data_dict, is_org=False):
if group is None:
raise NotFound
+ if is_org and not group.is_organization:
+ raise NotFound
+ if not is_org and group.is_organization:
+ raise NotFound
if is_org:
_check_access('organization_show',context, data_dict)
| diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -1644,3 +1644,57 @@ def test_02_bulk_delete(self):
res = self.app.get('/api/action/package_search?q=*:*')
assert json.loads(res.body)['result']['count'] == 0
+
+
+class TestGroupOrgView(WsgiAppCase):
+
+ @classmethod
+ def setup_class(cls):
+ model.Session.add_all([
+ model.User(name=u'sysadmin', apikey=u'sysadmin',
+ password=u'sysadmin', sysadmin=True),
+ ])
+ model.Session.commit()
+
+ org_dict = '%s=1' % json.dumps({
+ 'name': 'org',
+ })
+ res = cls.app.post('/api/action/organization_create',
+ extra_environ={'Authorization': 'sysadmin'},
+ params=org_dict)
+ cls.org_id = json.loads(res.body)['result']['id']
+
+ group_dict = '%s=1' % json.dumps({
+ 'name': 'group',
+ })
+ res = cls.app.post('/api/action/group_create',
+ extra_environ={'Authorization': 'sysadmin'},
+ params=group_dict)
+ cls.group_id = json.loads(res.body)['result']['id']
+
+ @classmethod
+ def teardown_class(self):
+ model.repo.rebuild_db()
+
+ def test_1_view_org(self):
+ res = self.app.get('/api/action/organization_show',
+ params={'id': self.org_id})
+ res_json = json.loads(res.body)
+ assert res_json['success'] is True
+
+ res = self.app.get('/api/action/group_show',
+ params={'id': self.org_id}, expect_errors=True)
+ res_json = json.loads(res.body)
+ assert res_json['success'] is False
+
+ def test_2_view_group(self):
+ res = self.app.get('/api/action/group_show',
+ params={'id': self.group_id})
+ res_json = json.loads(res.body)
+ assert res_json['success'] is True
+
+ res = self.app.get('/api/action/organization_show',
+ params={'id': self.group_id}, expect_errors=True)
+ res_json = json.loads(res.body)
+ assert res_json['success'] is False
+
| Groups and organizations can be visited with interchangeable URLs
A group can be visited with a /organization url as well.
See the following URLs
(bacon-is-awesome is actually a group)
http://demo.ckan.org/group/bacon-is-awesome
http://demo.ckan.org/organization/bacon-is-awesome
Seems to happen vice versa too
(ccccc is actually an organization)
http://demo.ckan.org/organization/ccccc
http://demo.ckan.org/group/ccccc
We should raise a 404 when you visit a group with a /organization url and when you visit something that's an organization with a /group URL.
| +1 to the 404 you propose
+1 to 404
| 2013-08-14T13:43:38 |
ckan/ckan | 1,216 | ckan__ckan-1216 | [
"1216"
] | b26147e8889a6c35fd1e063873d6597d855e6286 | diff --git a/ckan/logic/action/delete.py b/ckan/logic/action/delete.py
--- a/ckan/logic/action/delete.py
+++ b/ckan/logic/action/delete.py
@@ -73,8 +73,9 @@ def resource_delete(context, data_dict):
pkg_dict = _get_action('package_show')(context, {'id': package_id})
- if 'resources' in pkg_dict and id in pkg_dict['resources']:
- pkg_dict['resources'].remove(id)
+ if pkg_dict.get('resources'):
+ pkg_dict['resources'] = [r for r in pkg_dict['resources'] if not
+ r['id'] == id]
try:
pkg_dict = _get_action('package_update')(context, pkg_dict)
except ValidationError, e:
| diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -1705,3 +1705,56 @@ def test_2_view_group(self):
res_json = json.loads(res.body)
assert res_json['success'] is False
+
+class TestResourceAction(WsgiAppCase):
+
+ sysadmin_user = None
+
+ normal_user = None
+
+ @classmethod
+ def setup_class(cls):
+ search.clear()
+ CreateTestData.create()
+ cls.sysadmin_user = model.User.get('testsysadmin')
+
+ @classmethod
+ def teardown_class(cls):
+ model.repo.rebuild_db()
+
+ def _add_basic_package(self, package_name=u'test_package', **kwargs):
+ package = {
+ 'name': package_name,
+ 'title': u'A Novel By Tolstoy',
+ 'resources': [{
+ 'description': u'Full text.',
+ 'format': u'plain text',
+ 'url': u'http://www.annakarenina.com/download/'
+ }]
+ }
+ package.update(kwargs)
+
+ postparams = '%s=1' % json.dumps(package)
+ res = self.app.post('/api/action/package_create', params=postparams,
+ extra_environ={'Authorization': 'tester'})
+ return json.loads(res.body)['result']
+
+ def test_01_delete_resource(self):
+ res_dict = self._add_basic_package()
+ pkg_id = res_dict['id']
+
+ resource_count = len(res_dict['resources'])
+ id = res_dict['resources'][0]['id']
+ url = '/api/action/resource_delete'
+
+ # Use the sysadmin user because this package doesn't belong to an org
+ res = self.app.post(url, params=json.dumps({'id': id}),
+ extra_environ={'Authorization': str(self.sysadmin_user.apikey)})
+ res_dict = json.loads(res.body)
+ assert res_dict['success'] is True
+
+ url = '/api/action/package_show'
+ res = self.app.get(url, {'id': pkg_id})
+ res_dict = json.loads(res.body)
+ assert res_dict['success'] is True
+ assert len(res_dict['result']['resources']) == resource_count - 1
| resource_delete does not work
`resource_delete` was broken with this commit https://github.com/okfn/ckan/commit/c972c1c02778d484cb22158a0980bf52564a8327. pkg_dict['resources'] is a `list`, not a `dict`, so line 76 will never return `True`.
Accidentally ran into this when testing filestore modifications. **Update**: Seems to affect only master.

| 2013-09-03T09:23:06 |
|
ckan/ckan | 1,279 | ckan__ckan-1279 | [
"1257"
] | 1e5b72fd943e60069d33ceba917e90900b951aa5 | diff --git a/ckan/logic/auth/create.py b/ckan/logic/auth/create.py
--- a/ckan/logic/auth/create.py
+++ b/ckan/logic/auth/create.py
@@ -23,7 +23,7 @@ def package_create(context, data_dict=None):
# If an organization is given are we able to add a dataset to it?
data_dict = data_dict or {}
- org_id = data_dict.get('organization_id')
+ org_id = data_dict.get('owner_org')
if org_id and not new_authz.has_user_permission_for_group_or_org(
org_id, user, 'create_dataset'):
return {'success': False, 'msg': _('User %s not authorized to add dataset to this organization') % user}
diff --git a/ckan/new_authz.py b/ckan/new_authz.py
--- a/ckan/new_authz.py
+++ b/ckan/new_authz.py
@@ -243,7 +243,10 @@ def has_user_permission_for_group_or_org(group_id, user_name, permission):
''' Check if the user has the given permission for the group '''
if not group_id:
return False
- group_id = model.Group.get(group_id).id
+ group = model.Group.get(group_id)
+ if not group:
+ return False
+ group_id = group.id
# Sys admins can do anything
if is_sysadmin(user_name):
| diff --git a/ckan/tests/logic/test_auth.py b/ckan/tests/logic/test_auth.py
--- a/ckan/tests/logic/test_auth.py
+++ b/ckan/tests/logic/test_auth.py
@@ -123,17 +123,20 @@ def test_03_create_dataset_no_org(self):
self._call_api('package_create', dataset, 'no_org', 403)
def test_04_create_dataset_with_org(self):
-
+ org_with_user = self._call_api('organization_show', {'id':
+ 'org_with_user'}, 'sysadmin')
dataset = {'name': 'admin_create_with_user',
- 'owner_org': 'org_with_user'}
+ 'owner_org': org_with_user.json['result']['id']}
self._call_api('package_create', dataset, 'sysadmin', 200)
+ org_no_user = self._call_api('organization_show', {'id':
+ 'org_no_user'}, 'sysadmin')
dataset = {'name': 'sysadmin_create_no_user',
- 'owner_org': 'org_no_user'}
+ 'owner_org': org_no_user.json['result']['id']}
self._call_api('package_create', dataset, 'sysadmin', 200)
dataset = {'name': 'user_create_with_org',
- 'owner_org': 'org_with_user'}
+ 'owner_org': org_with_user.json['result']['id']}
self._call_api('package_create', dataset, 'no_org', 403)
def test_05_add_users_to_org(self):
@@ -164,7 +167,7 @@ def _add_datasets(self, user):
#not able to add dataset to org admin does not belong to.
dataset = {'name': user + '_dataset_bad', 'owner_org': 'org_no_user'}
- self._call_api('package_create', dataset, user, 409)
+ self._call_api('package_create', dataset, user, 403)
#admin not able to make dataset not owned by a org
dataset = {'name': user + '_dataset_bad'}
@@ -172,7 +175,7 @@ def _add_datasets(self, user):
#not able to add org to not existant org
dataset = {'name': user + '_dataset_bad', 'owner_org': 'org_not_exist'}
- self._call_api('package_create', dataset, user, 409)
+ self._call_api('package_create', dataset, user, 403)
def test_07_add_datasets(self):
self._add_datasets('org_admin')
| package_create uses the wrong parameter for organization
`package_create` uses `organization_id` from the data_dict to check if the user is able to add a dataset to the organization. However, according to the documentation, `owner_org` is the parameter that should have the organization id.
[Link to code](https://github.com/okfn/ckan/blob/master/ckan/logic/auth/create.py#L26)
| 2013-10-16T08:53:28 |
|
ckan/ckan | 1,285 | ckan__ckan-1285 | [
"504"
] | d52345bb23aca34d74e9d975261284b2f9ee78cd | diff --git a/ckan/logic/action/update.py b/ckan/logic/action/update.py
--- a/ckan/logic/action/update.py
+++ b/ckan/logic/action/update.py
@@ -132,7 +132,7 @@ def related_update(context, data_dict):
id = _get_or_bust(data_dict, "id")
session = context['session']
- schema = context.get('schema') or schema_.default_related_schema()
+ schema = context.get('schema') or schema_.default_update_related_schema()
related = model.Related.get(id)
context["related"] = related
diff --git a/ckan/logic/schema.py b/ckan/logic/schema.py
--- a/ckan/logic/schema.py
+++ b/ckan/logic/schema.py
@@ -332,6 +332,15 @@ def default_related_schema():
return schema
+def default_update_related_schema():
+ schema = default_related_schema()
+ schema['id'] = [not_empty, unicode]
+ schema['title'] = [ignore_missing, unicode]
+ schema['type'] = [ignore_missing, unicode]
+ schema['owner_id'] = [ignore_missing, unicode]
+ return schema
+
+
def default_extras_schema():
schema = {
| diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -1975,3 +1975,69 @@ def _assert_we_can_add_user_to_group(self, user_id, group_id):
group_ids = [g.id for g in groups]
assert res['success'] is True, res
assert group.id in group_ids, (group, user_groups)
+
+
+class TestRelatedAction(WsgiAppCase):
+
+ sysadmin_user = None
+
+ normal_user = None
+
+ @classmethod
+ def setup_class(cls):
+ search.clear()
+ CreateTestData.create()
+ cls.sysadmin_user = model.User.get('testsysadmin')
+
+ @classmethod
+ def teardown_class(cls):
+ model.repo.rebuild_db()
+
+ def _add_basic_package(self, package_name=u'test_package', **kwargs):
+ package = {
+ 'name': package_name,
+ 'title': u'A Novel By Tolstoy',
+ 'resources': [{
+ 'description': u'Full text.',
+ 'format': u'plain text',
+ 'url': u'http://www.annakarenina.com/download/'
+ }]
+ }
+ package.update(kwargs)
+
+ postparams = '%s=1' % json.dumps(package)
+ res = self.app.post('/api/action/package_create', params=postparams,
+ extra_environ={'Authorization': 'tester'})
+ return json.loads(res.body)['result']
+
+ def test_update_add_related_item(self):
+ package = self._add_basic_package()
+ related_item = {
+ "description": "Testing a Description",
+ "url": "http://example.com/image.png",
+ "title": "Testing",
+ "featured": 0,
+ "image_url": "http://example.com/image.png",
+ "type": "idea",
+ "dataset_id": package['id'],
+ }
+ related_item_json = json.dumps(related_item)
+ res_create = self.app.post('/api/action/related_create',
+ params=related_item_json,
+ extra_environ={'Authorization': 'tester'})
+ assert res_create.json['success']
+
+ related_update = res_create.json['result']
+ related_update = {'id': related_update['id'], 'title': 'Updated'}
+ related_update_json = json.dumps(related_update)
+ res_update = self.app.post('/api/action/related_update',
+ params=related_update_json,
+ extra_environ={'Authorization': 'tester'})
+ assert res_update.json['success']
+ res_update_json = res_update.json['result']
+ assert res_update_json['title'] == related_update['title']
+
+ related_item.pop('title')
+ related_item.pop('dataset_id')
+ for field in related_item:
+ assert related_item[field] == res_update_json[field]
| related_update needs owner_id and type
I discovered while writing tests for activity stream that `related_update` doesn't seem to work without providing `owner_id` and `type`. It should only need `id`.
| 2013-10-21T09:50:30 |
|
ckan/ckan | 1,315 | ckan__ckan-1315 | [
"877"
] | f8edc00aa7a0ee5f2c498b4f2bdf48f0cbb86760 | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -57,6 +57,8 @@
.. |sqlalchemy| replace:: SQLAlchemy
.. |javascript| replace:: JavaScript
.. |apache| replace:: Apache
+.. |nginx_config_file| replace:: /etc/nginx/sites-available/ckan_default
+.. |reload_nginx| replace:: sudo service nginx reload
'''
@@ -87,7 +89,7 @@
href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons
Attribution ShareAlike (Unported) v3.0 License</a>.<br />
<img src="http://i.creativecommons.org/l/by-sa/3.0/80x15.png" alt="CC License Logo" />
- <a href="http://opendefinition.org/"><img src="http://assets.okfn.org/images/ok_buttons/oc_80x15_blue.png" border="0"
+ <a href="http://opendefinition.org/"><img src="http://assets.okfn.org/images/ok_buttons/oc_80x15_blue.png" border="0"
alt="{{ _('Open Content') }}" /></a>
'''
html_show_sphinx = False
@@ -146,7 +148,7 @@
#'sidebarbgcolor': '#F2F2F2',
#'sidebartextcolor': 'black',
#'sidebarlinkcolor': '#355F7C',
-#'headfont': 'Trebuchet MS'
+#'headfont': 'Trebuchet MS'
#}
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
| Add documentation for Nginx configuration with CKAN
We use Nginx and Apache and this needs to be documented.
| I'm going to push this to 2.2, to land after the doc reorg has landed.
@nigelbabu Any update on this one?
I'll add this to my list for next week.
@seanh How do you want me to write this documentation? Should I add it as an alternative or should I ask everyone to use Nginx?
I think we should only explicitly mention how to use Nginx in the source deploy instructions (which already mention Nginx but don't explain how to use it)
http://docs.ckan.org/en/latest/deployment.html
The complication is that in the package we have Nginx reverse proxying Apache.
I think is fine to recommend the apache-nginx setup for source installs as well if we are using it for the package install. We can change the port in step 5, explain why are we using nginx and add an extra step 6 for the nginx conf file. Or if you prefer add it as an optional step at the end.
| 2013-11-13T09:01:25 |
|
ckan/ckan | 1,327 | ckan__ckan-1327 | [
"1255"
] | f8edc00aa7a0ee5f2c498b4f2bdf48f0cbb86760 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -761,6 +761,9 @@ def package_show(context, data_dict):
:param id: the id or name of the dataset
:type id: string
+ :param use_default_schema: use default package schema instead of
+ a custom schema defined with an IDatasetForm plugin (default: False)
+ :type use_default_schema: bool
:rtype: dictionary
@@ -778,6 +781,9 @@ def package_show(context, data_dict):
_check_access('package_show', context, data_dict)
+ if data_dict.get('use_default_schema', False):
+ context['schema'] = ckan.logic.schema.default_show_package_schema()
+
package_dict = None
use_cache = (context.get('use_cache', True)
and not 'revision_id' in context
@@ -1364,6 +1370,9 @@ def package_search(context, data_dict):
"count", "display_name" and "name" entries. The display_name is a
form of the name that can be used in titles.
:type search_facets: nested dict of dicts.
+ :param use_default_schema: use default package schema instead of
+ a custom schema defined with an IDatasetForm plugin (default: False)
+ :type use_default_schema: bool
An example result: ::
@@ -1429,8 +1438,10 @@ def package_search(context, data_dict):
results = []
if not abort:
+ data_source = 'data_dict' if data_dict.get('use_default_schema',
+ False) else 'validated_data_dict'
# return a list of package ids
- data_dict['fl'] = 'id data_dict'
+ data_dict['fl'] = 'id {0}'.format(data_source)
# If this query hasn't come from a controller that has set this flag
# then we should remove any mention of capacity from the fq and
@@ -1452,7 +1463,7 @@ def package_search(context, data_dict):
for package in query.results:
# get the package object
- package, package_dict = package['id'], package.get('data_dict')
+ package, package_dict = package['id'], package.get(data_source)
pkg_query = session.query(model.PackageRevision)\
.filter(model.PackageRevision.id == package)\
.filter(_and_(
| package_search results don't look like package_show results when using custom schemas
If we used the new validated_data_dict value in solr for the objects returned from package_search then they would match the ones returned from package_show.
| @wardi .Happy with this. Do you want todo it? Only thing that would be good to add is an extra option for package_search in order to show the original (pre schema).
| 2013-11-18T21:00:46 |
|
ckan/ckan | 1,493 | ckan__ckan-1493 | [
"1485"
] | 72f55306269a00c5bec72701f0b5978054ed7423 | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -236,22 +236,11 @@ def write_latest_release_file():
# Options for HTML output
# -----------------------
-
-#html_theme = 'default'
-#html_theme_options = {
-#"relbarbgcolor": "#777",
-#'sidebarbgcolor': '#F2F2F2',
-#'sidebartextcolor': 'black',
-#'sidebarlinkcolor': '#355F7C',
-#'headfont': 'Trebuchet MS'
-#}
-sys.path.append(os.path.abspath('_themes'))
-html_theme_path = ['_themes']
-html_theme = 'sphinx-theme-okfn'
-html_theme_options = {
- 'logo_icon': 'ckanlogo.png',
- 'show_version': True
- }
+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+if not on_rtd:
+ import sphinx_rtd_theme
+ html_theme = 'sphinx_rtd_theme'
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_sidebars = {
'**': ['globaltoc.html'],
| Switch to default RTD theme.
| 2014-02-04T05:53:18 |
||
ckan/ckan | 1,502 | ckan__ckan-1502 | [
"1502"
] | 0b366c207932f21a08d6cf3d4a5ec0faa3d588b6 | diff --git a/ckan/controllers/user.py b/ckan/controllers/user.py
--- a/ckan/controllers/user.py
+++ b/ckan/controllers/user.py
@@ -365,8 +365,6 @@ def logged_in(self):
user_dict = get_action('user_show')(context, data_dict)
- h.flash_success(_("%s is now logged in") %
- user_dict['display_name'])
return self.me()
else:
err = _('Login failed. Bad username or password.')
| diff --git a/ckan/tests/functional/test_user.py b/ckan/tests/functional/test_user.py
--- a/ckan/tests/functional/test_user.py
+++ b/ckan/tests/functional/test_user.py
@@ -168,7 +168,6 @@ def test_login(self):
res.header('Location').startswith('/dashboard')
res = res.follow()
assert_equal(res.status, 200)
- assert 'testlogin is now logged in' in res.body
assert 'checkpoint:my-dashboard' in res.body
# check user object created
@@ -372,7 +371,6 @@ def test_register_whilst_logged_in(self):
while res.status == 302:
res = res.follow()
assert_equal(res.status, 200)
- assert 'User B is now logged in' in res.body, res.body
@search_related
def test_home_login(self):
| "{User} has been logged in" flash message is pointless
I can see I'm logged in.
| 2014-02-11T11:46:30 |
|
ckan/ckan | 1,554 | ckan__ckan-1554 | [
"1507"
] | 1d7ea5130a13c96d02bac17e492355071b3a69b3 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -47,14 +47,6 @@
_text = sqlalchemy.text
-def _package_list_with_resources(context, package_revision_list):
- package_list = []
- for package in package_revision_list:
- result_dict = model_dictize.package_dictize(package, context)
- package_list.append(result_dict)
- return package_list
-
-
def site_read(context, data_dict=None):
'''Return ``True``.
@@ -130,6 +122,7 @@ def current_package_list_with_resources(context, data_dict):
model = context["model"]
limit = data_dict.get('limit')
offset = data_dict.get('offset', 0)
+ user = context['user']
if not 'offset' in data_dict and 'page' in data_dict:
log.warning('"page" parameter is deprecated. '
@@ -142,16 +135,11 @@ def current_package_list_with_resources(context, data_dict):
_check_access('current_package_list_with_resources', context, data_dict)
- query = model.Session.query(model.PackageRevision)
- query = query.filter(model.PackageRevision.state == 'active')
- query = query.filter(model.PackageRevision.current == True)
- query = query.order_by(
- model.package_revision_table.c.revision_timestamp.desc())
- if limit is not None:
- query = query.limit(limit)
- query = query.offset(offset)
- pack_rev = query.all()
- return _package_list_with_resources(context, pack_rev)
+ is_sysadmin = new_authz.is_sysadmin(user)
+ q = '+capacity:public' if not is_sysadmin else '*:*'
+ context['ignore_capacity_check'] = True
+ search = package_search(context, {'q': q, 'rows': limit, 'start': offset})
+ return search.get('results', [])
def revision_list(context, data_dict):
| diff --git a/ckan/new_tests/factories.py b/ckan/new_tests/factories.py
--- a/ckan/new_tests/factories.py
+++ b/ckan/new_tests/factories.py
@@ -6,10 +6,10 @@
http://factoryboy.readthedocs.org/en/latest/
-These are not meant to be used for the actual testing, e.g. if you're writing a
-test for the :py:func:`~ckan.logic.action.create.user_create` function then
-call :py:func:`~ckan.new_tests.helpers.call_action`, don't test it
-via the :py:class:`~ckan.new_tests.factories.User` factory below.
+These are not meant to be used for the actual testing, e.g. if you're writing
+a test for the :py:func:`~ckan.logic.action.create.user_create` function then
+call :py:func:`~ckan.new_tests.helpers.call_action`, don't test it via the
+:py:class:`~ckan.new_tests.factories.User` factory below.
Usage::
@@ -205,11 +205,13 @@ def _create(cls, target_class, *args, **kwargs):
if args:
assert False, "Positional args aren't supported, use keyword args."
- #TODO: we will need to be able to define this when creating the
- # instance perhaps passing a 'user' param?
- context = {
- 'user': helpers.call_action('get_site_user')['name']
- }
+ if 'user' in kwargs:
+ user_dict = kwargs.pop('user')
+ context = {'user': user_dict['name']}
+ else:
+ context = {
+ 'user': helpers.call_action('get_site_user')['name']
+ }
group_dict = helpers.call_action('organization_create',
context=context,
@@ -247,6 +249,39 @@ def _create(cls, target_class, *args, **kwargs):
return related_dict
+class Dataset(factory.Factory):
+ '''A factory class for creating CKAN datasets.'''
+
+ FACTORY_FOR = ckan.model.package
+
+ # These are the default params that will be used to create new groups.
+ title = 'Test Dataset'
+ description = 'Just another test dataset.'
+
+ # Generate a different group name param for each user that gets created.
+ name = factory.Sequence(lambda n: 'test_dataset_{n}'.format(n=n))
+
+ @classmethod
+ def _build(cls, target_class, *args, **kwargs):
+ raise NotImplementedError(".build() isn't supported in CKAN")
+
+ @classmethod
+ def _create(cls, target_class, *args, **kwargs):
+ if args:
+ assert False, "Positional args aren't supported, use keyword args."
+
+ assert 'user' in kwargs, ('The Dataset factory requires an extra '
+ 'user=user_dict keyword argument (the user '
+ 'who will create the group)')
+ user_dict = kwargs.pop('user')
+ context = {'user': user_dict['name']}
+
+ dataset_dict = helpers.call_action('package_create',
+ context=context,
+ **kwargs)
+ return dataset_dict
+
+
class MockUser(factory.Factory):
'''A factory class for creating mock CKAN users using the mock library.'''
diff --git a/ckan/new_tests/logic/action/test_get.py b/ckan/new_tests/logic/action/test_get.py
--- a/ckan/new_tests/logic/action/test_get.py
+++ b/ckan/new_tests/logic/action/test_get.py
@@ -6,6 +6,9 @@
import ckan.new_tests.factories as factories
+eq = nose.tools.eq_
+
+
class TestGet(object):
@classmethod
@@ -246,6 +249,71 @@ def test_related_list_featured(self):
# TODO: Create related items associated with a dataset and test
# related_list with them
+ def test_current_package_list(self):
+ '''
+ Test current_package_list_with_resources with no parameters
+ '''
+ user = factories.User()
+ dataset1 = factories.Dataset(user=user)
+ dataset2 = factories.Dataset(user=user)
+ current_package_list = helpers. \
+ call_action('current_package_list_with_resources')
+ eq(len(current_package_list), 2)
+
+ def test_current_package_list_limit_param(self):
+ '''
+ Test current_package_list_with_resources with limit parameter
+ '''
+ user = factories.User()
+ dataset1 = factories.Dataset(user=user)
+ dataset2 = factories.Dataset(user=user)
+ current_package_list = helpers. \
+ call_action('current_package_list_with_resources', limit=1)
+ eq(len(current_package_list), 1)
+ eq(current_package_list[0]['name'], dataset2['name'])
+
+ def test_current_package_list_offset_param(self):
+ '''
+ Test current_package_list_with_resources with offset parameter
+ '''
+ user = factories.User()
+ dataset1 = factories.Dataset(user=user)
+ dataset2 = factories.Dataset(user=user)
+ current_package_list = helpers. \
+ call_action('current_package_list_with_resources', offset=1)
+ eq(len(current_package_list), 1)
+ eq(current_package_list[0]['name'], dataset1['name'])
+
+ def test_current_package_list_private_datasets_anonoymous_user(self):
+ '''
+ Test current_package_list_with_resources with an anoymous user and
+ a private dataset
+ '''
+ user = factories.User()
+ org = factories.Organization(user=user)
+ dataset1 = factories.Dataset(user=user, owner_org=org['name'],
+ private=True)
+ dataset2 = factories.Dataset(user=user)
+ current_package_list = helpers. \
+ call_action('current_package_list_with_resources', context={})
+ eq(len(current_package_list), 1)
+
+ def test_current_package_list_private_datasets_sysadmin_user(self):
+ '''
+ Test current_package_list_with_resources with a sysadmin user and a
+ private dataset
+ '''
+ user = factories.User()
+ org = factories.Organization(user=user)
+ dataset1 = factories.Dataset(user=user, owner_org=org['name'],
+ private=True)
+ dataset2 = factories.Dataset(user=user)
+ sysadmin = factories.Sysadmin()
+ current_package_list = helpers. \
+ call_action('current_package_list_with_resources', context={'user':
+ sysadmin['name']})
+ eq(len(current_package_list), 2)
+
class TestBadLimitQueryParameters(object):
'''test class for #1258 non-int query parameters cause 500 errors
@@ -266,6 +334,7 @@ def test_activity_list_actions(self):
'group_activity_list_html',
'organization_activity_list_html',
'recently_changed_packages_activity_list_html',
+ 'current_package_list_with_resources',
]
for action in actions:
nose.tools.assert_raises(
diff --git a/ckan/tests/logic/test_action.py b/ckan/tests/logic/test_action.py
--- a/ckan/tests/logic/test_action.py
+++ b/ckan/tests/logic/test_action.py
@@ -114,48 +114,6 @@ def test_01_package_list_private(self):
assert 'public_dataset' in res
assert not 'private_dataset' in res
- def test_01_current_package_list_with_resources(self):
- url = '/api/action/current_package_list_with_resources'
-
- postparams = '%s=1' % json.dumps({
- 'limit': 1,
- 'offset': 1})
- res = json.loads(self.app.post(url, params=postparams).body)
- assert res['success']
- assert len(res['result']) == 1
-
- postparams = '%s=1' % json.dumps({
- 'limit': '5'})
- res = json.loads(self.app.post(url, params=postparams).body)
- assert res['success']
-
- postparams = '%s=1' % json.dumps({
- 'limit': -2})
- res = json.loads(self.app.post(url, params=postparams,
- status=StatusCodes.STATUS_409_CONFLICT).body)
- assert not res['success']
-
- postparams = '%s=1' % json.dumps({
- 'offset': 'a'})
- res = json.loads(self.app.post(url, params=postparams,
- status=StatusCodes.STATUS_409_CONFLICT).body)
- assert not res['success']
-
- postparams = '%s=1' % json.dumps({
- 'limit': 2,
- 'page': 1})
- res = json.loads(self.app.post(url, params=postparams).body)
- assert res['success']
- assert len(res['result']) == 2
-
- postparams = '%s=1' % json.dumps({
- 'limit': 1,
- 'page': 0})
- res = json.loads(self.app.post(url,
- params=postparams,
- status=StatusCodes.STATUS_409_CONFLICT).body)
- assert not res['success']
-
def test_01_package_show(self):
anna_id = model.Package.by_name(u'annakarenina').id
postparams = '%s=1' % json.dumps({'id': anna_id})
| `current_package_list_with_resources` needs to use `package_search`
The `current_package_list_with_resources` action calls the models directly. This needs to use `package_search` for speed and handling private datasets correctly.
| for CKAN 2.0 and 2.1 the package_search results don't look like the package_show results when IDatasetForm plugins are in use. Won't this be a problem for backporting this fix?
Yes, we can not change the output of the API calls for old releases. For CKAN 2.0 and 2.1 we should really only add the one line DB fix. For 2.2 we can backport the `package_search` refactor (as long as the output don't change)
| 2014-02-27T07:28:47 |
ckan/ckan | 1,565 | ckan__ckan-1565 | [
"1541"
] | e15651d6fd8ccd382a0c8b96eac78411ea23eb03 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -774,7 +774,7 @@ def dict_list_reduce(list_, key, unique=True):
def linked_gravatar(email_hash, size=100, default=None):
return literal(
'<a href="https://gravatar.com/" target="_blank" ' +
- 'title="%s">' % _('Update your avatar at gravatar.com') +
+ 'title="%s" alt="">' % _('Update your avatar at gravatar.com') +
'%s</a>' % gravatar(email_hash, size, default)
)
| Alt text for widgets
The widgets for dashboard, settings, etc have no alt text.
They seemingly are inserted with some clever hack using CSS (so in fact the images are visible in Safari or Firefox even if image loading is turned off) but loading the page in a text browser such as Lynx, they don't appear at all, so the user can't see those links.
| Unfortunately the images here aren't actually images, they are icons rendered using fonts. As such it isn't possible to give them alt tags.
@rossjones That's lovely, but what are you supposed to do if browsing in text mode? Isn't there an accessibility issue?
Also, the user's gravatar should have `alt=""` (no alt text is needed as the name or username is also linked to the profile page).
| 2014-02-28T13:32:49 |
|
ckan/ckan | 1,605 | ckan__ckan-1605 | [
"1592"
] | 9d35add335d7edb81ffe467b4e1220d2ec5051c7 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1870,6 +1870,15 @@ def unified_resource_format(format):
def check_config_permission(permission):
return new_authz.check_config_permission(permission)
+
+def get_organization(org=None):
+ if org is None:
+ return {}
+ try:
+ return logic.get_action('organization_show')({}, {'id': org})
+ except (NotFound, ValidationError, NotAuthorized):
+ return {}
+
# these are the functions that will end up in `h` template helpers
__allowed_functions__ = [
# functions defined in ckan.lib.helpers
@@ -1957,6 +1966,7 @@ def check_config_permission(permission):
'list_dict_filter',
'new_activities',
'time_ago_from_timestamp',
+ 'get_organization',
'has_more_facets',
# imported into ckan.lib.helpers
'literal',
| Organization image not shown when viewing dataset
When you are viewing a dataset that has an organization the organization's profile image is not shown.
Organization.html is not passed image_display_url when used in package templates.
| This is because the `organization` dicts in `package_show` does not have the `image_display_url` added. It's only added by the dictization functions.
| 2014-03-18T05:27:05 |
|
ckan/ckan | 1,612 | ckan__ckan-1612 | [
"599"
] | aebc52ca8bf9e4384b6171478923e19b178012a6 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -532,7 +532,7 @@ def default_group_type():
return str(config.get('ckan.default.group_type', 'group'))
-def get_facet_items_dict(facet, limit=10, exclude_active=False):
+def get_facet_items_dict(facet, limit=None, exclude_active=False):
'''Return the list of unselected facet items for the given facet, sorted
by count.
@@ -564,12 +564,41 @@ def get_facet_items_dict(facet, limit=10, exclude_active=False):
elif not exclude_active:
facets.append(dict(active=True, **facet_item))
facets = sorted(facets, key=lambda item: item['count'], reverse=True)
- if c.search_facets_limits:
+ if c.search_facets_limits and limit is None:
limit = c.search_facets_limits.get(facet)
- if limit:
+ if limit is not None:
return facets[:limit]
- else:
- return facets
+ return facets
+
+
+def has_more_facets(facet, limit=None, exclude_active=False):
+ '''
+ Returns True if there are more facet items for the given facet than the
+ limit.
+
+ Reads the complete list of facet items for the given facet from
+ c.search_facets, and filters out the facet items that the user has already
+ selected.
+
+ Arguments:
+ facet -- the name of the facet to filter.
+ limit -- the max. number of facet items.
+ exclude_active -- only return unselected facets.
+
+ '''
+ facets = []
+ for facet_item in c.search_facets.get(facet)['items']:
+ if not len(facet_item['name'].strip()):
+ continue
+ if not (facet, facet_item['name']) in request.params.items():
+ facets.append(dict(active=False, **facet_item))
+ elif not exclude_active:
+ facets.append(dict(active=True, **facet_item))
+ if c.search_facets_limits and limit is None:
+ limit = c.search_facets_limits.get(facet)
+ if limit is not None and len(facets) > limit:
+ return True
+ return False
def unselected_facet_items(facet, limit=10):
@@ -1843,6 +1872,7 @@ def check_config_permission(permission):
'list_dict_filter',
'new_activities',
'time_ago_from_timestamp',
+ 'has_more_facets',
# imported into ckan.lib.helpers
'literal',
'link_to',
| Hide 'show more' link on search facets when irrelevent
i.e. don't show "show more formats" link when there are no more formats or tags or groups to show: http://beta.ckan.org/dataset
| Duplicate, see #780
This issue still exists - As an example
http://master.ckan.org/dataset - Still shows "show more groups" and "show more formats" link when there are no more groups to display.
Also the "Show more..." links are much quite visually prominent compared to the facet titles and items. Suggest this should be less prominent.
@johnmartin - Please could you have a look?
FYI https://github.com/okfn/ckanext-iati/commit/ffe615f0d93
If we tweak `h.get_facet_items_dict` to actually support the limit param then we don't have to hack the ini file with `search.facets.default=11`
Maybe I dreamt it but I thought this had been fixed in another PR? @johnmartin please close this if so
@amercader I think you dreamt it. However I'm going to take a look at this today.
OK, I've taken a look at this... and I can update the templates to be a little nicer (using okfn/ckanext-iati@ffe615f as example). However when I give `h.get_facet_items_dict` a `limit` it just completely ignores it... so I'm not sure where to go from there. I'm unassigning myself and I'll bring this up in the next dev catchup.
| 2014-03-23T12:29:48 |
|
ckan/ckan | 1,613 | ckan__ckan-1613 | [
"1419"
] | aebc52ca8bf9e4384b6171478923e19b178012a6 | diff --git a/ckan/controllers/user.py b/ckan/controllers/user.py
--- a/ckan/controllers/user.py
+++ b/ckan/controllers/user.py
@@ -1,6 +1,5 @@
import logging
from urllib import quote
-from urlparse import urlparse
from pylons import config
@@ -356,7 +355,7 @@ def login(self, error=None):
def logged_in(self):
# redirect if needed
came_from = request.params.get('came_from', '')
- if self._sane_came_from(came_from):
+ if h.url_is_local(came_from):
return h.redirect_to(str(came_from))
if c.user:
@@ -390,7 +389,7 @@ def logout(self):
def logged_out(self):
# redirect if needed
came_from = request.params.get('came_from', '')
- if self._sane_came_from(came_from):
+ if h.url_is_local(came_from):
return h.redirect_to(str(came_from))
h.redirect_to(controller='user', action='logged_out_page')
@@ -686,14 +685,3 @@ def unfollow(self, id):
or e.error_dict)
h.flash_error(error_message)
h.redirect_to(controller='user', action='read', id=id)
-
- def _sane_came_from(self, url):
- '''Returns True if came_from is local'''
- if not url or (len(url) >= 2 and url.startswith('//')):
- return False
- parsed = urlparse(url)
- if parsed.scheme:
- domain = urlparse(h.url_for('/', qualified=True)).netloc
- if domain != parsed.netloc:
- return False
- return True
diff --git a/ckan/controllers/util.py b/ckan/controllers/util.py
--- a/ckan/controllers/util.py
+++ b/ckan/controllers/util.py
@@ -2,6 +2,8 @@
import ckan.lib.base as base
import ckan.lib.i18n as i18n
+import ckan.lib.helpers as h
+from ckan.common import _
class UtilController(base.BaseController):
@@ -10,7 +12,11 @@ class UtilController(base.BaseController):
def redirect(self):
''' redirect to the url parameter. '''
url = base.request.params.get('url')
- return base.redirect(url)
+
+ if h.url_is_local(url):
+ return base.redirect(url)
+ else:
+ base.abort(403, _('Redirecting to external site at %s not allowed.') % url)
def primer(self):
''' Render all html components out onto a single page.
diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -12,6 +12,7 @@
import urllib
import pprint
import copy
+import urlparse
from urllib import urlencode
from paste.deploy.converters import asbool
@@ -227,6 +228,18 @@ def _add_i18n_to_url(url_to_amend, **kw):
return url
+def url_is_local(url):
+ '''Returns True if url is local'''
+ if not url or (len(url) >= 2 and url.startswith('//')):
+ return False
+ parsed = urlparse.urlparse(url)
+ if parsed.scheme:
+ domain = urlparse.urlparse(url_for('/', qualified=True)).netloc
+ if domain != parsed.netloc:
+ return False
+ return True
+
+
def full_current_url():
''' Returns the fully qualified current url (eg http://...) useful
for sharing etc '''
| Warn the user when redirecting to an external site
CKAN doesn't currently do any checks when redirecting to a URL through /util/redirect. For example http://demo.ckan.org/util/redirect?url=http://www.google.com will send you directly to the target URL with no warning that what you're going to access is not hosted at http://demo.ckan.org.
It might be desirable to warn the user (and give a clickable link with the actual target URL) to avoid situations where e.g. http://reputable-site.com/util/redirect?url=http://undesirable-site.com/ could send oblivious users for example to malware sites.
If you think this is a good idea, I might be able to contribute.
| Ouch, yes open redirect is very bad https://www.owasp.org/index.php/Open_redirect
Not sure why we need the `/util/redirect` endpoint at all. If it is not used I suggest getting rid of it and handle redirects properly from the controllers code
@mwahlroos do you want to do a quick search on the code to see where it is used? (if it is at all)
@amercader it seems to be used by the UI language selector, but a quick search didn't reveal it being used anywhere else.
So, do you think the endpoint should be removed and the language selector modified to use something else instead, or the endpoint fixed?
We'll need to fix this for an extension we're developing anyway, so we might as well do it in a way acceptable for CKAN core rather than diverge with a solution that differs from what you guys are going to do.
I might tend towards removing the endpoint unless there's some other need for it beside the current use in the language selector. At the moment it's pretty much just a thin wrapper around other functionality anyway.
@mwahlroos Sorry for the late reply. I'm happy to remove the endpoint, but we still need to do a redirect on the language selector. Perhaps we can add a quick check on the controller action to only support internal redirects. There is a small function on the user controller that does it:
https://github.com/ckan/ckan/blob/625b51cdb0f1697add59c7e3faf723a48c8e04fd/ckan/controllers/user.py#L692
| 2014-03-25T14:01:45 |
|
ckan/ckan | 1,627 | ckan__ckan-1627 | [
"1434"
] | 3a134236dd318bb6a47d57df4ea0b7a91ee1540f | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -1055,10 +1055,11 @@ def _group_or_org_show(context, data_dict, is_org=False):
{'model': model, 'session': model.Session},
{'id': group_dict['id']})
- if schema:
- group_dict, errors = lib_plugins.plugin_validate(
- group_plugin, context, group_dict, schema,
- 'organization_show' if is_org else 'group_show')
+ if schema is None:
+ schema = logic.schema.default_show_group_schema()
+ group_dict, errors = lib_plugins.plugin_validate(
+ group_plugin, context, group_dict, schema,
+ 'organization_show' if is_org else 'group_show')
return group_dict
diff --git a/ckan/logic/schema.py b/ckan/logic/schema.py
--- a/ckan/logic/schema.py
+++ b/ckan/logic/schema.py
@@ -326,6 +326,22 @@ def default_update_group_schema():
schema["name"] = [ignore_missing, group_name_validator, unicode]
return schema
+def default_show_group_schema():
+ schema = default_group_schema()
+
+ # make default show schema behave like when run with no validation
+ schema['num_followers'] = []
+ schema['created'] = []
+ schema['display_name'] = []
+ schema['extras'] = {'__extras': [ckan.lib.navl.validators.keep_extras]}
+ schema['package_count'] = []
+ schema['packages'] = {'__extras': [ckan.lib.navl.validators.keep_extras]}
+ schema['revision_id'] = []
+ schema['state'] = []
+ schema['users'] = {'__extras': [ckan.lib.navl.validators.keep_extras]}
+
+ return schema
+
def default_related_schema():
schema = {
| num_followers and package_count not in default_group_schema
When creating a custom group schema, I start from default_group_schema, but it doesn't work on its own - I have to add in the package_count and num_followers fields to it. They are used by the templates.
snippets/organization.html requires the organization.package_count and organization.num_followers. validation is not run on group_show - why not? Shouldn't it run default_group_schema?
Here's how I got my IGroupForm schema to work:
```
def db_to_form_schema(self):
from ckan.logic.schema import default_group_schema
from ckan.lib.navl.validators import ignore_missing
schema = default_group_schema()
schema['num_followers'] = [ignore_missing]
schema['package_count'] = [ignore_missing]
return schema
```
| 2014-04-02T18:01:24 |
||
ckan/ckan | 1,654 | ckan__ckan-1654 | [
"1625"
] | 9d35add335d7edb81ffe467b4e1220d2ec5051c7 | diff --git a/ckan/logic/__init__.py b/ckan/logic/__init__.py
--- a/ckan/logic/__init__.py
+++ b/ckan/logic/__init__.py
@@ -259,7 +259,6 @@ def check_access(action, context, data_dict=None):
authorized to call the named action
'''
- action = new_authz.clean_action_name(action)
# Auth Auditing. We remove this call from the __auth_audit stack to show
# we have called the auth function
@@ -341,8 +340,6 @@ def get_action(action):
:rtype: callable
'''
- # clean the action names
- action = new_authz.clean_action_name(action)
if _actions:
if not action in _actions:
@@ -365,7 +362,6 @@ def get_action(action):
if (hasattr(v, '__call__')
and (v.__module__ == module_path
or hasattr(v, '__replaced'))):
- k = new_authz.clean_action_name(k)
_actions[k] = v
# Whitelist all actions defined in logic/action/get.py as
@@ -380,7 +376,6 @@ def get_action(action):
fetched_actions = {}
for plugin in p.PluginImplementations(p.IActions):
for name, auth_function in plugin.get_actions().items():
- name = new_authz.clean_action_name(name)
if name in resolved_action_plugins:
raise Exception(
'The action %r is already implemented in %r' % (
diff --git a/ckan/new_authz.py b/ckan/new_authz.py
--- a/ckan/new_authz.py
+++ b/ckan/new_authz.py
@@ -57,7 +57,6 @@ def _build(self):
for key, v in module.__dict__.items():
if not key.startswith('_'):
- key = clean_action_name(key)
# Whitelist all auth functions defined in
# logic/auth/get.py as not requiring an authorized user,
# as well as ensuring that the rest do. In both cases, do
@@ -75,7 +74,6 @@ def _build(self):
fetched_auth_functions = {}
for plugin in p.PluginImplementations(p.IAuthFunctions):
for name, auth_function in plugin.get_auth_functions().items():
- name = clean_action_name(name)
if name in resolved_auth_function_plugins:
raise Exception(
'The auth function %r is already implemented in %r' % (
@@ -106,13 +104,6 @@ def auth_functions_list():
return _AuthFunctions.keys()
-def clean_action_name(action_name):
- ''' Used to convert old style action names into new style ones '''
- new_action_name = re.sub('package', 'dataset', action_name)
- # CS: bad_spelling ignore
- return re.sub('licence', 'license', new_action_name)
-
-
def is_sysadmin(username):
''' Returns True is username is a sysadmin '''
user = _get_user(username)
@@ -157,7 +148,6 @@ def is_authorized(action, context, data_dict=None):
if context.get('ignore_auth'):
return {'success': True}
- action = clean_action_name(action)
auth_function = _AuthFunctions.get(action)
if auth_function:
username = context.get('user')
| Remove clean_action_name
clean_action_name ( https://github.com/ckan/ckan/blob/master/ckan/new_authz.py#L109-L113 ) is only called on IAuthFunctions implementations and renames them replacing 'package' with 'dataset'. This can end up with a user implementing an auth function test_package() which then if called with check_access('test_package') will fail because test_dataset could not be found.
This is horrible, it should be removed and documentation used to suggest people use the new naming scheme if it is important.
| +1
Good idea
Do it
| 2014-04-18T10:28:48 |
|
ckan/ckan | 1,655 | ckan__ckan-1655 | [
"1596"
] | 9d35add335d7edb81ffe467b4e1220d2ec5051c7 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -565,7 +565,8 @@ def organization_list_for_user(context, data_dict):
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
- .filter(model.Member.table_id == user_id)
+ .filter(model.Member.table_id == user_id) \
+ .filter(model.Member.state == 'active')
group_ids = []
for row in q.all():
| organization_list_for_user returns non-"active" organisations
In https://github.com/ckan/ckan/blob/master/ckan/logic/action/get.py at method organization_list_for_user the part
```
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id)
```
might be better written as
```
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id) \
.filter(model.Member.state == 'active')
```
because it can return deleted (or other state) members.
| Whilst looking at this I originally got confused by the similar `group_list_authz` at https://github.com/ckan/ckan/blob/master/ckan/logic/action/get.py#L427-L486 where it _does_ make sense (you want to know that you can edit deleted orgs).
I'm not sure how much value there is in organization_list_for_user returning deleted organisations, but then I'm not sure what harm there is either.
Can you elucidate why you think it is a problem?
If you add user to some organization and then remove user from this organization. Then in database the users' state is 'deleted'.
If you now try create dataset with this user (who has deleted state) the organization listing in the dataset creation page shows the deleted organization as selection and if you try to create dataset with that organization, then you get authorization error.
I did not checked other references, but at least in there it produces unwanted result.
Yep, definitely looks like an issue. Do you fancy submitting a PR for the fix?
Thanks, but you can take the honor.
| 2014-04-18T10:49:08 |
|
ckan/ckan | 1,666 | ckan__ckan-1666 | [
"1666"
] | f1d883837dc09b7767f4f083104fa4d8477fc9b5 | diff --git a/ckanext/datapusher/plugin.py b/ckanext/datapusher/plugin.py
--- a/ckanext/datapusher/plugin.py
+++ b/ckanext/datapusher/plugin.py
@@ -10,6 +10,8 @@
import ckan.model as model
import ckan.plugins.toolkit as toolkit
+from ckan.common import _
+
log = logging.getLogger(__name__)
_get_or_bust = logic.get_or_bust
@@ -61,6 +63,8 @@ def resource_data(self, id, resource_id):
)
except logic.NotFound:
datapusher_status = {}
+ except logic.NotAuthorized:
+ base.abort(401, _('Not authorized to see this page'))
return base.render('package/resource_data.html',
extra_vars={'status': datapusher_status})
| Datapusher extension throws an authorization error which it should have caught
If you visit the "DataStore" (from the edit page of the resource), it throws a traceback. Ideally, the datapusher should catch this one through a 403.
| 2014-04-24T10:58:06 |
||
ckan/ckan | 1,682 | ckan__ckan-1682 | [
"1682"
] | c942fe78b33c5ed4b1fa27b5110d130e486fdd03 | diff --git a/ckan/controllers/group.py b/ckan/controllers/group.py
--- a/ckan/controllers/group.py
+++ b/ckan/controllers/group.py
@@ -364,6 +364,9 @@ def bulk_process(self, id):
group_type = self._get_group_type(id.split('@')[0])
+ if group_type is None:
+ abort(404, _('Organization not found'))
+
if group_type != 'organization':
# FIXME: better error
raise Exception('Must be an organization')
| diff --git a/ckan/new_tests/controllers/test_group.py b/ckan/new_tests/controllers/test_group.py
new file mode 100644
--- /dev/null
+++ b/ckan/new_tests/controllers/test_group.py
@@ -0,0 +1,15 @@
+from nose.tools import assert_equal, assert_true
+
+from routes import url_for
+
+import ckan.new_tests.helpers as helpers
+import ckan.model as model
+
+
+class TestPackageControllerNew(helpers.FunctionalTestBase):
+
+ def test_bulk_process_throws_404_for_nonexistent_org(self):
+ app = self._get_test_app()
+ bulk_process_url = url_for(controller='organization',
+ action='bulk_process', id='does-not-exist')
+ response = app.get(url=bulk_process_url, status=404)
| bulk_process page for non-existent organization throws Exception
```
Stacktrace (most recent call last):
File "raven/middleware.py", line 35, in __call__
iterable = self.application(environ, start_response)
File "ckan/config/middleware.py", line 373, in __call__
return self.app(environ, start_response)
File "/usr/lib/ckan/demo/lib/python2.7/site-packages/paste/cascade.py", line 130, in __call__
return self.apps[-1](environ, start_response)
File "ckan/config/middleware.py", line 238, in __call__
return self.app(environ, start_response)
File "/usr/lib/ckan/demo/lib/python2.7/site-packages/paste/registry.py", line 379, in __call__
app_iter = self.application(environ, start_response)
File "/usr/lib/ckan/demo/lib/python2.7/site-packages/repoze/who/middleware.py", line 107, in __call__
app_iter = app(environ, wrapper.wrap_start_response)
File "webob/dec.py", line 147, in __call__
resp = self.call_func(req, *args, **self.kwargs)
File "webob/dec.py", line 208, in call_func
return self.func(req, *args, **kwargs)
File "fanstatic/publisher.py", line 234, in __call__
return request.get_response(self.app)
File "webob/request.py", line 1053, in get_response
application, catch_exc_info=False)
File "webob/request.py", line 1022, in call_application
app_iter = application(self.environ, start_response)
File "webob/dec.py", line 147, in __call__
resp = self.call_func(req, *args, **self.kwargs)
File "webob/dec.py", line 208, in call_func
return self.func(req, *args, **kwargs)
File "fanstatic/injector.py", line 54, in __call__
response = request.get_response(self.app)
File "webob/request.py", line 1053, in get_response
application, catch_exc_info=False)
File "webob/request.py", line 1022, in call_application
app_iter = application(self.environ, start_response)
File "beaker/middleware.py", line 73, in __call__
return self.app(environ, start_response)
File "beaker/middleware.py", line 155, in __call__
return self.wrap_app(environ, session_start_response)
File "routes/middleware.py", line 131, in __call__
response = self.app(environ, start_response)
File "pylons/wsgiapp.py", line 125, in __call__
response = self.dispatch(controller, environ, start_response)
File "pylons/wsgiapp.py", line 324, in dispatch
return controller(environ, start_response)
File "ckan/lib/base.py", line 346, in __call__
res = WSGIController.__call__(self, environ, start_response)
File "pylons/controllers/core.py", line 221, in __call__
response = self._dispatch_call()
File "pylons/controllers/core.py", line 172, in _dispatch_call
response = self._inspect_call(func)
File "pylons/controllers/core.py", line 107, in _inspect_call
result = self._perform_call(func, args)
File "ckan/controllers/group.py", line 368, in bulk_process
raise Exception('Must be an organization')
```
Especially annoying for large sites.
| 2014-04-28T05:18:20 |
|
ckan/ckan | 1,727 | ckan__ckan-1727 | [
"1193"
] | e24f6a07f1451d6ed23a9f2825cc0677e51d3acb | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -737,8 +737,12 @@ def check_access(action, data_dict=None):
return authorized
[email protected]("helpers.get_action() is deprecated and will be removed "
+ "in a future version of CKAN. Instead, please use the "
+ "extra_vars param to render() in your controller to pass "
+ "results from action functions to your templates.")
def get_action(action_name, data_dict=None):
- '''Calls an action function from a template.'''
+ '''Calls an action function from a template. Deprecated in CKAN 2.3.'''
if data_dict is None:
data_dict = {}
return logic.get_action(action_name)({}, data_dict)
| Remove ckan.lib.helpers.get_action()
This function is a bad idea because action functions that raise exceptions crash templates (which cannot catch exceptions). See http://lists.okfn.org/pipermail/ckan-dev/2013-August/005609.html
Instead, if you want to make an action available to a template, wrap each individual action in a template helper function that deals with any exceptions raised appropriately.
(Template helper functions should never raise exceptions.)
If `ckan.lib.helpers.get_action()` has been in a CKAN release already then it should be deprecated for one release, if not we can just delete it right away.
| Note you could also call get_action() in your controller and pass the results to the template using extra_vars. Keeps the template simpler, maybe.
Please don't remove this helper. There are some places where this is extremely useful. We should instead recommend not using it.
It's currently only used once in the core templates. Why is it so useful?
I think I've used it in one more place where it's not merged. Precisely because that was the easiest way to fix that bug.
| 2014-05-21T11:58:57 |
|
ckan/ckan | 1,825 | ckan__ckan-1825 | [
"1818"
] | bfa0a5ab49d59aeebf00bb655a96ee18319d88bf | diff --git a/ckan/logic/auth/get.py b/ckan/logic/auth/get.py
--- a/ckan/logic/auth/get.py
+++ b/ckan/logic/auth/get.py
@@ -135,7 +135,7 @@ def resource_show(context, data_dict):
raise logic.NotFound(_('No package found for this resource, cannot check auth.'))
pkg_dict = {'id': pkg.id}
- authorized = package_show(context, pkg_dict).get('success')
+ authorized = new_authz.is_authorized('package_show', context, pkg_dict).get('success')
if not authorized:
return {'success': False, 'msg': _('User %s not authorized to read resource %s') % (user, resource.id)}
| resource_show authentication function delegates in package_show function incorrectly
The resource_show authentication delegates in the package_show function to check if a user can view a resource. This is correct, but the problem is that the resource_show function uses directly the package_show function defined in the auth file (logic/auth/get.py) instead the one defined in the system (since a developer can change that function within a plugin).
| So you're suggesting that the resource_show auth function should be calling the package_show auth function via check_access, instead of calling it directly? That sounds correct to me. Are you able to send a pull request with this change?
| 2014-07-02T10:51:01 |
|
ckan/ckan | 1,827 | ckan__ckan-1827 | [
"1710"
] | b7b592ed28218231a1f179787856d38fb1fdcee0 | diff --git a/ckanext/datastore/db.py b/ckanext/datastore/db.py
--- a/ckanext/datastore/db.py
+++ b/ckanext/datastore/db.py
@@ -599,7 +599,7 @@ def upsert_data(context, data_dict):
if non_existing_filed_names:
raise ValidationError({
'fields': [u'fields "{0}" do not exist'.format(
- ', '.join(missing_fields))]
+ ', '.join(non_existing_filed_names))]
})
unique_values = [record[key] for key in unique_keys]
| DataStore Upsert operation returns an empty field list when non existing fields are included
When I've tried to upsert an object into a resource and I put a non existing field, API should return the name of the extra field. However, I get the following error:
```
"success": false,
"error": {
"fields": [
"fields \"\" do not exist"
],
"__type": "Validation Error"
}
```
As can be seen, the field list does not include the non existing fields (basically this list is empty). I expect a message similar to the following one:
```
"success": false,
"error": {
"fields": [
"fields \"test1, test2\" do not exist"
],
"__type": "Validation Error"
}
```
I've found a simple solution to this bug. Just go to `ckanext/datastore/db.py:633` and replace:
```
', '.join(missing_fields))]
```
by
```
', '.join(non_existing_filed_names))]
```
I've looked in the issue list and I haven't found something similar to this, but if it's already reported, you can close this issue ;)
| @aitormagan I have seen this too and just haven't logged it yet. Thanks for logging!
| 2014-07-02T14:24:55 |
|
ckan/ckan | 1,883 | ckan__ckan-1883 | [
"1709"
] | 1d96488532bc81d6bd11eed7da767da790300956 | diff --git a/ckan/lib/search/__init__.py b/ckan/lib/search/__init__.py
--- a/ckan/lib/search/__init__.py
+++ b/ckan/lib/search/__init__.py
@@ -32,7 +32,7 @@ def text_traceback():
SIMPLE_SEARCH = asbool(config.get('ckan.simple_search', False))
-SUPPORTED_SCHEMA_VERSIONS = ['2.0']
+SUPPORTED_SCHEMA_VERSIONS = ['2.3']
DEFAULT_OPTIONS = {
'limit': 20,
| Add resource extras to Solr search index
I plan to add resource extra attributes to Solr search index, to make it possible to search datasets by these fields.
As already discussed with @wardi, I will implement this in ckan/lib/search/index.py:PackageSearchIndex. Basic logic would be:
If the field name is not in:
ckan.model.resource.CORE_RESOURCE_COLUMNS + ['expired_id', 'revision_timestamp', 'expired_timestamp', 'current', 'state']
It is an extra field and should be indexed.
For this to work, I will create a new multivalued dynamic field in the Solr schema, with name 'res_extras_*'.
| Hi @rparrapy
How would you manage same extras in different resources for the same dataset?
Eg:
``` json
{
"name": "my-dataset",
"resources": [
{"id":"x", "my-extra": "value1"},
{"id":"y", "my-extra": "value2"},
]
}
```
Hi @amercader
I would probably concatenate both values with spaces between them (done it before with a non-multivalued field and it worked), like this:
``` python
pkg_dict["res_extras_my-extra"] = "value1 value2"
```
Solr splits the value in words, and it ends up working.
By the way, I just found out about this config setting:
http://ckan.readthedocs.org/en/latest/maintaining/configuration.html#ckan-extra-resource-fields
I tried using it but it didn't index the extra fields I specified (not sure why it is there, then).
Anyway, do you think I should use this config and index only specified resource fields? or stick to the plan and index _all_ extra fields?
fixed by #1732
@wardi The fix in #1732 will cause an exception if your CKAN is pointing to the previous version of the schema.xml and trying to create / update datasets (eg tests that use CreateTestData will fail. You need to increase the version number on the schema.xml file and change the supported schema versions value in `lib/search/common.py` so CKAN fails on startup and people actually know what is going on.
@wardi ping
| 2014-08-13T20:55:44 |
|
ckan/ckan | 1,911 | ckan__ckan-1911 | [
"1902"
] | 18ec6002c194e8777fa1ff0f5e18c0b5e581f423 | diff --git a/ckan/lib/dictization/model_dictize.py b/ckan/lib/dictization/model_dictize.py
--- a/ckan/lib/dictization/model_dictize.py
+++ b/ckan/lib/dictization/model_dictize.py
@@ -366,10 +366,13 @@ def get_packages_for_this_group(group_):
else:
q['fq'] = 'groups:"{0}"'.format(group_.name)
- is_group_member = (context.get('user') and
- new_authz.has_user_permission_for_group_or_org(group_.id, context.get('user'), 'read'))
- if is_group_member:
- context['ignore_capacity_check'] = True
+ # Allow members of organizations to see private datasets.
+ if group_.is_organization:
+ is_group_member = (context.get('user') and
+ new_authz.has_user_permission_for_group_or_org(
+ group_.id, context.get('user'), 'read'))
+ if is_group_member:
+ context['ignore_capacity_check'] = True
if not context.get('for_view'):
q['rows'] = 1000 # Only the first 1000 datasets are returned
| diff --git a/ckan/new_tests/logic/action/test_get.py b/ckan/new_tests/logic/action/test_get.py
--- a/ckan/new_tests/logic/action/test_get.py
+++ b/ckan/new_tests/logic/action/test_get.py
@@ -424,6 +424,58 @@ def test_package_autocomplete_does_not_return_private_datasets(self):
q='some')
eq(len(package_list), 1)
+ def test_group_show_does_not_show_private_datasets(self):
+ '''group_show() should never show private datasets.
+
+ If a dataset is a private member of an organization and also happens to
+ be a member of a group, group_show() should not return the dataset as
+ part of the group dict, even if the user calling group_show() is a
+ member or admin of the group or the organization or is a sysadmin.
+
+ '''
+ org_member = factories.User()
+ org = factories.Organization(user=org_member)
+ private_dataset = factories.Dataset(user=org_member,
+ owner_org=org['name'], private=True)
+
+ group = factories.Group()
+
+ # Add the private dataset to the group.
+ helpers.call_action('member_create', id=group['id'],
+ object=private_dataset['id'], object_type='package',
+ capacity='public')
+
+ # Create a member user and an admin user of the group.
+ group_member = factories.User()
+ helpers.call_action('member_create', id=group['id'],
+ object=group_member['id'], object_type='user',
+ capacity='member')
+ group_admin = factories.User()
+ helpers.call_action('member_create', id=group['id'],
+ object=group_admin['id'], object_type='user',
+ capacity='admin')
+
+ # Create a user who isn't a member of any group or organization.
+ non_member = factories.User()
+
+ sysadmin = factories.Sysadmin()
+
+ # None of the users should see the dataset when they call group_show().
+ for user in (org_member, group_member, group_admin, non_member,
+ sysadmin, None):
+
+ if user is None:
+ context = None # No user logged-in.
+ else:
+ context = {'user': user['name']}
+
+ group = helpers.call_action('group_show', id=group['id'],
+ context=context)
+
+ assert private_dataset['id'] not in [dataset['id'] for dataset
+ in group['packages']], (
+ "group_show() should never show private datasets")
+
class TestBadLimitQueryParameters(object):
'''test class for #1258 non-int query parameters cause 500 errors
| Private datasets belonging to groups
1. User seanh creates organization MyOrg
2. Seanh adds MyPrivateDataset to MyOrg, private
3. Seanh creates group MyGroup
4. Seanh adds MyPrivateDataset to MyGroup
5. Seanh adds user Fred to MyGroup
6. Now if Fred visits the CKAN front page, he can see MyPrivateDataset even though he is not a member of MyOrg (he can't actually load the dataset's page though)
| I can reproduce this on 2.2.1 but not on master. Can't find the commit that fixed it. Is it worth digging deeper to find what fixed this, so we can backport it?
I would say it's worth it if you have some time
| 2014-09-05T12:02:38 |
ckan/ckan | 1,918 | ckan__ckan-1918 | [
"1917"
] | 6d13801bc923d4a7e93ad38c8c30b184cbd6dc03 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -577,15 +577,38 @@ def group_list_authz(context, data_dict):
def organization_list_for_user(context, data_dict):
- '''Return the list of organizations that the user is a member of.
+ '''Return the organizations that the user has a given permission for.
+
+ By default this returns the list of organizations that the currently
+ authorized user can edit, i.e. the list of organizations that the user is an
+ admin of.
+
+ Specifically it returns the list of organizations that the currently
+ authorized user has a given permission (for example: "edit_group") against.
+
+ When a user becomes a member of an organization in CKAN they're given a
+ "capacity" (sometimes called a "role"), for example "member", "editor" or
+ "admin".
+
+ Each of these roles has certain permissions associated with it. For example
+ the admin role has the "admin" permission (which means they have permission
+ to do anything). The editor role has permissions like "create_dataset",
+ "update_dataset" and "delete_dataset". The member role has the "read"
+ permission.
+
+ This function returns the list of organizations that the authorized user has
+ a given permission for. For example the list of organizations that the user
+ is an admin of, or the list of organizations that the user can create
+ datasets in.
:param permission: the permission the user has against the
- returned organizations (optional, default: ``edit_group``)
+ returned organizations, for example ``"read"`` or ``"create_dataset"``
+ (optional, default: ``"edit_group"``)
:type permission: string
- :returns: list of dictized organizations that the user is
- authorized to edit
+ :returns: list of organizations that the user has the given permission for
:rtype: list of dicts
+
'''
model = context['model']
user = context['user']
@@ -613,7 +636,8 @@ def organization_list_for_user(context, data_dict):
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
- .filter(model.Member.table_id == user_id)
+ .filter(model.Member.table_id == user_id) \
+ .filter(model.Member.state == 'active')
group_ids = []
for row in q.all():
| diff --git a/ckan/new_tests/logic/action/test_get.py b/ckan/new_tests/logic/action/test_get.py
--- a/ckan/new_tests/logic/action/test_get.py
+++ b/ckan/new_tests/logic/action/test_get.py
@@ -459,3 +459,302 @@ def test_package_search_facet_field_is_json(self):
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, 'package_search',
**kwargs)
+
+
+class TestOrganizationListForUser(object):
+ '''Functional tests for the organization_list_for_user() action function.'''
+
+ def setup(self):
+ helpers.reset_db()
+ search.clear()
+
+ def test_when_user_is_not_a_member_of_any_organizations(self):
+ """
+
+ When the user isn't a member of any organizations (in any capacity)
+ organization_list_for_user() should return an empty list.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+
+ # Create an organization so we can test that it does not get returned.
+ factories.Organization()
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_when_user_is_an_admin_of_one_organization(self):
+ """
+
+ When the user is an admin of one organization
+ organization_list_for_user() should return a list of just that one
+ organization.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ # Create a second organization just so we can test that it does not get
+ # returned.
+ factories.Organization()
+
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='admin')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert len(organizations) == 1
+ assert organizations[0]['id'] == organization['id']
+
+ def test_when_user_is_an_admin_of_three_organizations(self):
+ """
+
+ When the user is an admin of three organizations
+ organization_list_for_user() should return a list of all three
+ organizations.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization_1 = factories.Organization()
+ organization_2 = factories.Organization()
+ organization_3 = factories.Organization()
+
+ # Create a second organization just so we can test that it does not get
+ # returned.
+ factories.Organization()
+
+ # Make the user an admin of all three organizations:
+ for organization in (organization_1, organization_2, organization_3):
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='admin')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert len(organizations) == 3
+ ids = [organization['id'] for organization in organizations]
+ for organization in (organization_1, organization_2, organization_3):
+ assert organization['id'] in ids
+
+ def test_does_not_return_members(self):
+ """
+
+ By default organization_list_for_user() should not return organizations
+ that the user is just a member (not an admin) of.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='member')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_does_not_return_editors(self):
+ """
+
+ By default organization_list_for_user() should not return organizations
+ that the user is just an editor (not an admin) of.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='editor')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_editor_permission(self):
+ """
+
+ organization_list_for_user() should return organizations that the user
+ is an editor of if passed a permission that belongs to the editor role.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='editor')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ permission='create_dataset',
+ context=context)
+
+ assert [org['id'] for org in organizations] == [organization['id']]
+
+ def test_member_permission(self):
+ """
+
+ organization_list_for_user() should return organizations that the user
+ is a member of if passed a permission that belongs to the member role.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='member')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ permission='read',
+ context=context)
+
+ assert [org['id'] for org in organizations] == [organization['id']]
+
+ def test_invalid_permission(self):
+ '''
+
+ organization_list_for_user() should return an empty list if passed a
+ non-existent or invalid permission.
+
+ Note that we test this with a user who is an editor of one organization.
+ If the user was an admin of the organization then it would return that
+ organization - admins have all permissions, including permissions that
+ don't exist.
+
+ '''
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+ factories.Organization()
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='editor')
+
+ for permission in ('', ' ', 'foo', 27.3, 5, True, False, None):
+ organizations = helpers.call_action('organization_list_for_user',
+ permission=permission,
+ context=context)
+
+ assert organizations == []
+
+ def test_that_it_does_not_return_groups(self):
+ """
+
+ organization_list_for_user() should not return groups that the user is
+ a member, editor or admin of.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ group_1 = factories.Group()
+ group_2 = factories.Group()
+ group_3 = factories.Group()
+ helpers.call_action('member_create', id=group_1['id'],
+ object=user['id'], object_type='user',
+ capacity='member')
+ helpers.call_action('member_create', id=group_2['id'],
+ object=user['id'], object_type='user',
+ capacity='editor')
+ helpers.call_action('member_create', id=group_3['id'],
+ object=user['id'], object_type='user',
+ capacity='admin')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_that_it_does_not_return_previous_memberships(self):
+ """
+
+ organization_list_for_user() should return organizations that the user
+ was previously an admin of.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ # Make the user an admin of the organization.
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='admin')
+
+ # Remove the user from the organization.
+ helpers.call_action('member_delete', id=organization['id'],
+ object=user['id'], object_type='user')
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_when_user_is_sysadmin(self):
+ """
+
+ When the user is a sysadmin organization_list_for_user() should just
+ return all organizations, even if the user is not a member of them.
+
+ """
+ user = factories.Sysadmin()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert [org['id'] for org in organizations] == [organization['id']]
+
+ def test_that_it_does_not_return_deleted_organizations(self):
+ """
+
+ organization_list_for_user() should not return deleted organizations
+ that the user was an admin of.
+
+ """
+ user = factories.User()
+ context = {'user': user['name']}
+ organization = factories.Organization()
+
+ # Make the user an admin of the organization.
+ helpers.call_action('member_create', id=organization['id'],
+ object=user['id'], object_type='user',
+ capacity='admin')
+
+ # Delete the organization.
+ helpers.call_action('organization_delete', id=organization['id'])
+
+ organizations = helpers.call_action('organization_list_for_user',
+ context=context)
+
+ assert organizations == []
+
+ def test_with_no_authorized_user(self):
+ """
+
+ organization_list_for_user() should return an empty list if there's no
+ authorized user. Users who aren't logged-in don't have any permissions.
+
+ """
+ # Create an organization so we can test that it doesn't get returned.
+ organization = factories.Organization()
+
+ organizations = helpers.call_action('organization_list_for_user')
+
+ assert organizations == []
| Update search index after membership changes
| 2014-09-10T11:57:10 |
|
ckan/ckan | 2,059 | ckan__ckan-2059 | [
"2007"
] | 0ddd6fda6103d3a22e00ed2480c929081c880eef | diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -273,6 +273,9 @@ def before_show(self, resource_dict):
action='dump', resource_id=resource_dict['id'])
connection = None
+
+ resource_dict['datastore_active'] = False
+
try:
connection = self.read_engine.connect()
result = connection.execute(
@@ -281,8 +284,6 @@ def before_show(self, resource_dict):
).fetchone()
if result:
resource_dict['datastore_active'] = True
- else:
- resource_dict['datastore_active'] = False
finally:
if connection:
connection.close()
| datastore extension fails tests when using pg 8.4
Appears to be checking for datastore values in a dict when datastore is not active.
| Tests pass on master and have been for a while, do you have more details?
https://travis-ci.org/ckan/ckan/jobs/39177131
```
======================================================================
ERROR: ckanext.datastore.tests.test_create.TestDatastoreCreate.test_create_basic
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/travis/build/ckan/ckan/ckanext/datastore/tests/test_create.py", line 470, in test_create_basic
assert res_dict['result']['datastore_active'] is False
KeyError: 'datastore_active'
```
This on a branch taken from master and the only changes in cli.py
It's the Python 2.6/PG 8.4 tests
This now works on a second run. Worrying, but not apparently an issue.
This just happened on master, so seems like something we need to investigate.
https://travis-ci.org/ckan/ckan/jobs/39255815
Random test failing debug, yay!
could it be a race? Might this query be failing quietly somehow? https://github.com/ckan/ckan/blob/master/ckanext/datastore/plugin.py#L276-L281 ..or the plugin not loaded?
It should definitely catch the exception rather than failing quietly. Will _table_metadata even exist if datastore is not active?
| 2014-11-19T13:32:04 |
|
ckan/ckan | 2,064 | ckan__ckan-2064 | [
"2029"
] | 0ddd6fda6103d3a22e00ed2480c929081c880eef | diff --git a/ckan/lib/cli.py b/ckan/lib/cli.py
--- a/ckan/lib/cli.py
+++ b/ckan/lib/cli.py
@@ -135,7 +135,7 @@ def _get_config(self):
fileConfig(self.filename)
return appconfig('config:' + self.filename)
- def _load_config(self):
+ def _load_config(self, load_site_user=True):
conf = self._get_config()
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
@@ -149,7 +149,7 @@ def _load_config(self):
self.translator_obj = MockTranslator()
self.registry.register(pylons.translator, self.translator_obj)
- if model.user_table.exists():
+ if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
@@ -199,11 +199,12 @@ class ManageDb(CkanCommand):
min_args = 1
def command(self):
- self._load_config()
+ cmd = self.args[0]
+
+ self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
- cmd = self.args[0]
if cmd == 'init':
model.repo.init_db()
| paster db upgrade failure 2.1->2.3
bumped into this when trying to test another issue. If I run `paster db upgrade` on master with a db from 2.1 I get this traceback:
``` pytb
Traceback (most recent call last):
File "/home/ian/ckan2env26/bin/paster", line 9, in <module>
load_entry_point('PasteScript==1.7.5', 'console_scripts', 'paster')()
File "/home/ian/ckan2env26/lib/python2.6/site-packages/paste/script/command.py", line 104, in run
invoke(command, command_name, options, args[1:])
File "/home/ian/ckan2env26/lib/python2.6/site-packages/paste/script/command.py", line 143, in invoke
exit_code = runner.run(args)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/paste/script/command.py", line 238, in run
result = self.command()
File "/home/ian/git/ckan/ckan/lib/cli.py", line 192, in command
self._load_config()
File "/home/ian/git/ckan/ckan/lib/cli.py", line 150, in _load_config
self.site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
File "/home/ian/git/ckan/ckan/logic/__init__.py", line 424, in wrapped
result = _action(context, data_dict, **kw)
File "/home/ian/git/ckan/ckan/logic/action/get.py", line 2185, in get_site_user
user = model.User.get(site_id)
File "/home/ian/git/ckan/ckan/model/user.py", line 64, in get
return query.first()
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/orm/query.py", line 2156, in first
ret = list(self[0:1])
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/orm/query.py", line 2023, in __getitem__
return list(res)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/orm/query.py", line 2227, in __iter__
return self._execute_and_instances(context)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/orm/query.py", line 2242, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/engine/base.py", line 1449, in execute
params)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/engine/base.py", line 1584, in _execute_clauseelement
compiled_sql, distilled_params
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/engine/base.py", line 1698, in _execute_context
context)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/engine/base.py", line 1691, in _execute_context
context)
File "/home/ian/ckan2env26/lib/python2.6/site-packages/sqlalchemy/engine/default.py", line 331, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (ProgrammingError) column user.state does not exist
LINE 1: ..._notifications, "user".sysadmin AS user_sysadmin, "user".sta...
```
| This is pretty serious as if I'm not mistaken, it also prevents 2.1 -> 2.2.
The problem seems to have been introduced in #1178. A new column `state` was added to the users table, and vdm was thrown into the mix. It now seems that when calling `paster db upgrade` and getting the site user the model is initialized as if the user table had a `state` column (I guess [this](https://github.com/ckan/ckan/blob/d680a44bff3ef6fb865f3c41e9554a85222b00ac/ckan/model/user.py#L36) does it) but that's still not the case physically in the DB.
Not sure how to tackle this one, apart from (more) horrible hacks on [cli.py](https://github.com/ckan/ckan/blob/d680a44bff3ef6fb865f3c41e9554a85222b00ac/ckan/lib/cli.py#L106) to prevent getting the site user on `paster db upgrade`.
And to make things worse the [migration script](https://github.com/ckan/ckan/blob/master/ckan/migration/versions/071_add_state_column_to_user_table.py) is not wrapped in a transaction so even if you add the field manually the migration hangs :(
I was able to upgrade from 2.1 to 2.2 (note that it took about 24 hours to complete). I just started the migration from 2.2->master
After letting it run for more than a week I think it's safe to assume that my migration failed, so I still think there might be issues when doing 2.1 -> 2.2
i have the same trouble when i doing 2.2a -> 2.2.1.
| 2014-11-19T17:56:26 |
|
ckan/ckan | 2,066 | ckan__ckan-2066 | [
"2063"
] | c76a487410e72c16a4e5f3b81ac0ae7e8747cee8 | diff --git a/ckan/controllers/package.py b/ckan/controllers/package.py
--- a/ckan/controllers/package.py
+++ b/ckan/controllers/package.py
@@ -313,10 +313,10 @@ def pager_url(q=None, page=None):
extra_vars={'dataset_type': package_type})
def _content_type_from_extension(self, ext):
- ct, mu, ext = accept.parse_extension(ext)
+ ct, ext = accept.parse_extension(ext)
if not ct:
- return None, None, None,
- return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
+ return None, None
+ return ct, ext
def _content_type_from_accept(self):
"""
@@ -325,8 +325,8 @@ def _content_type_from_accept(self):
it accurately. TextTemplate must be used for non-xml templates
whilst all that are some sort of XML should use MarkupTemplate.
"""
- ct, mu, ext = accept.parse_header(request.headers.get('Accept', ''))
- return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
+ ct, ext = accept.parse_header(request.headers.get('Accept', ''))
+ return ct, ext
def resources(self, id):
package_type = self._get_package_type(id.split('@')[0])
@@ -358,16 +358,15 @@ def resources(self, id):
def read(self, id, format='html'):
if not format == 'html':
- ctype, extension, loader = \
+ ctype, extension = \
self._content_type_from_extension(format)
if not ctype:
# An unknown format, we'll carry on in case it is a
# revision specifier and re-constitute the original id
id = "%s.%s" % (id, format)
- ctype, format, loader = "text/html; charset=utf-8", "html", \
- MarkupTemplate
+ ctype, format = "text/html; charset=utf-8", "html"
else:
- ctype, format, loader = self._content_type_from_accept()
+ ctype, format = self._content_type_from_accept()
response.headers['Content-Type'] = ctype
@@ -427,7 +426,7 @@ def read(self, id, format='html'):
template = template[:template.index('.') + 1] + format
try:
- return render(template, loader_class=loader,
+ return render(template,
extra_vars={'dataset_type': package_type})
except ckan.lib.render.TemplateNotFound:
msg = _("Viewing {package_type} datasets in {format} format is "
diff --git a/ckan/lib/accept.py b/ckan/lib/accept.py
--- a/ckan/lib/accept.py
+++ b/ckan/lib/accept.py
@@ -9,10 +9,10 @@
accept_re = re.compile("^(?P<ct>[^;]+)[ \t]*(;[ \t]*q=(?P<q>[0-9.]+)){0,1}$")
accept_types = {
- # Name : ContentType, Is Markup?, Extension
- "text/html": ("text/html; charset=utf-8", True, 'html'),
- "text/n3": ("text/n3; charset=utf-8", False, 'n3'),
- "application/rdf+xml": ("application/rdf+xml; charset=utf-8", True, 'rdf'),
+ # Name : ContentType, Extension
+ "text/html": ("text/html; charset=utf-8", 'html'),
+ "text/n3": ("text/n3; charset=utf-8", 'n3'),
+ "application/rdf+xml": ("application/rdf+xml; charset=utf-8", 'rdf'),
}
accept_by_extension = {
"rdf": "application/rdf+xml",
@@ -28,7 +28,7 @@ def parse_extension(file_ext):
ext = accept_by_extension.get(file_ext, None)
if ext:
return accept_types[ext]
- return (None, None, None,)
+ return (None, None)
def parse_header(accept_header=''):
diff --git a/ckan/plugins/interfaces.py b/ckan/plugins/interfaces.py
--- a/ckan/plugins/interfaces.py
+++ b/ckan/plugins/interfaces.py
@@ -963,8 +963,8 @@ def read_template(self):
``'package/read.html'``.
If the user requests the dataset in a format other than HTML
- (CKAN supports returning datasets in RDF or N3 format by appending .rdf
- or .n3 to the dataset read URL, see
+ (CKAN supports returning datasets in RDF/XML or N3 format by appending
+ .rdf or .n3 to the dataset read URL, see
:doc:`/maintaining/linked-data-and-rdf`) then CKAN will try to render a
template file with the same path as returned by this function, but a
different filename extension, e.g. ``'package/read.rdf'``. If your
| diff --git a/ckan/new_tests/controllers/test_package.py b/ckan/new_tests/controllers/test_package.py
--- a/ckan/new_tests/controllers/test_package.py
+++ b/ckan/new_tests/controllers/test_package.py
@@ -178,3 +178,35 @@ def test_inexistent_resource_view_page_returns_not_found_code(self):
app = self._get_test_app()
app.get(url, status=404)
+
+
+class TestPackageRead(helpers.FunctionalTestBase):
+ @classmethod
+ def setup_class(cls):
+ super(cls, cls).setup_class()
+ helpers.reset_db()
+
+ def setup(self):
+ model.repo.rebuild_db()
+
+ def test_read_rdf(self):
+ dataset1 = factories.Dataset()
+
+ offset = url_for(controller='package', action='read',
+ id=dataset1['name']) + ".rdf"
+ app = self._get_test_app()
+ res = app.get(offset, status=200)
+
+ assert 'dcat' in res, res
+ assert '{{' not in res, res
+
+ def test_read_n3(self):
+ dataset1 = factories.Dataset()
+
+ offset = url_for(controller='package', action='read',
+ id=dataset1['name']) + ".n3"
+ app = self._get_test_app()
+ res = app.get(offset, status=200)
+
+ assert 'dcat' in res, res
+ assert '{{' not in res, res
diff --git a/ckan/tests/lib/test_accept.py b/ckan/tests/lib/test_accept.py
--- a/ckan/tests/lib/test_accept.py
+++ b/ckan/tests/lib/test_accept.py
@@ -4,55 +4,47 @@
class TestAccept:
def test_accept_invalid(self):
- ct, markup, ext = accept.parse_header(None)
+ ct, ext = accept.parse_header(None)
assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "html")
def test_accept_invalid2(self):
- ct, markup, ext = accept.parse_header("")
+ ct, ext = accept.parse_header("")
assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "html")
def test_accept_invalid3(self):
- ct, markup, ext = accept.parse_header("wombles")
+ ct, ext = accept.parse_header("wombles")
assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "html")
def test_accept_valid(self):
a = "text/turtle,application/turtle,application/rdf+xml,text/plain;q=0.8,*/*;q=.5"
- ct, markup, ext = accept.parse_header(a)
+ ct, ext = accept.parse_header(a)
assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "rdf")
def test_accept_valid2(self):
a = "text/turtle,application/turtle,application/rdf+xml;q=0.9,text/plain;q=0.8,*/*;q=.5"
- ct, markup, ext = accept.parse_header(a)
+ ct, ext = accept.parse_header(a)
assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "rdf")
def test_accept_valid4(self):
a = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
- ct, markup, ext = accept.parse_header(a)
+ ct, ext = accept.parse_header(a)
assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "html")
def test_accept_valid5(self):
a = "application/rdf+xml;q=0.5,application/xhtml+xml,text/html;q=0.9"
- ct, markup, ext = accept.parse_header(a)
+ ct, ext = accept.parse_header(a)
assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "html")
def test_accept_valid6(self):
a = "application/rdf+xml;q=0.9,application/xhtml+xml,text/html;q=0.5"
- ct, markup, ext = accept.parse_header(a)
+ ct, ext = accept.parse_header(a)
assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( markup, True)
assert_equal( ext, "rdf")
| create jinja versions of package/read.rdf and .n3
we were relying on the templates in legacy_templates, but since those have been removed requesting rdf and n3 versions of datasets is now broken.
| n3 was always broken/incomplete, so we should probably wait until we've got a working template.
| 2014-11-19T21:01:21 |
ckan/ckan | 2,084 | ckan__ckan-2084 | [
"2080"
] | c1f154e9b80472b66f81606340ade24e0ff777aa | diff --git a/ckan/controllers/package.py b/ckan/controllers/package.py
--- a/ckan/controllers/package.py
+++ b/ckan/controllers/package.py
@@ -325,7 +325,6 @@ def _content_type_from_accept(self):
return ct, ext
def resources(self, id):
- package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
@@ -346,6 +345,7 @@ def resources(self, id):
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
+ package_type = c.pkg_dict['type'] or 'dataset'
self._setup_template_variables(context, {'id': id},
package_type=package_type)
@@ -366,7 +366,6 @@ def read(self, id, format='html'):
response.headers['Content-Type'] = ctype
- package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
@@ -413,6 +412,7 @@ def read(self, id, format='html'):
context, {'id': resource['id']})
resource['has_views'] = len(resource_views) > 0
+ package_type = c.pkg_dict['type'] or 'dataset'
self._setup_template_variables(context, {'id': id},
package_type=package_type)
@@ -432,7 +432,6 @@ def read(self, id, format='html'):
def history(self, id):
- package_type = self._get_package_type(id.split('@')[0])
if 'diff' in request.params or 'selected1' in request.params:
try:
@@ -508,6 +507,8 @@ def history(self, id):
response.headers['Content-Type'] = 'application/atom+xml'
return feed.writeString('utf-8')
+ package_type = c.pkg_dict['type'] or 'dataset'
+
c.related_count = c.pkg.related_count
return render(
self._history_template(c.pkg_dict.get('type', package_type)),
@@ -581,7 +582,7 @@ def new(self, data=None, errors=None, error_summary=None):
def resource_edit(self, id, resource_id, data=None, errors=None,
error_summary=None):
- package_type = self._get_package_type(id)
+
if request.method == 'POST' and not data:
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
@@ -636,6 +637,8 @@ def resource_edit(self, id, resource_id, data=None, errors=None,
if not data:
data = resource_dict
+ package_type = pkg_dict['type'] or 'dataset'
+
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
@@ -647,7 +650,6 @@ def resource_edit(self, id, resource_id, data=None, errors=None,
def new_resource(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
- package_type = self._get_package_type(id)
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
@@ -733,13 +735,7 @@ def new_resource(self, id, data=None, errors=None, error_summary=None):
# add more resources
redirect(h.url_for(controller='package',
action='new_resource', id=id))
- errors = errors or {}
- error_summary = error_summary or {}
- vars = {'data': data, 'errors': errors,
- 'error_summary': error_summary, 'action': 'new',
- 'resource_form_snippet': self._resource_form(package_type),
- 'dataset_type': package_type}
- vars['pkg_name'] = id
+
# get resources for sidebar
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
@@ -752,6 +748,15 @@ def new_resource(self, id, data=None, errors=None, error_summary=None):
except NotAuthorized:
abort(401, _('Unauthorized to create a resource for this package'))
+ package_type = pkg_dict['type'] or 'dataset'
+
+ errors = errors or {}
+ error_summary = error_summary or {}
+ vars = {'data': data, 'errors': errors,
+ 'error_summary': error_summary, 'action': 'new',
+ 'resource_form_snippet': self._resource_form(package_type),
+ 'dataset_type': package_type}
+ vars['pkg_name'] = id
# required for nav menu
vars['pkg_dict'] = pkg_dict
template = 'package/new_resource_not_draft.html'
@@ -879,10 +884,8 @@ def history_ajax(self, id):
def _get_package_type(self, id):
"""
- Given the id of a package it determines the plugin to load
- based on the package's type name (type). The plugin found
- will be returned, or None if there is no plugin associated with
- the type.
+ Given the id of a package this method will return the type of the
+ package, or 'dataset' if no type is currently set
"""
pkg = model.Package.get(id)
if pkg:
| Unnecessary db request
In the package controller, self._get_package_type is used to determine which plugin to use when calling setup_template_variables. _get_package_type does a model.Package.get() which is unnecessary, especially in cases where the controller action will fetch the package anyway.
Should work out if we can remove the unnecessary call in each action, instead using package type from package_show results if it was called.
| 2014-11-24T18:53:56 |
||
ckan/ckan | 2,097 | ckan__ckan-2097 | [
"2089"
] | 8a4d4cf90a1348b3515264de01f2874f289a1945 | diff --git a/ckan/logic/action/create.py b/ckan/logic/action/create.py
--- a/ckan/logic/action/create.py
+++ b/ckan/logic/action/create.py
@@ -108,9 +108,9 @@ def package_create(context, data_dict):
:param groups: the groups to which the dataset belongs (optional), each
group dictionary should have one or more of the following keys which
identify an existing group:
- ``'id'`` (the id of the group, string), ``'name'`` (the name of the
- group, string), ``'title'`` (the title of the group, string), to see
- which groups exist call :py:func:`~ckan.logic.action.get.group_list`
+ ``'id'`` (the id of the group, string), or ``'name'`` (the name of the
+ group, string), to see which groups exist
+ call :py:func:`~ckan.logic.action.get.group_list`
:type groups: list of dictionaries
:param owner_org: the id of the dataset's owning organization, see
:py:func:`~ckan.logic.action.get.organization_list` or
| API: Specifying groups by title for adding datasets doesn't work
the docs say:
groups (list of dictionaries) – the groups to which the dataset belongs (optional), each group dictionary should have one or more of the following keys which identify an existing group: 'id' (the id of the group, string), 'name' (the name of the group, string), **'title' (the title of the group, string)**, to see which groups exist call group_list()
However, specifying the group by title is not foreseen in the code, and this does not work
| 2014-11-25T20:38:41 |
||
ckan/ckan | 2,129 | ckan__ckan-2129 | [
"2104"
] | 5119209090a2163c780e88f90f3a55b60e0bcd88 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -2266,6 +2266,8 @@ def vocabulary_list(context, data_dict):
:rtype: list of dictionaries
'''
+ _check_access('vocabulary_list', context, data_dict)
+
model = context['model']
vocabulary_objects = model.Session.query(model.Vocabulary).all()
return model_dictize.vocabulary_list_dictize(vocabulary_objects, context)
@@ -2280,6 +2282,8 @@ def vocabulary_show(context, data_dict):
:rtype: dictionary
'''
+ _check_access('vocabulary_show', context, data_dict)
+
model = context['model']
vocab_id = data_dict.get('id')
if not vocab_id:
diff --git a/ckan/logic/auth/get.py b/ckan/logic/auth/get.py
--- a/ckan/logic/auth/get.py
+++ b/ckan/logic/auth/get.py
@@ -66,6 +66,10 @@ def license_list(context, data_dict):
# Licenses list is visible by default
return {'success': True}
+def vocabulary_list(context, data_dict):
+ # List of all vocabularies are visible by default
+ return {'success': True}
+
def tag_list(context, data_dict):
# Tags list is visible by default
return {'success': True}
@@ -157,6 +161,10 @@ def organization_show(context, data_dict):
# anyone can see a organization
return {'success': True}
+def vocabulary_show(context, data_dict):
+ # Allow viewing of vocabs by default
+ return {'success': True}
+
def tag_show(context, data_dict):
# No authz check in the logic function
return {'success': True}
| diff --git a/ckan/tests/test_coding_standards.py b/ckan/tests/test_coding_standards.py
--- a/ckan/tests/test_coding_standards.py
+++ b/ckan/tests/test_coding_standards.py
@@ -776,8 +776,6 @@ class TestActionAuth(object):
'get: user_activity_list_html',
'get: user_followee_count',
'get: user_follower_count',
- 'get: vocabulary_list',
- 'get: vocabulary_show',
'update: package_relationship_update_rest',
'update: task_status_update_many',
'update: term_translation_update_many',
| No auth function for vocabulary_list
I am trying to limit access to vocabulary list by implementing get_auth_functions. But I am getting the following exception when calling the vocabulary_list get api : "Exception: Action function vocabulary_list did not call its auth function". I checked logic.auth.get and I don't see any auth functions defined for vocabulary_list.
| This raises a 500 on master.ckan.org - `http://master.ckan.org/api/3/action/vocabulary_list`. We should probably implement a skeleton auth function that just returns True, you could then override it in your extension.
If you don't want an auth function, you should be able to decorate your auth function with the following code to avoid this error.
```
import ckan.logic as logic
@logic.auth_audit_exempt
def vocabulary_list(......
```
| 2014-12-09T14:49:26 |
ckan/ckan | 2,195 | ckan__ckan-2195 | [
"2194"
] | 7fb017a551a9616be8ace3db7f48ca7019254825 | diff --git a/ckan/controllers/related.py b/ckan/controllers/related.py
--- a/ckan/controllers/related.py
+++ b/ckan/controllers/related.py
@@ -35,7 +35,7 @@ def dashboard(self):
params_nopage = [(k, v) for k, v in base.request.params.items()
if k != 'page']
- page = self._get_page_number(request.params)
+ page = self._get_page_number(base.request.params)
# Update ordering in the context
related_list = logic.get_action('related_list')(context, data_dict)
| /related gives Internal Server Error
E.g. http://master.ckan.org/related
Looking at the error log, there's a NameError: name 'request' is not defined in line 38 of related.py.
This seems to have snuck in when #2062 was merged.
| 2015-01-08T22:56:05 |
||
ckan/ckan | 2,214 | ckan__ckan-2214 | [
"2210"
] | 65c689b61e39dec1b0d06bedbefb846aa9ddeb35 | diff --git a/ckan/lib/dictization/model_dictize.py b/ckan/lib/dictization/model_dictize.py
--- a/ckan/lib/dictization/model_dictize.py
+++ b/ckan/lib/dictization/model_dictize.py
@@ -361,14 +361,10 @@ def group_dictize(group, context,
like tags are included unless you specify it in the params.
:param packages_field: determines the format of the `packages` field - can
- be `datasets`, `dataset_count`, `none_but_include_package_count` or None.
- If set to `dataset_count` or `none_but_include_package_count` then you
- can precalculate dataset counts in advance by supplying:
- context['dataset_counts'] = get_group_dataset_counts()
+ be `datasets` or None.
'''
- assert packages_field in ('datasets', 'dataset_count',
- 'none_but_include_package_count', None)
- if packages_field in ('dataset_count', 'none_but_include_package_count'):
+ assert packages_field in ('datasets', 'dataset_count', None)
+ if packages_field == 'dataset_count':
dataset_counts = context.get('dataset_counts', None)
result_dict = d.table_dictize(group, context)
@@ -417,12 +413,11 @@ def get_packages_for_this_group(group_, just_the_count=False):
search_results = logic.get_action('package_search')(search_context,
q)
return search_results['count'], search_results['results']
+
if packages_field == 'datasets':
package_count, packages = get_packages_for_this_group(group)
result_dict['packages'] = packages
else:
- # i.e. packages_field is 'dataset_count' or
- # 'none_but_include_package_count'
if dataset_counts is None:
package_count, packages = get_packages_for_this_group(
group, just_the_count=True)
@@ -433,8 +428,6 @@ def get_packages_for_this_group(group_, just_the_count=False):
package_count = facets['owner_org'].get(group.id, 0)
else:
package_count = facets['groups'].get(group.name, 0)
- if packages_field != 'none_but_include_package_count':
- result_dict['packages'] = package_count
result_dict['package_count'] = package_count
diff --git a/ckan/logic/action/create.py b/ckan/logic/action/create.py
--- a/ckan/logic/action/create.py
+++ b/ckan/logic/action/create.py
@@ -786,7 +786,14 @@ def _group_or_org_create(context, data_dict, is_org=False):
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
- return model_dictize.group_dictize(group, context)
+
+ return_id_only = context.get('return_id_only', False)
+ action = 'organization_show' if is_org else 'group_show'
+
+ output = context['id'] if return_id_only \
+ else _get_action(action)(context, {'id': group.id})
+
+ return output
def group_create(context, data_dict):
@@ -846,7 +853,9 @@ def group_create(context, data_dict):
a member of the group)
:type users: list of dictionaries
- :returns: the newly created group
+ :returns: the newly created group (unless 'return_id_only' is set to True
+ in the context, in which case just the group id will
+ be returned)
:rtype: dictionary
'''
@@ -905,7 +914,9 @@ def organization_create(context, data_dict):
in which the user is a member of the organization)
:type users: list of dictionaries
- :returns: the newly created organization
+ :returns: the newly created organization (unless 'return_id_only' is set
+ to True in the context, in which case just the organization id
+ will be returned)
:rtype: dictionary
'''
diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -378,6 +378,7 @@ def _group_or_org_list(context, data_dict, is_org=False):
log.warn('`order_by` deprecated please use `sort`')
if not data_dict.get('sort'):
sort = order_by
+
# if the sort is packages and no sort direction is supplied we want to do a
# reverse sort to maintain compatibility.
if sort.strip() in ('packages', 'package_count'):
@@ -413,22 +414,16 @@ def _group_or_org_list(context, data_dict, is_org=False):
query = query.filter(model.Group.type == group_type)
groups = query.all()
- if all_fields:
- include_tags = asbool(data_dict.get('include_tags', False))
- else:
- include_tags = False
- # even if we are not going to return all_fields, we need to dictize all the
- # groups so that we can sort by any field.
- group_list = model_dictize.group_list_dictize(
- groups, context,
- sort_key=lambda x: x[sort_info[0][0]],
- reverse=sort_info[0][1] == 'desc',
- with_package_counts=all_fields or
- sort_info[0][0] in ('packages', 'package_count'),
- include_groups=asbool(data_dict.get('include_groups', False)),
- include_tags=include_tags,
- include_extras=include_extras,
- )
+
+ action = 'organization_show' if is_org else 'group_show'
+
+ group_list = []
+ for group in groups:
+ data_dict['id'] = group.id
+ group_list.append(logic.get_action(action)(context, data_dict))
+
+ group_list = sorted(group_list, key=lambda x: x[sort_info[0][0]],
+ reverse=sort_info[0][1] == 'desc')
if not all_fields:
group_list = [group[ref_group_by] for group in group_list]
@@ -463,7 +458,7 @@ def group_list(context, data_dict):
(optional, default: ``False``)
:type include_tags: boolean
:param include_groups: if all_fields, include the groups the groups are in
- (optional, default: ``False``)
+ (optional, default: ``False``).
:type include_groups: boolean
:rtype: list of strings
@@ -1158,8 +1153,7 @@ def _group_or_org_show(context, data_dict, is_org=False):
context['group'] = group
include_datasets = asbool(data_dict.get('include_datasets', False))
- packages_field = 'datasets' if include_datasets \
- else 'none_but_include_package_count'
+ packages_field = 'datasets' if include_datasets else 'dataset_count'
if group is None:
raise NotFound
| diff --git a/ckan/tests/legacy/functional/test_group.py b/ckan/tests/legacy/functional/test_group.py
--- a/ckan/tests/legacy/functional/test_group.py
+++ b/ckan/tests/legacy/functional/test_group.py
@@ -78,19 +78,19 @@ def test_sorting(self):
assert results[-1]['name'] == u'alpha', results[-1]['name']
# Test packages reversed
- data_dict = {'all_fields': True, 'sort': 'packages desc'}
+ data_dict = {'all_fields': True, 'sort': 'package_count desc'}
results = get_action('group_list')(context, data_dict)
assert results[0]['name'] == u'beta', results[0]['name']
assert results[1]['name'] == u'delta', results[1]['name']
# Test packages forward
- data_dict = {'all_fields': True, 'sort': 'packages asc'}
+ data_dict = {'all_fields': True, 'sort': 'package_count asc'}
results = get_action('group_list')(context, data_dict)
assert results[-2]['name'] == u'delta', results[-2]['name']
assert results[-1]['name'] == u'beta', results[-1]['name']
# Default ordering for packages
- data_dict = {'all_fields': True, 'sort': 'packages'}
+ data_dict = {'all_fields': True, 'sort': 'package_count'}
results = get_action('group_list')(context, data_dict)
assert results[0]['name'] == u'beta', results[0]['name']
assert results[1]['name'] == u'delta', results[1]['name']
diff --git a/ckan/tests/lib/dictization/test_model_dictize.py b/ckan/tests/lib/dictization/test_model_dictize.py
--- a/ckan/tests/lib/dictization/test_model_dictize.py
+++ b/ckan/tests/lib/dictization/test_model_dictize.py
@@ -25,7 +25,7 @@ def test_group_list_dictize(self):
assert_equal(len(group_dicts), 1)
assert_equal(group_dicts[0]['name'], group['name'])
- assert_equal(group_dicts[0]['packages'], 0)
+ assert_equal(group_dicts[0]['package_count'], 0)
assert 'extras' not in group_dicts[0]
assert 'tags' not in group_dicts[0]
assert 'groups' not in group_dicts[0]
@@ -181,7 +181,6 @@ def test_group_dictize_group_with_parent_group(self):
assert_equal(len(group['groups']), 1)
assert_equal(group['groups'][0]['name'], 'parent')
- assert_equal(group['groups'][0]['packages'], 0) # deprecated
assert_equal(group['groups'][0]['package_count'], 0)
def test_group_dictize_without_packages(self):
@@ -250,8 +249,6 @@ def test_group_dictize_with_package_count(self):
group = model_dictize.group_dictize(group_obj, context,
packages_field='dataset_count')
-
- assert_equal(group['packages'], 1)
assert_equal(group['package_count'], 1)
def test_group_dictize_with_no_packages_field_but_still_package_count(self):
@@ -263,8 +260,7 @@ def test_group_dictize_with_no_packages_field_but_still_package_count(self):
# not supplying dataset_counts in this case either
group = model_dictize.group_dictize(group_obj, context,
- packages_field=
- 'none_but_include_package_count')
+ packages_field='dataset_count')
assert 'packages' not in group
assert_equal(group['package_count'], 1)
@@ -293,7 +289,7 @@ def test_group_dictize_for_org_with_package_count(self):
org = model_dictize.group_dictize(org_obj, context,
packages_field='dataset_count')
- assert_equal(org['packages'], 1)
+ assert_equal(org['package_count'], 1)
class TestPackageDictize:
diff --git a/ckan/tests/logic/action/test_create.py b/ckan/tests/logic/action/test_create.py
--- a/ckan/tests/logic/action/test_create.py
+++ b/ckan/tests/logic/action/test_create.py
@@ -474,3 +474,159 @@ def test_id_cant_already_exist(self):
id=dataset['id'],
name='test-dataset',
)
+
+
+class TestGroupCreate(helpers.FunctionalTestBase):
+
+ def test_create_group(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ group = helpers.call_action(
+ 'group_create',
+ context=context,
+ name='test-group',
+ )
+
+ assert len(group['users']) == 1
+ assert group['display_name'] == u'test-group'
+ assert group['package_count'] == 0
+ assert not group['is_organization']
+ assert group['type'] == 'group'
+
+ @nose.tools.raises(logic.ValidationError)
+ def test_create_group_validation_fail(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ group = helpers.call_action(
+ 'group_create',
+ context=context,
+ name='',
+ )
+
+ def test_create_group_return_id(self):
+ import re
+
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ 'return_id_only': True
+ }
+
+ group = helpers.call_action(
+ 'group_create',
+ context=context,
+ name='test-group',
+ )
+
+ assert isinstance(group, str)
+ assert re.match('([a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}?)', group)
+
+ def test_create_matches_show(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ created = helpers.call_action(
+ 'organization_create',
+ context=context,
+ name='test-organization',
+ )
+
+ shown = helpers.call_action(
+ 'organization_show',
+ context=context,
+ id='test-organization',
+ )
+
+ assert sorted(created.keys()) == sorted(shown.keys())
+ for k in created.keys():
+ assert created[k] == shown[k], k
+
+
+class TestOrganizationCreate(helpers.FunctionalTestBase):
+
+ def test_create_organization(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ org = helpers.call_action(
+ 'organization_create',
+ context=context,
+ name='test-organization',
+ )
+
+ assert len(org['users']) == 1
+ assert org['display_name'] == u'test-organization'
+ assert org['package_count'] == 0
+ assert org['is_organization']
+ assert org['type'] == 'organization'
+
+ @nose.tools.raises(logic.ValidationError)
+ def test_create_organization_validation_fail(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ org = helpers.call_action(
+ 'organization_create',
+ context=context,
+ name='',
+ )
+
+ def test_create_organization_return_id(self):
+ import re
+
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ 'return_id_only': True
+ }
+
+ org = helpers.call_action(
+ 'organization_create',
+ context=context,
+ name='test-organization',
+ )
+
+ assert isinstance(org, str)
+ assert re.match('([a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}?)', org)
+
+ def test_create_matches_show(self):
+ user = factories.User()
+ context = {
+ 'user': user['name'],
+ 'ignore_auth': True,
+ }
+
+ created = helpers.call_action(
+ 'organization_create',
+ context=context,
+ name='test-organization',
+ )
+
+ shown = helpers.call_action(
+ 'organization_show',
+ context=context,
+ id='test-organization',
+ )
+
+ assert sorted(created.keys()) == sorted(shown.keys())
+ for k in created.keys():
+ assert created[k] == shown[k], k
diff --git a/ckan/tests/logic/action/test_get.py b/ckan/tests/logic/action/test_get.py
--- a/ckan/tests/logic/action/test_get.py
+++ b/ckan/tests/logic/action/test_get.py
@@ -97,9 +97,7 @@ def test_group_list_sort_by_package_count(self):
factories.Dataset(groups=[{'name': 'bb'}])
group_list = helpers.call_action('group_list', sort='package_count')
- # default is descending order
-
- eq(group_list, ['bb', 'aa'])
+ eq(sorted(group_list), sorted(['bb', 'aa']))
def test_group_list_sort_by_package_count_ascending(self):
@@ -113,6 +111,15 @@ def test_group_list_sort_by_package_count_ascending(self):
eq(group_list, ['bb', 'aa'])
+ def assert_equals_expected(self, expected_dict, result_dict):
+ superfluous_keys = set(result_dict) - set(expected_dict)
+ assert not superfluous_keys, 'Did not expect key: %s' % \
+ ' '.join(('%s=%s' % (k, result_dict[k]) for k in superfluous_keys))
+ for key in expected_dict:
+ assert expected_dict[key] == result_dict[key], \
+ '%s=%s should be %s' % \
+ (key, result_dict[key], expected_dict[key])
+
def test_group_list_all_fields(self):
group = factories.Group()
@@ -121,8 +128,10 @@ def test_group_list_all_fields(self):
expected_group = dict(group.items()[:])
for field in ('users', 'tags', 'extras', 'groups'):
+ if field in group_list[0]:
+ del group_list[0][field]
del expected_group[field]
- expected_group['packages'] = 0
+
assert group_list[0] == expected_group
assert 'extras' not in group_list[0]
assert 'tags' not in group_list[0]
@@ -158,25 +167,19 @@ def test_group_list_groups_returned(self):
else:
child_group_returned, parent_group_returned = group_list[::-1]
expected_parent_group = dict(parent_group.items()[:])
- for field in ('users', 'tags', 'extras'):
- del expected_parent_group[field]
- expected_parent_group['capacity'] = u'public'
- expected_parent_group['packages'] = 0
- expected_parent_group['package_count'] = 0
- eq(child_group_returned['groups'], [expected_parent_group])
+
+ eq([g['name'] for g in child_group_returned['groups']], [expected_parent_group['name']])
class TestGroupShow(helpers.FunctionalTestBase):
def test_group_show(self):
-
group = factories.Group(user=factories.User())
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True)
- # FIXME: Should this be returned by group_create?
- group_dict.pop('num_followers', None)
+ group_dict.pop('packages', None)
eq(group_dict, group)
def test_group_show_error_not_found(self):
@@ -359,14 +362,12 @@ def test_organization_list_in_presence_of_custom_group_types(self):
class TestOrganizationShow(helpers.FunctionalTestBase):
def test_organization_show(self):
-
org = factories.Organization()
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
- # FIXME: Should this be returned by organization_create?
- org_dict.pop('num_followers', None)
+ org_dict.pop('packages', None)
eq(org_dict, org)
def test_organization_show_error_not_found(self):
| group_create does not return results of group_show
When calling group_create, the action layer returns the result of calling group_dictize (which ignores the schema). The same is also true for group_list.
When calling package_create it returns the result of package_show.
It feels like it should be more consistent so that group_create definitively returns the same as group_show.
| As per package, provide an option just to return the ID/IDs
| 2015-01-14T14:26:21 |
ckan/ckan | 2,246 | ckan__ckan-2246 | [
"937"
] | 21c0db517bf9224e6d3ba35a563245a46086963c | diff --git a/ckanext/datapusher/cli.py b/ckanext/datapusher/cli.py
--- a/ckanext/datapusher/cli.py
+++ b/ckanext/datapusher/cli.py
@@ -1,3 +1,5 @@
+import sys
+
import ckan.lib.cli as cli
import ckan.plugins as p
import ckanext.datastore.db as datastore_db
@@ -8,38 +10,78 @@ class DatapusherCommand(cli.CkanCommand):
Usage:
- submit - Resubmit all datastore resources to the datapusher,
+ resubmit - Resubmit all datastore resources to the datapusher,
ignoring if their files haven't changed.
+ submit <pkgname> - Submits all resources from the package
+ identified by pkgname (either the short name or ID).
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
- if self.args and self.args[0] == 'submit':
+ if self.args and self.args[0] == 'resubmit':
+ self._confirm_or_abort()
+
self._load_config()
self._submit_all()
+ elif self.args and self.args[0] == 'submit':
+ self._confirm_or_abort()
+
+ if len(self.args) != 2:
+ print "This command requires an argument\n"
+ print self.usage
+ sys.exit(1)
+
+ self._load_config()
+ self._submit_package(self.args[1])
else:
print self.usage
- def _submit_all(self):
+ def _confirm_or_abort(self):
question = (
"Data in any datastore resource that isn't in their source files "
"(e.g. data added using the datastore API) will be permanently "
"lost. Are you sure you want to proceed?"
)
answer = cli.query_yes_no(question, default=None)
- if answer == 'yes':
- resources_ids = datastore_db.get_all_resources_ids_in_datastore()
- print 'Submitting %d datastore resources' % len(resources_ids)
- datapusher_submit = p.toolkit.get_action('datapusher_submit')
- for resource_id in resources_ids:
- print ('Submitting %s...' % resource_id),
- data_dict = {
- 'resource_id': resource_id,
- 'ignore_hash': True,
- }
- if datapusher_submit(None, data_dict):
- print 'OK'
- else:
- print 'Fail'
+ if not answer == 'yes':
+ print "Aborting..."
+ sys.exit(0)
+
+ def _submit_all(self):
+ resources_ids = datastore_db.get_all_resources_ids_in_datastore()
+ self._submit(resource_ids)
+
+ def _submit_package(self, pkg_id):
+ import ckan.model as model
+
+ package_show = p.toolkit.get_action('package_show')
+ try:
+ pkg = package_show({'model': model, 'ignore_auth': True},
+ {'id': pkg_id.strip()})
+ except Exception, e:
+ print e
+ print "Package '{}' was not found".format(pkg_id)
+ sys.exit(1)
+
+ resource_ids = [r['id'] for r in pkg['resources']]
+ self._submit(resource_ids)
+
+ def _submit(self, resources):
+ import ckan.model as model
+
+ print 'Submitting %d datastore resources' % len(resources)
+ user = p.toolkit.get_action('get_site_user')(
+ {'model': model, 'ignore_auth': True}, {})
+ datapusher_submit = p.toolkit.get_action('datapusher_submit')
+ for resource_id in resources:
+ print ('Submitting %s...' % resource_id),
+ data_dict = {
+ 'resource_id': resource_id,
+ 'ignore_hash': True,
+ }
+ if datapusher_submit({'user': user['name']}, data_dict):
+ print 'OK'
+ else:
+ print 'Fail'
| Add paster command for the datapusher to datastore
Something like `paster datastore push [package_id]`. Also note https://github.com/okfn/ckanext-datastorer/tree/36-datapusher.
- [x] Add paster command
- [x] Store job status with `task_status_update` (is done automatically when using the `datapusher_submit` action)
Related to #938
https://github.com/okfn/ckan/wiki/Spec:-DataStore-and-FileStore-Consolidation
| 2015-01-28T12:57:14 |
||
ckan/ckan | 2,248 | ckan__ckan-2248 | [
"2218"
] | 21c0db517bf9224e6d3ba35a563245a46086963c | diff --git a/ckan/logic/validators.py b/ckan/logic/validators.py
--- a/ckan/logic/validators.py
+++ b/ckan/logic/validators.py
@@ -34,13 +34,9 @@ def owner_org_validator(key, data, errors, context):
model = context['model']
user = context['user']
user = model.User.get(user)
- if value == '' :
+ if value == '':
if not new_authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('A organization must be supplied'))
- package = context.get('package')
- # only sysadmins can remove datasets from org
- if package and package.owner_org and not user.sysadmin:
- raise Invalid(_('You cannot remove a dataset from an existing organization'))
return
group = model.Group.get(value)
| diff --git a/ckan/new_tests/controllers/test_package.py b/ckan/new_tests/controllers/test_package.py
--- a/ckan/new_tests/controllers/test_package.py
+++ b/ckan/new_tests/controllers/test_package.py
@@ -1,4 +1,4 @@
-from nose.tools import assert_equal, assert_true
+from nose.tools import assert_equal, assert_true, assert_not_equal
from routes import url_for
@@ -148,6 +148,183 @@ def test_previous_next_maintains_draft_state(self):
pkg = model.Package.by_name(u'previous-next-maintains-draft')
assert_equal(pkg.state, 'draft')
+ def test_dataset_edit_org_dropdown_visible_to_normal_user_with_orgs_available(self):
+ '''
+ The 'Organization' dropdown is available on the dataset create/edit
+ page to normal (non-sysadmin) users who have organizations available
+ to them.
+ '''
+ user = factories.User()
+ # user is admin of org.
+ org = factories.Organization(name="my-org",
+ users=[{'name': user['id'], 'capacity': 'admin'}])
+
+ app = self._get_test_app()
+ env = {'REMOTE_USER': user['name'].encode('ascii')}
+ response = app.get(
+ url=url_for(controller='package', action='new'),
+ extra_environ=env,
+ )
+
+ # organization dropdown available in create page.
+ assert 'id="field-organizations"' in response
+
+ # create dataset
+ form = response.forms['dataset-edit']
+ form['name'] = u'my-dataset'
+ form['owner_org'] = org['id']
+ response = submit_and_follow(app, form, env, 'save')
+
+ # add a resource to make the pkg active
+ resource_form = response.forms['resource-edit']
+ resource_form['url'] = u'http://example.com/resource'
+ submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
+ pkg = model.Package.by_name(u'my-dataset')
+ assert_equal(pkg.state, 'active')
+
+ # edit package page response
+ url = url_for(controller='package',
+ action='edit',
+ id=pkg.id)
+ pkg_edit_response = app.get(url=url, extra_environ=env)
+ # A field with the correct id is in the response
+ assert 'id="field-organizations"' in pkg_edit_response
+ # The organization id is in the response in a value attribute
+ assert 'value="{0}"'.format(org['id']) in pkg_edit_response
+
+ def test_dataset_edit_org_dropdown_normal_user_can_remove_org(self):
+ '''
+ A normal user (non-sysadmin) can remove an organization from a dataset
+ have permissions on.
+ '''
+ user = factories.User()
+ # user is admin of org.
+ org = factories.Organization(name="my-org",
+ users=[{'name': user['id'], 'capacity': 'admin'}])
+
+ app = self._get_test_app()
+ env = {'REMOTE_USER': user['name'].encode('ascii')}
+ response = app.get(
+ url=url_for(controller='package', action='new'),
+ extra_environ=env,
+ )
+
+ # create dataset with owner_org
+ form = response.forms['dataset-edit']
+ form['name'] = u'my-dataset'
+ form['owner_org'] = org['id']
+ response = submit_and_follow(app, form, env, 'save')
+
+ # add a resource to make the pkg active
+ resource_form = response.forms['resource-edit']
+ resource_form['url'] = u'http://example.com/resource'
+ submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
+ pkg = model.Package.by_name(u'my-dataset')
+ assert_equal(pkg.state, 'active')
+ assert_equal(pkg.owner_org, org['id'])
+ assert_not_equal(pkg.owner_org, None)
+
+ # edit package page response
+ url = url_for(controller='package',
+ action='edit',
+ id=pkg.id)
+ pkg_edit_response = app.get(url=url, extra_environ=env)
+
+ # edit dataset
+ edit_form = pkg_edit_response.forms['dataset-edit']
+ edit_form['owner_org'] = ''
+ submit_and_follow(app, edit_form, env, 'save')
+ post_edit_pkg = model.Package.by_name(u'my-dataset')
+ assert_equal(post_edit_pkg.owner_org, None)
+ assert_not_equal(post_edit_pkg.owner_org, org['id'])
+
+ def test_dataset_edit_org_dropdown_not_visible_to_normal_user_with_no_orgs_available(self):
+ '''
+ The 'Organization' dropdown is not available on the dataset
+ create/edit page to normal (non-sysadmin) users who have no
+ organizations available to them.
+ '''
+ user = factories.User()
+ # user isn't admin of org.
+ org = factories.Organization(name="my-org")
+
+ app = self._get_test_app()
+ env = {'REMOTE_USER': user['name'].encode('ascii')}
+ response = app.get(
+ url=url_for(controller='package', action='new'),
+ extra_environ=env,
+ )
+
+ # organization dropdown available in create page.
+ assert 'id="field-organizations"' not in response
+
+ # create dataset
+ form = response.forms['dataset-edit']
+ form['name'] = u'my-dataset'
+ response = submit_and_follow(app, form, env, 'save')
+
+ # add a resource to make the pkg active
+ resource_form = response.forms['resource-edit']
+ resource_form['url'] = u'http://example.com/resource'
+ submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
+ pkg = model.Package.by_name(u'my-dataset')
+ assert_equal(pkg.state, 'active')
+
+ # edit package response
+ url = url_for(controller='package',
+ action='edit',
+ id=model.Package.by_name(u'my-dataset').id)
+ pkg_edit_response = app.get(url=url, extra_environ=env)
+ # A field with the correct id is in the response
+ assert 'id="field-organizations"' not in pkg_edit_response
+ # The organization id is in the response in a value attribute
+ assert 'value="{0}"'.format(org['id']) not in pkg_edit_response
+
+ def test_dataset_edit_org_dropdown_visible_to_sysadmin_with_no_orgs_available(self):
+ '''
+ The 'Organization' dropdown is available to sysadmin users regardless
+ of whether they personally have an organization they administrate.
+ '''
+ user = factories.User()
+ sysadmin = factories.Sysadmin()
+ # user is admin of org.
+ org = factories.Organization(name="my-org",
+ users=[{'name': user['id'], 'capacity': 'admin'}])
+
+ app = self._get_test_app()
+ # user in env is sysadmin
+ env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
+ response = app.get(
+ url=url_for(controller='package', action='new'),
+ extra_environ=env,
+ )
+
+ # organization dropdown available in create page.
+ assert 'id="field-organizations"' in response
+
+ # create dataset
+ form = response.forms['dataset-edit']
+ form['name'] = u'my-dataset'
+ form['owner_org'] = org['id']
+ response = submit_and_follow(app, form, env, 'save')
+
+ # add a resource to make the pkg active
+ resource_form = response.forms['resource-edit']
+ resource_form['url'] = u'http://example.com/resource'
+ submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
+ pkg = model.Package.by_name(u'my-dataset')
+ assert_equal(pkg.state, 'active')
+
+ # edit package page response
+ url = url_for(controller='package',
+ action='edit',
+ id=pkg.id)
+ pkg_edit_response = app.get(url=url, extra_environ=env)
+ # A field with the correct id is in the response
+ assert 'id="field-organizations"' in pkg_edit_response
+ # The organization id is in the response in a value attribute
+ assert 'value="{0}"'.format(org['id']) in pkg_edit_response
+
class TestPackageResourceRead(helpers.FunctionalTestBase):
@classmethod
| Enable a logged in user to move dataset to another organization
In #953 this change was introduced:
> User is logged in, and is a member of an organization, and dataset is a
> member of any org: Visibility shows but Organization does not show when
> updating a dataset.
So, this prevents a logged in user to change dataset's organization.
Why can't a legitimate user to move datasets around organizations?
| Under "legitimate" I'd suggest "Admin of both organizations".
That's correct: https://github.com/ckan/ckan/blob/e9e56ddf44d22f63c1eb4667f1a4f4efc0541215/ckan/logic/action/get.py#L579
| 2015-01-28T16:18:53 |
ckan/ckan | 2,262 | ckan__ckan-2262 | [
"1847"
] | 54f70a249cf20a4290a48d0a72e8252c6d55a960 | diff --git a/ckan/config/middleware.py b/ckan/config/middleware.py
--- a/ckan/config/middleware.py
+++ b/ckan/config/middleware.py
@@ -75,6 +75,8 @@ def make_app(conf, full_stack=True, static_files=True, **app_conf):
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
#app = QueueLogMiddleware(app)
+ if asbool(config.get('ckan.use_pylons_response_cleanup_middleware', True)):
+ app = execute_on_completion(app, config, cleanup_pylons_response_string)
# Fanstatic
if asbool(config.get('debug', False)):
@@ -360,3 +362,41 @@ def __call__(self, environ, start_response):
self.engine.execute(sql, key, data.get('url'), data.get('type'))
return []
return self.app(environ, start_response)
+
+
+def generate_close_and_callback(iterable, callback, environ):
+ """
+ return a generator that passes through items from iterable
+ then calls callback(environ).
+ """
+ try:
+ for item in iterable:
+ yield item
+ except GeneratorExit:
+ if hasattr(iterable, 'close'):
+ iterable.close()
+ raise
+ finally:
+ callback(environ)
+
+
+def execute_on_completion(application, config, callback):
+ """
+ Call callback(environ) once complete response is sent
+ """
+ def inner(environ, start_response):
+ try:
+ result = application(environ, start_response)
+ except:
+ callback(environ)
+ raise
+ return generate_close_and_callback(result, callback, environ)
+ return inner
+
+
+def cleanup_pylons_response_string(environ):
+ try:
+ msg = 'response cleared by pylons response cleanup middleware'
+ environ['pylons.controller']._py_object.response._body = msg
+ except (KeyError, AttributeError):
+ pass
| diff --git a/ckan/tests/config/test_middleware.py b/ckan/tests/config/test_middleware.py
new file mode 100644
--- /dev/null
+++ b/ckan/tests/config/test_middleware.py
@@ -0,0 +1,25 @@
+import ckan.tests.helpers as helpers
+
+from nose.tools import assert_equals, assert_not_equals
+from routes import url_for
+
+
+class TestPylonsResponseCleanupMiddleware(helpers.FunctionalTestBase):
+ @classmethod
+ def _apply_config_changes(cls, config):
+ config['ckan.use_pylons_response_cleanup_middleware'] = True
+
+ def test_homepage_with_middleware_activated(self):
+ '''Test the home page renders with the middleware activated
+
+ We are just testing the home page renders without any troubles and that
+ the middleware has not done anything strange to the response string'''
+ app = self._get_test_app()
+ response = app.get(url=url_for(controller='home', action='index'))
+
+ assert_equals(200, response.status_int)
+ # make sure we haven't overwritten the response too early.
+ assert_not_equals(
+ 'response cleared by pylons response cleanup middleware',
+ response.body
+ )
| Large memory leak in datastore API
When querying the datastore API on a large dataset (eg. querying 10,000 rows at a time) the memory used by the process keeps growing. On our dataset this is about 20MB leaking per request - more than can be reasonably mitigated by restarting the worker after a number of requests.
This happens both when running the application using Paster and when running as a WSGI script under Apache2.
It seems the string containing the response is kept in memory. This is referenced by the response object, which is referenced by the pylon context and (a few steps up) by the WSGI environment. The environment itself is referenced numerous times, so it's hard to track this down.
As a proof of concept, the symptom can be removed by this "amusing" code:
``` python
diff --git a/ckan/controllers/api.py b/ckan/controllers/api.py
index 08ba3a8..4b09102 100644
--- a/ckan/controllers/api.py
+++ b/ckan/controllers/api.py
@@ -19,6 +19,12 @@ import ckan.lib.munge as munge
from ckan.common import _, c, request, response
+from threading import Timer
+
+
+def _clean_links(env):
+ env['pylons.controller']._py_object.response._body = ''
+
log = logging.getLogger(__name__)
@@ -67,7 +73,9 @@ class ApiController(base.BaseController):
# avoid status_code_redirect intercepting error responses
environ['pylons.status_code_redirect'] = True
- return base.BaseController.__call__(self, environ, start_response)
+ result = base.BaseController.__call__(self, environ, start_response)
+ Timer(10, _clean_links, [environ]).start()
+ return result
def _finish(self, status_int, response_data=None,
content_type='text'):
```
Obviously this is not a workable solution!
| @aliceh75 , is it possible to get some more details on the size of the dataset and the types of queries you are performing on the datastore? I need to be able to reliably recreate this to get a deeper look at it.
Our dataset is about 3,000,000 records. The queries are just simple fetches using the datastore API. I still have the script I used to test this at the time. If I run:
``` python
import json
import urllib
import urllib2
while True:
request_params = {
'resource_id': '...',
'offset': 0,
'limit': 10000
}
request = urllib2.Request('http://.../api/action/datastore_search')
response = urllib2.urlopen(request, urllib.quote(json.dumps(request_params)))
str = response.read(100)
print len(str)
print str
response.close()
```
Then I can see the memory used by CKAN go up and up and up. This is true both when ckan is running as a WSGI script and when it is running using paster.
Ensuring the response string is cleared (as I do in the code provided in an earlier comment) removes the symptom - no more memory leak. But the code I wrote is obviously not a proper solution - it's just there to demonstrate where the problem comes from.
If you change the end of _finish() in the api controller to write the response data instead of returning it do you see any difference? I realise this also seems a bit hacky ...
```
response.write(response_msg)
return ''
```
instead of
```
return response_msg
```
just a guess, but could something be logging the SQL issued to the DB? I know another web framwork records all the SQL issued when run in debug mode.
I've been poking around using heapy and it looks like the previous responses are hanging around in memory, but it's nontrivial to trackdown why they aren't getting garbage collected.
```
ipdb> heap[0].byid
Set of 151666 <str> objects. Total size = 226687040 bytes.
Index Size % Cumulative % Representation (limited)
0 21196016 9.4 21196016 9.4 '{"help": "ht...l": 2173762}}'
1 21196016 9.4 42392032 18.7 '{"help": "ht...l": 2173762}}'
2 21196016 9.4 63588048 28.1 '{"help": "ht...l": 2173762}}'
3 21196016 9.4 84784064 37.4 '{"help": "ht...l": 2173762}}'
4 21196016 9.4 105980080 46.8 '{"help": "ht...l": 2173762}}'
5 21196016 9.4 127176096 56.1 '{"help": "ht...l": 2173762}}'
6 21196016 9.4 148372112 65.5 '{"help": "ht...l": 2173762}}'
7 21196016 9.4 169568128 74.8 '{"help": "ht...l": 2173762}}'
8 21196016 9.4 190764144 84.2 '{"help": "ht...l": 2173762}}'
9 21196016 9.4 211960160 93.5 '{"help": "ht...l": 2173762}}'
...
```
e: Below is the shortest paths of references that heapy can find for the above strings
```
ipdb> pprint([strings.byid[x].shpaths for x in range(0,10)])
[ 0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4e56250>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4e56e50>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4e565f0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4ce2430>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x5433ab0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.t139785342744320_c_traceobj.__self__.__dict__['curframe'].f_locals['result'][0]
1: hpy().Root.t139785342744320_c_traceobj.__self__.__dict__['curframe_locals']['result'][0]
2: hpy().Root.t139785342744320_c_traceobj.im_self.__dict__['curframe'].f_locals['result'][0]
3: hpy().Root.t139785342744320_c_traceobj.im_self.__dict__['curframe_locals']['result'][0],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x55288f0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4ce28d0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x4ce21f0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body'],
0: hpy().Root.i0_modules['routes'].__dict__['_RequestConfig'].__dict__['_RequestConfig__shared_state'].??[<weakref...x54339b0>]['environ']['pylons.controller'].__dict__['_py_object'].__dict__['response'].__dict__['_body']
```
e: .rp shows a tree of references, so in this case,the str is onctained in a dict of pylons.controllers.util.Response and that's contained in 3, dict of pylons.util.PylonsContext and so on and so forth
```
...
ipdb> strings.byid[0].rp
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 <id 0x7f21a9792010>: 0x7f21a9792010
1: a [-] 1 dict of pylons.controllers.util.Response: 0x7f224a002550
2: aa ---- [-] 1 pylons.controllers.util.Response: 0x7f224a002550
3: a3 [-] 1 dict of pylons.util.PylonsContext: 0x7f224a002790
4: a4 ------ [-] 1 pylons.util.PylonsContext: 0x7f224a002790
5: a5 [-] 1 dict of pylons.util.AttribSafeContextObj: 0x7f224a002ad0
6: a6 -------- [-] 1 pylons.util.AttribSafeContextObj: 0x7f224a002ad0
7: a7 [^ 3] 1 dict of pylons.util.PylonsContext: 0x7f224a002790
8: a4b ------ [-] 1 dict of ckan.controllers.api.ApiController: 0x7f224a002c10
9: a4ba [-] 1 ckan.controllers.api.ApiController: 0x7f224a002c10
<Type e.g. '_.more' for more.>
ipdb> strings.byid[0].rp.more
10: a4baa ------ [+] 1 dict (no owner): 0x7f222405ef30*50
11: a4bab [+] 1 types.MethodType: <ckan.controllers.api.ApiController ...
```
@joetsoi When I looked at this the reference chain I found was something like:
response string <- response object <- pylon context <- ... <- WSGI environment <- ...
The WSGI environment itself was referenced numerous times, I couldn't really make anything out of it.
So this isn't a memory leak. I'm just going to document what I did here, just in case it's of interest to anyone else, or if I find it useful in the future when I've forgotten what happened.
So the `"_RequestConfig__shared_state'].??[ < weakref...x4e56250>"` in my last reply suggested that these objects should of been cleaned up once there was no longer a reference to them, a manual `gc.collect()` didn't seem to do anything and inspecting the`RequestConfig shared_state` showed it to be a thread.local object.
So at this point i'm thinking we must be holding a reference to it in the controller somewhere that we odn't clean up.
using heapy and inserting some ipdb breakpoints before the controller returns the result from the api, something like
```
from guppy import hpy
hp = hpy()
heap = hp.heap()
strings = heap[0]
import ipdb; ipdb.set_trace()
```
it's clear that the response objects, specifically the response strings are the problem
```
ipdb> heap.byrcs
Partition of a set of 343689 objects. Total size = 266893568 bytes.
Index Count % Size % Cumulative % Referrers by Kind (class / dict of class)
0 19 0 190780624 71 190780624 71 dict of pylons.controllers.util.Response
1 11 0 21199320 8 211979944 79 dict of pylons.controllers.util.Response, list
2 107194 31 10007816 4 221987760 83 types.CodeType
3 41823 12 4018400 2 226006160 85 tuple
4 24904 7 3071408 1 229077568 86 function
5 9287 3 2350432 1 231428000 87 dict of module
6 6440 2 2324560 1 233752560 88 function, tuple
7 7144 2 2229040 1 235981600 88 type
8 15536 5 1949840 1 237931440 89 dict of type
9 19440 6 1858816 1 239790256 90 dict (no owner)
Partition of a set of 343689 objects. Total size = 266892800 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 137776 40 225205640 84 225205640 84 str
1 83479 24 7054200 3 232259840 87 tuple
2 8486 2 4350608 2 236610448 89 dict (no owner)
3 18988 6 2430464 1 239040912 90 types.CodeType
4 19980 6 2397600 1 241438512 90 function
5 842 0 2387696 1 243826208 91 dict of module
6 2190 1 1984280 1 245810488 92 type
7 2190 1 1815888 1 247626376 93 dict of type
8 9956 3 1585184 1 249211560 93 list
9 2563 1 1509984 1 250721544 94 unicode
```
and by inspecting those response strings using byid as above in my previous comment we see they're the api responses.
So I wanted to see if what we have in memory is exactly the responses that we requested (or was it copying one request over and over? etc)
I ended up tweaking your script a bit and removing the `total` from the result dict returned by `datastore_search`
```
import json
import urllib
import urllib2
i = 0
while True:
request_params = {
'resource_id': 'blah',
'offset': 0,
'limit': 100000 + i
}
request = urllib2.Request('http://localhost:5000/api/action/datastore_search')
response = urllib2.urlopen(request, urllib.quote(json.dumps(request_params)))
str = response.read()
print len(str)
print str[-100:]
response.close()
i += 1
```
This way, when I ran it the next time, the output from the script is something like
```
21195957
store_search", "next": "/api/action/datastore_search?offset=100000"}, "offset": 0, "limit": 100000}}
21196181
store_search", "next": "/api/action/datastore_search?offset=100001"}, "offset": 0, "limit": 100001}}
21196406
store_search", "next": "/api/action/datastore_search?offset=100002"}, "offset": 0, "limit": 100002}}
```
and when we insepct byid we get something like
```
ipdb> strings.byid
Set of 137776 <str> objects. Total size = 225205640 bytes.
Index Size % Cumulative % Representation (limited)
0 21198840 9.4 21198840 9.4 '{"help": "ht...it": 100013}}'
1 21198632 9.4 42397472 18.8 '{"help": "ht...it": 100012}}'
2 21198424 9.4 63595896 28.2 '{"help": "ht...it": 100011}}'
3 21198200 9.4 84794096 37.7 '{"help": "ht...it": 100010}}'
4 21197976 9.4 105992072 47.1 '{"help": "ht...it": 100009}}'
5 21197768 9.4 127189840 56.5 '{"help": "ht...it": 100008}}'
6 21197552 9.4 148387392 65.9 '{"help": "ht...it": 100007}}'
7 21197336 9.4 169584728 75.3 '{"help": "ht...it": 100006}}'
8 21197120 9.4 190781848 84.7 '{"help": "ht...it": 100005}}'
9 21196896 9.4 211978744 94.1 '{"help": "ht...it": 100004}}'
<137766 more rows. Type e.g. '_.more' to view.>
```
This way, we can directly see which of the requests we made are in memory. So we look at the next couple of strings
```
ipdb> strings.byid.more
Index Size % Cumulative % Representation (limited)
10 77528 0.0 212056272 94.2 '============...elease.\n\n\n'
11 30712 0.0 212086984 94.2 'Provide a re...n\n\n '
12 22352 0.0 212109336 94.2 '(?<=\\()(\\*...ng |zero\\? )'
13 18016 0.0 212127352 94.2 'Return a new....\n\n '
14 13656 0.0 212141008 94.2 '\n.. dialect...stgreSQL.\n\n'
15 12480 0.0 212153488 94.2 'subprocess -...cess.Popen.\n'
16 12088 0.0 212165576 94.2 "Create a new...k'``.\n\n "
17 11088 0.0 212176664 94.2 'Create a SQL....\n\n '
18 10904 0.0 212187568 94.2 '\n Co....\n\n '
19 10504 0.0 212198072 94.2 '\n Format...output.\n '
<137756 more rows. Type e.g. '_.more' to view.>
```
At this point, the first requests we made no longer appear in memory, the requests for 100000-100003 don't appear to be in memory.
So by doing some stepping through each request and stopping before the controller returned each time, we can see that it maintains a maximum of 10 response strings in memory.
```
ipdb> c
2015-01-15 17:54:57,892 INFO [ckan.lib.base] /api/action/datastore_search render time 33.155 seconds
> /home/joe/projects/json_datastore/src/ckan/ckan/controllers/api.py(82)__call__()
81 import ipdb; ipdb.set_trace()
---> 82 log.debug('mem {0}'.format(result[0][-20]))
83
ipdb> strings.byid
Set of 151739 <str> objects. Total size = 226701936 bytes.
Index Size % Cumulative % Representation (limited)
0 21199056 9.4 21199056 9.4 '{"help": "ht...it": 100014}}'
1 21198840 9.4 42397896 18.7 '{"help": "ht...it": 100013}}'
2 21198632 9.4 63596528 28.1 '{"help": "ht...it": 100012}}'
3 21198424 9.4 84794952 37.4 '{"help": "ht...it": 100011}}'
4 21198200 9.4 105993152 46.8 '{"help": "ht...it": 100010}}'
5 21197976 9.4 127191128 56.1 '{"help": "ht...it": 100009}}'
6 21197768 9.4 148388896 65.5 '{"help": "ht...it": 100008}}'
7 21197552 9.4 169586448 74.8 '{"help": "ht...it": 100007}}'
8 21197336 9.4 190783784 84.2 '{"help": "ht...it": 100006}}'
9 21197120 9.4 211980904 93.5 '{"help": "ht...it": 100005}}'
<151729 more rows. Type e.g. '_.more' to view.>
```
100004 has been wiped off. At this point after some discussion on irc, we thought it might be the fact that we're in debug mode, and it's just retaining the last 10 responses and their debugging information. but the problem remains if we turn on "debug = false"
At some point I pieced together the weakref, thread.local stuff with http://pythonpaste.org/paste-httpserver-threadpool.html and notice that [_the default number of threads for the paste http server is 10_](http://pythonpaste.org/modules/httpserver.html). If you test using "threadpool_workers=n" in your development.ini, it'll match the number of strings that will show up in heap[0].byid
The other thing to notice is that if you make 10 new requests to api endpoints that return a much smaller response
```
Set of 151950 <str> objects. Total size = 78397616 bytes.
Index Size % Cumulative % Representation (limited)
0 21199280 27.0 21199280 27.0 '{"help": "ht...it": 100015}}'
1 21199056 27.0 42398336 54.1 '{"help": "ht...it": 100014}}'
2 21198840 27.0 63597176 81.1 '{"help": "ht...it": 100013}}'
3 77528 0.1 63674704 81.2 '============...elease.\n\n\n'
4 30712 0.0 63705416 81.3 'Provide a re...n\n\n '
5 22352 0.0 63727768 81.3 '(?<=\\()(\\*...ng |zero\\? )'
6 18016 0.0 63745784 81.3 'Return a new....\n\n '
7 13656 0.0 63759440 81.3 '\n.. dialect...stgreSQL.\n\n'
8 12480 0.0 63771920 81.3 'subprocess -...cess.Popen.\n'
9 12088 0.0 63784008 81.4 "Create a new...k'``.\n\n "
<151940 more rows. Type e.g. '_.more' to view.>
ipdb> strings.byid.more
Index Size % Cumulative % Representation (limited)
10 11088 0.0 63795096 81.4 'Create a SQL....\n\n '
11 10904 0.0 63806000 81.4 '\n Co....\n\n '
12 10584 0.0 63816584 81.4 '{"help": "ht...facets": {}}}' (the smaller api responses)
13 10584 0.0 63827168 81.4 '{"help": "ht...facets": {}}}'
14 10584 0.0 63837752 81.4 '{"help": "ht...facets": {}}}'
15 10584 0.0 63848336 81.4 '{"help": "ht...facets": {}}}'
16 10584 0.0 63858920 81.5 '{"help": "ht...facets": {}}}'
17 10584 0.0 63869504 81.5 '{"help": "ht...facets": {}}}'
18 10504 0.0 63880008 81.5 '\n Format...output.\n '
19 9168 0.0 63889176 81.5 'Represent a ...ents.\n\n '
<151930 more rows. Type e.g. '_.more' to view.>
```
you'll see the total size has dropped. from 226701936 bytes to 78397616 bytes. But the entire time I've been debugging, I've been running
```
$ watch "cat /proc/`pgrep paster`/status"
...
VmPeak: 3497708 kB
VmSize: 3497708 kB
...
```
Which still reports the memory usage the same as when the larger requests were still in memory. This is apparently just hte way python works (see https://groups.google.com/forum/#!topic/celery-users/jVc3I3kPtlw). If you've just made a response that took up 500mb of memory, it stands to reason that you might have to do the same again quite soon, so python will hang onto the (virtual) memory. But it's not 'leaked'
**TL;DR**, this isn't a memory leak, each thread will hold onto the memory that it has used and another thread can't garbage collect another thread's data, so, the issue at this point is whether we can clear out the response string once the response has been returned and whether it is safe to do so.
e:http://pylons-webframework.readthedocs.org/en/latest/controllers.html#special-methods, next step will probably to investigate whether it's ok to clear out the string in something like the `__after__()` method of the controller.
heapy docs
http://guppy-pe.sourceforge.net/heapy-thesis.pdf
http://smira.ru/wp-content/uploads/2011/08/heapy.html
http://haypo-notes.readthedocs.org/en/latest/heap_fragmentation.html
https://chase-seibert.github.io/blog/2013/08/03/diagnosing-memory-leaks-python.html
http://forthescience.org/blog/2014/08/16/python-and-memory-fragmentation/
weak references
https://docs.python.org/2/library/weakref.html
http://pymotw.com/2/weakref/
pylons docs
http://pylons-webframework.readthedocs.org/en/latest/execution.html
http://pythonpaste.org/paste-httpserver-threadpool.html
awesome stuff @joetsoi, super useful!
I think it might be possible to have some cleanup middleware as suggested in these docs, https://code.google.com/p/modwsgi/wiki/RegisteringCleanupCode. Pylons does a similar-ish thing at the end of requests https://github.com/Pylons/pylons/blob/master/pylons/wsgiapp.py#L118-L122.
This wouldn't stop say 15 simultaneous requests returning large results eating up a lot of memory at once.
So if I understand correctly, you are suggesting that the apparent memory usage should max out after 10 requests (with the occasional increase if a given request is larger than any that have been before).
I remember this growing steadily over a long period of time, not maxing out quickly - but it was a while ago so I will run tests again to see if what I see is consistent with that idea.
| 2015-02-04T12:08:46 |
ckan/ckan | 2,337 | ckan__ckan-2337 | [
"2328"
] | 8e15127f3e5ddda6334d080c198234a33044df76 | diff --git a/ckan/config/middleware.py b/ckan/config/middleware.py
--- a/ckan/config/middleware.py
+++ b/ckan/config/middleware.py
@@ -218,7 +218,7 @@ def __call__(self, environ, start_response):
if qs:
# sort out weird encodings
- #qs = urllib.quote(qs, '')
+ qs = urllib.quote(qs, '')
environ['CKAN_CURRENT_URL'] = '%s?%s' % (path_info, qs)
else:
environ['CKAN_CURRENT_URL'] = path_info
| Requesting page with non-ASCII URL fails in Jinja template rendering
[Jinja only accepts unicode or ASCII](http://jinja.pocoo.org/docs/dev/api/#unicode)
`ckan/lib/base.py` relays requested URLs in raw format to Jinja, causing `UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 20: ordinal not in range(128)` if URL or its parts are rendered to page.
This issue is visible, for example, when searching for datasets using a browser that does not auto-escape non-ASCII characters. For example, IE dies not escape while Firefox does. This bug is easily reproduced by searching datasets with "ä" as the search term using IE, or by using curl: `curl -v "http://demo.ckan.org/dataset?q=ä"`
URLs and any other data used in Jinja templates should be decoded and encoded into unicode before rendering. This is not a trivial fix, since correctly identifying an unknown encoding in all cases is borderline, if not, impossible.
One possibility would be to assume requests come in UTF-8 or ASCII and just decode everything as if it was UTF-8 (since only-ASCII UTF-8 is the same as the same text in ASCII). This would not work for other encodings like latin-1, but would be a start.
I can create a patch and pull request for the UTF-8/ASCII-only solution if it's considered acceptable. Otherwise I'd like to request comments on other options. [BeautifulSoup](http://www.crummy.com/software/BeautifulSoup/bs3/documentation.html#Beautiful%20Soup%20Gives%20You%20Unicode,%20Dammit) has one solution for a few encodings.
| @bzar yes, please! A patch that expects only UTF-8/ASCII sounds great. We should only be passing unicode through to jinja.
Digging into this I have uncovered the following:
1. Pylons already assumes all data is UTF-8 and [decodes it into unicode by default since pylons 0.9.6](http://pylons-webframework.readthedocs.org/en/latest/tutorials/understanding_unicode.html#request-parameters)
2. The non-unicode string rendered by the template is CKAN_CURRENT_URL, defined [here](https://github.com/ckan/ckan/blob/master/ckan/config/middleware.py#L222)
3. Changing CKAN_CURRENT_URL to unicode breaks "url_for" provided by "routes" library
4. Current code base already has a fix for this that escapes the UTF-8 url into valid ASCII [here](https://github.com/ckan/ckan/blob/master/ckan/config/middleware.py#L220-L221)
5. This fix has been commented out in [this commit](https://github.com/ckan/ckan/commit/0e7fd02d80bd822c85a5d754dec1f10010273cbe) because it affected legacy templates
Should this "fix" be reverted or at least parametrized?
@bzar we should revert it. the legacy templates have been removed.
There is still support for extensions using the legacy template engine. Write the PR and we can ask @davidread to test if it breaks their legacy templates. If not, we should backport this fix too.
I've read and read this, but have lots of questions about what is meant by failure and why, and it would require me to dig a lot to understand how to test this. We've switched to Jinja templates for search page now, so it's also not much help me doing it anyway, I'm afraid. Sorry.
Testing it is as simple as requesting `/dataset?q=ä"`. Make sure the HTTP client you use doesn't auto-escape the URL (Firefox for one does this, IE and curl don't as [mentioned in pylons' manual](http://pylons-webframework.readthedocs.org/en/latest/tutorials/understanding_unicode.html#output-encoding)).
The failure is a "500 internal server error" reply caused by rendering a non-ASCII python string (not unicode object) inside a JINJA template, namely the request URL stored in CKAN_CURRENT_URL.
I'll prepare the pull request.
| 2015-03-06T08:15:12 |
|
ckan/ckan | 2,416 | ckan__ckan-2416 | [
"2412"
] | 4fe6d105c9d1f3db21071d8ee8037e260985379f | diff --git a/ckanext/resourceproxy/controller.py b/ckanext/resourceproxy/controller.py
--- a/ckanext/resourceproxy/controller.py
+++ b/ckanext/resourceproxy/controller.py
@@ -40,7 +40,9 @@ def proxy_resource(context, data_dict):
# first we try a HEAD request which may not be supported
did_get = False
r = requests.head(url)
- if r.status_code == 405:
+ # 405 would be the appropriate response here, but 400 with
+ # the invalid method mentioned in the text is also possible (#2412)
+ if r.status_code in (400, 405):
r = requests.get(url, stream=True)
did_get = True
r.raise_for_status()
| resource proxy fails when server can't cope with the headers only request
I want to proxy a WMS service:
http://geodienste-hamburg.de/HH_WMS_BWVI_opendata?REQUEST=GetCapabilities&SERVICE=WMS
This link works (GET). But making a HEAD request results in the failure:
400 Invalid method in request
This is, in and of itself, not a CKAN problem. The problem is that the resource proxy gives up if this type of request is not supported:
https://github.com/ckan/ckan/blob/master/ckanext/resourceproxy/controller.py#L46
Is this intended behaviour? The comment on https://github.com/ckan/ckan/blob/master/ckanext/resourceproxy/controller.py#L40 implies (to me) not.
| That sounds sensible @mattfullerton do you want to send a patch that does a GET request if the HEAD fails?
will do
| 2015-05-07T07:09:36 |
|
ckan/ckan | 2,420 | ckan__ckan-2420 | [
"2401"
] | 4fe6d105c9d1f3db21071d8ee8037e260985379f | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1832,7 +1832,7 @@ def resource_view_full_page(resource_view):
def remove_linebreaks(string):
'''Remove linebreaks from string to make it usable in JavaScript'''
- return str(string).replace('\n', '')
+ return unicode(string).replace('\n', '')
def list_dict_filter(list_, search_field, output_field, value):
| diff --git a/ckan/tests/lib/test_helpers.py b/ckan/tests/lib/test_helpers.py
--- a/ckan/tests/lib/test_helpers.py
+++ b/ckan/tests/lib/test_helpers.py
@@ -91,13 +91,13 @@ def test_remove_linebreaks_removes_linebreaks(self):
assert result.find('\n') == -1,\
'"remove_linebreaks" should remove line breaks'
- def test_remove_linebreaks_casts_into_str(self):
- class StringLike(str):
+ def test_remove_linebreaks_casts_into_unicode(self):
+ class UnicodeLike(unicode):
pass
- test_string = StringLike('foo')
+ test_string = UnicodeLike('foo')
result = h.remove_linebreaks(test_string)
- strType = ''.__class__
+ strType = u''.__class__
assert result.__class__ == strType,\
- '"remove_linebreaks" casts into str()'
+ '"remove_linebreaks" casts into unicode()'
| 500 Error on view_form_filters.html with some utf-8 files with BOM
We get an internal server error when loading the "edit view" page (in our case to delete a view that was created).
Based on the server log, the error is coming from view_form_filters.html, the template/feature used to add predefined filters in the view.
The specific issue seem to be with a special character
```
[remote 172.17.42.1:25449] File '/project/ckan/ckan/templates/package/snippets/
[remote 172.17.42.1:25449] data-module-template-filter-inputs = "{{ h.
[remote 172.17.42.1:25449] File '/project/ckan/ckan/lib/helpers.py', line 1806 in
[remote 172.17.42.1:25449] return str(string).replace('\\n', '')
[remote 172.17.42.1:25449] UnicodeEncodeError: 'ascii' codec can't encode character u'\\ufeff' in position 851: ordinal not in range(128)
```
(Full log available here: http://pastebin.com/uSaTUhjZ)
After playing with the files, it seems that the error appears on some utf-8 encoded files with BOM. I shared 2 files (https://drive.google.com/folderview?id=0B7gAVbY0-eHrflZ6a0dUbVNEVzZKNkJDdFZCN2t0ak9YdWs0UzJHSUR6RUJDOGN3X1lVbm8&usp=sharing), one with BOM that triggers the error, another without BOM that works fine.
| 2015-05-08T08:01:10 |
|
ckan/ckan | 2,519 | ckan__ckan-2519 | [
"1874"
] | bbdae4eb77a45367b101989f28e2538fc209ea13 | diff --git a/ckan/lib/dictization/model_save.py b/ckan/lib/dictization/model_save.py
--- a/ckan/lib/dictization/model_save.py
+++ b/ckan/lib/dictization/model_save.py
@@ -27,7 +27,6 @@ def resource_dict_save(res_dict, context):
table = class_mapper(model.Resource).mapped_table
fields = [field.name for field in table.c]
-
# Resource extras not submitted will be removed from the existing extras
# dict
new_extras = {}
@@ -40,7 +39,9 @@ def resource_dict_save(res_dict, context):
if isinstance(getattr(obj, key), datetime.datetime):
if getattr(obj, key).isoformat() == value:
continue
- if key == 'url' and not new and obj.url <> value:
+ if key == 'last_modified' and not new:
+ obj.url_changed = True
+ if key == 'url' and not new and obj.url != value:
obj.url_changed = True
setattr(obj, key, value)
else:
diff --git a/ckan/lib/uploader.py b/ckan/lib/uploader.py
--- a/ckan/lib/uploader.py
+++ b/ckan/lib/uploader.py
@@ -173,6 +173,7 @@ def __init__(self, resource):
self.filename = munge.munge_filename(self.filename)
resource['url'] = self.filename
resource['url_type'] = 'upload'
+ resource['last_modified'] = datetime.datetime.utcnow()
self.upload_file = upload_field_storage.file
elif self.clear:
resource['url_type'] = ''
| Modifying resource doesn't update it's last updated timestamp
Resources show their creation date and last updated date which are the same. If the resource is modified, the last updated date should change but it is not updated.
| related to #1567 and would be fixed if I change #1921 to add the last_modified field to resource_show. Is there a reason this field wasn't included in resource_show?
'last_modified' in not set anywhere in CKAN core, this was (or is) supposed to be set by extensions like [ckanext-archiver](https://github.com/ckan/ckanext-archiver/blob/eb3f5d183a33c2286cf90b36bc7d2ca09bd13178/ckanext/archiver/tasks.py#L445). If I'm not mistaken it refers to changes on the original file, not the CKAN resource metadata, so it looks like there is more work involved than originally thought.
Moving to 2.4.
I think we should update this value when users upload files too. It's displayed to end users as the resource's "last updated" date, and users expect that date to change when they upload a new file.
I think the meaning and presentation of the fields should be more clear and consistent.
last_modified: the date and time when the actual data has been last changed
revision_timestamp: the date and time when the resource metadata has been last changed
If this is the correct meaning for the fields, the visual label on the templates for the resource should be consistent with that:
last_modified -> "Data last updated"
revision_timestamp -> "Metadata last updated"
Currently these fields are used interchangeably to display the same label "Last updated". See
`<td>{{ h.render_datetime(res.last_modified) or h.render_datetime(res.revision_timestamp) or h.render_datetime(res.created) or _('unknown') }}</td>`
https://github.com/ckan/ckan/blob/master/ckan/templates/package/resource_read.html#L158
| 2015-07-03T20:32:32 |
|
ckan/ckan | 2,563 | ckan__ckan-2563 | [
"2556"
] | 7498d0fd995fc1d5cd496bf5249e49df2138e053 | diff --git a/ckan/lib/app_globals.py b/ckan/lib/app_globals.py
--- a/ckan/lib/app_globals.py
+++ b/ckan/lib/app_globals.py
@@ -73,13 +73,9 @@
_CONFIG_CACHE = {}
def set_main_css(css_file):
- ''' Sets the main_css using debug css if needed. The css_file
- must be of the form file.css '''
+ ''' Sets the main_css. The css_file must be of the form file.css '''
assert css_file.endswith('.css')
- if config.get('debug') and css_file == '/base/css/main.css':
- new_css = '/base/css/main.debug.css'
- else:
- new_css = css_file
+ new_css = css_file
# FIXME we should check the css file exists
app_globals.main_css = str(new_css)
| Include the main.debug.css
Hi, I'm new to CKAN in my organization and turned debug to true for development and encountered the `AttributeError: 'module' object has no attribute 'css/main.debug.css'` error. It took me a while to figure out that I had to compile the less to get it.
Wouldn't it be easier to include this file so that debug mode automatically works without needing to change anything else?
| I don't see any difference between the main.css and the main.debug.css that I generate from the less files. Should there be a difference? If not, why do we have two versions at all?
if it is the same then let's kill it
@Laurent we agreed at the dev meeting that we're better off without the .debug versions. Do you have time to contribute a PR that removes this?
Sure!
| 2015-07-28T14:18:45 |
|
ckan/ckan | 2,567 | ckan__ckan-2567 | [
"2557"
] | 63e3bc9186f02a4bf1cce24b4c5d937e696c04c3 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -1921,7 +1921,8 @@ def get_group(id):
context = {'ignore_auth': True,
'limits': {'packages': 2},
'for_view': True}
- data_dict = {'id': id}
+ data_dict = {'id': id,
+ 'include_datasets': True}
try:
out = logic.get_action(get_action)(context, data_dict)
| Datasets not shown in the featured organization snippet and on the Organizations page
After updating to 2.4.0, I've noticed that the datasets are not shown in the featured organization snippet. Also, when visiting "Organizations" page, organizations are listed but for each one it says 0 datasets.
I can see that the same happens in the live demo (http://demo.ckan.org/).
| 2015-08-04T16:19:04 |
||
ckan/ckan | 2,576 | ckan__ckan-2576 | [
"2574"
] | c3a4322ca7b5dc0d73abc8af3bca853f6048660e | diff --git a/ckanext/datastore/db.py b/ckanext/datastore/db.py
--- a/ckanext/datastore/db.py
+++ b/ckanext/datastore/db.py
@@ -316,6 +316,13 @@ def create_table(context, data_dict):
})
field['type'] = _guess_type(records[0][field['id']])
+ # Check for duplicate fields
+ unique_fields = set([f['id'] for f in supplied_fields])
+ if not len(unique_fields) == len(supplied_fields):
+ raise ValidationError({
+ 'field': ['Duplicate column names are not supported']
+ })
+
if records:
# check record for sanity
if not isinstance(records[0], dict):
| diff --git a/ckanext/datastore/tests/test_create.py b/ckanext/datastore/tests/test_create.py
--- a/ckanext/datastore/tests/test_create.py
+++ b/ckanext/datastore/tests/test_create.py
@@ -1,7 +1,7 @@
import json
import nose
import sys
-from nose.tools import assert_equal
+from nose.tools import assert_equal, raises
import pylons
from pylons import config
@@ -138,6 +138,21 @@ def test_create_doesnt_add_more_indexes_when_updating_data(self):
current_index_names = self._get_index_names(resource['id'])
assert_equal(previous_index_names, current_index_names)
+ @raises(p.toolkit.ValidationError)
+ def test_create_duplicate_fields(self):
+ package = factories.Dataset()
+ data = {
+ 'resource': {
+ 'book': 'crime',
+ 'author': ['tolstoy', 'dostoevsky'],
+ 'package_id': package['id']
+ },
+ 'fields': [{'id': 'book', 'type': 'text'},
+ {'id': 'book', 'type': 'text'}],
+ }
+ result = helpers.call_action('datastore_create', **data)
+
+
def _has_index_on_field(self, resource_id, field):
sql = u"""
SELECT
| Datastore explodes with duplicate columns
When provided with duplicate columns from datapushers (to datastore_create) datastore explodes!
```
<class 'sqlalchemy.exc.ProgrammingError'>: (ProgrammingError) column "CITY OF BRISTOL"
specified more than once 'CREATE TABLE "652faf02-02db-4b49-911a-8305e7a6d7c1" ("_id" serial
primary key, "_full_text" tsvector, "{1FB4E294-102B-475C-876A-000005E78B1E}" text, "177000"
numeric, "2012-05-29 00:00" timestamp, "BS4 5AW" text, "S" text, "N" text, "F" text, "11" text, "WATER
LANE" text, "BRISLINGTON" text, "BRISTOL" text, "CITY OF BRISTOL" text, "CITY OF BRISTOL"
text, "A" text); ' {}
```
The solution for duplicate columns is probably non-obvious, however it would be nicer for datastore to log the error cleanly rather than raising a 500.
Relevant part of stack trace:
```
Module ckanext.datastore.logic.action:140 in datastore_create
>> result = db.create(context, data_dict)
Module ckanext.datastore.db:1064 in create
>> create_table(context, data_dict)
Module ckanext.datastore.db:342 in create_table
>> context['connection'].execute(sql_string.replace('%', '%%'))
Module sqlalchemy.engine.base:719 in execute
>> return self._execute_text(object, multiparams, params)
Module sqlalchemy.engine.base:868 in _execute_text
>> statement, parameters
Module sqlalchemy.engine.base:954 in _execute_context
>> context)
Module sqlalchemy.engine.base:1116 in _handle_dbapi_exception
>> exc_info
Module sqlalchemy.util.compat:189 in raise_from_cause
>> reraise(type(exception), exception, tb=exc_tb)
Module sqlalchemy.engine.base:947 in _execute_context
>> context)
Module sqlalchemy.engine.default:435 in do_execute
>> cursor.execute(statement, parameters)
```
| 2015-08-12T12:10:06 |
|
ckan/ckan | 2,599 | ckan__ckan-2599 | [
"2592"
] | 942a824c85ec2faf0a7c46a44b5d72997f911568 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -110,6 +110,28 @@ def url(*args, **kw):
return _add_i18n_to_url(my_url, locale=locale, **kw)
+def get_site_protocol_and_host():
+ '''Return the protocol and host of the configured `ckan.site_url`.
+ This is needed to generate valid, full-qualified URLs.
+
+ If `ckan.site_url` is set like this::
+
+ ckan.site_url = http://example.com
+
+ Then this function would return a tuple `('http', 'example.com')`
+ If the setting is missing, `(None, None)` is returned instead.
+
+ '''
+ site_url = config.get('ckan.site_url', None)
+ if site_url is not None:
+ parsed_url = urlparse.urlparse(site_url)
+ return (
+ parsed_url.scheme.encode('utf-8'),
+ parsed_url.netloc.encode('utf-8')
+ )
+ return (None, None)
+
+
def url_for(*args, **kw):
'''Return the URL for the given controller, action, id, etc.
@@ -139,6 +161,8 @@ def url_for(*args, **kw):
raise Exception('api calls must specify the version! e.g. ver=3')
# fix ver to include the slash
kw['ver'] = '/%s' % ver
+ if kw.get('qualified', False):
+ kw['protocol'], kw['host'] = get_site_protocol_and_host()
my_url = _routes_default_url_for(*args, **kw)
kw['__ckan_no_root'] = no_root
return _add_i18n_to_url(my_url, locale=locale, **kw)
@@ -222,7 +246,11 @@ def _add_i18n_to_url(url_to_amend, **kw):
root = ''
if kw.get('qualified', False):
# if qualified is given we want the full url ie http://...
- root = _routes_default_url_for('/', qualified=True)[:-1]
+ protocol, host = get_site_protocol_and_host()
+ root = _routes_default_url_for('/',
+ qualified=True,
+ host=host,
+ protocol=protocol)[:-1]
# ckan.root_path is defined when we have none standard language
# position in the url
root_path = config.get('ckan.root_path', None)
@@ -231,15 +259,15 @@ def _add_i18n_to_url(url_to_amend, **kw):
# into the ecportal core is done - Toby
# we have a special root specified so use that
if default_locale:
- root = re.sub('/{{LANG}}', '', root_path)
+ root_path = re.sub('/{{LANG}}', '', root_path)
else:
- root = re.sub('{{LANG}}', locale, root_path)
+ root_path = re.sub('{{LANG}}', locale, root_path)
# make sure we don't have a trailing / on the root
- if root[-1] == '/':
- root = root[:-1]
- url = url_to_amend[len(re.sub('/{{LANG}}', '', root_path)):]
- url = '%s%s' % (root, url)
- root = re.sub('/{{LANG}}', '', root_path)
+ if root_path[-1] == '/':
+ root_path = root_path[:-1]
+
+ url_path = url_to_amend[len(root):]
+ url = '%s%s%s' % (root, root_path, url_path)
else:
if default_locale:
url = url_to_amend
| diff --git a/ckan/tests/lib/test_helpers.py b/ckan/tests/lib/test_helpers.py
--- a/ckan/tests/lib/test_helpers.py
+++ b/ckan/tests/lib/test_helpers.py
@@ -1,7 +1,9 @@
import nose
+import i18n
import ckan.lib.helpers as h
import ckan.exceptions
+from ckan.tests import helpers
eq_ = nose.tools.eq_
CkanUrlException = ckan.exceptions.CkanUrlException
@@ -55,6 +57,73 @@ def test_url_for_static_or_external_works_with_protocol_relative_url(self):
eq_(h.url_for_static_or_external(url), url)
+class TestHelpersUrlFor(object):
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ def test_url_for_default(self):
+ url = '/dataset/my_dataset'
+ generated_url = h.url_for(controller='package', action='read', id='my_dataset')
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ def test_url_for_with_locale(self):
+ url = '/de/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ locale='de')
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ def test_url_for_not_qualified(self):
+ url = '/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ qualified=False)
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ def test_url_for_qualified(self):
+ url = 'http://example.com/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ qualified=True)
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ @helpers.change_config('ckan.root_path', '/my/prefix')
+ def test_url_for_qualified_with_root_path(self):
+ url = 'http://example.com/my/prefix/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ qualified=True)
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ def test_url_for_qualified_with_locale(self):
+ url = 'http://example.com/de/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ qualified=True,
+ locale='de')
+ eq_(generated_url, url)
+
+ @helpers.change_config('ckan.site_url', 'http://example.com')
+ @helpers.change_config('ckan.root_path', '/my/custom/path/{{LANG}}/foo')
+ def test_url_for_qualified_with_root_path_and_locale(self):
+ url = 'http://example.com/my/custom/path/de/foo/dataset/my_dataset'
+ generated_url = h.url_for(controller='package',
+ action='read',
+ id='my_dataset',
+ qualified=True,
+ locale='de')
+ eq_(generated_url, url)
+
+
class TestHelpersRenderMarkdown(object):
def test_render_markdown_allow_html(self):
| Download URL has wrong domain name
Based on the mails form the ckan-dev mailinglist (https://lists.okfn.org/pipermail/ckan-dev/2015-August/009236.html), there seems to be a problem when a file is uploaded manually and if the external address of CKAN is different from the internal one.
In our setup the admin users use an internal-only domain to manage the open data portal. The external address is different and set correctly in the configuration (`ckan.site_url`). But when generating the absolute download URL of a resource, this setting is not considered, thus the download gets an internal URL, which an external user can not download.
@amercader already pointed me in the right direction, as where the URL is generated, the code is here: https://github.com/ckan/ckan/blob/master/ckan/lib/dictization/model_dictize.py#L118.
PR follows.
| 2015-08-25T10:06:43 |
|
ckan/ckan | 2,600 | ckan__ckan-2600 | [
"2597"
] | 942a824c85ec2faf0a7c46a44b5d72997f911568 | diff --git a/ckan/lib/munge.py b/ckan/lib/munge.py
--- a/ckan/lib/munge.py
+++ b/ckan/lib/munge.py
@@ -139,7 +139,7 @@ def munge_filename(filename):
# clean up
filename = substitute_ascii_equivalents(filename)
filename = filename.lower().strip()
- filename = re.sub(r'[^a-zA-Z0-9. -]', '', filename).replace(' ', '-')
+ filename = re.sub(r'[^a-zA-Z0-9_. -]', '', filename).replace(' ', '-')
# resize if needed but keep extension
name, ext = os.path.splitext(filename)
# limit overly long extensions
| diff --git a/ckan/tests/lib/test_munge.py b/ckan/tests/lib/test_munge.py
--- a/ckan/tests/lib/test_munge.py
+++ b/ckan/tests/lib/test_munge.py
@@ -48,8 +48,9 @@ class TestMungeFilename(object):
('random:other%character&', 'randomothercharacter'),
(u'u with umlaut \xfc', 'u-with-umlaut-u'),
('2014-11-10 12:24:05.340603my_image.jpeg',
- '2014-11-10-122405.340603myimage.jpeg'),
+ '2014-11-10-122405.340603my_image.jpeg'),
('file.csv', 'file.csv'),
+ ('underscores_are_awesome', 'underscores_are_awesome'),
('f' * 100 + '.csv', 'f' * 96 + '.csv'),
('path/to/file.csv', 'file.csv'),
('.longextension', '.longextension'),
| Fileupload removes underscores from filenames
There doesn't seem to be any reason to forbid underscores in filenames, especially since munge allows them elsewhere. Looks like someone just forgot to add an underscore to the replace function. Have a PR all ready to go with fix and updated test, just need to know what branch to submit it to ;)
| 2015-08-25T12:14:46 |
|
ckan/ckan | 2,639 | ckan__ckan-2639 | [
"2636"
] | 4ca2e6158c4004482b31f0b80382e338feba56b8 | diff --git a/ckan/pastertemplates/__init__.py b/ckan/pastertemplates/__init__.py
--- a/ckan/pastertemplates/__init__.py
+++ b/ckan/pastertemplates/__init__.py
@@ -51,6 +51,11 @@ class CkanextTemplate(Template):
def check_vars(self, vars, cmd):
vars = Template.check_vars(self, vars, cmd)
+ # workaround for a paster issue https://github.com/ckan/ckan/issues/2636
+ # this is only used from a short-lived paster command
+ reload(sys)
+ sys.setdefaultencoding('utf-8')
+
if not vars['project'].startswith('ckanext-'):
print "\nError: Project name must start with 'ckanext-'"
sys.exit(1)
@@ -63,7 +68,7 @@ def check_vars(self, vars, cmd):
keywords = [keyword for keyword in keywords
if keyword not in ('ckan', 'CKAN')]
keywords.insert(0, 'CKAN')
- vars['keywords'] = ' '.join(keywords)
+ vars['keywords'] = u' '.join(keywords)
# For an extension named ckanext-example we want a plugin class
# named ExamplePlugin.
| Unicode decode error when creating an extension
I have a weird name. It has a weird letter: _ö_ in it. Trying to create an extension:
```
paster --plugin=ckan create -t ckanext ckanext-example
```
and providing my real name when asked for it ends up with a `UnicodeDecodeError`:
```
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 10: ordinal not in range(128)
```
This could be solved with me changing my name. But that's not going to happen for everyone with weird characters in their names.
| 2015-09-11T17:54:08 |
||
ckan/ckan | 2,653 | ckan__ckan-2653 | [
"2652"
] | 9f4de3882c8425f44324d75b759f4c115bca1835 | diff --git a/ckan/lib/cli.py b/ckan/lib/cli.py
--- a/ckan/lib/cli.py
+++ b/ckan/lib/cli.py
@@ -222,7 +222,7 @@ def command(self):
os.remove(f)
model.repo.clean_db()
- search.clear()
+ search.clear_all()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
@@ -498,9 +498,12 @@ def show(self):
pprint(index)
def clear(self):
- from ckan.lib.search import clear
+ from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
- clear(package_id)
+ if not package_id:
+ clear_all()
+ else:
+ clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
diff --git a/ckan/lib/search/__init__.py b/ckan/lib/search/__init__.py
--- a/ckan/lib/search/__init__.py
+++ b/ckan/lib/search/__init__.py
@@ -239,13 +239,16 @@ def show(package_reference):
return package_query.get_index(package_reference)
-def clear(package_reference=None):
+def clear(package_reference):
package_index = index_for(model.Package)
- if package_reference:
- log.debug("Clearing search index for dataset %s..." %
- package_reference)
- package_index.delete_package({'id': package_reference})
- elif not SIMPLE_SEARCH:
+ log.debug("Clearing search index for dataset %s..." %
+ package_reference)
+ package_index.delete_package({'id': package_reference})
+
+
+def clear_all():
+ if not SIMPLE_SEARCH:
+ package_index = index_for(model.Package)
log.debug("Clearing search index...")
package_index.clear()
| diff --git a/ckan/tests/helpers.py b/ckan/tests/helpers.py
--- a/ckan/tests/helpers.py
+++ b/ckan/tests/helpers.py
@@ -187,7 +187,7 @@ def _apply_config_changes(cls, cfg):
def setup(self):
'''Reset the database and clear the search indexes.'''
reset_db()
- search.clear()
+ search.clear_all()
@classmethod
def teardown_class(cls):
diff --git a/ckan/tests/legacy/__init__.py b/ckan/tests/legacy/__init__.py
--- a/ckan/tests/legacy/__init__.py
+++ b/ckan/tests/legacy/__init__.py
@@ -320,7 +320,7 @@ def setup_test_search_index():
#from ckan import plugins
if not is_search_supported():
raise SkipTest("Search not supported")
- search.clear()
+ search.clear_all()
#plugins.load('synchronous_search')
def is_search_supported():
diff --git a/ckan/tests/legacy/functional/api/model/test_tag.py b/ckan/tests/legacy/functional/api/model/test_tag.py
--- a/ckan/tests/legacy/functional/api/model/test_tag.py
+++ b/ckan/tests/legacy/functional/api/model/test_tag.py
@@ -1,20 +1,20 @@
import copy
-from nose.tools import assert_equal
+from nose.tools import assert_equal
from ckan import model
from ckan.lib.create_test_data import CreateTestData
import ckan.lib.search as search
from ckan.tests.legacy.functional.api.base import BaseModelApiTestCase
-from ckan.tests.legacy.functional.api.base import Api1TestCase as Version1TestCase
-from ckan.tests.legacy.functional.api.base import Api2TestCase as Version2TestCase
+from ckan.tests.legacy.functional.api.base import Api1TestCase as Version1TestCase
+from ckan.tests.legacy.functional.api.base import Api2TestCase as Version2TestCase
class TagsTestCase(BaseModelApiTestCase):
@classmethod
def setup_class(cls):
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.testsysadmin = model.User.by_name(u'testsysadmin')
cls.comment = u'Comment umlaut: \xfc.'
@@ -23,7 +23,7 @@ def setup_class(cls):
@classmethod
def teardown_class(cls):
- search.clear()
+ search.clear_all()
model.repo.rebuild_db()
def test_register_get_ok(self):
@@ -33,7 +33,7 @@ def test_register_get_ok(self):
assert self.russian.name in results, results
assert self.tolstoy.name in results, results
assert self.flexible_tag.name in results, results
-
+
def test_entity_get_ok(self):
offset = self.tag_offset(self.russian.name)
res = self.app.get(offset, status=self.STATUS_200_OK)
diff --git a/ckan/tests/legacy/functional/api/test_package_search.py b/ckan/tests/legacy/functional/api/test_package_search.py
--- a/ckan/tests/legacy/functional/api/test_package_search.py
+++ b/ckan/tests/legacy/functional/api/test_package_search.py
@@ -34,7 +34,7 @@ def setup_class(self):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def assert_results(self, res_dict, expected_package_names):
expected_pkgs = [self.package_ref_from_name(expected_package_name) \
@@ -62,7 +62,7 @@ def check(request_params, expected_params):
def test_00_read_search_params_with_errors(self):
def check_error(request_params):
- assert_raises(ValueError, ApiController._get_search_params, request_params)
+ assert_raises(ValueError, ApiController._get_search_params, request_params)
# uri json
check_error(UnicodeMultiDict({'qjson': '{"q": illegal json}'}))
# posted json
@@ -109,7 +109,7 @@ def test_05_uri_json_tags(self):
res_dict = self.data_from_res(res)
self.assert_results(res_dict, [u'annakarenina'])
assert res_dict['count'] == 1, res_dict
-
+
def test_05_uri_json_tags_multiple(self):
query = {'q': 'tags:russian tags:tolstoy'}
json_query = self.dumps(query)
@@ -131,7 +131,7 @@ def test_08_uri_qjson_malformed(self):
offset = self.base_url + '?qjson="q":""' # user forgot the curly braces
res = self.app.get(offset, status=400)
self.assert_json_response(res, 'Bad request - Could not read parameters')
-
+
def test_09_just_tags(self):
offset = self.base_url + '?q=tags:russian'
res = self.app.get(offset, status=200)
@@ -199,7 +199,7 @@ def setup_class(self):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def test_07_uri_qjson_tags(self):
query = {'q': '', 'tags':['tolstoy']}
@@ -239,11 +239,11 @@ def test_07_uri_qjson_tags_reverse(self):
assert res_dict['count'] == 2, res_dict
def test_07_uri_qjson_extras(self):
- # TODO: solr is not currently set up to allow partial matches
+ # TODO: solr is not currently set up to allow partial matches
# and extras are not saved as multivalued so this
# test will fail. Make extras multivalued or remove?
raise SkipTest()
-
+
query = {"geographic_coverage":"England"}
json_query = self.dumps(query)
offset = self.base_url + '?qjson=%s' % json_query
@@ -267,7 +267,7 @@ def test_08_all_fields(self):
rating=3.0)
model.Session.add(rating)
model.repo.commit_and_remove()
-
+
query = {'q': 'russian', 'all_fields': 1}
json_query = self.dumps(query)
offset = self.base_url + '?qjson=%s' % json_query
diff --git a/ckan/tests/legacy/functional/test_group.py b/ckan/tests/legacy/functional/test_group.py
--- a/ckan/tests/legacy/functional/test_group.py
+++ b/ckan/tests/legacy/functional/test_group.py
@@ -13,7 +13,7 @@ class TestGroup(FunctionalTestCase):
@classmethod
def setup_class(self):
- search.clear()
+ search.clear_all()
model.Session.remove()
CreateTestData.create()
diff --git a/ckan/tests/legacy/lib/test_cli.py b/ckan/tests/legacy/lib/test_cli.py
--- a/ckan/tests/legacy/lib/test_cli.py
+++ b/ckan/tests/legacy/lib/test_cli.py
@@ -8,7 +8,7 @@
from ckan.lib.create_test_data import CreateTestData
from ckan.common import json
-from ckan.lib.search import index_for,query_for
+from ckan.lib.search import index_for,query_for, clear_all
class TestDb:
@classmethod
diff --git a/ckan/tests/legacy/lib/test_dictization.py b/ckan/tests/legacy/lib/test_dictization.py
--- a/ckan/tests/legacy/lib/test_dictization.py
+++ b/ckan/tests/legacy/lib/test_dictization.py
@@ -31,7 +31,7 @@ class TestBasicDictize:
def setup_class(cls):
# clean the db so we can run these tests on their own
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.package_expected = {
diff --git a/ckan/tests/legacy/lib/test_solr_package_search.py b/ckan/tests/legacy/lib/test_solr_package_search.py
--- a/ckan/tests/legacy/lib/test_solr_package_search.py
+++ b/ckan/tests/legacy/lib/test_solr_package_search.py
@@ -56,7 +56,7 @@ def setup_class(cls):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def _pkg_names(self, result):
return ' '.join(result['results'])
@@ -316,7 +316,7 @@ def setup_class(cls):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def test_overall(self):
check_search_results('annakarenina', 1, ['annakarenina'])
@@ -358,7 +358,7 @@ def setup_class(cls):
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def _do_search(self, q, expected_pkgs, count=None):
query = {
@@ -422,7 +422,7 @@ def setup_class(cls):
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def _do_search(self, department, expected_pkgs, count=None):
result = search.query_for(model.Package).run({'q': 'department: %s' % department})
@@ -467,7 +467,7 @@ def setup_class(cls):
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def _do_search(self, q, wanted_results):
query = {
diff --git a/ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py b/ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py
--- a/ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py
+++ b/ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py
@@ -52,19 +52,19 @@ def setup_class(cls):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
def setup(self):
self._create_package()
-
+
def teardown(self):
self._remove_package()
self._remove_package(u'new_name')
-
+
def _create_package(self, package=None):
CreateTestData.create_arbitrary(self.new_pkg_dict)
return model.Package.by_name(self.new_pkg_dict['name'])
-
+
def _remove_package(self, name=None):
package = model.Package.by_name(name or 'council-owned-litter-bins')
if package:
@@ -84,7 +84,7 @@ def test_03_update_package_from_dict(self):
extra = model.PackageExtra(key='published_by', value='barrow')
package._extras[extra.key] = extra
model.repo.commit_and_remove()
-
+
check_search_results('', 3)
check_search_results('barrow', 1, ['new_name'])
@@ -106,5 +106,5 @@ def test_04_delete_package_from_dict(self):
rev = model.repo.new_revision()
package.delete()
model.repo.commit_and_remove()
-
+
check_search_results('', 2)
diff --git a/ckan/tests/legacy/logic/test_action.py b/ckan/tests/legacy/logic/test_action.py
--- a/ckan/tests/legacy/logic/test_action.py
+++ b/ckan/tests/legacy/logic/test_action.py
@@ -35,7 +35,7 @@ class TestAction(WsgiAppCase):
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
@@ -1349,7 +1349,7 @@ class TestBulkActions(WsgiAppCase):
@classmethod
def setup_class(cls):
- search.clear()
+ search.clear_all()
model.Session.add_all([
model.User(name=u'sysadmin', apikey=u'sysadmin',
password=u'sysadmin', sysadmin=True),
@@ -1436,7 +1436,7 @@ class TestResourceAction(WsgiAppCase):
@classmethod
def setup_class(cls):
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
@@ -1539,7 +1539,7 @@ class TestRelatedAction(WsgiAppCase):
@classmethod
def setup_class(cls):
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
diff --git a/ckan/tests/legacy/logic/test_tag.py b/ckan/tests/legacy/logic/test_tag.py
--- a/ckan/tests/legacy/logic/test_tag.py
+++ b/ckan/tests/legacy/logic/test_tag.py
@@ -10,7 +10,7 @@
class TestAction(WsgiAppCase):
@classmethod
def setup_class(cls):
- search.clear()
+ search.clear_all()
CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
diff --git a/ckan/tests/lib/dictization/test_model_dictize.py b/ckan/tests/lib/dictization/test_model_dictize.py
--- a/ckan/tests/lib/dictization/test_model_dictize.py
+++ b/ckan/tests/lib/dictization/test_model_dictize.py
@@ -14,7 +14,7 @@ class TestGroupListDictize:
def setup(self):
helpers.reset_db()
- search.clear()
+ search.clear_all()
def test_group_list_dictize(self):
group = factories.Group()
@@ -136,7 +136,7 @@ class TestGroupDictize:
def setup(self):
helpers.reset_db()
- search.clear()
+ search.clear_all()
def test_group_dictize(self):
group = factories.Group(name='test_dictize')
diff --git a/ckan/tests/logic/action/test_delete.py b/ckan/tests/logic/action/test_delete.py
--- a/ckan/tests/logic/action/test_delete.py
+++ b/ckan/tests/logic/action/test_delete.py
@@ -186,7 +186,7 @@ def test_dataset_in_a_purged_group_no_longer_shows_that_group(self):
assert_equals(dataset_shown['groups'], [])
def test_purged_group_is_not_in_search_results_for_its_ex_dataset(self):
- search.clear()
+ search.clear_all()
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
@@ -288,7 +288,7 @@ def test_dataset_in_a_purged_org_no_longer_shows_that_org(self):
assert_equals(dataset_shown['owner_org'], None)
def test_purged_org_is_not_in_search_results_for_its_ex_dataset(self):
- search.clear()
+ search.clear_all()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
@@ -394,7 +394,7 @@ def test_group_no_longer_shows_its_purged_dataset(self):
assert_equals(dataset_shown['packages'], [])
def test_purged_dataset_is_not_in_search_results(self):
- search.clear()
+ search.clear_all()
dataset = factories.Dataset()
def get_search_results():
diff --git a/ckanext/example_idatasetform/tests/test_example_idatasetform.py b/ckanext/example_idatasetform/tests/test_example_idatasetform.py
--- a/ckanext/example_idatasetform/tests/test_example_idatasetform.py
+++ b/ckanext/example_idatasetform/tests/test_example_idatasetform.py
@@ -18,13 +18,13 @@ def setup_class(cls):
def teardown(self):
model.repo.rebuild_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
@classmethod
def teardown_class(cls):
helpers.reset_db()
model.repo.rebuild_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
config.clear()
config.update(cls.original_config)
@@ -97,7 +97,7 @@ def teardown(self):
def teardown_class(cls):
plugins.unload('example_idatasetform_v4')
helpers.reset_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
config.clear()
config.update(cls.original_config)
@@ -139,13 +139,13 @@ def setup_class(cls):
def teardown(self):
model.repo.rebuild_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
@classmethod
def teardown_class(cls):
plugins.unload('example_idatasetform')
helpers.reset_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
config.clear()
config.update(cls.original_config)
@@ -212,13 +212,13 @@ def setup_class(cls):
def teardown(self):
model.repo.rebuild_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
@classmethod
def teardown_class(cls):
plugins.unload('example_idatasetform')
helpers.reset_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
config.clear()
config.update(cls.original_config)
diff --git a/ckanext/multilingual/tests/test_multilingual_plugin.py b/ckanext/multilingual/tests/test_multilingual_plugin.py
--- a/ckanext/multilingual/tests/test_multilingual_plugin.py
+++ b/ckanext/multilingual/tests/test_multilingual_plugin.py
@@ -58,7 +58,7 @@ def teardown(cls):
ckan.plugins.unload('multilingual_group')
ckan.plugins.unload('multilingual_tag')
ckan.model.repo.rebuild_db()
- ckan.lib.search.clear()
+ ckan.lib.search.clear_all()
def test_user_read_translation(self):
'''Test the translation of datasets on user view pages by the
| ckan.lib.search.clear is dangerous and should be more explicit.
The call to clear() in ckan.lib.search takes an optional package id. If no package_id is provided (the default parameter is None) then it will clear the entire search index.
This is likely to make programmer error much more pronounced when they accidentally pass in None, or an empty string and their search index disappears - ask me how I know?
| 2015-09-22T16:41:30 |
|
ckan/ckan | 2,675 | ckan__ckan-2675 | [
"2669"
] | 85108d89ebff59fcbf4ee847b8e234c93329bc85 | diff --git a/ckan/lib/munge.py b/ckan/lib/munge.py
--- a/ckan/lib/munge.py
+++ b/ckan/lib/munge.py
@@ -34,7 +34,7 @@ def munge_title_to_name(name):
# take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove doubles
- name = re.sub('--', '-', name)
+ name = re.sub('-+', '-', name)
# remove leading or trailing hyphens
name = name.strip('-')
# if longer than max_length, keep last word if a year
| diff --git a/ckan/tests/lib/test_munge.py b/ckan/tests/lib/test_munge.py
--- a/ckan/tests/lib/test_munge.py
+++ b/ckan/tests/lib/test_munge.py
@@ -104,14 +104,14 @@ class TestMungeTitleToName(object):
# (original, expected)
munge_list = [
('unchanged', 'unchanged'),
- ('some spaces here', 'some-spaces-here'),
+ ('some spaces here &here', 'some-spaces-here-here'),
('s', 's_'), # too short
('random:other%character&', 'random-othercharacter'),
(u'u with umlaut \xfc', 'u-with-umlaut-u'),
('reallylong' * 12, 'reallylong' * 9 + 'reall'),
('reallylong' * 12 + ' - 2012', 'reallylong' * 9 + '-2012'),
('10cm - 50cm Near InfraRed (NI) Digital Aerial Photography (AfA142)',
- '10cm--50cm-near-infrared-ni-digital-aerial-photography-afa142')
+ '10cm-50cm-near-infrared-ni-digital-aerial-photography-afa142')
]
def test_munge_title_to_name(self):
@@ -128,7 +128,8 @@ class TestMungeTag:
('unchanged', 'unchanged'),
('s', 's_'), # too short
('some spaces here', 'some-spaces--here'),
- ('random:other%character&', 'randomothercharacter')
+ ('random:other%characters&_.here', 'randomothercharactershere'),
+ ('river-water-dashes', 'river-water-dashes'),
]
def test_munge_tag(self):
@@ -137,7 +138,7 @@ def test_munge_tag(self):
munge = munge_tag(org)
nose_tools.assert_equal(munge, exp)
- def test_munge_tag_muliple_pass(self):
+ def test_munge_tag_multiple_pass(self):
'''Munge a list of tags muliple times gives expected results.'''
for org, exp in self.munge_list:
first_munge = munge_tag(org)
| title munger allows repeated dashes
If you munge title `Flood data` then those three spaces get converted to 2 dashes: `flood--data`, when it should really just be one `flood-data`.
There are hacks in several harvesters to fix this, but it's better fixed at source in core ckan.
| 2015-10-06T14:08:37 |
|
ckan/ckan | 2,725 | ckan__ckan-2725 | [
"2724"
] | 887b8514ce23e54100f894391495047410e8d821 | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -1945,7 +1945,8 @@ def package_search(context, data_dict):
package_dict = item.before_view(package_dict)
results.append(package_dict)
else:
- results.append(model_dictize.package_dictize(pkg, context))
+ log.error('No package_dict is coming from solr for package '
+ 'id %s', package['id'])
count = query.count
facets = query.facets
@@ -1961,6 +1962,17 @@ def package_search(context, data_dict):
'sort': data_dict['sort']
}
+ # create a lookup table of group name to title for all the groups and
+ # organizations in the current search's facets.
+ group_names = []
+ for field_name in ('groups', 'organization'):
+ group_names.extend(facets.get(field_name, {}).keys())
+
+ groups = session.query(model.Group.name, model.Group.title) \
+ .filter(model.Group.name.in_(group_names)) \
+ .all()
+ group_titles_by_name = dict(groups)
+
# Transform facets into a more useful data structure.
restructured_facets = {}
for key, value in facets.items():
@@ -1972,11 +1984,9 @@ def package_search(context, data_dict):
new_facet_dict = {}
new_facet_dict['name'] = key_
if key in ('groups', 'organization'):
- group = model.Group.get(key_)
- if group:
- new_facet_dict['display_name'] = group.display_name
- else:
- new_facet_dict['display_name'] = key_
+ display_name = group_titles_by_name.get(key_, key_)
+ display_name = display_name if display_name and display_name.strip() else key_
+ new_facet_dict['display_name'] = display_name
elif key == 'license_id':
license = model.Package.get_license_register().get(key_)
if license:
| diff --git a/ckan/tests/legacy/logic/test_action.py b/ckan/tests/legacy/logic/test_action.py
--- a/ckan/tests/legacy/logic/test_action.py
+++ b/ckan/tests/legacy/logic/test_action.py
@@ -1053,173 +1053,6 @@ def test_2_update_many(self):
json.loads(res.body)
-
-
-class TestActionPackageSearch(WsgiAppCase):
-
- @classmethod
- def setup_class(cls):
- setup_test_search_index()
- CreateTestData.create()
- cls.sysadmin_user = model.User.get('testsysadmin')
-
- @classmethod
- def teardown_class(self):
- model.repo.rebuild_db()
-
- def test_1_basic(self):
- params = {
- 'q':'tolstoy',
- 'facet.field': ['groups', 'tags', 'res_format', 'license'],
- 'rows': 20,
- 'start': 0,
- }
- postparams = '%s=1' % json.dumps(params)
- res = self.app.post('/api/action/package_search', params=postparams)
- res = json.loads(res.body)
- result = res['result']
- assert_equal(res['success'], True)
- assert_equal(result['count'], 1)
- assert_equal(result['results'][0]['name'], 'annakarenina')
-
- # Test GET request
- params_json_list = params
- params_json_list['facet.field'] = json.dumps(params['facet.field'])
- url_params = urllib.urlencode(params_json_list)
- res = self.app.get('/api/action/package_search?{0}'.format(url_params))
- res = json.loads(res.body)
- result = res['result']
- assert_equal(res['success'], True)
- assert_equal(result['count'], 1)
- assert_equal(result['results'][0]['name'], 'annakarenina')
-
- def test_1_facet_limit(self):
- params = {
- 'q':'*:*',
- 'facet.field': ['groups', 'tags', 'res_format', 'license'],
- 'rows': 20,
- 'start': 0,
- }
- postparams = '%s=1' % json.dumps(params)
- res = self.app.post('/api/action/package_search', params=postparams)
- res = json.loads(res.body)
- assert_equal(res['success'], True)
-
- assert_equal(len(res['result']['search_facets']['groups']['items']), 2)
-
- params = {
- 'q':'*:*',
- 'facet.field': ['groups', 'tags', 'res_format', 'license'],
- 'facet.limit': 1,
- 'rows': 20,
- 'start': 0,
- }
- postparams = '%s=1' % json.dumps(params)
- res = self.app.post('/api/action/package_search', params=postparams)
- res = json.loads(res.body)
- assert_equal(res['success'], True)
-
- assert_equal(len(res['result']['search_facets']['groups']['items']), 1)
-
- params = {
- 'q':'*:*',
- 'facet.field': ['groups', 'tags', 'res_format', 'license'],
- 'facet.limit': -1, # No limit
- 'rows': 20,
- 'start': 0,
- }
- postparams = '%s=1' % json.dumps(params)
- res = self.app.post('/api/action/package_search', params=postparams)
- res = json.loads(res.body)
- assert_equal(res['success'], True)
-
- assert_equal(len(res['result']['search_facets']['groups']['items']), 2)
-
- def test_1_basic_no_params(self):
- postparams = '%s=1' % json.dumps({})
- res = self.app.post('/api/action/package_search', params=postparams)
- res = json.loads(res.body)
- result = res['result']
- assert_equal(res['success'], True)
- assert_equal(result['count'], 2)
- assert result['results'][0]['name'] in ('annakarenina', 'warandpeace')
-
- # Test GET request
- res = self.app.get('/api/action/package_search')
- res = json.loads(res.body)
- result = res['result']
- assert_equal(res['success'], True)
- assert_equal(result['count'], 2)
- assert result['results'][0]['name'] in ('annakarenina', 'warandpeace')
-
- def test_2_bad_param(self):
- postparams = '%s=1' % json.dumps({
- 'sort':'metadata_modified',
- })
- res = self.app.post('/api/action/package_search', params=postparams,
- status=409)
- assert '"message": "Search error:' in res.body, res.body
- assert 'SOLR returned an error' in res.body, res.body
- # solr error is 'Missing sort order' or 'Missing_sort_order',
- # depending on the solr version.
- assert 'sort' in res.body, res.body
-
- def test_3_bad_param(self):
- postparams = '%s=1' % json.dumps({
- 'weird_param':True,
- })
- res = self.app.post('/api/action/package_search', params=postparams,
- status=400)
- assert '"message": "Search Query is invalid:' in res.body, res.body
- assert '"Invalid search parameters: [\'weird_param\']' in res.body, res.body
-
- def test_4_sort_by_metadata_modified(self):
- search_params = '%s=1' % json.dumps({
- 'q': '*:*',
- 'fl': 'name, metadata_modified',
- 'sort': u'metadata_modified desc'
- })
-
- # modify warandpeace, check that it is the first search result
- rev = model.repo.new_revision()
- pkg = model.Package.get('warandpeace')
- pkg.title = "War and Peace [UPDATED]"
-
- pkg.metadata_modified = datetime.datetime.utcnow()
- model.repo.commit_and_remove()
-
- res = self.app.post('/api/action/package_search', params=search_params)
- result = json.loads(res.body)['result']
- result_names = [r['name'] for r in result['results']]
- assert result_names == ['warandpeace', 'annakarenina'], result_names
-
- # modify annakarenina, check that it is the first search result
- rev = model.repo.new_revision()
- pkg = model.Package.get('annakarenina')
- pkg.title = "A Novel By Tolstoy [UPDATED]"
- pkg.metadata_modified = datetime.datetime.utcnow()
- model.repo.commit_and_remove()
-
- res = self.app.post('/api/action/package_search', params=search_params)
- result = json.loads(res.body)['result']
- result_names = [r['name'] for r in result['results']]
- assert result_names == ['annakarenina', 'warandpeace'], result_names
-
- # add a tag to warandpeace, check that it is the first result
- pkg = model.Package.get('warandpeace')
- pkg_params = '%s=1' % json.dumps({'id': pkg.id})
- res = self.app.post('/api/action/package_show', params=pkg_params)
- pkg_dict = json.loads(res.body)['result']
- pkg_dict['tags'].append({'name': 'new-tag'})
- pkg_params = '%s=1' % json.dumps(pkg_dict)
- res = self.app.post('/api/action/package_update', params=pkg_params,
- extra_environ={'Authorization': str(self.sysadmin_user.apikey)})
-
- res = self.app.post('/api/action/package_search', params=search_params)
- result = json.loads(res.body)['result']
- result_names = [r['name'] for r in result['results']]
- assert result_names == ['warandpeace', 'annakarenina'], result_names
-
class MockPackageSearchPlugin(SingletonPlugin):
implements(IPackageController, inherit=True)
diff --git a/ckan/tests/logic/action/test_get.py b/ckan/tests/logic/action/test_get.py
--- a/ckan/tests/logic/action/test_get.py
+++ b/ckan/tests/logic/action/test_get.py
@@ -5,6 +5,7 @@
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic.schema as schema
+from ckan.lib.search.common import SearchError
eq = nose.tools.eq_
@@ -843,12 +844,103 @@ def test_package_autocomplete_does_not_return_private_datasets(self):
class TestPackageSearch(helpers.FunctionalTestBase):
+ def test_search(self):
+ factories.Dataset(title='Rivers')
+ factories.Dataset(title='Lakes') # decoy
+
+ search_result = helpers.call_action('package_search', q='rivers')
+
+ eq(search_result['results'][0]['title'], 'Rivers')
+ eq(search_result['count'], 1)
+
+ def test_search_all(self):
+ factories.Dataset(title='Rivers')
+ factories.Dataset(title='Lakes')
+
+ search_result = helpers.call_action('package_search') # no q
+
+ eq(search_result['count'], 2)
+
+ def test_bad_action_parameter(self):
+ nose.tools.assert_raises(
+ SearchError,
+ helpers.call_action,
+ 'package_search', weird_param=1)
+
+ def test_bad_solr_parameter(self):
+ nose.tools.assert_raises(
+ SearchError,
+ helpers.call_action,
+ 'package_search', sort='metadata_modified')
+ # SOLR doesn't like that we didn't specify 'asc' or 'desc'
+ # SOLR error is 'Missing sort order' or 'Missing_sort_order',
+ # depending on the solr version.
+
+ def test_facets(self):
+ org = factories.Organization(name='test-org-facet', title='Test Org')
+ factories.Dataset(owner_org=org['id'])
+ factories.Dataset(owner_org=org['id'])
+
+ data_dict = {'facet.field': ['organization']}
+ search_result = helpers.call_action('package_search', **data_dict)
+
+ eq(search_result['count'], 2)
+ eq(search_result['search_facets'],
+ {'organization': {'items': [{'count': 2,
+ 'display_name': u'Test Org',
+ 'name': 'test-org-facet'}],
+ 'title': 'organization'}})
+
+ def test_facet_limit(self):
+ group1 = factories.Group(name='test-group-fl1', title='Test Group 1')
+ group2 = factories.Group(name='test-group-fl2', title='Test Group 2')
+ factories.Dataset(groups=[{'name': group1['name']},
+ {'name': group2['name']}])
+ factories.Dataset(groups=[{'name': group1['name']}])
+ factories.Dataset()
+
+ data_dict = {'facet.field': ['groups'],
+ 'facet.limit': 1}
+ search_result = helpers.call_action('package_search', **data_dict)
+
+ eq(len(search_result['search_facets']['groups']['items']), 1)
+ eq(search_result['search_facets'],
+ {'groups': {'items': [{'count': 2,
+ 'display_name': u'Test Group 1',
+ 'name': 'test-group-fl1'}],
+ 'title': 'groups'}})
+
+ def test_facet_no_limit(self):
+ group1 = factories.Group()
+ group2 = factories.Group()
+ factories.Dataset(groups=[{'name': group1['name']},
+ {'name': group2['name']}])
+ factories.Dataset(groups=[{'name': group1['name']}])
+ factories.Dataset()
+
+ data_dict = {'facet.field': ['groups'],
+ 'facet.limit': -1} # no limit
+ search_result = helpers.call_action('package_search', **data_dict)
+
+ eq(len(search_result['search_facets']['groups']['items']), 2)
+
+ def test_sort(self):
+ factories.Dataset(name='test0')
+ factories.Dataset(name='test1')
+ factories.Dataset(name='test2')
+
+ search_result = helpers.call_action('package_search',
+ sort='metadata_created desc')
+
+ result_names = [result['name'] for result in search_result['results']]
+ eq(result_names, [u'test2', u'test1', u'test0'])
+
def test_package_search_on_resource_name(self):
'''
package_search() should allow searching on resource name field.
'''
resource_name = 'resource_abc'
- package = factories.Resource(name=resource_name)
+ factories.Resource(name=resource_name)
search_result = helpers.call_action('package_search', q='resource_abc')
eq(search_result['results'][0]['resources'][0]['name'], resource_name)
| Search facet results - improved speed
Based on PR #2692
| 2015-11-04T17:12:16 |
|
ckan/ckan | 2,726 | ckan__ckan-2726 | [
"1829"
] | e8c31ccfb750226ff1b1e26b5218be9b44d05e2d | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -45,7 +45,7 @@
.. |datastore_user| replace:: datastore_default
.. |test_database| replace:: ckan_test
.. |test_datastore| replace:: datastore_test
-.. |apache_config_file| replace:: /etc/apache2/sites-available/ckan_default
+.. |apache_config_file| replace:: /etc/apache2/sites-available/ckan_default.conf
.. |apache.wsgi| replace:: |config_dir|/apache.wsgi
.. |data_dir| replace:: |config_dir|/data
.. |sstore| replace:: |config_dir|/sstore
@@ -63,9 +63,10 @@
.. |sqlalchemy| replace:: SQLAlchemy
.. |javascript| replace:: JavaScript
.. |apache| replace:: Apache
-.. |nginx_config_file| replace:: /etc/nginx/sites-available/ckan_default
+.. |nginx_config_file| replace:: /etc/nginx/sites-available/ckan
.. |reload_nginx| replace:: sudo service nginx reload
.. |jquery| replace:: jQuery
+.. |nodejs| replace:: Node.js
.. _Jinja2: http://jinja.pocoo.org/
.. _CKAN front page: http://127.0.0.1:5000
@@ -154,10 +155,10 @@ def latest_release_version():
return version
-def latest_package_name():
+def latest_package_name(distro='trusty'):
'''Return the filename of the Ubuntu package for the latest stable release.
- e.g. "python-ckan_2.1_amd64.deb"
+ e.g. "python-ckan_2.1-trusty_amd64.deb"
'''
# We don't create a new package file name for a patch release like 2.1.1,
@@ -165,8 +166,8 @@ def latest_package_name():
# have the X.Y part of the version number in them, not X.Y.Z.
latest_minor_version = latest_release_version()[:3]
- return 'python-ckan_{version}_amd64.deb'.format(
- version=latest_minor_version)
+ return 'python-ckan_{version}-{distro}_amd64.deb'.format(
+ version=latest_minor_version, distro=distro)
def write_latest_release_file():
@@ -192,14 +193,16 @@ def write_latest_release_file():
.. |latest_release_tag| replace:: {latest_tag}
.. |latest_release_version| replace:: {latest_version}
-.. |latest_package_name| replace:: {package_name}
+.. |latest_package_name_precise| replace:: {package_name_precise}
+.. |latest_package_name_trusty| replace:: {package_name_trusty}
'''
open(filename, 'w').write(template.format(
filename=filename,
latest_tag=latest_release_tag(),
latest_version=latest_release_version(),
- package_name=latest_package_name(),
+ package_name_precise=latest_package_name('precise'),
+ package_name_trusty=latest_package_name('trusty'),
))
| diff --git a/ckanext/datapusher/tests/test.py b/ckanext/datapusher/tests/test.py
--- a/ckanext/datapusher/tests/test.py
+++ b/ckanext/datapusher/tests/test.py
@@ -47,11 +47,20 @@ def setup_class(cls):
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
+ # Httpretty crashes with Solr on Python 2.6,
+ # skip the tests
+ if (sys.version_info[0] == 2 and sys.version_info[1] == 6):
+ raise nose.SkipTest()
+
@classmethod
def teardown_class(cls):
rebuild_all_dbs(cls.Session)
p.unload('datastore')
p.unload('datapusher')
+ # Reenable Solr indexing
+ if (sys.version_info[0] == 2 and sys.version_info[1] == 6
+ and not p.plugin_loaded('synchronous_search')):
+ p.load('synchronous_search')
def test_create_ckan_resource_in_package(self):
package = model.Package.get('annakarenina')
diff --git a/ckanext/resourceproxy/tests/test_proxy.py b/ckanext/resourceproxy/tests/test_proxy.py
--- a/ckanext/resourceproxy/tests/test_proxy.py
+++ b/ckanext/resourceproxy/tests/test_proxy.py
@@ -1,7 +1,9 @@
+import sys
import requests
import unittest
import json
import httpretty
+import nose
import paste.fixture
from pylons import config
@@ -27,7 +29,8 @@ def set_resource_url(url):
context = {
'model': model,
'session': model.Session,
- 'user': model.User.get('testsysadmin').name
+ 'user': model.User.get('testsysadmin').name,
+ 'use_cache': False,
}
resource = p.toolkit.get_action('resource_show')(
@@ -55,12 +58,20 @@ def setup_class(cls):
wsgiapp = middleware.make_app(config['global_conf'], **config)
cls.app = paste.fixture.TestApp(wsgiapp)
create_test_data.CreateTestData.create()
+ # Httpretty crashes with Solr on Python 2.6,
+ # skip the tests
+ if (sys.version_info[0] == 2 and sys.version_info[1] == 6):
+ raise nose.SkipTest()
@classmethod
def teardown_class(cls):
config.clear()
config.update(cls._original_config)
model.repo.rebuild_db()
+ # Reenable Solr indexing
+ if (sys.version_info[0] == 2 and sys.version_info[1] == 6
+ and not p.plugin_loaded('synchronous_search')):
+ p.load('synchronous_search')
def setUp(self):
self.url = 'http://www.ckan.org/static/example.json'
| Less compiler does not work on Ubuntu 14.04
If I follow the instructions on the [Frontend Development docs](http://docs.ckan.org/en/latest/contributing/frontend/index.html) (freshly updated on #1734) and install node using the following (as I'm on Ubuntu 14.04):
```
sudo apt-get install nodejs
```
and I try to rebuild the CSS using
```
./bin/less
```
I get an error because the node binary is named differently as the one defined on our `bin/less` script:
```
/usr/bin/env: node: No such file or directory
```
Look like the version that gets installed this way is called `nodejs`.
According to [this comment](https://github.com/joyent/node/issues/3911#issuecomment-12798913) from a Node developer, installing the default node package is frowned upon anyway, so perhaps we should recommend using Chris Lea's PPA anyway (or you know, getting rid of the node dependency).
| https://github.com/robotis/Lesscpy/ looks like a good thing. I haven't worked with our .less files much, do we need javascript evaluation?
I'd recommend talking to @mintcanary or another frontend developer for their opinion before we remove the dep.
I don't have any insight here I'm afraid, I've only used an external less app with CKAN.
I haven't used lessc.py, but the message on their message (`This is an early version, so you are likly to find bugs.`) doesn't make me comfortable. I tried compiling our `main.less` with it, but got:
```
$ lesscpy ckan/public/base/less/main.less
Traceback (most recent call last):
File "/tmp/lesscpy/blah/bin/lesscpy", line 5, in <module>
pkg_resources.run_script('lesscpy==0.10.1', 'lesscpy')
File "/tmp/lesscpy/blah/local/lib/python2.7/site-packages/pkg_resources.py", line 488, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/tmp/lesscpy/blah/local/lib/python2.7/site-packages/pkg_resources.py", line 1354, in run_script
execfile(script_filename, namespace, namespace)
File "/tmp/lesscpy/blah/lib/python2.7/site-packages/lesscpy-0.10.1-py2.7.egg/EGG-INFO/scripts/lesscpy", line 16, in <module>
compiler.run()
File "/tmp/lesscpy/lesscpy/scripts/compiler.py", line 177, in run
p.parse(filename=args.target, debuglevel=args.debug)
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 105, in parse
file, lexer=self.lex, debug=debuglevel)
File "build/bdist.linux-x86_64/egg/ply/yacc.py", line 265, in parse
File "build/bdist.linux-x86_64/egg/ply/yacc.py", line 971, in parseopt_notrack
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 204, in p_statement_import
recurse.parse(filename=filename, debuglevel=0)
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 105, in parse
file, lexer=self.lex, debug=debuglevel)
File "build/bdist.linux-x86_64/egg/ply/yacc.py", line 265, in parse
File "build/bdist.linux-x86_64/egg/ply/yacc.py", line 971, in parseopt_notrack
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 204, in p_statement_import
recurse.parse(filename=filename, debuglevel=0)
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 107, in parse
self.post_parse()
File "/tmp/lesscpy/lesscpy/lessc/parser.py", line 118, in post_parse
out.append(pu.parse(self.scope))
File "/tmp/lesscpy/lesscpy/plib/block.py", line 43, in parse
inner = list(utility.flatten([p.parse(scope) for p in inner if p]))
File "/tmp/lesscpy/lesscpy/plib/property.py", line 39, in parse
self.parsed = self.process(style, scope)
File "/tmp/lesscpy/lesscpy/plib/node.py", line 49, in process
for t in tokens]
File "/tmp/lesscpy/lesscpy/plib/expression.py", line 40, in parse
if str(e).strip()]
ValueError: too many values to unpack
```
They disabled issues on their github repository, so I'm not sure where to submit this bug report either. IMHO, we should keep using node's lessc for now.
I agree that installing from the Ubuntu packages isn't ideal, because you'll get a quite old node, but it doesn't matter if all you want is to compile less, and it's simpler than using a PPA.
@vitorbaptista I worked around that first lesscpy issue (I think it was having trouble subtracting from a negative number) but now it seems to be in an endless loop.
How do we get a lessc that's old enough for the system node without a PPA? download it directly?
Eventually, the node and npm version gets too old. As it's happened with Ubuntu 12.04.
Hi amercader,
a few weeks ago I had the same issue you had in Ubuntu 14.04 (I am starting to develop on CKAN).
I wrote a mini-guide for me, and I think I solved the problem by:
sudo ln -s /usr/bin/nodejs /usr/bin/node
This is what I did in Ubuntu 14.04:
```
sudo apt-get install nodejs npm
```
#have your virtualenv active when you do this:
. /usr/lib/ckan/default/bin/activate
cd ~/ckan/lib/default/src/ckan
# and run:
```
npm install less nodewatch
```
# Next
```
sudo ln -s /usr/bin/nodejs /usr/bin/node
. /usr/lib/ckan/default/bin/activate
cd ~/ckan/lib/default/src/ckan
```
./bin/less
@amercader It looks like we're bumping all the 14.04 issues to 2.4. Can this be bumped as well or do we want this one fixed for 2.3?
| 2015-11-06T13:20:38 |
ckan/ckan | 2,739 | ckan__ckan-2739 | [
"2735"
] | e8c31ccfb750226ff1b1e26b5218be9b44d05e2d | diff --git a/ckan/migration/versions/082_add_metadata_created.py b/ckan/migration/versions/082_add_metadata_created.py
new file mode 100644
--- /dev/null
+++ b/ckan/migration/versions/082_add_metadata_created.py
@@ -0,0 +1,14 @@
+def upgrade(migrate_engine):
+ migrate_engine.execute('''
+ ALTER TABLE package_revision
+ ADD COLUMN metadata_created timestamp without time zone;
+ ALTER TABLE package
+ ADD COLUMN metadata_created timestamp without time zone;
+
+ UPDATE package SET metadata_created=
+ (SELECT revision_timestamp
+ FROM package_revision
+ WHERE id=package.id
+ ORDER BY revision_timestamp ASC
+ LIMIT 1);
+ ''')
diff --git a/ckan/model/package.py b/ckan/model/package.py
--- a/ckan/model/package.py
+++ b/ckan/model/package.py
@@ -47,6 +47,7 @@
Column('type', types.UnicodeText, default=u'dataset'),
Column('owner_org', types.UnicodeText),
Column('creator_user_id', types.UnicodeText),
+ Column('metadata_created', types.DateTime, default=datetime.datetime.utcnow),
Column('metadata_modified', types.DateTime, default=datetime.datetime.utcnow),
Column('private', types.Boolean, default=False),
)
@@ -481,16 +482,6 @@ def get_groups(self, group_type=None, capacity=None):
groups = [g[0] for g in groupcaps if g[1] == capacity]
return groups
- @property
- def metadata_created(self):
- import ckan.model as model
- q = meta.Session.query(model.PackageRevision.revision_timestamp)\
- .filter(model.PackageRevision.id == self.id)\
- .order_by(model.PackageRevision.revision_timestamp.asc())
- ts = q.first()
- if ts:
- return ts[0]
-
@staticmethod
def get_fields(core_only=False, fields_to_ignore=None):
'''Returns a list of the properties of a package.
| package.metadata_created() should be a field
Currently metadata_created is a property that calls the metadata_created function - which in turn queries PackageRevision to find the earliest one. This means that modifying it becomes a chore (for instance, when harvesting).
This should be turned into a column on the model (similar to metadata_modified). Will need a migration.
| @wardi says this would help when harvesting.
Maybe it was originally like this and changed - can you look at the history?
| 2015-11-17T17:22:23 |
|
ckan/ckan | 2,742 | ckan__ckan-2742 | [
"2741"
] | e8c31ccfb750226ff1b1e26b5218be9b44d05e2d | diff --git a/ckan/config/routing.py b/ckan/config/routing.py
--- a/ckan/config/routing.py
+++ b/ckan/config/routing.py
@@ -234,7 +234,6 @@ def make_map():
m.connect('/dataset/activity/{id}/{offset}', action='activity')
m.connect('dataset_groups', '/dataset/groups/{id}',
action='groups', ckan_icon='group')
- m.connect('/dataset/{id}.{format}', action='read')
m.connect('dataset_resources', '/dataset/resources/{id}',
action='resources', ckan_icon='reorder')
m.connect('dataset_read', '/dataset/{id}', action='read',
diff --git a/ckan/controllers/package.py b/ckan/controllers/package.py
--- a/ckan/controllers/package.py
+++ b/ckan/controllers/package.py
@@ -13,7 +13,6 @@
import ckan.lib.maintain as maintain
import ckan.lib.i18n as i18n
import ckan.lib.navl.dictization_functions as dict_fns
-import ckan.lib.accept as accept
import ckan.lib.helpers as h
import ckan.model as model
import ckan.lib.datapreview as datapreview
@@ -306,22 +305,6 @@ def pager_url(q=None, page=None):
return render(self._search_template(package_type),
extra_vars={'dataset_type': package_type})
- def _content_type_from_extension(self, ext):
- ct, ext = accept.parse_extension(ext)
- if not ct:
- return None, None
- return ct, ext
-
- def _content_type_from_accept(self):
- """
- Given a requested format this method determines the content-type
- to set and the genshi template loader to use in order to render
- it accurately. TextTemplate must be used for non-xml templates
- whilst all that are some sort of XML should use MarkupTemplate.
- """
- ct, ext = accept.parse_header(request.headers.get('Accept', ''))
- return ct, ext
-
def resources(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
@@ -350,20 +333,7 @@ def resources(self, id):
return render('package/resources.html',
extra_vars={'dataset_type': package_type})
- def read(self, id, format='html'):
- if not format == 'html':
- ctype, extension = \
- self._content_type_from_extension(format)
- if not ctype:
- # An unknown format, we'll carry on in case it is a
- # revision specifier and re-constitute the original id
- id = "%s.%s" % (id, format)
- ctype, format = "text/html; charset=utf-8", "html"
- else:
- ctype, format = self._content_type_from_accept()
-
- response.headers['Content-Type'] = ctype
-
+ def read(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
@@ -415,8 +385,6 @@ def read(self, id, format='html'):
package_type=package_type)
template = self._read_template(package_type)
- template = template[:template.index('.') + 1] + format
-
try:
return render(template,
extra_vars={'dataset_type': package_type})
diff --git a/ckan/lib/accept.py b/ckan/lib/accept.py
deleted file mode 100644
--- a/ckan/lib/accept.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Simple accept header parsing to determins which content type we should deliver
-back to the caller. This is mostly used by the rdf export functionality
-"""
-import re
-import operator
-
-# For parsing {name};q=x and {name} style fields from the accept header
-accept_re = re.compile("^(?P<ct>[^;]+)[ \t]*(;[ \t]*q=(?P<q>[0-9.]+)){0,1}$")
-
-accept_types = {
- # Name : ContentType, Extension
- "text/html": ("text/html; charset=utf-8", 'html'),
- "text/n3": ("text/n3; charset=utf-8", 'n3'),
- "application/rdf+xml": ("application/rdf+xml; charset=utf-8", 'rdf'),
-}
-accept_by_extension = {
- "rdf": "application/rdf+xml",
- "n3": "text/n3"
-}
-
-
-def parse_extension(file_ext):
- """
- If provided an extension, this function will return the details
- for that extension, if we know about it.
- """
- ext = accept_by_extension.get(file_ext, None)
- if ext:
- return accept_types[ext]
- return (None, None)
-
-
-def parse_header(accept_header=''):
- """
- Parses the supplied accept header and tries to determine
- which content types we can provide the response in that will keep the
- client happy.
-
- We will always provide html as the default if we can't see anything else
- but we will also need to take into account the q score.
-
- The return values are be content-type,is-markup,extension
- """
- if accept_header is None:
- accept_header = ""
-
- acceptable = {}
- for typ in accept_header.split(','):
- m = accept_re.match(typ)
- if m:
- key = m.groups(0)[0]
- qscore = m.groups(0)[2] or 1.0
- acceptable[key] = float(qscore)
-
- for ctype in sorted(acceptable.iteritems(),
- key=operator.itemgetter(1),
- reverse=True):
- if ctype[0] in accept_types:
- return accept_types[ctype[0]]
-
- return accept_types["text/html"]
| diff --git a/ckan/tests/legacy/lib/test_accept.py b/ckan/tests/legacy/lib/test_accept.py
deleted file mode 100644
--- a/ckan/tests/legacy/lib/test_accept.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from nose.tools import assert_equal
-
-import ckan.lib.accept as accept
-
-class TestAccept:
- def test_accept_invalid(self):
- ct, ext = accept.parse_header(None)
- assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( ext, "html")
-
- def test_accept_invalid2(self):
- ct, ext = accept.parse_header("")
- assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( ext, "html")
-
- def test_accept_invalid3(self):
- ct, ext = accept.parse_header("wombles")
- assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( ext, "html")
-
-
- def test_accept_valid(self):
- a = "text/turtle,application/turtle,application/rdf+xml,text/plain;q=0.8,*/*;q=.5"
- ct, ext = accept.parse_header(a)
- assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( ext, "rdf")
-
- def test_accept_valid2(self):
- a = "text/turtle,application/turtle,application/rdf+xml;q=0.9,text/plain;q=0.8,*/*;q=.5"
- ct, ext = accept.parse_header(a)
- assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( ext, "rdf")
-
- def test_accept_valid4(self):
- a = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
- ct, ext = accept.parse_header(a)
- assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( ext, "html")
-
- def test_accept_valid5(self):
- a = "application/rdf+xml;q=0.5,application/xhtml+xml,text/html;q=0.9"
- ct, ext = accept.parse_header(a)
- assert_equal( ct, "text/html; charset=utf-8")
- assert_equal( ext, "html")
-
- def test_accept_valid6(self):
- a = "application/rdf+xml;q=0.9,application/xhtml+xml,text/html;q=0.5"
- ct, ext = accept.parse_header(a)
- assert_equal( ct, "application/rdf+xml; charset=utf-8")
- assert_equal( ext, "rdf")
| Remove old RDF cruft.
The old template-style RDF templates are no longer present, which means there is a chunk of code (around simple conneg) that can be removed from ckan/lib/accept.py and from the read() method of the package controller. Currently it is running code that is never used on each request.
Make sure it is obvious that users should install ckanext-dcat instead.
| 2015-11-17T19:18:47 |
|
ckan/ckan | 2,824 | ckan__ckan-2824 | [
"2823"
] | 61f59d83604a792006e945878424fb7957ce6e49 | diff --git a/ckan/model/package.py b/ckan/model/package.py
--- a/ckan/model/package.py
+++ b/ckan/model/package.py
@@ -76,8 +76,10 @@ def search_by_name(cls, text_query):
@classmethod
def get(cls, reference):
'''Returns a package object referenced by its id or name.'''
- query = meta.Session.query(cls).filter(cls.id==reference)
- pkg = query.first()
+ if not reference:
+ return None
+
+ pkg = meta.Session.query(cls).get(reference)
if pkg == None:
pkg = cls.by_name(reference)
return pkg
| Package.get accidentally avoids the sqlalchemy session cache.
SQLAlchemy has a session cache containing objects that it has retrieved recently, which are stored in an identity map (pk->object). If you call session.query(cls).get(pk) the object will be retrieved from the cache (identity map), if you call session.query(cls).filter() then Sqlalchemy _has_ to query the database again.
In Package.get() the query to find by id (before looking by name) uses filter and so will _always_ bypass the session cache. Once upon a time it used session.query(cls).get(reference), but this was lost in a change (where eager loading was added, then removed in a later commit).
For a contrived, and probably sub-optimal example ...
``` python
for pkg in some_query_that_fetches_packages():
pkg_dict = logic.get_action('package_show')(context, {'id': pkg.id})
```
In these cases package_show is issuing another query to the database, even though the package object is in the session cache ready for retrieval.
There is more information on how the Session cache works at http://docs.sqlalchemy.org/en/latest/orm/session_basics.html#is-the-session-a-cache
| 2016-01-06T21:06:26 |
||
ckan/ckan | 2,831 | ckan__ckan-2831 | [
"2805"
] | 7cc772bddac46c023c68f928f67233ea1fd991f9 | diff --git a/ckan/controllers/package.py b/ckan/controllers/package.py
--- a/ckan/controllers/package.py
+++ b/ckan/controllers/package.py
@@ -361,10 +361,8 @@ def read(self, id):
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
- except NotFound:
+ except (NotFound, NotAuthorized):
abort(404, _('Dataset not found'))
- except NotAuthorized:
- abort(401, _('Unauthorized to read package %s') % id)
# used by disqus plugin
c.current_package_id = c.pkg.id
| diff --git a/ckan/tests/legacy/functional/test_package.py b/ckan/tests/legacy/functional/test_package.py
--- a/ckan/tests/legacy/functional/test_package.py
+++ b/ckan/tests/legacy/functional/test_package.py
@@ -683,7 +683,7 @@ def teardown_class(self):
def test_read(self):
offset = url_for(controller='package', action='read', id=self.non_active_name)
- res = self.app.get(offset, status=[302, 401])
+ res = self.app.get(offset, status=[404])
def test_read_as_admin(self):
| Opening a deleted dataset or organization logs you out
Steps to reproduce:
1. Create a dataset as user A
2. Remove the dataset
3. Log out
4. Log in as user B (regular user)
5. Open the removed dataset via address bar
6. You are now logged out at the login screen
Redirecting to login screen is OK since the user does not have sufficient privileges to view the removed dataset, but logging them out is, in my opinion, unnecessary.
| I think that unauthenticated, or users without permission to edit, should get a 404 if they visit a package where state='deleted'. The question is really, should we, instead of doing this check in package_show instead do it as part of check_access so that it applies to everything that has state=deleted?
Confirmed on master. User is logged out :(
Will investigate a PR to return 404 instead.
| 2016-01-12T16:12:47 |
ckan/ckan | 2,879 | ckan__ckan-2879 | [
"2878"
] | ec2ecfb6df2091440c52cf3fed026fa8a8d3356d | diff --git a/ckan/lib/cli.py b/ckan/lib/cli.py
--- a/ckan/lib/cli.py
+++ b/ckan/lib/cli.py
@@ -186,9 +186,6 @@ class ManageDb(CkanCommand):
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file
- db simple-dump-csv FILE_PATH - dump just datasets in CSV format
- db simple-dump-json FILE_PATH - dump just datasets in JSON format
- db user-dump-csv FILE_PATH - dump user information to a CSV file
db load FILE_PATH - load a pg_dump from a file
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing
@@ -239,12 +236,6 @@ def command(self):
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
- elif cmd == 'simple-dump-csv':
- self.simple_dump_csv()
- elif cmd == 'simple-dump-json':
- self.simple_dump_json()
- elif cmd == 'user-dump-csv':
- self.user_dump_csv()
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
@@ -326,35 +317,6 @@ def load(self, only_load=False):
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
- def simple_dump_csv(self):
- import ckan.model as model
- if len(self.args) < 2:
- print 'Need csv file path'
- return
- dump_filepath = self.args[1]
- import ckan.lib.dumper as dumper
- dump_file = open(dump_filepath, 'w')
- dumper.SimpleDumper().dump(dump_file, format='csv')
-
- def simple_dump_json(self):
- import ckan.model as model
- if len(self.args) < 2:
- print 'Need json file path'
- return
- dump_filepath = self.args[1]
- import ckan.lib.dumper as dumper
- dump_file = open(dump_filepath, 'w')
- dumper.SimpleDumper().dump(dump_file, format='json')
-
- def user_dump_csv(self):
- if len(self.args) < 2:
- print 'Need csv file path'
- return
- dump_filepath = self.args[1]
- import ckan.lib.dumper as dumper
- dump_file = open(dump_filepath, 'w')
- dumper.UserDumper().dump(dump_file)
-
def migrate_filestore(self):
from ckan.model import Session
import requests
diff --git a/ckan/lib/dumper.py b/ckan/lib/dumper.py
deleted file mode 100644
--- a/ckan/lib/dumper.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import csv
-
-import ckan.model as model
-from ckan.common import json, OrderedDict
-
-
-class SimpleDumper(object):
- '''Dumps just package data but including tags, groups, license text etc'''
- def dump(self, dump_file_obj, format='json', query=None):
- if query is None:
- query = model.Session.query(model.Package)
- active = model.State.ACTIVE
- query = query.filter_by(state=active)
- if format == 'csv':
- self.dump_csv(dump_file_obj, query)
- elif format == 'json':
- self.dump_json(dump_file_obj, query)
- else:
- raise Exception('Unknown format: %s' % format)
-
- def dump_csv(self, dump_file_obj, query):
- row_dicts = []
- for pkg in query:
- pkg_dict = pkg.as_dict()
- # flatten dict
- for name, value in pkg_dict.items()[:]:
- if isinstance(value, (list, tuple)):
- if value and isinstance(value[0], dict) and \
- name == 'resources':
- for i, res in enumerate(value):
- prefix = 'resource-%i' % i
- pkg_dict[prefix + '-url'] = res['url']
- pkg_dict[prefix + '-format'] = res['format']
- pkg_dict[prefix + '-description'] = \
- res['description']
- else:
- pkg_dict[name] = ' '.join(value)
- if isinstance(value, dict):
- for name_, value_ in value.items():
- pkg_dict[name_] = value_
- del pkg_dict[name]
- row_dicts.append(pkg_dict)
- writer = CsvWriter(row_dicts)
- writer.save(dump_file_obj)
-
- def dump_json(self, dump_file_obj, query):
- pkgs = []
- for pkg in query:
- pkg_dict = pkg.as_dict()
- pkgs.append(pkg_dict)
- json.dump(pkgs, dump_file_obj, indent=4)
-
-
-class CsvWriter:
- def __init__(self, package_dict_list=None):
- self._rows = []
- self._col_titles = []
- for row_dict in package_dict_list:
- for key in row_dict.keys():
- if key not in self._col_titles:
- self._col_titles.append(key)
- for row_dict in package_dict_list:
- self._add_row_dict(row_dict)
-
- def _add_row_dict(self, row_dict):
- row = []
- for title in self._col_titles:
- if title in row_dict:
- if isinstance(row_dict[title], int):
- row.append(row_dict[title])
- elif isinstance(row_dict[title], unicode):
- row.append(row_dict[title].encode('utf8'))
- else:
- row.append(row_dict[title])
- else:
- row.append(None)
- self._rows.append(row)
-
- def save(self, file_obj):
- writer = csv.writer(file_obj, quotechar='"',
- quoting=csv.QUOTE_NONNUMERIC)
- writer.writerow(self._col_titles)
- for row in self._rows:
- writer.writerow(row)
-
-
-class UserDumper(object):
- def dump(self, dump_file_obj):
- query = model.Session.query(model.User)
- query = query.order_by(model.User.created.asc())
-
- columns = (('id', 'name', 'openid', 'fullname', 'email', 'created',
- 'about'))
- row_dicts = []
- for user in query:
- row = OrderedDict()
- for col in columns:
- value = getattr(user, col)
- if not value:
- value = ''
- if col == 'created':
- value = str(value) # or maybe dd/mm/yyyy?
- row[col] = value
- row_dicts.append(row)
-
- writer = CsvWriter(row_dicts)
- writer.save(dump_file_obj)
- dump_file_obj.close()
| diff --git a/ckan/tests/legacy/lib/test_cli.py b/ckan/tests/legacy/lib/test_cli.py
--- a/ckan/tests/legacy/lib/test_cli.py
+++ b/ckan/tests/legacy/lib/test_cli.py
@@ -10,45 +10,6 @@
from ckan.lib.search import index_for,query_for, clear_all
-class TestDb:
- @classmethod
- def setup_class(cls):
- cls.db = ManageDb('db')
- CreateTestData.create()
-
- # delete warandpeace
- rev = model.repo.new_revision()
- model.Package.by_name(u'warandpeace').delete()
- model.repo.commit_and_remove()
-
- @classmethod
- def teardown_class(cls):
- model.repo.rebuild_db()
-
- def test_simple_dump_csv(self):
- csv_filepath = '/tmp/dump.tmp'
- self.db.args = ('simple-dump-csv %s' % csv_filepath).split()
- self.db.simple_dump_csv()
- assert os.path.exists(csv_filepath), csv_filepath
- f_obj = open(csv_filepath, "r")
- reader = csv.reader(f_obj)
- rows = [row for row in reader]
- assert_equal(rows[0][:3], ['id', 'name', 'title'])
- pkg_names = set(row[1] for row in rows[1:])
- assert 'annakarenina' in pkg_names, pkg_names
- assert 'warandpeace' not in pkg_names, pkg_names
-
- def test_simple_dump_json(self):
- json_filepath = '/tmp/dump.tmp'
- self.db.args = ('simple-dump-json %s' % json_filepath).split()
- self.db.simple_dump_json()
- assert os.path.exists(json_filepath), json_filepath
- f_obj = open(json_filepath, "r")
- rows = json.loads(f_obj.read())
- assert set(rows[0].keys()) > set(('id', 'name', 'title')), rows[0].keys()
- pkg_names = set(row['name'] for row in rows)
- assert 'annakarenina' in pkg_names, pkg_names
- assert 'warandpeace' not in pkg_names, pkg_names
class FakeOptions():
def __init__(self,**kwargs):
diff --git a/ckan/tests/legacy/test_dumper.py b/ckan/tests/legacy/test_dumper.py
deleted file mode 100644
--- a/ckan/tests/legacy/test_dumper.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import tempfile
-
-from ckan.tests.legacy import TestController, CreateTestData
-import ckan.model as model
-import ckan.lib.dumper as dumper
-simple_dumper = dumper.SimpleDumper()
-
-
-class TestSimpleDump(TestController):
-
- @classmethod
- def setup_class(self):
- model.repo.rebuild_db()
- CreateTestData.create()
-
- @classmethod
- def teardown_class(self):
- model.Session.remove()
- model.repo.rebuild_db()
-
- def test_simple_dump_csv(self):
- dump_file = tempfile.TemporaryFile()
- simple_dumper.dump(dump_file, 'csv')
- dump_file.seek(0)
- res = dump_file.read()
- assert 'annakarenina' in res, res
- assert 'tolstoy' in res, res
- assert 'russian' in res, res
- assert 'genre' in res, res
- assert 'romantic novel' in res, res
- assert 'datahub.io/download' in res, res
- assert 'Index of the novel' in res, res
- assert 'joeadmin' not in res, res
- self.assert_correct_field_order(res)
-
- def test_simple_dump_json(self):
- dump_file = tempfile.TemporaryFile()
- simple_dumper.dump(dump_file, 'json')
- dump_file.seek(0)
- res = dump_file.read()
- assert 'annakarenina' in res, res
- assert '"russian"' in res, res
- assert 'genre' in res, res
- assert 'romantic novel' in res, res
- assert 'joeadmin' not in res, res
- self.assert_correct_field_order(res)
-
- def assert_correct_field_order(self, res):
- correct_field_order = ('id', 'name', 'title', 'version', 'url')
- field_position = [res.find('"%s"' % field) for field in correct_field_order]
- field_position_sorted = field_position[:]
- field_position_sorted.sort()
- assert field_position == field_position_sorted, field_position
| Paster command db simple-dump-json fails with out of memory error
When dumping large datasets (~17.000) the paster command `db simple-dump-json` fails with an out of memory error.
System:
- Ubuntu 14.04.2 LTS
- 64-bit platform
- RAM 8 GB
I tried to patch the local install with an implementation writing only some JSON chunks to the output file, which still fails in the way that the process gets killed, also by an out of memory error.
``` python
def dump_json(self, dump_file_obj, query):
for pkg in query:
pkg_dict = pkg.as_dict()
for chunk in json.JSONEncoder(indent=4).iterencode(pkg_dict):
dump_file_obj.write(chunk)
dump_file_obj.close()
```
My way of avoiding failing dumps is currently using a combination of `ckanapi dump datasets` and some post `jq` manipulation via `cat dump.jsonl | jq --slurp . > dump.json`.
| 2016-02-16T13:32:52 |
|
ckan/ckan | 2,896 | ckan__ckan-2896 | [
"2895"
] | 6f4bfceeb320c172a19f46cc66d3ddf228b1c543 | diff --git a/ckan/lib/plugins.py b/ckan/lib/plugins.py
--- a/ckan/lib/plugins.py
+++ b/ckan/lib/plugins.py
@@ -537,7 +537,7 @@ def i18n_directory(self):
i18n/
'''
# assume plugin is called ckanext.<myplugin>.<...>.PluginClass
- extension_module_name = '.'.join(self.__module__.split('.')[0:2])
+ extension_module_name = '.'.join(self.__module__.split('.')[:3])
module = sys.modules[extension_module_name]
return os.path.join(os.path.dirname(module.__file__), 'i18n')
| Problems with translations in plugins
(This came up together with #2893, see there for more details)
I found two issues regarding translations in plugins:
1. `ckan.lib.plugins.DefaultTranslation.i18n_directory` relies on the extension package's `__file__` attribute (`ckanext.my_extension.__file__`). However, many extensions are distributed using namespace packages, and namespace packages don't have a `__file__` attribute.
2. It seems that `.mo` compiled translation files are not automatically picked up when an extension is packaged using `setup.py`.
I will prepare a PR for this.
| 2016-03-03T17:25:01 |
||
ckan/ckan | 2,901 | ckan__ckan-2901 | [
"2900"
] | ca8b04638229fda7814957991ff5fd63ccb24640 | diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -246,7 +246,7 @@ def update_config():
config['ckan.site_id'] = ckan_host
# ensure that a favicon has been set
- favicon = config.get('ckan.favicon', '/images/icons/ckan.ico')
+ favicon = config.get('ckan.favicon', '/base/images/ckan.ico')
config['ckan.favicon'] = favicon
# Init SOLR settings and check if the schema is compatible
| Ckan Favicon with wrong path
### CKAN Version if known (or site URL)
2.5.1, from source
### Please describe the expected behaviour
Load ckan icon
### Please describe the actual behaviour
Can't lada ckan icon
### What steps can be taken to reproduce the issue?
Open dev console on a browser. Quick search on github shows the path is wrong on the source code. it points to 'images/icons/ckan.ico' instead of 'base/images/icons/ckan.ico'
| 2016-03-08T16:28:00 |
||
ckan/ckan | 2,921 | ckan__ckan-2921 | [
"2856"
] | 8dbb6dcacd2b95f1dc2ce5fcaaabf57462569269 | diff --git a/ckanext/datapusher/logic/action.py b/ckanext/datapusher/logic/action.py
--- a/ckanext/datapusher/logic/action.py
+++ b/ckanext/datapusher/logic/action.py
@@ -3,6 +3,8 @@
import urlparse
import datetime
+from dateutil.parser import parse as parse_date
+
import pylons
import requests
@@ -37,7 +39,6 @@ def datapusher_submit(context, data_dict):
:rtype: bool
'''
-
schema = context.get('schema', dpschema.datapusher_submit_schema())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
@@ -47,6 +48,13 @@ def datapusher_submit(context, data_dict):
p.toolkit.check_access('datapusher_submit', context, data_dict)
+ try:
+ resource_dict = p.toolkit.get_action('resource_show')(context, {
+ 'id': res_id,
+ })
+ except logic.NotFound:
+ return False
+
datapusher_url = pylons.config.get('ckan.datapusher.url')
site_url = pylons.config['ckan.site_url']
@@ -100,7 +108,9 @@ def datapusher_submit(context, data_dict):
'ignore_hash': data_dict.get('ignore_hash', False),
'ckan_url': site_url,
'resource_id': res_id,
- 'set_url_type': data_dict.get('set_url_type', False)
+ 'set_url_type': data_dict.get('set_url_type', False),
+ 'task_created': task['last_updated'],
+ 'original_url': resource_dict.get('url'),
}
}))
r.raise_for_status()
@@ -166,6 +176,9 @@ def datapusher_hook(context, data_dict):
task['state'] = status
task['last_updated'] = str(datetime.datetime.now())
+
+ resubmit = False
+
if status == 'complete':
# Create default views for resource if necessary (only the ones that
# require data to be in the DataStore)
@@ -186,9 +199,36 @@ def datapusher_hook(context, data_dict):
'create_datastore_views': True,
})
+ # Check if the uploaded file has been modified in the meantime
+ if (resource_dict.get('last_modified') and
+ metadata.get('task_created')):
+ try:
+ last_modified_datetime = parse_date(
+ resource_dict['last_modified'])
+ task_created_datetime = parse_date(metadata['task_created'])
+ if last_modified_datetime > task_created_datetime:
+ log.debug('Uploaded file more recent: {0} > {1}'.format(
+ last_modified_datetime, task_created_datetime))
+ resubmit = True
+ except ValueError:
+ pass
+ # Check if the URL of the file has been modified in the meantime
+ elif (resource_dict.get('url') and
+ metadata.get('original_url') and
+ resource_dict['url'] != metadata['original_url']):
+ log.debug('URLs are different: {0} != {1}'.format(
+ resource_dict['url'], metadata['original_url']))
+ resubmit = True
+
context['ignore_auth'] = True
p.toolkit.get_action('task_status_update')(context, task)
+ if resubmit:
+ log.debug('Resource {0} has been modified, '
+ 'resubmitting to DataPusher'.format(res_id))
+ p.toolkit.get_action('datapusher_submit')(
+ context, {'resource_id': res_id})
+
def datapusher_status(context, data_dict):
''' Get the status of a datapusher job for a certain resource.
diff --git a/ckanext/datapusher/plugin.py b/ckanext/datapusher/plugin.py
--- a/ckanext/datapusher/plugin.py
+++ b/ckanext/datapusher/plugin.py
@@ -109,6 +109,25 @@ def notify(self, entity, operation=None):
entity.url_type != 'datapusher'):
try:
+ task = p.toolkit.get_action('task_status_show')(
+ context, {
+ 'entity_id': entity.id,
+ 'task_type': 'datapusher',
+ 'key': 'datapusher'}
+ )
+ if task.get('state') == 'pending':
+ # There already is a pending DataPusher submission,
+ # skip this one ...
+ log.debug(
+ 'Skipping DataPusher submission for '
+ 'resource {0}'.format(entity.id))
+ return
+ except p.toolkit.ObjectNotFound:
+ pass
+
+ try:
+ log.debug('Submitting resource {0}'.format(entity.id) +
+ ' to DataPusher')
p.toolkit.get_action('datapusher_submit')(context, {
'resource_id': entity.id
})
diff --git a/ckanext/datastore/logic/action.py b/ckanext/datastore/logic/action.py
--- a/ckanext/datastore/logic/action.py
+++ b/ckanext/datastore/logic/action.py
@@ -143,8 +143,13 @@ def datastore_create(context, data_dict):
raise p.toolkit.ValidationError(str(err))
# Set the datastore_active flag on the resource if necessary
- p.toolkit.get_action('resource_patch')(
- context, {'id': data_dict['resource_id'], 'datastore_active': True})
+ if resource.extras.get('datastore_active') is not True:
+ log.debug(
+ 'Setting datastore_active=True on resource {0}'.format(resource.id)
+ )
+ p.toolkit.get_action('resource_patch')(
+ context,
+ {'id': data_dict['resource_id'], 'datastore_active': True})
result.pop('id', None)
result.pop('private', None)
@@ -345,9 +350,17 @@ def datastore_delete(context, data_dict):
result = db.delete(context, data_dict)
# Set the datastore_active flag on the resource if necessary
- if not data_dict.get('filters'):
+ model = _get_or_bust(context, 'model')
+ resource = model.Resource.get(data_dict['resource_id'])
+
+ if (not data_dict.get('filters') and
+ resource.extras.get('datastore_active') is True):
+ log.debug(
+ 'Setting datastore_active=True on resource {0}'.format(resource.id)
+ )
p.toolkit.get_action('resource_patch')(
- context, {'id': data_dict['resource_id'], 'datastore_active': False})
+ context, {'id': data_dict['resource_id'],
+ 'datastore_active': False})
result.pop('id', None)
result.pop('connection_url')
| diff --git a/ckan/tests/helpers.py b/ckan/tests/helpers.py
--- a/ckan/tests/helpers.py
+++ b/ckan/tests/helpers.py
@@ -20,6 +20,7 @@
import webtest
from pylons import config
import nose.tools
+import mock
import ckan.lib.search as search
import ckan.config.middleware
@@ -320,3 +321,60 @@ def wrapper(*args, **kwargs):
return return_value
return nose.tools.make_decorator(func)(wrapper)
return decorator
+
+
+def mock_action(action_name):
+ '''
+ Decorator to easily mock a CKAN action in the context of a test function
+
+ It adds a mock object for the provided action as a parameter to the test
+ function. The mock is discarded at the end of the function, even if there
+ is an exception raised.
+
+ Note that this mocks the action both when it's called directly via
+ ``ckan.logic.get_action`` and via ``ckan.plugins.toolkit.get_action``.
+
+ Usage::
+
+ @mock_action('user_list')
+ def test_mock_user_list(self, mock_user_list):
+
+ mock_user_list.return_value = 'hi'
+
+ # user_list is mocked
+ eq_(helpers.call_action('user_list', {}), 'hi')
+
+ assert mock_user_list.called
+
+ :param action_name: the name of the action to be mocked,
+ e.g. ``package_create``
+ :type action_name: string
+
+ '''
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ mock_action = mock.MagicMock()
+
+ from ckan.logic import get_action as original_get_action
+
+ def side_effect(called_action_name):
+ if called_action_name == action_name:
+ return mock_action
+ else:
+ return original_get_action(called_action_name)
+ try:
+ with mock.patch('ckan.logic.get_action') as mock_get_action, \
+ mock.patch('ckan.plugins.toolkit.get_action') \
+ as mock_get_action_toolkit:
+ mock_get_action.side_effect = side_effect
+ mock_get_action_toolkit.side_effect = side_effect
+
+ new_args = args + tuple([mock_action])
+ return_value = func(*new_args, **kwargs)
+ finally:
+ # Make sure to stop the mock, even with an exception
+ mock_action.stop()
+ return return_value
+
+ return nose.tools.make_decorator(func)(wrapper)
+ return decorator
diff --git a/ckanext/datapusher/tests/test_action.py b/ckanext/datapusher/tests/test_action.py
new file mode 100644
--- /dev/null
+++ b/ckanext/datapusher/tests/test_action.py
@@ -0,0 +1,277 @@
+import datetime
+
+from nose.tools import eq_
+
+import ckan.plugins as p
+from ckan.tests import helpers, factories
+
+
+class TestDataPusherAction(object):
+
+ @classmethod
+ def setup_class(cls):
+ if not p.plugin_loaded('datastore'):
+ p.load('datastore')
+ if not p.plugin_loaded('datapusher'):
+ p.load('datapusher')
+
+ helpers.reset_db()
+
+ @classmethod
+ def teardown_class(cls):
+
+ p.unload('datapusher')
+ p.unload('datastore')
+
+ helpers.reset_db()
+
+ def _pending_task(self, resource_id):
+ return {
+ 'entity_id': resource_id,
+ 'entity_type': 'resource',
+ 'task_type': 'datapusher',
+ 'last_updated': str(datetime.datetime.now()),
+ 'state': 'pending',
+ 'key': 'datapusher',
+ 'value': '{}',
+ 'error': '{}',
+ }
+
+ @helpers.mock_action('datapusher_submit')
+ def test_submit(self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ assert not mock_datapusher_submit.called
+
+ helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV')
+
+ assert mock_datapusher_submit.called
+
+ @helpers.mock_action('datapusher_submit')
+ def test_submit_when_url_changes(self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.pdf',
+ )
+
+ assert not mock_datapusher_submit.called
+
+ helpers.call_action('resource_update', {},
+ id=resource['id'],
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+
+ @helpers.mock_action('datapusher_submit')
+ def test_does_not_submit_while_ongoing_job(self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.CSV',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Create a task with a state pending to mimic an ongoing job
+ # on the DataPusher
+ helpers.call_action('task_status_update', {},
+ **self._pending_task(resource['id']))
+
+ # Update the resource
+ helpers.call_action('resource_update', {},
+ id=resource['id'],
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV',
+ description='Test',
+ )
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ @helpers.mock_action('datapusher_submit')
+ def test_resubmits_if_url_changes_in_the_meantime(
+ self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Create a task with a state pending to mimic an ongoing job
+ # on the DataPusher
+ task = helpers.call_action('task_status_update', {},
+ **self._pending_task(resource['id']))
+
+ # Update the resource, set a new URL
+ helpers.call_action('resource_update', {},
+ id=resource['id'],
+ package_id=dataset['id'],
+ url='http://example.com/another.file.csv',
+ format='CSV',
+ )
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Call datapusher_hook with state complete, to mock the DataPusher
+ # finishing the job and telling CKAN
+ data_dict = {
+ 'metadata': {
+ 'resource_id': resource['id'],
+ 'original_url': 'http://example.com/file.csv',
+ 'task_created': task['last_updated'],
+ },
+ 'status': 'complete',
+ }
+ helpers.call_action('datapusher_hook', {}, **data_dict)
+
+ # datapusher_submit was called again
+ eq_(len(mock_datapusher_submit.mock_calls), 2)
+
+ @helpers.mock_action('datapusher_submit')
+ def test_resubmits_if_upload_changes_in_the_meantime(
+ self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Create a task with a state pending to mimic an ongoing job
+ # on the DataPusher
+ task = helpers.call_action('task_status_update', {},
+ **self._pending_task(resource['id']))
+
+ # Update the resource, set a new last_modified (changes on file upload)
+ helpers.call_action('resource_update', {},
+ id=resource['id'],
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV',
+ last_modified=datetime.datetime.now().isoformat()
+ )
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Call datapusher_hook with state complete, to mock the DataPusher
+ # finishing the job and telling CKAN
+ data_dict = {
+ 'metadata': {
+ 'resource_id': resource['id'],
+ 'original_url': 'http://example.com/file.csv',
+ 'task_created': task['last_updated'],
+ },
+ 'status': 'complete',
+ }
+ helpers.call_action('datapusher_hook', {}, **data_dict)
+
+ # datapusher_submit was called again
+ eq_(len(mock_datapusher_submit.mock_calls), 2)
+
+ @helpers.mock_action('datapusher_submit')
+ def test_does_not_resubmit_if_a_resource_field_changes_in_the_meantime(
+ self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Create a task with a state pending to mimic an ongoing job
+ # on the DataPusher
+ task = helpers.call_action('task_status_update', {},
+ **self._pending_task(resource['id']))
+
+ # Update the resource, set a new description
+ helpers.call_action('resource_update', {},
+ id=resource['id'],
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV',
+ description='Test',
+ )
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Call datapusher_hook with state complete, to mock the DataPusher
+ # finishing the job and telling CKAN
+ data_dict = {
+ 'metadata': {
+ 'resource_id': resource['id'],
+ 'original_url': 'http://example.com/file.csv',
+ 'task_created': task['last_updated'],
+ },
+ 'status': 'complete',
+ }
+ helpers.call_action('datapusher_hook', {}, **data_dict)
+
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ @helpers.mock_action('datapusher_submit')
+ def test_does_not_resubmit_if_a_dataset_field_changes_in_the_meantime(
+ self, mock_datapusher_submit):
+ dataset = factories.Dataset()
+
+ resource = helpers.call_action('resource_create', {},
+ package_id=dataset['id'],
+ url='http://example.com/file.csv',
+ format='CSV'
+ )
+
+ assert mock_datapusher_submit.called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Create a task with a state pending to mimic an ongoing job
+ # on the DataPusher
+ task = helpers.call_action('task_status_update', {},
+ **self._pending_task(resource['id']))
+
+ # Update the parent dataset
+ helpers.call_action('package_update', {},
+ id=dataset['id'],
+ notes='Test notes',
+ resources=[resource]
+ )
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
+
+ # Call datapusher_hook with state complete, to mock the DataPusher
+ # finishing the job and telling CKAN
+ data_dict = {
+ 'metadata': {
+ 'resource_id': resource['id'],
+ 'original_url': 'http://example.com/file.csv',
+ 'task_created': task['last_updated'],
+ },
+ 'status': 'complete',
+ }
+ helpers.call_action('datapusher_hook', {}, **data_dict)
+
+ # Not called
+ eq_(len(mock_datapusher_submit.mock_calls), 1)
diff --git a/ckanext/datastore/tests/test_create.py b/ckanext/datastore/tests/test_create.py
--- a/ckanext/datastore/tests/test_create.py
+++ b/ckanext/datastore/tests/test_create.py
@@ -189,6 +189,47 @@ def _execute_sql(self, sql, *args):
session = orm.scoped_session(orm.sessionmaker(bind=engine))
return session.connection().execute(sql, *args)
+ def test_sets_datastore_active_on_resource_on_create(self):
+ resource = factories.Resource()
+
+ assert_equal(resource['datastore_active'], False)
+
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'records': [
+ {'book': 'annakarenina', 'author': 'tolstoy'}
+ ]
+ }
+
+ helpers.call_action('datastore_create', **data)
+
+ resource = helpers.call_action('resource_show', id=resource['id'])
+
+ assert_equal(resource['datastore_active'], True)
+
+ def test_sets_datastore_active_on_resource_on_delete(self):
+ resource = factories.Resource(datastore_active=True)
+
+ assert_equal(resource['datastore_active'], True)
+
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'records': [
+ {'book': 'annakarenina', 'author': 'tolstoy'}
+ ]
+ }
+
+ helpers.call_action('datastore_create', **data)
+
+ helpers.call_action('datastore_delete', resource_id=resource['id'],
+ force=True)
+
+ resource = helpers.call_action('resource_show', id=resource['id'])
+
+ assert_equal(resource['datastore_active'], False)
+
class TestDatastoreCreate(tests.WsgiAppCase):
sysadmin_user = None
| DataPusher called multiple times when creating a dataset
When you upload a CSV the DataPusher [gets called](https://github.com/ckan/ckan/blob/master/ckanext/datapusher/plugin.py#L98) to do its thing.
On https://github.com/ckan/ckan/pull/2234 we added a couple of calls to `resource_patch` on `datastore_create` and `datastore_delete` to set the `datastore_active` extra on the resource.
This is all very well but this update on the resource triggers again the DataPusher, which pings `datastore_delete` and `datastore_create`, which trigger...
Not sure how we can handle this on the `notify` extension point, as we only get the model object there.
| And for the record, before 2.5, DataPusher always got called 2 times when creating a dataset because the wonderful [`new_resource`](https://github.com/ckan/ckan/blob/master/ckan/controllers/package.py#L608) action from the package controller first calls `resource_create` and then `package_update`.
Hi, may i know is the problem solve the ? My ckan is install on ubuntu 12.10 with ckan version 2.5.1
I have successfully install datapusher and datastore. But something when i upload a dataset, datapusher always got called 2 times. This error happened randomly, sometime it push normally but most of the time it pushes twice.
Just like amercader said, I check the log, most of the api got call twice when i upload a dataset. Such as store_create,datastore_delete, datastore_search etc. But teach me if there is any solution for this. Thank you
@TkTech I'm working on a patch for this, so grabbing it from you
| 2016-03-17T13:52:44 |
ckan/ckan | 2,927 | ckan__ckan-2927 | [
"2926",
"2926"
] | 1da4f6f348037fcba70228a44dd11478d2c50f88 | diff --git a/ckan/lib/base.py b/ckan/lib/base.py
--- a/ckan/lib/base.py
+++ b/ckan/lib/base.py
@@ -75,8 +75,9 @@ def render_snippet(template_name, **kw):
cache_force = kw.pop('cache_force', None)
output = render(template_name, extra_vars=kw, cache_force=cache_force,
renderer='snippet')
- output = '\n<!-- Snippet %s start -->\n%s\n<!-- Snippet %s end -->\n' % (
- template_name, output, template_name)
+ if config.get('debug'):
+ output = ('\n<!-- Snippet %s start -->\n%s\n<!-- Snippet %s end -->\n'
+ % (template_name, output, template_name))
return literal(output)
| diff --git a/ckan/tests/controllers/test_admin.py b/ckan/tests/controllers/test_admin.py
--- a/ckan/tests/controllers/test_admin.py
+++ b/ckan/tests/controllers/test_admin.py
@@ -223,6 +223,7 @@ def test_custom_css(self):
style_tag = reset_intro_response_html.select('head style')
assert_equal(len(style_tag), 0)
+ @helpers.change_config('debug', True)
def test_homepage_style(self):
'''Select a homepage style'''
app = self._get_test_app()
@@ -255,6 +256,7 @@ def test_homepage_style(self):
class TestTrashView(helpers.FunctionalTestBase):
'''View tests for permanently deleting datasets with Admin Trash.'''
+ @helpers.change_config('debug', True)
def test_trash_view_anon_user(self):
'''An anon user shouldn't be able to access trash view.'''
app = self._get_test_app()
diff --git a/ckan/tests/lib/test_base.py b/ckan/tests/lib/test_base.py
--- a/ckan/tests/lib/test_base.py
+++ b/ckan/tests/lib/test_base.py
@@ -3,6 +3,21 @@
import ckan.tests.helpers as helpers
+class TestRenderSnippet(helpers.FunctionalTestBase):
+ """
+ Test ``ckan.lib.base.render_snippet``.
+ """
+ @helpers.change_config('debug', True)
+ def test_comment_present_if_debug_true(self):
+ response = self._get_test_app().get('/')
+ assert '<!-- Snippet ' in response
+
+ @helpers.change_config('debug', False)
+ def test_comment_absent_if_debug_false(self):
+ response = self._get_test_app().get('/')
+ assert '<!-- Snippet ' not in response
+
+
class TestCORS(helpers.FunctionalTestBase):
def test_options(self):
| Snippet HTML comment should only be inserted when debugging is enabled
As of CKAN 2.5.1, the `{% snippet %}` Jinja-directive always places HTML-comments around the snippet content:
``` html
<!-- Snippet snippets/tag_list.html start -->
<ul class="tag-list">
<li>
<a class="tag" href="/data/dataset?tags=geodaten">geodaten</a>
<li>
<a class="tag" href="/data/dataset?tags=markt">markt</a>
</ul>
<!-- Snippet snippets/tag_list.html end -->
```
In my opinion, this should only be done if debug mode is enabled.
Snippet HTML comment should only be inserted when debugging is enabled
As of CKAN 2.5.1, the `{% snippet %}` Jinja-directive always places HTML-comments around the snippet content:
``` html
<!-- Snippet snippets/tag_list.html start -->
<ul class="tag-list">
<li>
<a class="tag" href="/data/dataset?tags=geodaten">geodaten</a>
<li>
<a class="tag" href="/data/dataset?tags=markt">markt</a>
</ul>
<!-- Snippet snippets/tag_list.html end -->
```
In my opinion, this should only be done if debug mode is enabled.
| 2016-03-22T14:51:03 |
|
ckan/ckan | 2,954 | ckan__ckan-2954 | [
"2953",
"2953"
] | fdff5a9e8f6f9a6f9ca7f73fecbdeb578c2f3870 | diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py
--- a/ckan/lib/helpers.py
+++ b/ckan/lib/helpers.py
@@ -670,7 +670,8 @@ def get_facet_items_dict(facet, limit=None, exclude_active=False):
facets.append(dict(active=False, **facet_item))
elif not exclude_active:
facets.append(dict(active=True, **facet_item))
- facets = sorted(facets, key=lambda item: item['count'], reverse=True)
+ # Sort descendingly by count and ascendingly by case-sensitive display name
+ facets.sort(key=lambda it: (-it['count'], it['display_name'].lower()))
if c.search_facets_limits and limit is None:
limit = c.search_facets_limits.get(facet)
# zero treated as infinite for hysterical raisins
| Strange facet item order
As of CKAN 2.5.2, the search facet items are listed in a strange order: They are first sorted by the number of matching datasets (that's good) and then by their name in _reverse_ alphabetic order, with lower case before upper case (that's strange). Here's a screenshot from [demo.ckan.org](http://demo.ckan.org):

I would have expected the following order: First search by count and then by name in alphabetic order, without distinguishing between upper and lower case.
Strange facet item order
As of CKAN 2.5.2, the search facet items are listed in a strange order: They are first sorted by the number of matching datasets (that's good) and then by their name in _reverse_ alphabetic order, with lower case before upper case (that's strange). Here's a screenshot from [demo.ckan.org](http://demo.ckan.org):

I would have expected the following order: First search by count and then by name in alphabetic order, without distinguishing between upper and lower case.
| 2016-04-12T10:21:43 |
||
ckan/ckan | 2,960 | ckan__ckan-2960 | [
"2955"
] | c0a997dcea032a681e9816ebf6c822cf93b07dbc | diff --git a/ckan/controllers/feed.py b/ckan/controllers/feed.py
--- a/ckan/controllers/feed.py
+++ b/ckan/controllers/feed.py
@@ -171,12 +171,17 @@ def _alternate_url(self, params, **kwargs):
def _group_or_organization(self, obj_dict, is_org):
data_dict, params = self._parse_url_params()
- key = 'owner_org' if is_org else 'groups'
- data_dict['fq'] = '%s:"%s"' % (key, obj_dict['id'],)
- group_type = 'organization'
- if not is_org:
+ if is_org:
+ key = 'owner_org'
+ value = obj_dict['id']
+ group_type = 'organization'
+ else:
+ key = 'groups'
+ value = obj_dict['name']
group_type = 'group'
+ data_dict['fq'] = '{0}:"{1}"'.format(key, value)
+
item_count, results = _package_search(data_dict)
navigation_urls = self._navigation_urls(params,
| diff --git a/ckan/tests/controllers/test_feed.py b/ckan/tests/controllers/test_feed.py
--- a/ckan/tests/controllers/test_feed.py
+++ b/ckan/tests/controllers/test_feed.py
@@ -8,6 +8,10 @@
class TestFeedNew(helpers.FunctionalTestBase):
+ @classmethod
+ def teardown_class(cls):
+ helpers.reset_db()
+
def test_atom_feed_page_zero_gives_error(self):
group = factories.Group()
offset = url_for(controller='feed', action='group',
@@ -31,3 +35,49 @@ def test_atom_feed_page_not_int_gives_error(self):
app = self._get_test_app()
res = app.get(offset, status=400)
assert '"page" parameter must be a positive integer' in res, res
+
+ def test_general_atom_feed_works(self):
+ dataset = factories.Dataset()
+ offset = url_for(controller='feed', action='general')
+ app = self._get_test_app()
+ res = app.get(offset)
+
+ assert '<title>{0}</title>'.format(dataset['title']) in res.body
+
+ def test_group_atom_feed_works(self):
+ group = factories.Group()
+ dataset = factories.Dataset(groups=[{'id': group['id']}])
+ offset = url_for(controller='feed', action='group',
+ id=group['name'])
+ app = self._get_test_app()
+ res = app.get(offset)
+
+ assert '<title>{0}</title>'.format(dataset['title']) in res.body
+
+ def test_organization_atom_feed_works(self):
+ group = factories.Organization()
+ dataset = factories.Dataset(owner_org=group['id'])
+ offset = url_for(controller='feed', action='organization',
+ id=group['name'])
+ app = self._get_test_app()
+ res = app.get(offset)
+
+ assert '<title>{0}</title>'.format(dataset['title']) in res.body
+
+ def test_custom_atom_feed_works(self):
+ dataset1 = factories.Dataset(
+ title='Test weekly',
+ extras=[{'key': 'frequency', 'value': 'weekly'}])
+ dataset2 = factories.Dataset(
+ title='Test daily',
+ extras=[{'key': 'frequency', 'value': 'daily'}])
+ offset = url_for(controller='feed', action='custom')
+ params = {
+ 'q': 'frequency:weekly'
+ }
+ app = self._get_test_app()
+ res = app.get(offset, params=params)
+
+ assert '<title>{0}</title>'.format(dataset1['title']) in res.body
+
+ assert '<title>{0}</title>'.format(dataset2['title']) not in res.body
| Datasets not listed in group feed
### CKAN Version if known (or site URL)
2.5.1
### Please describe the expected behaviour
/feeds/group/group-name.atom should list datasets
### Please describe the actual behaviour
/feeds/group/group-name.atom does not list any datasets. My understanding is that this should be the same as /feeds/custom.atom?groups=group-name
Some public examples:
https://data.hdx.rwlabs.org/feeds/group/nepal-earthquake.atom
https://data.hdx.rwlabs.org/feeds/custom.atom?groups=nepal-earthquake
http://demo.ckan.org/feeds/group/data-explorer.atom
http://demo.ckan.org/feeds/custom.atom?groups=data-explorer
### What steps can be taken to reproduce the issue?
Create a dataset in a group
Go to
/feeds/group/name-of-group.atom
| 2016-04-14T10:44:39 |
|
ckan/ckan | 2,985 | ckan__ckan-2985 | [
"2969"
] | 91d3a818919aad02df294d416a0b33d992a3e1cf | diff --git a/ckanext/datastore/db.py b/ckanext/datastore/db.py
--- a/ckanext/datastore/db.py
+++ b/ckanext/datastore/db.py
@@ -255,7 +255,7 @@ def json_get_values(obj, current_list=None):
elif isinstance(obj, dict):
json_get_values(obj.items(), current_list)
elif obj:
- current_list.append(str(obj))
+ current_list.append(unicode(obj))
return current_list
| diff --git a/ckanext/datastore/tests/test_upsert.py b/ckanext/datastore/tests/test_upsert.py
--- a/ckanext/datastore/tests/test_upsert.py
+++ b/ckanext/datastore/tests/test_upsert.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
import json
import nose
import datetime
@@ -9,6 +10,9 @@
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.tests.legacy as tests
+import ckan.tests.helpers as helpers
+import ckan.tests.factories as factories
+
import ckanext.datastore.db as db
from ckanext.datastore.tests.helpers import rebuild_all_dbs, set_url_type
@@ -16,6 +20,62 @@
assert_equal = nose.tools.assert_equal
+class TestDatastoreUpsertNewTests(object):
+ @classmethod
+ def setup_class(cls):
+ if not p.plugin_loaded('datastore'):
+ p.load('datastore')
+
+ @classmethod
+ def teardown_class(cls):
+ p.unload('datastore')
+ helpers.reset_db()
+
+ def test_upsert_doesnt_crash_with_json_field(self):
+ resource = factories.Resource()
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'primary_key': 'id',
+ 'fields': [{'id': 'id', 'type': 'text'},
+ {'id': 'book', 'type': 'json'},
+ {'id': 'author', 'type': 'text'}],
+ }
+ helpers.call_action('datastore_create', **data)
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'method': 'insert',
+ 'records': [
+ {'id': '1',
+ 'book': {'code': 'A', 'title': u'ñ'},
+ 'author': 'tolstoy'}],
+ }
+ helpers.call_action('datastore_upsert', **data)
+
+ def test_upsert_doesnt_crash_with_json_field_with_string_value(self):
+ resource = factories.Resource()
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'primary_key': 'id',
+ 'fields': [{'id': 'id', 'type': 'text'},
+ {'id': 'book', 'type': 'json'},
+ {'id': 'author', 'type': 'text'}],
+ }
+ helpers.call_action('datastore_create', **data)
+ data = {
+ 'resource_id': resource['id'],
+ 'force': True,
+ 'method': 'insert',
+ 'records': [
+ {'id': '1',
+ 'book': u'ñ',
+ 'author': 'tolstoy'}],
+ }
+ helpers.call_action('datastore_upsert', **data)
+
+
class TestDatastoreUpsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
| Latin characters not accepted in JSON DataStore fields
### CKAN version if known (or site URL)
2.3 (private deployment)
### Please describe the expected behaviour
The following dataset upsert should return `200 OK`:
```
> curl -X POST -k -H "Content-Type: application/json" -H "Accept: application/json" -H "Authorization: MY_API_KEY" -H "Cache-Control: no-cache" -d '{
"resource_id": "bba47654-df03-4c43-b2ff-b0f144223101",
"records": [
{
"recvTime": "2016-04-06T16:42:02.533333",
"temperature": "ñ"
}
],
"method": "insert",
"force": "true"
}' http://10.0.0.2:5055/api/3/action/datastore_upsert
```
### Please describe the actual behaviour
```
500 Inernal Server Error
<html>
<head>
<title>Server Error</title>
</head>
<body>
<h1>Server Error</h1>
An internal server error occurred
</body>
</html>
```
The following traces were taken from the log:
```
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] Error - <type 'exceptions.UnicodeEncodeError'>: 'ascii' codec can't encode character u'\\xf1' in position 0: ordinal not in range(128)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] URL: https://192.168.21.64:8443/api/3/action/datastore_upsert
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/weberror/errormiddleware.py', line 162 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] app_iter = self.application(environ, sr_checker)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/dec.py', line 147 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] resp = self.call_func(req, *args, **self.kwargs)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/dec.py', line 208 in call_func
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return self.func(req, *args, **kwargs)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/fanstatic/publisher.py', line 234 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return request.get_response(self.app)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/request.py', line 1053 in get_response
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] application, catch_exc_info=False)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/request.py', line 1022 in call_application
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] app_iter = application(self.environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/dec.py', line 147 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] resp = self.call_func(req, *args, **self.kwargs)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/dec.py', line 208 in call_func
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return self.func(req, *args, **kwargs)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/fanstatic/injector.py', line 54 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] response = request.get_response(self.app)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/request.py', line 1053 in get_response
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] application, catch_exc_info=False)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/webob/request.py', line 1022 in call_application
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] app_iter = application(self.environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/beaker/middleware.py', line 73 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return self.app(environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/beaker/middleware.py', line 155 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return self.wrap_app(environ, session_start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/routes/middleware.py', line 131 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] response = self.app(environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/wsgiapp.py', line 125 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] response = self.dispatch(controller, environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/wsgiapp.py', line 324 in dispatch
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return controller(environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckan/controllers/api.py', line 78 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] result = base.BaseController.__call__(self, environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckan/lib/base.py', line 338 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] res = WSGIController.__call__(self, environ, start_response)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/controllers/core.py', line 221 in __call__
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] response = self._dispatch_call()
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/controllers/core.py', line 172 in _dispatch_call
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] response = self._inspect_call(func)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/controllers/core.py', line 107 in _inspect_call
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] result = self._perform_call(func, args)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/lib/python2.6/site-packages/pylons/controllers/core.py', line 60 in _perform_call
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] return func(**args)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckan/controllers/api.py', line 208 in action
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] result = function(context, request_data)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckan/logic/__init__.py', line 424 in wrapped
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] result = _action(context, data_dict, **kw)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckanext/datastore/logic/action.py', line 212 in datastore_upsert
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] result = db.upsert(context, data_dict)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckanext/datastore/db.py', line 1121 in upsert
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] upsert_data(context, data_dict)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckanext/datastore/db.py', line 642 in upsert_data
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] row.append(_to_full_text(fields, record))
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckanext/datastore/db.py', line 805 in _to_full_text
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] full_text.extend(json_get_values(value))
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] File '/usr/local/ckan/venv_valencia/src/ckan/ckanext/datastore/db.py', line 258 in json_get_values
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] current_list.append(str(obj))
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] UnicodeEncodeError: 'ascii' codec can't encode character u'\\xf1' in position 0: ordinal not in range(128)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11]
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11]
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CGI Variables
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] -------------
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CKAN_CURRENT_URL: '/api/3/action/datastore_upsert'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CKAN_LANG: 'en'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CKAN_LANG_IS_DEFAULT: True
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CONTENT_LENGTH: '206'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] CONTENT_TYPE: 'application/json; charset=utf-8'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] DOCUMENT_ROOT: '/var/www/html'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] GATEWAY_INTERFACE: 'CGI/1.1'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTPS: '1'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_ACCEPT: 'application/json'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_AUTHORIZATION: '3d20cdae-44ad-4eeb-a8b7-8de485140133'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_CACHE_CONTROL: 'no-cache'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_CONNECTION: 'close'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_HOST: '192.168.21.64:8443'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] HTTP_USER_AGENT: 'curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.18 Basic ECC zlib/1.2.3 libidn/1.18 libssh2/1.4.2'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] PATH_INFO: '/api/3/action/datastore_upsert'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] PATH_TRANSLATED: '/etc/ckan/valencia/apache.wsgi/api/3/action/datastore_upsert'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] REMOTE_ADDR: '10.0.0.11'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] REMOTE_PORT: '52839'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] REQUEST_METHOD: 'POST'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] REQUEST_URI: '/api/3/action/datastore_upsert'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SCRIPT_FILENAME: '/etc/ckan/valencia/apache.wsgi'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_ADDR: '10.0.0.27'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_ADMIN: 'root@localhost'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_NAME: '192.168.21.64'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_PORT: '8443'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_PROTOCOL: 'HTTP/1.0'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] SERVER_SOFTWARE: 'Apache'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11]
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11]
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] WSGI Variables
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] --------------
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] application: <fanstatic.publisher.Delegator object at 0x7f69d1c19250>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] beaker.cache: <beaker.cache.CacheManager object at 0x7f69d1c19190>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] beaker.get_session: <bound method SessionMiddleware._get_session of <beaker.middleware.SessionMiddleware object at 0x7f69d19a0750>>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] beaker.session: {'_accessed_time': 1460985026.8013899, '_creation_time': 1460985026.8013899}
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] fanstatic.needed: <fanstatic.core.NeededResources object at 0x7f69d1c18f10>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_ssl.is_https: <built-in method ssl_is_https of mod_wsgi.Adapter object at 0x7f69cdc2e0a8>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_ssl.var_lookup: <built-in method ssl_var_lookup of mod_wsgi.Adapter object at 0x7f69cdc2e0a8>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.application_group: 'ckan-valencia.ost:8443|'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.callable_object: 'application'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.handler_script: ''
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.input_chunked: '0'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.listener_host: ''
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.listener_port: '443'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.process_group: ''
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.request_handler: 'wsgi-script'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.script_reloading: '1'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] mod_wsgi.version: (3, 2)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] paste.cookies: (<SimpleCookie: >, '')
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] paste.registry: <paste.registry.Registry object at 0x7f69d1c18c90>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] paste.throw_errors: True
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.action_method: <bound method ApiController.action of <ckan.controllers.api.ApiController object at 0x7f69d1f51c10>>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.controller: <ckan.controllers.api.ApiController object at 0x7f69d1f51c10>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.environ_config: {'session': 'beaker.session', 'cache': 'beaker.cache'}
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.pylons: <pylons.util.PylonsContext object at 0x7f69d1c19210>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.routes_dict: {'action': u'action', 'controller': u'api', 'ver': 3, 'logic_function': u'datastore_upsert'}
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] pylons.status_code_redirect: True
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] repoze.who.api: <repoze.who.api.API object at 0x7f69d1c18cd0>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] repoze.who.logger: <logging.Logger instance at 0x7f69d1b2e7a0>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] repoze.who.plugins: {'ckan.lib.authenticator:UsernamePasswordAuthenticator': <ckan.lib.authenticator.UsernamePasswordAuthenticator object at 0x7f69d1f51cd0>, 'friendlyform': <FriendlyFormPlugin 140092465969168>, 'auth_tkt': <CkanAuthTktCookiePlugin 140092459979728>}
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] routes.route: <routes.route.Route object at 0x7f69d1f22350>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] routes.url: <routes.util.URLGenerator object at 0x7f69d1c19490>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] webob._parsed_query_vars: (GET([]), '')
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] webob.adhoc_attrs: {'response': <Response at 0x7f69d1ac3050 200 OK>, 'language': 'en-us'}
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] webob.is_body_seekable: True
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] wsgi process: 'Multiprocess'
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] wsgi.file_wrapper: <built-in method file_wrapper of mod_wsgi.Adapter object at 0x7f69cdc2e0a8>
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] wsgi.version: (1, 1)
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] wsgiorg.routing_args: (<routes.util.URLGenerator object at 0x7f69d1c19490>, {'action': u'action', 'controller': u'api', 'ver': 3, 'logic_function': u'datastore_upsert'})
[Mon Apr 18 15:10:26 2016] [error] [client 10.0.0.11] ------------------------------------------------------------
```
### What steps can be taken to reproduce the issue?
Data upsert using a latin character: `ñ`, `ç`, `á`, etc.
| @frbattid Can you check if the following patch fixes the issue for you?
``` diff
diff --git a/ckanext/datastore/db.py b/ckanext/datastore/db.py
index 1121c9e..895971c 100644
--- a/ckanext/datastore/db.py
+++ b/ckanext/datastore/db.py
@@ -255,7 +255,7 @@ def json_get_values(obj, current_list=None):
elif isinstance(obj, dict):
json_get_values(obj.items(), current_list)
elif obj:
- current_list.append(str(obj))
+ current_list.append(unicode(obj))
return current_list
```
Also can you share the table structure of that DataStore resource? Something like:
```
> curl -H "Authorization: MY_API_KEY" http://10.0.0.2:5055/api/3/action/datastore_search
```
It's really bizarre because I can see the problem on the stack trace you pasted, but I didn't manage to reproduce the issue, ie I couldn't make that code execute with the same request.
@amercader this is what we got:
```
curl -h "Authorization: MY_API_KEY" http://10.0.0.2:5055/api/action/datastore_search?resource_id=bba47654-df03-4c43-b2ff-b0f144223101&limit=1
{
"help": "https://192.168.21.64:8443/api/3/action/help_show?name=datastore_search",
"success": true,
"result": {
"resource_id": "bba47654-df03-4c43-b2ff-b0f144223101",
"fields": [
{
"type": "int4",
"id": "_id"
},
{
"type": "timestamp",
"id": "recvTime"
},
{
"type": "json",
"id": "fiwareServicePath"
},
{
"type": "json",
"id": "entityType"
},
{
"type": "json",
"id": "entityId"
},
{
"type": "timestamp",
"id": "TimeInstant"
},
{
"type": "json",
"id": "temperature"
},
{
"type": "json",
"id": "temperature_md"
},
{
"type": "json",
"id": "pressure"
},
{
"type": "json",
"id": "pressure_md"
},
{
"type": "json",
"id": "presence"
},
{
"type": "json",
"id": "presence_md"
},
{
"type": "json",
"id": "humidity"
},
{
"type": "json",
"id": "humidity_md"
},
{
"type": "json",
"id": "fillLevel"
},
{
"type": "json",
"id": "fillLevel_md"
}
],
"records": [
{
"TimeInstant": null,
"recvTime": "2016-03-15T11:52:37.670000",
"temperature": "5701",
"presence": null,
"pressure_md": null,
"fillLevel": null,
"entityType": "room",
"humidity": null,
"pressure": null,
"humidity_md": null,
"fiwareServicePath": "electricidad",
"entityId": "room008",
"_id": 1,
"temperature_md": null,
"presence_md": null,
"fillLevel_md": null
}
],
"_links": {
"start": "/api/action/datastore_search?limit=1&resource_id=bba47654-df03-4c43-b2ff-b0f144223101",
"next": "/api/action/datastore_search?offset=1&limit=1&resource_id=bba47654-df03-4c43-b2ff-b0f144223101"
},
"limit": 1,
"total": 81534
}
}
```
@frbattid That what I imagined, as the fields have a "json" type. Is there a reason why fields like "Temperature" are stored as JSON? Are they actually JSON values?
Of course the original issue remains, so can you confirm if the patch I pasted on my last message fixes it?
@frbattid @manucarrace see above
@amercader our fields can be a simple value or a compund value (json value), which is the case we need the type json.
On the other hand, I confirm the patch works for us. Do you plan to include it in further releases?
Thank you in advance
| 2016-04-28T10:01:03 |
ckan/ckan | 2,989 | ckan__ckan-2989 | [
"2988"
] | 91d3a818919aad02df294d416a0b33d992a3e1cf | diff --git a/ckan/controllers/admin.py b/ckan/controllers/admin.py
--- a/ckan/controllers/admin.py
+++ b/ckan/controllers/admin.py
@@ -16,7 +16,8 @@
def get_sysadmins():
- q = model.Session.query(model.User).filter(model.User.sysadmin==True)
+ q = model.Session.query(model.User).filter(model.User.sysadmin == True,
+ model.User.state == 'active')
return q.all()
diff --git a/ckan/lib/cli.py b/ckan/lib/cli.py
--- a/ckan/lib/cli.py
+++ b/ckan/lib/cli.py
@@ -635,7 +635,8 @@ def command(self):
def list(self):
import ckan.model as model
print 'Sysadmins:'
- sysadmins = model.Session.query(model.User).filter_by(sysadmin=True)
+ sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
+ state='active')
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s id=%s' % (sysadmin.__class__.__name__,
| Deleted users appear in sysadmin user lists
Sysadmin users that have been deleted still appear in the sysadmin list at `/ckan-admin` and in the output of a `paster sysadmin list` command in CKAN in 2.4.1 and 2.5.2 without any indication the user has been deleted.
I expect that deleted sysadmin users will not appear in either list or possibly appear with an indication they are deleted.
To reproduce:
1. Create a sysadmin user 'test_sysadmin'
2. Delete 'test_sysadmin' user
3. Open `/ckan-admin` - 'test_sysadmin' is listed
4. Run `paster sysadmin list` - 'test_sysadmin' is listed
| 2016-04-29T01:31:28 |
||
ckan/ckan | 2,992 | ckan__ckan-2992 | [
"2740"
] | 91d3a818919aad02df294d416a0b33d992a3e1cf | diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py
--- a/ckan/logic/action/get.py
+++ b/ckan/logic/action/get.py
@@ -1895,9 +1895,10 @@ def package_search(context, data_dict):
for field_name in ('groups', 'organization'):
group_names.extend(facets.get(field_name, {}).keys())
- groups = session.query(model.Group.name, model.Group.title) \
- .filter(model.Group.name.in_(group_names)) \
+ groups = (session.query(model.Group.name, model.Group.title)
+ .filter(model.Group.name.in_(group_names))
.all()
+ if group_names else [])
group_titles_by_name = dict(groups)
# Transform facets into a more useful data structure.
| Sqlalchemy warning during tests (expensive non-query).
I'm seeing the following warning when running the tests..
```
SAWarning: The IN-predicate on "group.name" was invoked with an empty sequence.
This results in a contradiction, which nonetheless can be expensive to evaluate.
Consider alternative strategies for improved performance.
```
which doesn't seem like a big issue, and may just be test specific. But it might be worth tracking down to ensure we don't have an unnecessary expensive query that isn't guarded against in the action layer.
Maybe good-for-contribution?
| 2016-04-29T13:36:00 |
||
ckan/ckan | 3,020 | ckan__ckan-3020 | [
"3012"
] | aeb38632927b09bf6e702b61110f9edff839a951 | diff --git a/ckan/logic/action/update.py b/ckan/logic/action/update.py
--- a/ckan/logic/action/update.py
+++ b/ckan/logic/action/update.py
@@ -80,6 +80,11 @@ def resource_update(context, data_dict):
log.error('Could not find resource %s after all', id)
raise NotFound(_('Resource was not found.'))
+ # Persist the datastore_active extra if already present and not provided
+ if ('datastore_active' in resource.extras and
+ 'datastore_active' not in data_dict):
+ data_dict['datastore_active'] = resource.extras['datastore_active']
+
for plugin in plugins.PluginImplementations(plugins.IResourceController):
plugin.before_update(context, pkg_dict['resources'][n], data_dict)
| diff --git a/ckan/tests/logic/action/test_update.py b/ckan/tests/logic/action/test_update.py
--- a/ckan/tests/logic/action/test_update.py
+++ b/ckan/tests/logic/action/test_update.py
@@ -723,6 +723,77 @@ def test_extra_gets_deleted_on_extra_only_update(self):
assert_equals(res_returned['anotherfield'], 'second')
assert 'newfield' not in res_returned
+ def test_datastore_active_is_persisted_if_true_and_not_provided(self):
+ dataset = factories.Dataset()
+ resource = factories.Resource(package=dataset,
+ url='http://example.com',
+ datastore_active=True)
+
+ res_returned = helpers.call_action('resource_update',
+ id=resource['id'],
+ url='http://example.com',
+ name='Test')
+
+ assert_equals(res_returned['datastore_active'], True)
+
+ def test_datastore_active_is_persisted_if_false_and_not_provided(self):
+ dataset = factories.Dataset()
+ resource = factories.Resource(package=dataset,
+ url='http://example.com',
+ datastore_active=False)
+
+ res_returned = helpers.call_action('resource_update',
+ id=resource['id'],
+ url='http://example.com',
+ name='Test')
+
+ assert_equals(res_returned['datastore_active'], False)
+
+ def test_datastore_active_is_updated_if_false_and_provided(self):
+ dataset = factories.Dataset()
+ resource = factories.Resource(package=dataset,
+ url='http://example.com',
+ datastore_active=False)
+
+ res_returned = helpers.call_action('resource_update',
+ id=resource['id'],
+ url='http://example.com',
+ name='Test',
+ datastore_active=True)
+
+ assert_equals(res_returned['datastore_active'], True)
+
+ def test_datastore_active_is_updated_if_true_and_provided(self):
+ dataset = factories.Dataset()
+ resource = factories.Resource(package=dataset,
+ url='http://example.com',
+ datastore_active=True)
+
+ res_returned = helpers.call_action('resource_update',
+ id=resource['id'],
+ url='http://example.com',
+ name='Test',
+ datastore_active=False)
+
+ assert_equals(res_returned['datastore_active'], False)
+
+ def test_datastore_active_not_present_if_not_provided_and_not_datastore_plugin_enabled(self):
+
+ assert not p.plugin_loaded('datastore')
+
+ dataset = factories.Dataset()
+ resource = factories.Resource(package=dataset,
+ url='http://example.com',
+ )
+
+ res_returned = helpers.call_action('resource_update',
+ id=resource['id'],
+ url='http://example.com',
+ name='Test',
+ )
+
+ assert 'datastore_active' not in res_returned
+
class TestConfigOptionUpdate(object):
| "Data API" button disappears on resource page after empty update
ckan_version: 2.5.2
### Please describe the expected behaviour
When updating a resource without changing any fields, url or description, the green "Data API" button should still be there, because the data is still in the datastore (And API calls of this type confirms that it still is). Not sure if it is expected of the datapusher to trigger its update or not.
### Please describe the actual behaviour
The green "Data API" button disappears, as if data is now removed from the datastore for the particular resource. It would seem like CKAN thinks this triggers the datapusher hook, even though it does not (logs confirm it nothing happens).
### What steps can be taken to reproduce the issue?
Push .CSV file to the datastore using the web interface, then update the resource using either the `resource_update` API call or the web interface, without changing anything.
I'm curious to see if i'm the only one with this issue, i can reproduce over several CKAN installations running 2.5.2. If i need to post this in the datapusher github issue tracker instead, close this and i will post it there instead.
| @NicolaiMogensen this is indeed a bug.
The "Data API" button relies on the `datastore_active` resource extra being present and True. When editing via the frontend (or the API) this field is not passed and so it's dropped from the db. We need to persist the value when updating if the resource already has it and it's not provided.
PR incoming
| 2016-05-13T12:04:20 |
ckan/ckan | 3,027 | ckan__ckan-3027 | [
"2331",
"2331"
] | 32375f2d07635db89606cfd150886b80e67a58b4 | diff --git a/ckan/logic/action/create.py b/ckan/logic/action/create.py
--- a/ckan/logic/action/create.py
+++ b/ckan/logic/action/create.py
@@ -300,8 +300,10 @@ def resource_create(context, data_dict):
_get_action('package_update')(context, pkg_dict)
context.pop('defer_commit')
except ValidationError, e:
- errors = e.error_dict['resources'][-1]
- raise ValidationError(errors)
+ try:
+ raise ValidationError(e.error_dict['resources'][-1])
+ except (KeyError, IndexError):
+ raise ValidationError(e.error_dict)
## Get out resource_id resource from model as it will not appear in
## package_show until after commit
diff --git a/ckan/logic/action/update.py b/ckan/logic/action/update.py
--- a/ckan/logic/action/update.py
+++ b/ckan/logic/action/update.py
@@ -98,8 +98,10 @@ def resource_update(context, data_dict):
updated_pkg_dict = _get_action('package_update')(context, pkg_dict)
context.pop('defer_commit')
except ValidationError, e:
- errors = e.error_dict['resources'][n]
- raise ValidationError(errors)
+ try:
+ raise ValidationError(e.error_dict['resources'][-1])
+ except (KeyError, IndexError):
+ raise ValidationError(e.error_dict)
upload.upload(id, uploader.get_max_resource_size())
model.repo.commit()
| KeyError 'resources' in error handler
https://github.com/ckan/ckan/blob/master/ckan/logic/action/create.py#L305
Extension-code dependent, it can happen that a perfectly normal validation error leads to this block and CKAN throws an exception concerning a missing resources key when there is no resources key in the dictionary. This can lead to serious confusion for a developer.
KeyError 'resources' in error handler
https://github.com/ckan/ckan/blob/master/ckan/logic/action/create.py#L305
Extension-code dependent, it can happen that a perfectly normal validation error leads to this block and CKAN throws an exception concerning a missing resources key when there is no resources key in the dictionary. This can lead to serious confusion for a developer.
| I went with a hacky patch for this; if there's not a resource error, there must be a package validation error so assume that and dump the error dict for good measure https://github.com/datagovau/ckanext-agls/blob/master/patches/resource-edit-error.patch
At least it doesn't just Error 500 on the user then.
Yes I've run into this too.
What we need is to check for errors outside that last resource and include a message something like what @maxious suggests. "Can't save resource because of validation errors in the dataset/other resource fields: {field_names}"
Anyone care to submit a PR?
Ugh, I just hit this, it's pretty horrible. PR incoming
PR here https://github.com/ckan/ckan/issues/3027
I went with a hacky patch for this; if there's not a resource error, there must be a package validation error so assume that and dump the error dict for good measure https://github.com/datagovau/ckanext-agls/blob/master/patches/resource-edit-error.patch
At least it doesn't just Error 500 on the user then.
Yes I've run into this too.
What we need is to check for errors outside that last resource and include a message something like what @maxious suggests. "Can't save resource because of validation errors in the dataset/other resource fields: {field_names}"
Anyone care to submit a PR?
Ugh, I just hit this, it's pretty horrible. PR incoming
PR here https://github.com/ckan/ckan/issues/3027
| 2016-05-17T17:05:59 |
|
ckan/ckan | 3,030 | ckan__ckan-3030 | [
"2394"
] | 32375f2d07635db89606cfd150886b80e67a58b4 | diff --git a/ckan/controllers/user.py b/ckan/controllers/user.py
--- a/ckan/controllers/user.py
+++ b/ckan/controllers/user.py
@@ -35,6 +35,15 @@
unflatten = dictization_functions.unflatten
+def set_repoze_user(user_id):
+ '''Set the repoze.who cookie to match a given user_id'''
+ if 'repoze.who.plugins' in request.environ:
+ rememberer = request.environ['repoze.who.plugins']['friendlyform']
+ identity = {'repoze.who.userid': user_id}
+ response.headerlist += rememberer.remember(request.environ,
+ identity)
+
+
class UserController(base.BaseController):
def __before__(self, action, **env):
base.BaseController.__before__(self, action, **env)
@@ -245,10 +254,7 @@ def _save_new(self, context):
return self.new(data_dict, errors, error_summary)
if not c.user:
# log the user in programatically
- rememberer = request.environ['repoze.who.plugins']['friendlyform']
- identity = {'repoze.who.userid': data_dict['name']}
- response.headerlist += rememberer.remember(request.environ,
- identity)
+ set_repoze_user(data_dict['name'])
h.redirect_to(controller='user', action='me', __ckan_no_root=True)
else:
# #1799 User has managed to register whilst logged in - warn user
@@ -321,6 +327,12 @@ def edit(self, id=None, data=None, errors=None, error_summary=None):
def _save_edit(self, id, context):
try:
+ if id in (c.userobj.id, c.userobj.name):
+ current_user = True
+ else:
+ current_user = False
+ old_username = c.userobj.name
+
data_dict = logic.clean_dict(unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
@@ -343,6 +355,11 @@ def _save_edit(self, id, context):
user = get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
+
+ if current_user and data_dict['name'] != old_username:
+ # Changing currently logged in user's name.
+ # Update repoze.who cookie to match
+ set_repoze_user(data_dict['name'])
h.redirect_to(controller='user', action='read', id=user['name'])
except NotAuthorized:
abort(403, _('Unauthorized to edit user %s') % id)
| diff --git a/ckan/tests/controllers/test_user.py b/ckan/tests/controllers/test_user.py
--- a/ckan/tests/controllers/test_user.py
+++ b/ckan/tests/controllers/test_user.py
@@ -289,6 +289,100 @@ def test_email_change_with_password(self):
response = submit_and_follow(app, form, env, 'save')
assert_true('Profile updated' in response)
+ def test_edit_user_logged_in_username_change(self):
+
+ user_pass = 'pass'
+ user = factories.User(password=user_pass)
+ app = self._get_test_app()
+
+ # Have to do an actual login as this test relys on repoze cookie handling.
+ # get the form
+ response = app.get('/user/login')
+ # ...it's the second one
+ login_form = response.forms[1]
+ # fill it in
+ login_form['login'] = user['name']
+ login_form['password'] = user_pass
+ # submit it
+ login_form.submit()
+
+ # Now the cookie is set, run the test
+ response = app.get(
+ url=url_for(controller='user', action='edit'),
+ )
+ # existing values in the form
+ form = response.forms['user-edit-form']
+
+ # new values
+ form['name'] = 'new-name'
+ response = submit_and_follow(app, form, name='save')
+ response = helpers.webtest_maybe_follow(response)
+
+ expected_url = url_for(controller='user', action='read', id='new-name')
+ assert response.request.path == expected_url
+
+ def test_edit_user_logged_in_username_change_by_name(self):
+ user_pass = 'pass'
+ user = factories.User(password=user_pass)
+ app = self._get_test_app()
+
+ # Have to do an actual login as this test relys on repoze cookie handling.
+ # get the form
+ response = app.get('/user/login')
+ # ...it's the second one
+ login_form = response.forms[1]
+ # fill it in
+ login_form['login'] = user['name']
+ login_form['password'] = user_pass
+ # submit it
+ login_form.submit()
+
+ # Now the cookie is set, run the test
+ response = app.get(
+ url=url_for(controller='user', action='edit', id=user['name']),
+ )
+ # existing values in the form
+ form = response.forms['user-edit-form']
+
+ # new values
+ form['name'] = 'new-name'
+ response = submit_and_follow(app, form, name='save')
+ response = helpers.webtest_maybe_follow(response)
+
+ expected_url = url_for(controller='user', action='read', id='new-name')
+ assert response.request.path == expected_url
+
+ def test_edit_user_logged_in_username_change_by_id(self):
+ user_pass = 'pass'
+ user = factories.User(password=user_pass)
+ app = self._get_test_app()
+
+ # Have to do an actual login as this test relys on repoze cookie handling.
+ # get the form
+ response = app.get('/user/login')
+ # ...it's the second one
+ login_form = response.forms[1]
+ # fill it in
+ login_form['login'] = user['name']
+ login_form['password'] = user_pass
+ # submit it
+ login_form.submit()
+
+ # Now the cookie is set, run the test
+ response = app.get(
+ url=url_for(controller='user', action='edit', id=user['id']),
+ )
+ # existing values in the form
+ form = response.forms['user-edit-form']
+
+ # new values
+ form['name'] = 'new-name'
+ response = submit_and_follow(app, form, name='save')
+ response = helpers.webtest_maybe_follow(response)
+
+ expected_url = url_for(controller='user', action='read', id='new-name')
+ assert response.request.path == expected_url
+
def test_perform_reset_for_key_change(self):
password = 'password'
params = {'password1': password, 'password2': password}
diff --git a/ckan/tests/helpers.py b/ckan/tests/helpers.py
--- a/ckan/tests/helpers.py
+++ b/ckan/tests/helpers.py
@@ -188,6 +188,8 @@ def _apply_config_changes(cls, cfg):
def setup(self):
'''Reset the database and clear the search indexes.'''
reset_db()
+ if hasattr(self, '_test_app'):
+ self._test_app.reset()
search.clear_all()
@classmethod
| Changing your user name produces an error and logs you out
If you edit your user and change your user name, it says "Profile updated" (which is good - it worked) and "Unauthorized to edit a user" and you are now logged out.
All the other user fields work ok.
| BTW there is a test for this in https://github.com/ckan/ckan/pull/2393 with the name field commented out
@davidread do you have time to have a look at investigate a bit more what might be happening? cheers
I've got a bit of a backlog, and we don't use this feature in DGU, so I'd rather not, unless it can wait a couple of weeks
So I believe I've tracked this down to the [BaseController](https://github.com/ckan/ckan/blob/master/ckan/lib/base.py#L325), which is superseding the redirect_to call in _save_edit. The comment in there says the user needs to be forced to logout when they update their name (I'm not sure if this is correct) which is what is causing the issue.
I think this is redirecting back to the /user/edit/<username> page after performing the logout function which is what is generating the 401 error and then forcing the user to the login page since they're currently unauthenticated.
As for a fix, I'm not sure what the best way to approach this is, but I can probably dig around this week and see what I come up with.
Good spot finding the relevant code.
The authtkt cookie contains the user's name, so when you change your user.name in the database, then the cookie is out of date. Then when it gets to that code in BaseController you referenced, it finds that user name isn't in the database and therefore the best thing is to log you out. (The cookie's user name is read by the AuthKit middleware and that sets request.environ['REMOTE_USER'].)
The best solution would be to tell authtkt to change the cookie to have the new username.
Failing that, CKAN could just warn the user that they will have to log-in again if they change their username.
| 2016-05-19T03:30:03 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.