repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
getredash/redash
732
getredash__redash-732
[ "730" ]
cb4fbf81a206a927a6a0391a46daf682ad9904ee
diff --git a/migrations/0017_add_organization.py b/migrations/0017_add_organization.py --- a/migrations/0017_add_organization.py +++ b/migrations/0017_add_organization.py @@ -9,7 +9,7 @@ Organization.create_table() default_org = Organization.create(name="Default", slug='default', settings={ - Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN + Organization.SETTING_GOOGLE_APPS_DOMAINS: list(settings.GOOGLE_APPS_DOMAIN) }) column = Group.org
Migration Error(0017_add_organization.py) Migration Error with current master branch. ``` $ vagrant@vagrant-ubuntu-trusty-64:/opt/redash/current$ export PYTHONPATH=. && bin/run python migrations/0017_add_organization.py ``` ### output ``` [2016-01-05 09:38:42,746][PID:1822][WARNING][redash.query_runner] Oracle query runner enabled but not supported, not registering. Either disable or install missing dependencies. Traceback (most recent call last): File "migrations/0017_add_organization.py", line 12, in <module> Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 4001, in create inst.save(force_insert=True) File "/opt/redash/current/redash/models.py", line 93, in save super(BaseModel, self).save(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 4148, in save pk_from_cursor = self.insert(**field_dict).execute() File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2858, in execute cursor = self._execute() File "/opt/redash/current/redash/metrics/database.py", line 50, in metered_execute result = real_execute(self, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2370, in _execute sql, params = self.sql() File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2832, in sql return self.compiler().generate_insert(self) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1733, in generate_insert return self.build_query(clauses, alias_map) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1542, in build_query return self.parse_node(Clause(*clauses), alias_map) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node sql, params, unknown = self._parse(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse sql, params = self._parse_map[node_type](node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause node.nodes, alias_map, conv, node.glue) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list node_sql, node_params = self.parse_node(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node sql, params, unknown = self._parse(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse sql, params = self._parse_map[node_type](node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause node.nodes, alias_map, conv, node.glue) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list node_sql, node_params = self.parse_node(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node sql, params, unknown = self._parse(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse sql, params = self._parse_map[node_type](node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause node.nodes, alias_map, conv, node.glue) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list node_sql, node_params = self.parse_node(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node sql, params, unknown = self._parse(node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse sql, params = self._parse_map[node_type](node, alias_map, conv) File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1394, in _parse_param params = [node.conv(node.value)] File "/opt/redash/current/redash/models.py", line 68, in db_value return json.dumps(value) File "/usr/lib/python2.7/json/__init__.py", line 243, in dumps return _default_encoder.encode(obj) File "/usr/lib/python2.7/json/encoder.py", line 207, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode return _iterencode(o, 0) File "/usr/lib/python2.7/json/encoder.py", line 184, in default raise TypeError(repr(o) + " is not JSON serializable") TypeError: set([]) is not JSON serializable ```
2016-01-05T10:46:27
getredash/redash
740
getredash__redash-740
[ "738" ]
be4c59e73ddfab2536fa2ed88757788ec7ecd69d
diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -306,8 +306,9 @@ def version_check(): def base_url(org): - if org.domain: - return 'https://{}'.format(org.domain) + if settings.MULTI_ORG: + return "https://{}/{}".format(settings.HOST, org.slug) + return settings.HOST
Alert send Error(AttributeError: 'Organization' object has no attribute 'domain) Hello. Alert send Error. ``` 01:51:24 worker.1 | [2016-01-07 01:51:24,764: ERROR/MainProcess] Task redash.tasks.check_alerts_for_query[6fd2a1aa-bb2b-4054-a6a1-2487c0bae30c] raised unexpected: AttributeError("'Organization' object has no attribute 'domain'",) 01:51:24 worker.1 | Traceback (most recent call last): 01:51:24 worker.1 | File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task 01:51:24 worker.1 | R = retval = fun(*args, **kwargs) 01:51:24 worker.1 | File "/opt/redash/current/redash/tasks.py", line 31, in __call__ 01:51:24 worker.1 | return super(BaseTask, self).__call__(*args, **kwargs) 01:51:24 worker.1 | File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__ 01:51:24 worker.1 | return self.run(*args, **kwargs) 01:51:24 worker.1 | File "/opt/redash/current/redash/tasks.py", line 338, in check_alerts_for_query 01:51:24 worker.1 | """.format(host=base_url(alert.query.org), alert_id=alert.id, query_id=query.id) 01:51:24 worker.1 | File "/opt/redash/current/redash/tasks.py", line 309, in base_url 01:51:24 worker.1 | if org.domain: 01:51:24 worker.1 | AttributeError: 'Organization' object has no attribute 'domain' ```
2016-01-07T09:46:52
getredash/redash
784
getredash__redash-784
[ "783" ]
24137e87fdcea7bf54f8f5c5baca3643e98fed96
diff --git a/redash/utils.py b/redash/utils.py --- a/redash/utils.py +++ b/redash/utils.py @@ -53,9 +53,12 @@ def default(self, o): if isinstance(o, decimal.Decimal): return float(o) - if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): + if isinstance(o, (datetime.date, datetime.time)): return o.isoformat() - + + if isinstance(o, datetime.timedelta): + return str(o) + super(JSONEncoder, self).default(o)
AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' On the latest 0.9.2-rc: ``` [2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError("'datetime.timedelta' object has no attribute 'isoformat'",) Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task R = retval = fun(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 31, in __call__ return super(BaseTask, self).__call__(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__ return self.run(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 286, in execute_query data, error = query_runner.run_query(annotated_query) File "/opt/redash/redash/query_runner/pg.py", line 132, in run_query json_data = json.dumps(data, cls=JSONEncoder) File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps sort_keys=sort_keys, **kw).encode(obj) File "/usr/lib/python2.7/json/encoder.py", line 207, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode return _iterencode(o, 0) File "/opt/redash/redash/utils.py", line 57, in default return o.isoformat() AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' ```
2016-01-21T14:37:59
getredash/redash
831
getredash__redash-831
[ "801" ]
868263315bf55aded37e809038744403360bfd07
diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py --- a/redash/handlers/query_results.py +++ b/redash/handlers/query_results.py @@ -5,7 +5,7 @@ from flask import make_response, request from flask.ext.restful import abort - +import xlsxwriter from redash import models, settings, utils from redash.wsgi import api from redash.tasks import QueryTask, record_event @@ -105,6 +105,8 @@ def get(self, query_id=None, query_result_id=None, filetype='json'): if filetype == 'json': response = self.make_json_response(query_result) + elif filetype == 'xlsx': + response = self.make_excel_response(query_result) else: response = self.make_csv_response(query_result) @@ -137,6 +139,28 @@ def make_csv_response(query_result): headers = {'Content-Type': "text/csv; charset=UTF-8"} return make_response(s.getvalue(), 200, headers) + @staticmethod + def make_excel_response(query_result): + s = cStringIO.StringIO() + + query_data = json.loads(query_result.data) + book = xlsxwriter.Workbook(s) + sheet = book.add_worksheet("result") + + column_names = [] + for (c, col) in enumerate(query_data['columns']): + sheet.write(0, c, col['name']) + column_names.append(col['name']) + + for (r, row) in enumerate(query_data['rows']): + for (c, name) in enumerate(column_names): + sheet.write(r+1, c, row[name]) + + book.close() + + headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"} + return make_response(s.getvalue(), 200, headers) + api.add_org_resource(QueryResultListAPI, '/api/query_results', endpoint='query_results') api.add_org_resource(QueryResultAPI,
diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -163,8 +163,9 @@ def test_interval_schedule_that_doesnt_need_reschedule(self): def test_exact_time_that_needs_reschedule(self): now = datetime.datetime.now() yesterday = now - datetime.timedelta(days=1) - schedule = "{:02d}:00".format(now.hour - 3) - self.assertTrue(models.should_schedule_next(yesterday, now, schedule)) + scheduled_datetime = now - datetime.timedelta(hours=3) + scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) + self.assertTrue(models.should_schedule_next(yesterday, now, scheduled_time)) def test_exact_time_that_doesnt_need_reschedule(self): now = date_parse("2015-10-16 20:10")
Download DataSheets as Excel file. Csv file with utf-8 is hard to use in excel. So I want to download data sheets as .xlsx file
:+1:
2016-02-15T14:50:08
getredash/redash
849
getredash__redash-849
[ "847" ]
d74442184e30a8aeb19edacb39e1ca22cbb523c1
diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py --- a/redash/query_runner/presto.py +++ b/redash/query_runner/presto.py @@ -6,6 +6,8 @@ import logging logger = logging.getLogger(__name__) +from collections import defaultdict + try: from pyhive import presto enabled = True @@ -76,15 +78,12 @@ def run_query(self, query): cursor = connection.cursor() + try: cursor.execute(query) - columns_data = [(row[0], row[1]) for row in cursor.description] - - columns = [{'name': col[0], - 'friendly_name': col[0], - 'type': PRESTO_TYPES_MAPPING.get(col[1], None)} for col in columns_data] - - rows = [dict(zip(([c[0] for c in columns_data]), r)) for i, r in enumerate(cursor.fetchall())] + column_tuples = [(i[0], PRESTO_TYPES_MAPPING.get(i[1], None)) for i in cursor.description] + columns = self.fetch_columns(column_tuples) + rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())] data = {'columns': columns, 'rows': rows} json_data = json.dumps(data, cls=JSONEncoder) error = None
presto query runner merges columns with same name The query `SELECT 'foo' AS same_name, 'not foo' AS same_name` against a Presto data source generates surprising results: ('foo', 'foo') instead of ('foo', 'not foo') The issue is in https://github.com/getredash/redash/blob/d74442184e30a8aeb19edacb39e1ca22cbb523c1/redash/query_runner/presto.py#L87. From experimenting with other data sources, it looks like redash itself supports showing result sets that have duplicated column names.
2016-02-23T23:41:51
getredash/redash
909
getredash__redash-909
[ "908" ]
536d7595c590f14129ee41ae5b18919ee9daf51d
diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py --- a/redash/utils/configuration.py +++ b/redash/utils/configuration.py @@ -38,7 +38,7 @@ def iteritems(self): return self._config.iteritems() def to_dict(self, mask_secrets=False): - if (mask_secrets is False or 'secret' not in self.schema): + if mask_secrets is False or 'secret' not in self.schema: return self._config config = self._config.copy() @@ -53,7 +53,7 @@ def update(self, new_config): config = {} for k, v in new_config.iteritems(): - if k in self.schema['secret'] and v == SECRET_PLACEHOLDER: + if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER: config[k] = self[k] else: config[k] = v
diff --git a/tests/test_configuration.py b/tests/test_configuration.py --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -74,3 +74,10 @@ def test_doesnt_leave_leftovers(self): self.assertEqual(container['a'], 1) self.assertEqual('test', container['b']) self.assertNotIn('e', container) + + def test_works_for_schema_without_secret(self): + secretless = configuration_schema.copy() + secretless.pop('secret') + container = ConfigurationContainer({'a': 1, 'b': 'test', 'e': 3}, secretless) + container.update({'a': 2}) + self.assertEqual(container['a'], 2)
Error on adding modules to python datasource I'm trying to add a module to a python datasource, but it's failing with this traceback ``` Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper resp = resource(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask_login.py", line 792, in decorated_view return func(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view return self.dispatch_request(*args, **kwargs) File "/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py", line 19, in dispatch_request return super(BaseResource, self).dispatch_request(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request resp = meth(*args, **kwargs) File "/opt/redash/redash.0.9.2.b1536/redash/permissions.py", line 40, in decorated return fn(*args, **kwargs) File "/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py", line 38, in post data_source.options.update(req['options']) File "/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py", line 56, in update if k in self.schema['secret'] and v == SECRET_PLACEHOLDER: KeyError: 'secret' ```
2016-03-10T09:57:00
getredash/redash
919
getredash__redash-919
[ "916", "916" ]
2f35c1ea2bd631c71b697f83b339cf7e5a0539f2
diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py --- a/redash/query_runner/sqlite.py +++ b/redash/query_runner/sqlite.py @@ -3,14 +3,14 @@ import sqlite3 import sys -from redash.query_runner import BaseQueryRunner +from redash.query_runner import BaseSQLQueryRunner from redash.query_runner import register from redash.utils import JSONEncoder logger = logging.getLogger(__name__) -class Sqlite(BaseQueryRunner): +class Sqlite(BaseSQLQueryRunner): @classmethod def configuration_schema(cls): return { @@ -33,7 +33,7 @@ def __init__(self, configuration): self._dbpath = self.configuration['dbpath'] - def get_schema(self): + def _get_tables(self, schema): query_table = "select tbl_name from sqlite_master where type='table'" query_columns = "PRAGMA table_info(%s)" @@ -44,7 +44,6 @@ def get_schema(self): results = json.loads(results) - schema = {} for row in results['rows']: table_name = row['tbl_name'] schema[table_name] = {'name': table_name, 'columns': []} diff --git a/redash/query_runner/vertica.py b/redash/query_runner/vertica.py --- a/redash/query_runner/vertica.py +++ b/redash/query_runner/vertica.py @@ -28,7 +28,7 @@ } -class Vertica(BaseQueryRunner): +class Vertica(BaseSQLQueryRunner): @classmethod def configuration_schema(cls): return { @@ -68,7 +68,7 @@ def enabled(cls): def __init__(self, configuration): super(Vertica, self).__init__(configuration) - def get_schema(self): + def _get_tables(self, schema): query = """ Select table_schema, table_name, column_name from columns where is_system_table=false; """ @@ -80,7 +80,6 @@ def get_schema(self): results = json.loads(results) - schema = {} for row in results['rows']: table_name = '{}.{}'.format(row['table_schema'], row['table_name'])
Vertica doesn't show schema Vertica query runner returns error due to incompatible get_schema method Vertica doesn't show schema Vertica query runner returns error due to incompatible get_schema method
2016-03-13T15:03:39
getredash/redash
998
getredash__redash-998
[ "994" ]
2655eec907ed3ecc526447ecdf2ba6e74d19f5f3
diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py --- a/redash/handlers/embed.py +++ b/redash/handlers/embed.py @@ -41,7 +41,6 @@ def embed(query_id, visualization_id, org_slug=None): vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at')) return render_template("embed.html", - client_config=json_dumps(client_config), visualization=json_dumps(vis), query_result=json_dumps(qr))
Embed query description appearing larger than vizualization name The query description is appearing larger then the visualization name: ![image](https://cloud.githubusercontent.com/assets/7199904/14591358/6bf74f9e-0553-11e6-91c4-70912b25025b.png)
2016-04-18T15:38:31
getredash/redash
1,002
getredash__redash-1002
[ "971", "971" ]
0908e222a6ae538a777e16759514b2eea161bd9e
diff --git a/redash/handlers/base.py b/redash/handlers/base.py --- a/redash/handlers/base.py +++ b/redash/handlers/base.py @@ -5,7 +5,7 @@ from peewee import DoesNotExist from redash import settings -from redash.tasks import record_event +from redash.tasks import record_event as record_event_task from redash.models import ApiUser from redash.authentication import current_org @@ -33,26 +33,30 @@ def current_org(self): return current_org._get_current_object() def record_event(self, options): - if isinstance(self.current_user, ApiUser): - options.update({ - 'api_key': self.current_user.name, - 'org_id': self.current_org.id - }) - else: - options.update({ - 'user_id': self.current_user.id, - 'org_id': self.current_org.id - }) + record_event(self.current_org, self.current_user, options) + +def record_event(org, user, options): + if isinstance(user, ApiUser): + options.update({ + 'api_key': user.name, + 'org_id': org.id + }) + else: options.update({ - 'user_agent': request.user_agent.string, - 'ip': request.remote_addr + 'user_id': user.id, + 'org_id': org.id }) - if 'timestamp' not in options: - options['timestamp'] = int(time.time()) + options.update({ + 'user_agent': request.user_agent.string, + 'ip': request.remote_addr + }) + + if 'timestamp' not in options: + options['timestamp'] = int(time.time()) - record_event.delay(options) + record_event_task.delay(options) def require_fields(req, fields): diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py --- a/redash/handlers/embed.py +++ b/redash/handlers/embed.py @@ -9,7 +9,7 @@ from redash import serializers from redash.utils import json_dumps from redash.handlers import routes -from redash.handlers.base import org_scoped_rule +from redash.handlers.base import org_scoped_rule, record_event from redash.permissions import require_access, view_only from authentication import current_org @@ -17,7 +17,6 @@ @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET']) @login_required def embed(query_id, visualization_id, org_slug=None): - # TODO: add event for embed access query = models.Query.get_by_id_and_org(query_id, current_org) require_access(query.groups, current_user, view_only) vis = query.visualizations.where(models.Visualization.id == visualization_id).first() @@ -33,6 +32,15 @@ def embed(query_id, visualization_id, org_slug=None): else: abort(404, message="Visualization not found.") + record_event(current_org, current_user, { + 'action': 'view', + 'object_id': visualization_id, + 'object_type': 'visualization', + 'query_id': query_id, + 'embed': True, + 'referer': request.headers.get('Referer') + }) + client_config = {} client_config.update(settings.COMMON_CLIENT_CONFIG) @@ -65,6 +73,15 @@ def public_dashboard(token, org_slug=None): 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate' } + record_event(current_org, current_user, { + 'action': 'view', + 'object_id': dashboard.id, + 'object_type': 'dashboard', + 'public': True, + 'headless': 'embed' in request.args, + 'referer': request.headers.get('Referer') + }) + response = render_template("public.html", headless='embed' in request.args, user=json.dumps(user),
Add events to track embeds usage Add events to track embeds usage
(Remember to record the http-referrer as well) (Remember to record the http-referrer as well)
2016-04-20T13:23:30
getredash/redash
1,003
getredash__redash-1003
[ "986" ]
ff9336907e16314fab2212b54f5494d2e46dd17b
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -649,11 +649,13 @@ def search(cls, term, groups): where &= cls.is_archived == False - return cls.select()\ - .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)) \ - .where(where) \ - .where(DataSourceGroup.group << groups)\ - .order_by(cls.created_at.desc()) + query_ids = cls.select(peewee.fn.Distinct(cls.id))\ + .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)) \ + .where(where) \ + .where(DataSourceGroup.group << groups) + + return cls.select().where(cls.id << query_ids) + @classmethod def recent(cls, groups, user_id=None, limit=20):
diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -90,6 +90,18 @@ def test_search_respects_groups(self): self.assertNotIn(q2, queries) self.assertNotIn(q3, queries) + def test_returns_each_query_only_once(self): + other_group = self.factory.create_group() + second_group = self.factory.create_group() + ds = self.factory.create_data_source(group=other_group) + ds.add_group(second_group, False) + + q1 = self.factory.create_query(description="Testing search", data_source=ds) + + queries = list(models.Query.search("Testing", [self.factory.default_group, other_group, second_group])) + + self.assertEqual(1, len(queries)) + def test_save_creates_default_visualization(self): q = self.factory.create_query() self.assertEquals(q.visualizations.count(), 1)
Getting duplicates in the search box even Even though there is only one query, the query box is showing duplicates when adding query to a dashboard or creating a new alert.
Seeing this behaviour also http://puu.sh/omMLA/079c209fa1.png I think it's because of the join with `DataSourceGroup` [here](https://github.com/getredash/redash/blob/master/redash/models.py#L653).
2016-04-20T14:13:44
getredash/redash
1,069
getredash__redash-1069
[ "810" ]
52558043ee072c5e981753dac6f057988f401cd5
diff --git a/migrations/0024_add_options_to_query.py b/migrations/0024_add_options_to_query.py new file mode 100644 --- /dev/null +++ b/migrations/0024_add_options_to_query.py @@ -0,0 +1,10 @@ +from redash.models import db, Query +from playhouse.migrate import PostgresqlMigrator, migrate + +if __name__ == '__main__': + migrator = PostgresqlMigrator(db.database) + + with db.database.transaction(): + migrate( + migrator.add_column('queries', 'options', Query.options), + ) diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -86,8 +86,6 @@ def post(self, query_id): for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']: query_def.pop(field, None) - # TODO(@arikfr): after running a query it updates all relevant queries with the new result. So is this really - # needed? if 'latest_query_data_id' in query_def: query_def['latest_query_data'] = query_def.pop('latest_query_data_id') diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -72,6 +72,8 @@ def db_value(self, value): return json.dumps(value) def python_value(self, value): + if not value: + return value return json.loads(value) @@ -585,11 +587,11 @@ class Query(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin): query = peewee.TextField() query_hash = peewee.CharField(max_length=32) api_key = peewee.CharField(max_length=40) - user_email = peewee.CharField(max_length=360, null=True) user = peewee.ForeignKeyField(User) last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries") is_archived = peewee.BooleanField(default=False, index=True) schedule = peewee.CharField(max_length=10, null=True) + options = JSONField(default={}) class Meta: db_table = 'queries' @@ -607,7 +609,8 @@ def to_dict(self, with_stats=False, with_visualizations=False, with_user=True, w 'is_archived': self.is_archived, 'updated_at': self.updated_at, 'created_at': self.created_at, - 'data_source_id': self.data_source_id + 'data_source_id': self.data_source_id, + 'options': self.options } if with_user: @@ -833,7 +836,6 @@ class Dashboard(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin): org = peewee.ForeignKeyField(Organization, related_name="dashboards") slug = peewee.CharField(max_length=140, index=True) name = peewee.CharField(max_length=100) - user_email = peewee.CharField(max_length=360, null=True) user = peewee.ForeignKeyField(User) layout = peewee.TextField() dashboard_filters_enabled = peewee.BooleanField(default=False)
Feature request: refresh button on the chart ![chart_refresh_button](https://cloud.githubusercontent.com/assets/16562082/12780895/db830522-ca81-11e5-9ea2-befdadf5c2f8.png)
2016-05-22T12:27:04
getredash/redash
1,084
getredash__redash-1084
[ "1049" ]
6edb0ca8ecd179a39e2c5864361f2e7bfa3053b5
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -755,7 +755,8 @@ def all(cls, groups): .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\ .where(DataSourceGroup.group << groups)\ .switch(Alert)\ - .join(User) + .join(User)\ + .group_by(Alert, User, Query) @classmethod def get_by_id_and_org(cls, id, org):
diff --git a/tests/models/test_alerts.py b/tests/models/test_alerts.py --- a/tests/models/test_alerts.py +++ b/tests/models/test_alerts.py @@ -25,3 +25,13 @@ def test_returns_all_alerts_for_given_groups(self): alerts = Alert.all(groups=[group]) self.assertNotIn(alert1, alerts) self.assertIn(alert2, alerts) + + def test_return_each_alert_only_once(self): + group = self.factory.create_group() + self.factory.data_source.add_group(group) + + alert = self.factory.create_alert() + + alerts = Alert.all(groups=[self.factory.default_group, group]) + self.assertEqual(1, len(list(alerts))) + self.assertIn(alert, alerts)
Duplicate alerts ### Issue Summary Alerts are duplicated when listing them in the UI. Maybe related to https://github.com/getredash/redash/issues/986? ### Steps to Reproduce 1. Create a user who belongs to multiple groups 2. Create an alert Screenshot: http://puu.sh/oMLK5/9cfc8c6134.png (only 2 records are present in the DB) ### Technical details: - Redash Version: 0.10.0+b1774 - Browser/OS: Latest Chrome / OS X - How did you install Redash: AMI
2016-05-30T11:40:31
getredash/redash
1,089
getredash__redash-1089
[ "961" ]
ad8676df2ea61ac4430f5e36941d49f318c1eebf
diff --git a/redash/query_runner/mssql.py b/redash/query_runner/mssql.py --- a/redash/query_runner/mssql.py +++ b/redash/query_runner/mssql.py @@ -1,6 +1,7 @@ import json import logging import sys +import uuid from redash.query_runner import * from redash.utils import JSONEncoder @@ -22,6 +23,12 @@ 5: TYPE_FLOAT, } +class MSSQLJSONEncoder(JSONEncoder): + def default(self, o): + if isinstance(o, uuid.UUID): + return str(o) + return super(MSSQLJSONEncoder, self).default(o) + class SqlServer(BaseSQLQueryRunner): @classmethod def configuration_schema(cls): @@ -123,7 +130,7 @@ def run_query(self, query): rows = [dict(zip((c['name'] for c in columns), row)) for row in data] data = {'columns': columns, 'rows': rows} - json_data = json.dumps(data, cls=JSONEncoder) + json_data = json.dumps(data, cls=MSSQLJSONEncoder) error = None else: error = "No data was returned."
Support for UUID type in MSSQL query runner Currently it fails on serializing to JSON (need to add fallback in the serializer to cast into string).
2016-06-01T12:13:04
getredash/redash
1,104
getredash__redash-1104
[ "1101" ]
8a5a71421d5ef4ed0abc954c42a9743a69124c5d
diff --git a/redash/query_runner/mssql.py b/redash/query_runner/mssql.py --- a/redash/query_runner/mssql.py +++ b/redash/query_runner/mssql.py @@ -54,6 +54,11 @@ def configuration_schema(cls): "default": "7.0", "title": "TDS Version" }, + "charset": { + "type": "string", + "default": "UTF-8", + "title": "Character Set" + }, "db": { "type": "string", "title": "Database Name" @@ -120,11 +125,16 @@ def run_query(self, query): db = self.configuration['db'] port = self.configuration.get('port', 1433) tds_version = self.configuration.get('tds_version', '7.0') + charset = self.configuration.get('charset', 'UTF-8') if port != 1433: server = server + ':' + str(port) - connection = pymssql.connect(server=server, user=user, password=password, database=db, tds_version=tds_version) + connection = pymssql.connect(server=server, user=user, password=password, database=db, tds_version=tds_version, charset=charset) + + if isinstance(query, unicode): + query = query.encode(charset) + cursor = connection.cursor() logger.debug("SqlServer running query: %s", query)
Query execution fails if user name has unicode characters ``` [2016-06-08 21:16:08,491: ERROR/MainProcess] Task redash.tasks.execute_query[82e3c23b-ceb7-4ed1-81c2-98f8601a6b14] raised unexpected: UnicodeEncodeError('ascii', u'/* Username: \u4f0a\u85e4\u76f4\u4e5f, Task ID: 82e3c23b-ceb7-4ed1-81c2-98f8601a6b14, Query ID: 6, Queue: queries, Query Hash: f1fc50d12a0b5cd0279b42db51130dbe */ SELECT TOP 100 *\nFROM CurrentDB.dbo.Booking', 13, 17, 'ordinal not in range(128)') Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task R = retval = fun(*args, **kwargs) File "/opt/redash/redash.0.11.0.b1903/redash/tasks/base.py", line 13, in __call__ return super(BaseTask, self).__call__(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__ return self.run(*args, **kwargs) File "/opt/redash/redash.0.11.0.b1903/redash/tasks/queries.py", line 445, in execute_query return QueryExecutor(self, query, data_source_id, metadata).run() File "/opt/redash/redash.0.11.0.b1903/redash/tasks/queries.py", line 395, in run data, error = query_runner.run_query(annotated_query) File "/opt/redash/redash.0.11.0.b1903/redash/query_runner/mssql.py", line 125, in run_query cursor.execute(query) File "pymssql.pyx", line 445, in pymssql.Cursor.execute (pymssql.c:6242) File "_mssql.pyx", line 998, in _mssql.MSSQLConnection.execute_query (_mssql.c:10085) File "_mssql.pyx", line 1029, in _mssql.MSSQLConnection.execute_query (_mssql.c:9964) File "_mssql.pyx", line 1149, in _mssql.MSSQLConnection.format_and_run_query (_mssql.c:11030) File "_mssql.pyx", line 200, in _mssql.ensure_bytes (_mssql.c:2544) File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode return codecs.utf_8_decode(input, errors, True) UnicodeEncodeError: 'ascii' codec can't encode characters in position 13-16: ordinal not in range(128) ``` ### Steps to Reproduce - My user name is "伊藤直也" - Using mssql query runner - Querying "select top 100 \* from sometable" When using BigQuery query runner, This error doesn't happen. ### Technical details: - Redash Version: v0.10.1.b1834, 0.11.0 - RC - Browser/OS: Google Chrome/OSX / Server: ubuntu Linux 16.04 LTS - How did you install Redash: used `setup/ubuntu/bootstrap.sh`
2016-06-09T03:10:03
getredash/redash
1,110
getredash__redash-1110
[ "1109" ]
8c21e9149dab9bdbb502391ca36bb313bafe5841
diff --git a/redash/permissions.py b/redash/permissions.py --- a/redash/permissions.py +++ b/redash/permissions.py @@ -17,7 +17,8 @@ def has_access(object_groups, user, need_view_only): return False required_level = 1 if need_view_only else 2 - group_level = 1 if any(flatten([object_groups[group] for group in matching_groups])) else 2 + + group_level = 1 if all(flatten([object_groups[group] for group in matching_groups])) else 2 return required_level <= group_level
diff --git a/tests/test_permissions.py b/tests/test_permissions.py --- a/tests/test_permissions.py +++ b/tests/test_permissions.py @@ -24,6 +24,14 @@ def test_allows_if_user_member_in_group_with_full_access(self): self.assertTrue(has_access({1: not view_only}, user, not view_only)) + def test_allows_if_user_member_in_multiple_groups(self): + user = MockUser([], [1, 2, 3]) + + self.assertTrue(has_access({1: not view_only, 2: view_only}, user, not view_only)) + self.assertFalse(has_access({1: view_only, 2: view_only}, user, not view_only)) + self.assertTrue(has_access({1: view_only, 2: view_only}, user, view_only)) + self.assertTrue(has_access({1: not view_only, 2: not view_only}, user, view_only)) + def test_not_allows_if_not_enough_permission(self): user = MockUser([], [1])
Mixed view_only in multiple data_source_groups blocks query executions A user belonging to multiple groups that have access to one data source but with different access levels can not execute queries on that data source. For example, if a user belongs to built-in `default` group and you have set `view_only` for all data sources in this group to true, adding this user to a new group to allow full access to one of the data sources will not work. This is caused by `group_level` definition in `def has_access()` in [permissions.py](https://github.com/getredash/redash/blob/master/redash/permissions.py): ``` required_level = 1 if need_view_only else 2 group_level = 1 if any(flatten([object_groups[group] for group in matching_groups])) else 2 return required_level <= group_level ```
2016-06-09T16:59:47
getredash/redash
1,118
getredash__redash-1118
[ "930", "930" ]
4fabaaea8a1eb146cb7e3bfbd36fa51849a1bd23
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -641,6 +641,9 @@ def archive(self): for w in vis.widgets: w.delete_instance() + for alert in self.alerts: + alert.delete_instance(recursive=True) + self.save() @classmethod
diff --git a/tests/factories.py b/tests/factories.py --- a/tests/factories.py +++ b/tests/factories.py @@ -185,6 +185,14 @@ def create_alert(self, **kwargs): args.update(**kwargs) return alert_factory.create(**args) + def create_alert_subscription(self, **kwargs): + args = { + 'user': self.user + } + + args.update(**kwargs) + return alert_subscription_factory.create(**args) + def create_data_source(self, **kwargs): args = { 'org': self.org @@ -274,6 +282,3 @@ def create_api_key(self, **kwargs): def create_destination(self, **kwargs): return destination_factory.create(**kwargs) - - def create_alert_subscription(self, **kwargs): - return alert_subscription_factory.create(**kwargs) diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -276,6 +276,16 @@ def test_removes_scheduling(self): self.assertEqual(None, query.schedule) + def test_deletes_alerts(self): + subscription = self.factory.create_alert_subscription() + query = subscription.alert.query + + query.archive() + + self.assertRaises(models.Alert.DoesNotExist, models.Alert.get_by_id, subscription.alert.id) + self.assertRaises(models.AlertSubscription.DoesNotExist, models.AlertSubscription.get_by_id, subscription.id) + + class DataSourceTest(BaseTestCase): def test_get_schema(self): return_value = [{'name': 'table', 'columns': []}]
When archiving a query, delete related alerts Related: #731 . When archiving a query, delete related alerts Related: #731 .
2016-06-14T08:10:10
getredash/redash
1,119
getredash__redash-1119
[ "731" ]
3ce27b9652506dcf96488c1a5ccf9899cd989b2d
diff --git a/redash/handlers/alerts.py b/redash/handlers/alerts.py --- a/redash/handlers/alerts.py +++ b/redash/handlers/alerts.py @@ -34,6 +34,11 @@ def post(self, alert_id): return alert.to_dict() + def delete(self, alert_id): + alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org) + require_admin_or_owner(alert.user.id) + alert.delete_instance(recursive=True) + class AlertListResource(BaseResource): def post(self):
diff --git a/tests/factories.py b/tests/factories.py --- a/tests/factories.py +++ b/tests/factories.py @@ -187,7 +187,8 @@ def create_alert(self, **kwargs): def create_alert_subscription(self, **kwargs): args = { - 'user': self.user + 'user': self.user, + 'alert': self.create_alert() } args.update(**kwargs) diff --git a/tests/handlers/test_alerts.py b/tests/handlers/test_alerts.py --- a/tests/handlers/test_alerts.py +++ b/tests/handlers/test_alerts.py @@ -1,8 +1,5 @@ from tests import BaseTestCase -from tests.factories import org_factory -from tests.handlers import authenticated_user, json_request -from redash.wsgi import app -from redash.models import AlertSubscription +from redash.models import AlertSubscription, Alert class TestAlertResourceGet(BaseTestCase): @@ -30,6 +27,36 @@ def test_returns_404_if_admin_from_another_org(self): self.assertEqual(rv.status_code, 404) +class TestAlertResourceDelete(BaseTestCase): + def test_removes_alert_and_subscriptions(self): + subscription = self.factory.create_alert_subscription() + alert = subscription.alert + + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id)) + self.assertEqual(rv.status_code, 200) + + self.assertRaises(Alert.DoesNotExist, Alert.get_by_id, subscription.alert.id) + self.assertRaises(AlertSubscription.DoesNotExist, AlertSubscription.get_by_id, subscription.id) + + def test_returns_403_if_not_allowed(self): + alert = self.factory.create_alert() + + user = self.factory.create_user() + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=user) + self.assertEqual(rv.status_code, 403) + + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=self.factory.create_admin()) + self.assertEqual(rv.status_code, 200) + + def test_returns_404_for_unauthorized_users(self): + alert = self.factory.create_alert() + + second_org = self.factory.create_org() + second_org_admin = self.factory.create_admin(org=second_org) + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=second_org_admin) + self.assertEqual(rv.status_code, 404) + + class TestAlertListPost(BaseTestCase): def test_returns_200_if_has_access_to_query(self): query = self.factory.create_query()
User should be able to delete an Alert Can't remove Alert with UI. Directly run sql as below. ``` sql delete from alerts where id = 〜 ```
+1 +1 +1
2016-06-14T08:55:38
getredash/redash
1,215
getredash__redash-1215
[ "1212" ]
42e7a41fcc555ffddb9a871dff81164ef2d78972
diff --git a/redash/destinations/email.py b/redash/destinations/email.py --- a/redash/destinations/email.py +++ b/redash/destinations/email.py @@ -24,7 +24,11 @@ def icon(cls): return 'fa-envelope' def notify(self, alert, query, user, new_state, app, host, options): - recipients = [email for email in options.get('addresses').split(',') if email] + recipients = [email for email in options.get('addresses', '').split(',') if email] + + if not recipients: + logging.warning("No emails given. Skipping send.") + html = """ Check <a href="{host}/alerts/{alert_id}">alert</a> / check <a href="{host}/queries/{query_id}">query</a>. """.format(host=host, alert_id=alert.id, query_id=query.id) @@ -39,6 +43,6 @@ def notify(self, alert, query, user, new_state, app, host, options): ) mail.send(message) except Exception: - logging.exception("mail send ERROR.") + logging.exception("Mail send error.") register(Email) diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -1189,12 +1189,11 @@ def notify(self, alert, query, user, new_state, app, host): app, host) else: # User email subscription, so create an email destination object - config = {'email': self.user.email} + config = {'addresses': self.user.email} schema = get_configuration_schema_for_destination_type('email') - options = ConfigurationContainer(json.dumps(config), schema) + options = ConfigurationContainer(config, schema) destination = get_destination('email', options) - return destination.notify(alert, query, user, new_state, - app, host, options) + return destination.notify(alert, query, user, new_state, app, host, options) all_models = (Organization, Group, DataSource, DataSourceGroup, User, QueryResult, Query, Alert, Dashboard, Visualization, Widget, Event, NotificationDestination, AlertSubscription, ApiKey) diff --git a/redash/tasks/alerts.py b/redash/tasks/alerts.py --- a/redash/tasks/alerts.py +++ b/redash/tasks/alerts.py @@ -42,5 +42,5 @@ def check_alerts_for_query(query_id): try: subscription.notify(alert, query, subscription.user, new_state, app, host) except Exception as e: - logger.warn("Exception: {}".format(e)) + logger.exception("Error with processing destination")
Email notification not working Hi Arik, thank you for re:dash, it is really amazing. ### Issue Summary Redash 0.11.0+b1995 (and similar versions including 0.12RC I guess) ### Steps to Reproduce 1. Set an alert on a query that will trigger 2. Register to the alert with your user (email) 3. Refresh the query ### Technical details: From the logs I get `celery_error.log:[2016-06-26 13:30:46,312: WARNING/Worker-11] redash.tasks.check_alerts_for_query[5bb71f82-ec70-4893-bb12-5d665f7f9a05]: Exception: 'str' object has no attribute 'get'` I'm not really sure (*) but I think it is bubbling up in [redash/tasks/alerts.py:43](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L43) from [redash/destinations/email.py:27](https://github.com/getredash/redash/blob/8b73a2b135aed8fcbca139380a959b9222ae0080/redash/destinations/email.py#L27) (*) ConfigurationContainer may be doing something more than merely passing the dict along but I'm not sure ... The root cause may be in [redash/models.py:1192](https://github.com/getredash/redash/blob/master/redash/models.py#L1192) where it could be solved by: `config = {'addresses': self.user.email}` Hopefully this evening I will have a pull request.
You analyzed it correctly and I will push the fix later today myself (already have the fix, just didn't have the chance to make a pull request).
2016-07-27T13:49:17
getredash/redash
1,256
getredash__redash-1256
[ "439" ]
61fe16e18e00117043e886e1bc025fc1d758f022
diff --git a/redash/handlers/api.py b/redash/handlers/api.py --- a/redash/handlers/api.py +++ b/redash/handlers/api.py @@ -9,7 +9,7 @@ from redash.handlers.dashboards import DashboardListResource, RecentDashboardsResource, DashboardResource, DashboardShareResource from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource, DataSourceTestResource from redash.handlers.events import EventResource -from redash.handlers.queries import QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource +from redash.handlers.queries import QueryForkResource, QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource from redash.handlers.visualizations import VisualizationListResource @@ -71,6 +71,7 @@ def json_representation(data, code, headers=None): api.add_org_resource(MyQueriesResource, '/api/queries/my', endpoint='my_queries') api.add_org_resource(QueryRefreshResource, '/api/queries/<query_id>/refresh', endpoint='query_refresh') api.add_org_resource(QueryResource, '/api/queries/<query_id>', endpoint='query') +api.add_org_resource(QueryForkResource, '/api/queries/<query_id>/fork', endpoint='query_fork') api.add_org_resource(ObjectPermissionsListResource, '/api/<object_type>/<object_id>/acl', endpoint='object_permissions') api.add_org_resource(CheckPermissionResource, '/api/<object_type>/<object_id>/acl/<access_type>', endpoint='check_permissions') diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -138,6 +138,14 @@ def delete(self, query_id): query.archive(self.current_user) +class QueryForkResource(BaseResource): + @require_permission('edit_query') + def post(self, query_id): + query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) + forked_query = query.fork(self.current_user) + return forked_query.to_dict(with_visualizations=True) + + class QueryRefreshResource(BaseResource): def post(self, query_id): query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -837,6 +837,31 @@ def recent(cls, groups, user_id=None, limit=20): return query + def fork(self, user): + query = self + forked_query = Query() + forked_query.name = 'Copy of (#{}) {}'.format(query.id, query.name) + forked_query.user = user + forked_list = ['org', 'data_source', 'latest_query_data', 'description', 'query', 'query_hash'] + for a in forked_list: + setattr(forked_query, a, getattr(query, a)) + forked_query.save() + + forked_visualizations = [] + for v in query.visualizations: + if v.type == 'TABLE': + continue + forked_v = v.to_dict() + forked_v['options'] = v.options + forked_v['query'] = forked_query + forked_v.pop('id') + forked_visualizations.append(forked_v) + + if len(forked_visualizations) > 0: + with db.database.atomic(): + Visualization.insert_many(forked_visualizations).execute() + return forked_query + def pre_save(self, created): super(Query, self).pre_save(created) self.query_hash = utils.gen_query_hash(self.query)
diff --git a/tests/models/test_queries.py b/tests/models/test_queries.py --- a/tests/models/test_queries.py +++ b/tests/models/test_queries.py @@ -1,5 +1,73 @@ from tests import BaseTestCase +from redash.models import Query -# Add tests for change tracking +class TestApiKeyGetByObject(BaseTestCase): + + def assert_visualizations(self, origin_q, origin_v, forked_q, forked_v): + self.assertEqual(origin_v.options, forked_v.options) + self.assertEqual(origin_v.type, forked_v.type) + self.assertNotEqual(origin_v.id, forked_v.id) + self.assertNotEqual(origin_v.query, forked_v.query) + self.assertEqual(forked_q.id, forked_v.query.id) + + + def test_fork_with_visualizations(self): + # prepare original query and visualizations + data_source = self.factory.create_data_source(group=self.factory.create_group()) + query = self.factory.create_query(data_source=data_source, description="this is description") + visualization_chart = self.factory.create_visualization(query=query, description="chart vis", type="CHART", options="""{"yAxis": [{"type": "linear"}, {"type": "linear", "opposite": true}], "series": {"stacking": null}, "globalSeriesType": "line", "sortX": true, "seriesOptions": {"count": {"zIndex": 0, "index": 0, "type": "line", "yAxis": 0}}, "xAxis": {"labels": {"enabled": true}, "type": "datetime"}, "columnMapping": {"count": "y", "created_at": "x"}, "bottomMargin": 50, "legend": {"enabled": true}}""") + visualization_box = self.factory.create_visualization(query=query, description="box vis", type="BOXPLOT", options="{}") + fork_user = self.factory.create_user() + + forked_query = query.fork(fork_user) + + + forked_visualization_chart = None + forked_visualization_box = None + forked_table = None + count_table = 0 + for v in forked_query.visualizations: + if v.description == "chart vis": + forked_visualization_chart = v + if v.description == "box vis": + forked_visualization_box = v + if v.type == "TABLE": + count_table += 1 + forked_table = v + self.assert_visualizations(query, visualization_chart, forked_query, forked_visualization_chart) + self.assert_visualizations(query, visualization_box, forked_query, forked_visualization_box) + + self.assertEqual(forked_query.org, query.org) + self.assertEqual(forked_query.data_source, query.data_source) + self.assertEqual(forked_query.latest_query_data, query.latest_query_data) + self.assertEqual(forked_query.description, query.description) + self.assertEqual(forked_query.query, query.query) + self.assertEqual(forked_query.query_hash, query.query_hash) + self.assertEqual(forked_query.user, fork_user) + self.assertEqual(forked_query.description, query.description) + self.assertTrue(forked_query.name.startswith('Copy')) + # num of TABLE must be 1. default table only + self.assertEqual(count_table, 1) + self.assertEqual(forked_table.name, "Table") + self.assertEqual(forked_table.description, "") + self.assertEqual(forked_table.options, "{}") + + def test_fork_from_query_that_has_no_visualization(self): + # prepare original query and visualizations + data_source = self.factory.create_data_source(group=self.factory.create_group()) + query = self.factory.create_query(data_source=data_source, description="this is description") + fork_user = self.factory.create_user() + + forked_query = query.fork(fork_user) + + count_table = 0 + count_vis = 0 + for v in forked_query.visualizations: + count_vis += 1 + if v.type == "TABLE": + count_table += 1 + + self.assertEqual(count_table, 1) + self.assertEqual(count_vis, 1)
When forking a query, charts are not copied
+1 +1 I thought they were... Would like this as well. +1 I will try this. +1
2016-08-25T11:28:32
getredash/redash
1,334
getredash__redash-1334
[ "1224" ]
2135dfd2e56d57fc7706c703282816fac630da24
diff --git a/manage.py b/manage.py --- a/manage.py +++ b/manage.py @@ -47,12 +47,16 @@ def check_settings(): for name, item in settings.all_settings().iteritems(): print "{} = {}".format(name, item) [email protected] -def send_test_mail(): + [email protected]('email', default=None, help="Email address to send test message to (default: the address you defined in MAIL_DEFAULT_SENDER)") +def send_test_mail(email=None): from redash import mail from flask_mail import Message - mail.send(Message(subject="Test Message from re:dash", recipients=[settings.MAIL_DEFAULT_SENDER], body="Test message.")) + if email is None: + email = settings.MAIL_DEFAULT_SENDER + + mail.send(Message(subject="Test Message from re:dash", recipients=[email], body="Test message.")) if __name__ == '__main__':
Ability to specify recipient email address when using send_test_email It would be useful if manage.py send_test_email allowed you to specify a recipient instead of sending the test to whatever address is specified in REDASH_MAIL_DEFAULT_SENDER="". I run re:dash on a subdomain and am using Mailgun for alerts, but since I use Google Apps and their MX records for my domain, my Mailgun address can't deliver email to itself.
2016-10-09T06:16:41
getredash/redash
1,335
getredash__redash-1335
[ "1144" ]
015b1dc8fd4a4cd7f8003c3cb96133adf4ce1411
diff --git a/redash/destinations/email.py b/redash/destinations/email.py --- a/redash/destinations/email.py +++ b/redash/destinations/email.py @@ -1,7 +1,7 @@ import logging from flask_mail import Message -from redash import models, mail +from redash import mail, settings from redash.destinations import * @@ -15,6 +15,11 @@ def configuration_schema(cls): "addresses": { "type": "string" }, + "subject_template": { + "type": "string", + "default": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE, + "title": "Subject Template" + } }, "required": ["addresses"] } @@ -36,9 +41,12 @@ def notify(self, alert, query, user, new_state, app, host, options): try: with app.app_context(): + alert_name = alert.name.encode('utf-8', 'ignore') + state = new_state.upper() + subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) message = Message( recipients=recipients, - subject="[{1}] {0}".format(alert.name.encode('utf-8', 'ignore'), new_state.upper()), + subject=subject_template.format(alert_name=alert_name, state=state), html=html ) mail.send(message) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -141,6 +141,8 @@ def all_settings(): HOST = os.environ.get('REDASH_HOST', '') +ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE = os.environ.get('REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE', "({state}) {alert_name}") + # CORS settings for the Query Result API (and possbily future external APIs). # In most cases all you need to do is set REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN # to the calling domain (or domains in a comma separated list).
Gmail is merging [OK] and [Triggered] alert emails ### Issue Summary In Gmail the alert emails from redash are merged on the same thread, which makes it hard to know if an alert was triggered or solved. Here is the explanation of the behavior of Gmail: https://www.quora.com/Why-when-I-edit-subject-in-email-and-add-the-symbols-at-the-beginning-of-the-current-subject-to-keep-email-in-the-same-thread-the-email-sometimes-get-divided-in-a-new-thread-and-sometimes-doesnt/answer/Vineet-Chawla ### Steps to Reproduce 1. Setup an alert, register to receive email updates on a gmail address 2. Trigger the alert 3. Change the query and re-run it so that redash sends the email starting with [OK] 4. In gmail you will see the two emails under the subject starting with [Triggered] like this screenshot: ![screenshot from 2016-06-19 14 24 07](https://cloud.githubusercontent.com/assets/494686/16177359/7c8ddc8a-362a-11e6-8d80-f13202143ea2.png) 5. The expectation is to have two threads, or to have the information about whether the alert was triggered or solved in the body of the email ### Technical details: - Redash Version: 0.9.2+b1536
I think there is a way to pass some message id header to prevent grouping. Will look into it. I've given a quick look at the source, and I think that another quick fix would be to change the email subject define there: https://github.com/getredash/redash/blob/8b73a2b135aed8fcbca139380a959b9222ae0080/redash/destinations/email.py#L37 to be: "{0} - {1}". What do you think about that ?
2016-10-09T06:45:10
getredash/redash
1,394
getredash__redash-1394
[ "1383" ]
d1b82694a6cab0a1cc24bcfffac2ad900edcf7ba
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -1,10 +1,11 @@ import json from flask import request - from redash import models -from redash.permissions import require_permission, require_admin_or_owner, require_access, view_only from redash.handlers.base import BaseResource +from redash.permissions import (require_access, + require_object_modify_permission, + require_permission, view_only) class WidgetListResource(BaseResource): @@ -12,7 +13,7 @@ class WidgetListResource(BaseResource): def post(self): widget_properties = request.get_json(force=True) dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org) - require_admin_or_owner(dashboard.user_id) + require_object_modify_permission(dashboard, self.current_user) widget_properties['options'] = json.dumps(widget_properties['options']) widget_properties.pop('id', None) @@ -47,7 +48,7 @@ def post(self): widget.dashboard.layout = json.dumps(layout) widget.dashboard.save() - return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row} + return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version} class WidgetResource(BaseResource): @@ -55,7 +56,7 @@ class WidgetResource(BaseResource): def post(self, widget_id): # This method currently handles Text Box widgets only. widget = models.Widget.get_by_id_and_org(widget_id, self.current_org) - require_admin_or_owner(widget.dashboard.user_id) + require_object_modify_permission(widget.dashboard, self.current_user) widget_properties = request.get_json(force=True) widget.text = widget_properties['text'] widget.save() @@ -65,7 +66,7 @@ def post(self, widget_id): @require_permission('edit_dashboard') def delete(self, widget_id): widget = models.Widget.get_by_id_and_org(widget_id, self.current_org) - require_admin_or_owner(widget.dashboard.user_id) + require_object_modify_permission(widget.dashboard, self.current_user) widget.delete_instance() - return {'layout': widget.dashboard.layout} + return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}
Share access permissions for add/remove widgets Hi @arikfr , in our PR #1113 we focused primarily on sharing access permissions for queries, and for dashboards we currently only allow to modify basic info of the dashboard. What is still missing is ability to allow other users to add and remove widgets. Is that something you are planning to add, or should we give it a shot? Another thing that came up - we're currently enforcing `require_admin_or_owner(...)` for managing access permissions (e.g., https://github.com/getredash/redash/blob/master/redash/handlers/permissions.py#L42). This is actually a very restrictive limitation, and we believe that anybody with permissions (owner, admin, other permitted users) should be able to add/remove users. For instance, if you consider person A creating a dashboard, then giving access to persons B and C who are actively maintaining that dashboard. Then, if person A leaves the company, B and C would not be able to make the required changes to add another person D. What do you think? /cc @rohanpd
This is what the role of an admin is. The ability to administrate an account. As part of someone leaving a company, they would delegate their admin access to someone else / hand over their account. I think perhaps what you're asking for instead, is a more 'per-action' style permission system. Ie, create / add user, create / edit widget, create / edit query, create / edit dashboard, etc. This would be a better way to do what you want, rather than over-expanding non-admin permissions, which is a security issue. For the first thing: I think that we should allow managing widgets. At least on my part, it wasn't intentional and just an overlook. For the second issue: in general I agree with @adamlwgriffiths, that this is what the other admins are for. But I do think we should have the ability to handover ownership of objects to someone else. Opened #1387 to track this.
2016-11-15T13:58:16
getredash/redash
1,405
getredash__redash-1405
[ "2" ]
2d7a4970737f652105b2fbcfd718ae411e638ec8
diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/google_analytics.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +from base64 import b64decode +import json +import logging +from redash.query_runner import * +from redash.utils import JSONEncoder +from urlparse import urlparse, parse_qs +from datetime import datetime +logger = logging.getLogger(__name__) + +try: + from oauth2client.client import SignedJwtAssertionCredentials + from apiclient.discovery import build + import httplib2 + enabled = True +except ImportError as e: + logger.info(str(e)) + enabled = False + + +def _load_key(filename): + with open(filename, "rb") as f: + return json.loads(f.read()) + + +types_conv = dict( + STRING=TYPE_STRING, + INTEGER=TYPE_INTEGER, + FLOAT=TYPE_FLOAT, + DATE=TYPE_DATE, + DATETIME=TYPE_DATETIME +) + + +class GoogleAnalytics(BaseSQLQueryRunner): + @classmethod + def annotate_query(cls): + return False + + @classmethod + def type(cls): + return "google_analytics" + + @classmethod + def enabled(cls): + return enabled + + @classmethod + def configuration_schema(cls): + return { + 'type': 'object', + 'properties': { + 'jsonKeyFile': { + "type": "string", + 'title': 'JSON Key File' + } + }, + 'required': ['jsonKeyFile'], + 'secret': ['jsonKeyFile'] + } + + def __init__(self, configuration): + super(GoogleAnalytics, self).__init__(configuration) + + def _get_tables(self, schema): + accounts = self._get_analytics_service().management().accounts().list().execute().get('items') + if accounts is None: + raise Exception("Failed getting accounts.") + else: + for account in accounts: + schema[account['name']] = {'name': account['name'], 'columns': []} + properties = self._get_analytics_service().management().webproperties().list( + accountId=account['id']).execute().get('items', []) + for property_ in properties: + schema[account['name']]['columns'].append( + u'{0} (ga:{1})'.format(property_['name'], property_['defaultProfileId']) + ) + return schema.values() + + def _get_analytics_service(self): + scope = ['https://www.googleapis.com/auth/analytics.readonly'] + key = json.loads(b64decode(self.configuration['jsonKeyFile'])) + credentials = SignedJwtAssertionCredentials(key['client_email'], key["private_key"], scope=scope) + return build('analytics', 'v3', http=credentials.authorize(httplib2.Http())) + + def run_query(self, query, user): + logger.debug("Analytics is about to execute query: %s", query) + try: + params = json.loads(query) + except: + params = parse_qs(urlparse(query).query, keep_blank_values=True) + for key in params.keys(): + params[key] = ','.join(params[key]) + if '-' in key: + params[key.replace('-', '_')] = params.pop(key) + if len(params) > 0: + response = self._get_analytics_service().data().ga().get(**params).execute() + columns = [] + for h in response['columnHeaders']: + if h['name'] == 'ga:date': + h['dataType'] = 'DATE' + elif h['name'] == 'ga:dateHour': + h['dataType'] = 'DATETIME' + columns.append({ + 'name': h['name'], + 'friendly_name': h['name'].split(':', 1)[1], + 'type': types_conv.get(h['dataType'], 'string') + }) + rows = [] + for r in response['rows']: + d = {} + for c, value in enumerate(r): + column_name = response['columnHeaders'][c]['name'] + column_type = filter(lambda col: col['name'] == column_name, columns)[0]['type'] + if column_type == TYPE_DATE: + value = datetime.strptime(value, '%Y%m%d') + elif column_type == TYPE_DATETIME: + if len(value) == 10: + value = datetime.strptime(value, '%Y%m%d%H') + elif len(value) == 12: + value = datetime.strptime(value, '%Y%m%d%H%M') + else: + raise Exception("Unknown date/time format in results: '{}'".format(value)) + d[column_name] = value + rows.append(d) + data = {'columns': columns, 'rows': rows} + error = None + json_data = json.dumps(data, cls=JSONEncoder) + else: + error = 'Wrong query format' + json_data = None + return json_data, error + +register(GoogleAnalytics) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -173,7 +173,8 @@ def all_settings(): 'redash.query_runner.sqlite', 'redash.query_runner.dynamodb_sql', 'redash.query_runner.mssql', - 'redash.query_runner.jql' + 'redash.query_runner.jql', + 'redash.query_runner.google_analytics' ] enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
Visualizations workflow & object Visualizations (widget?) should have an object of their own containing the following data: - query - type (chart, cohort, grid, ...) - options Tasks: - [x] Visualization object - [x] UI to create new visualizations instead of the hardcoded option we have today - [x] Change the dashboard widgets to use visualizations rather than queries - [ ] Friendlier selector when adding new widgets to dashboard
There was a case (EvMe's query 607), where the data was unsorted and had too many series, which resulted in HighCharts dying. When we revisit visualizations we should take into account limits for different visualizers, and prevent the user from killing his browser. cc: @shayel. http://app.raw.densitydesign.org/#/ @amirnissim this requires some changes on the backend too, but let's try to move forward without them and on Sunday we will discuss the needed changes. Below is a "brain dump" about this feature, ask any questions you feel necessary: Basically the idea is to have different types of visualizations, and the ability to create a new visualization from any dataset (query). Eventually, this will replace the "Chart" and "Cohort" static tabs. Each visualization will define: 1. Name 2. Description 3. Properties (mandatory & optional + default values) 4. Rendering logic When creating new visualization it will have: 1. query_id - reference to queries table 2. visualization_type - string 3. options - JSON 4. ? Until we add the visualization object, let's start by creating the "infrastructure" for this in the frontend code. In terms of UI, I think we will represent each visualization as a tab where we currently have the "Chart" and "Cohort" tabs and also have a "+" tab, where you define a new visualization.
2016-11-17T15:04:59
getredash/redash
1,423
getredash__redash-1423
[ "1422", "1422" ]
e6482cffabd3ef287b0012dc9fc23b5b77c2c98e
diff --git a/redash/query_runner/cass.py b/redash/query_runner/cass.py --- a/redash/query_runner/cass.py +++ b/redash/query_runner/cass.py @@ -1,18 +1,19 @@ import json -import sys import logging -from redash.query_runner import * +from redash.query_runner import BaseQueryRunner, register from redash.utils import JSONEncoder logger = logging.getLogger(__name__) try: - from cassandra.cluster import Cluster + from cassandra.cluster import Cluster, Error + from cassandra.auth import PlainTextAuthProvider enabled = True except ImportError: enabled = False + class Cassandra(BaseQueryRunner): noop_query = "SELECT * FROM system" @@ -61,11 +62,9 @@ def _get_tables(self, schema): return results, error def run_query(self, query, user): - from cassandra.cluster import Cluster connection = None try: if self.configuration.get('username', '') and self.configuration.get('password', ''): - from cassandra.auth import PlainTextAuthProvider auth_provider = PlainTextAuthProvider(username='{}'.format(self.configuration.get('username', '')), password='{}'.format(self.configuration.get('password', ''))) connection = Cluster([self.configuration.get('host', '')], auth_provider=auth_provider) @@ -86,16 +85,15 @@ def run_query(self, query, user): json_data = json.dumps(data, cls=JSONEncoder) error = None - - except cassandra.cluster.Error, e: + except Error as e: error = e.args[1] except KeyboardInterrupt: error = "Query cancelled by user." return json_data, error -class ScyllaDB(Cassandra): +class ScyllaDB(Cassandra): def __init__(self, configuration): super(ScyllaDB, self).__init__(configuration) @@ -103,5 +101,6 @@ def __init__(self, configuration): def type(cls): return "scylla" + register(Cassandra) register(ScyllaDB)
NameError: global name 'cassandra' is not defined Hey, ### Issue Summary I added `export REDASH_ADDITIONAL_QUERY_RUNNERS=redash.query_runner.cass` to `opt/redash/.env`. After that, I was able to create a new cassandra datasource but when querying is failing and displaying: **global name 'cassandra' is not defined** I have installed the DataStax Cassandra driver `sudo pip install cassandra-driver` and using Python 2.7.6. These are the traces of `/opt/redash/logs/celery_error.log` ``` 2016-11-23 02:09:15,625: WARNING/Worker-5] Unexpected error while running query: Traceback (most recent call last): File "/opt/redash/redash.0.12.0.b2449/redash/tasks/queries.py", line 410, in run data, error = query_runner.run_query(annotated_query, self.user) File "/opt/redash/redash.0.12.0.b2449/redash/query_runner/cass.py", line 90, in run_query except cassandra.cluster.Error, e: NameError: global name 'cassandra' is not defined ``` ### Technical details * Redash version 0.12.0+b2449 * OS Ubuntu 3.13.0-88-generic / Browser: Chrome. * How did you install Redash: running the provisioning script Thanks NameError: global name 'cassandra' is not defined Hey, ### Issue Summary I added `export REDASH_ADDITIONAL_QUERY_RUNNERS=redash.query_runner.cass` to `opt/redash/.env`. After that, I was able to create a new cassandra datasource but when querying is failing and displaying: **global name 'cassandra' is not defined** I have installed the DataStax Cassandra driver `sudo pip install cassandra-driver` and using Python 2.7.6. These are the traces of `/opt/redash/logs/celery_error.log` ``` 2016-11-23 02:09:15,625: WARNING/Worker-5] Unexpected error while running query: Traceback (most recent call last): File "/opt/redash/redash.0.12.0.b2449/redash/tasks/queries.py", line 410, in run data, error = query_runner.run_query(annotated_query, self.user) File "/opt/redash/redash.0.12.0.b2449/redash/query_runner/cass.py", line 90, in run_query except cassandra.cluster.Error, e: NameError: global name 'cassandra' is not defined ``` ### Technical details * Redash version 0.12.0+b2449 * OS Ubuntu 3.13.0-88-generic / Browser: Chrome. * How did you install Redash: running the provisioning script Thanks
If you run `python -c "import cassandra"` is it working? > If you run python -c "import cassandra" is it working? Yes If you run `python -c "import cassandra"` is it working? > If you run python -c "import cassandra" is it working? Yes
2016-11-23T08:51:25
getredash/redash
1,484
getredash__redash-1484
[ "1457" ]
3bc98fba5bd2b6b723ba0d0ce3007a651e8c2acb
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -58,10 +58,6 @@ def post(self): for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']: query_def.pop(field, None) - # If we already executed this query, save the query result reference - if 'latest_query_data_id' in query_def: - query_def['latest_query_data'] = query_def.pop('latest_query_data_id') - query_def['query_text'] = query_def.pop('query') query_def['user'] = self.current_user query_def['data_source'] = data_source
Wrong sort for Date column It happens if sort by date column with UI. Screenshot shows everything. <img width="544" alt="2016-12-06 14 37 07" src="https://cloud.githubusercontent.com/assets/7091907/20924299/dea511b4-bbc1-11e6-8ffd-2bdda4bcbbd8.png">
Sorry, didn't get the problem. It seems to be a descending sort. **2016-12-06** is greater than **2016-11-07**, right? @WesleyBatista but it's ascending sort. Looks like it sorts strings 'DD/MM/YYYY' but not dates 'YYYY-MM-DD'
2016-12-23T20:37:33
getredash/redash
1,488
getredash__redash-1488
[ "1459" ]
9e5944d56302d6ab1d50b843621e16bd3de1d485
diff --git a/redash/query_runner/snowflake.py b/redash/query_runner/snowflake.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/snowflake.py @@ -0,0 +1,113 @@ +from __future__ import absolute_import +import json + +try: + import snowflake.connector + enabled = True +except ImportError: + enabled = False + + +from redash.query_runner import BaseQueryRunner, register +from redash.query_runner import TYPE_STRING, TYPE_DATE, TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN +from redash.utils import json_dumps + +TYPES_MAP = { + 0: TYPE_INTEGER, + 1: TYPE_FLOAT, + 2: TYPE_STRING, + 3: TYPE_DATE, + 4: TYPE_DATETIME, + 5: TYPE_STRING, + 6: TYPE_DATETIME, + 13: TYPE_BOOLEAN +} + + +class Snowflake(BaseQueryRunner): + noop_query = "SELECT 1" + + @classmethod + def configuration_schema(cls): + return { + "type": "object", + "properties": { + "account": { + "type": "string" + }, + "user": { + "type": "string" + }, + "password": { + "type": "string" + }, + "warehouse": { + "type": "string" + }, + "database": { + "type": "string" + } + }, + "required": ["user", "password", "account", "database", "warehouse"], + "secret": ["password"] + } + + @classmethod + def enabled(cls): + return enabled + + def run_query(self, query, user): + connection = snowflake.connector.connect( + user=self.configuration['user'], + password=self.configuration['password'], + account=self.configuration['account'], + ) + + cursor = connection.cursor() + + try: + cursor.execute("USE WAREHOUSE {}".format(self.configuration['warehouse'])) + cursor.execute("USE {}".format(self.configuration['database'])) + + cursor.execute(query) + + columns = self.fetch_columns([(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description]) + rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor] + + data = {'columns': columns, 'rows': rows} + error = None + json_data = json_dumps(data) + finally: + cursor.close() + connection.close() + + return json_data, error + + def get_schema(self, get_stats=False): + query = """ + SELECT col.table_schema, + col.table_name, + col.column_name + FROM {database}.information_schema.columns col + WHERE col.table_schema <> 'INFORMATION_SCHEMA' + """.format(database=self.configuration['database']) + + results, error = self.run_query(query, None) + + if error is not None: + raise Exception("Failed getting schema.") + + schema = {} + results = json.loads(results) + + for row in results['rows']: + table_name = '{}.{}'.format(row['TABLE_SCHEMA'], row['TABLE_NAME']) + + if table_name not in schema: + schema[table_name] = {'name': table_name, 'columns': []} + + schema[table_name]['columns'].append(row['COLUMN_NAME']) + + return schema.values() + +register(Snowflake) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -184,7 +184,8 @@ def all_settings(): 'redash.query_runner.dynamodb_sql', 'redash.query_runner.mssql', 'redash.query_runner.jql', - 'redash.query_runner.google_analytics' + 'redash.query_runner.google_analytics', + 'redash.query_runner.snowflake' ] enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
Snowflake Support Snowflake is a cloud data warehousing solution. It would be nice to be able to connect from reDash to SF. They have a python connector here: https://docs.snowflake.net/manuals/user-guide/python-connector.html
2016-12-25T10:29:54
getredash/redash
1,500
getredash__redash-1500
[ "1499" ]
627f3f4fd53764310f494409eb4546ad14875e84
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -789,8 +789,8 @@ def outdated_queries(cls): @classmethod def search(cls, term, group_ids): # TODO: This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution. - where = (Query.name.like(u"%{}%".format(term)) | - Query.description.like(u"%{}%".format(term))) + where = (Query.name.ilike(u"%{}%".format(term)) | + Query.description.ilike(u"%{}%".format(term))) if term.isdigit(): where |= Query.id == term
diff --git a/tests/models/test_queries.py b/tests/models/test_queries.py --- a/tests/models/test_queries.py +++ b/tests/models/test_queries.py @@ -1,5 +1,163 @@ +# encoding: utf8 + from tests import BaseTestCase -from redash.models import Query, db +import datetime +from redash.models import Query, Group, Event, db +from redash.utils import utcnow + + +class QueryTest(BaseTestCase): + def test_changing_query_text_changes_hash(self): + q = self.factory.create_query() + old_hash = q.query_hash + + q.query_text = "SELECT 2;" + db.session.flush() + self.assertNotEquals(old_hash, q.query_hash) + + def test_search_finds_in_name(self): + q1 = self.factory.create_query(name=u"Testing seåřċħ") + q2 = self.factory.create_query(name=u"Testing seåřċħing") + q3 = self.factory.create_query(name=u"Testing seå řċħ") + queries = list(Query.search(u"seåřċħ", [self.factory.default_group.id])) + + self.assertIn(q1, queries) + self.assertIn(q2, queries) + self.assertNotIn(q3, queries) + + def test_search_finds_in_description(self): + q1 = self.factory.create_query(description=u"Testing seåřċħ") + q2 = self.factory.create_query(description=u"Testing seåřċħing") + q3 = self.factory.create_query(description=u"Testing seå řċħ") + + queries = Query.search(u"seåřċħ", [self.factory.default_group.id]) + + self.assertIn(q1, queries) + self.assertIn(q2, queries) + self.assertNotIn(q3, queries) + + def test_search_by_id_returns_query(self): + q1 = self.factory.create_query(description="Testing search") + q2 = self.factory.create_query(description="Testing searching") + q3 = self.factory.create_query(description="Testing sea rch") + db.session.flush() + queries = Query.search(str(q3.id), [self.factory.default_group.id]) + + self.assertIn(q3, queries) + self.assertNotIn(q1, queries) + self.assertNotIn(q2, queries) + + def test_search_respects_groups(self): + other_group = Group(org=self.factory.org, name="Other Group") + db.session.add(other_group) + ds = self.factory.create_data_source(group=other_group) + + q1 = self.factory.create_query(description="Testing search", data_source=ds) + q2 = self.factory.create_query(description="Testing searching") + q3 = self.factory.create_query(description="Testing sea rch") + + queries = list(Query.search("Testing", [self.factory.default_group.id])) + + self.assertNotIn(q1, queries) + self.assertIn(q2, queries) + self.assertIn(q3, queries) + + queries = list(Query.search("Testing", [other_group.id, self.factory.default_group.id])) + self.assertIn(q1, queries) + self.assertIn(q2, queries) + self.assertIn(q3, queries) + + queries = list(Query.search("Testing", [other_group.id])) + self.assertIn(q1, queries) + self.assertNotIn(q2, queries) + self.assertNotIn(q3, queries) + + def test_returns_each_query_only_once(self): + other_group = self.factory.create_group() + second_group = self.factory.create_group() + ds = self.factory.create_data_source(group=other_group) + ds.add_group(second_group, False) + + q1 = self.factory.create_query(description="Testing search", data_source=ds) + db.session.flush() + queries = list(Query.search("Testing", [self.factory.default_group.id, other_group.id, second_group.id])) + + self.assertEqual(1, len(queries)) + + def test_save_updates_updated_at_field(self): + # This should be a test of ModelTimestampsMixin, but it's easier to test in context of existing model... :-\ + one_day_ago = utcnow().date() - datetime.timedelta(days=1) + q = self.factory.create_query(created_at=one_day_ago, updated_at=one_day_ago) + db.session.flush() + q.name = 'x' + db.session.flush() + self.assertNotEqual(q.updated_at, one_day_ago) + + def test_search_is_case_insensitive(self): + q = self.factory.create_query(name="Testing search") + + self.assertIn(q, Query.search('testing', [self.factory.default_group.id])) + + +class QueryRecentTest(BaseTestCase): + def test_global_recent(self): + q1 = self.factory.create_query() + q2 = self.factory.create_query() + db.session.flush() + e = Event(org=self.factory.org, user=self.factory.user, action="edit", + object_type="query", object_id=q1.id) + db.session.add(e) + recent = Query.recent([self.factory.default_group.id]) + self.assertIn(q1, recent) + self.assertNotIn(q2, recent) + + def test_recent_excludes_drafts(self): + q1 = self.factory.create_query() + q2 = self.factory.create_query(is_draft=True) + + db.session.add_all([ + Event(org=self.factory.org, user=self.factory.user, + action="edit", object_type="query", + object_id=q1.id), + Event(org=self.factory.org, user=self.factory.user, + action="edit", object_type="query", + object_id=q2.id) + ]) + recent = Query.recent([self.factory.default_group.id]) + + self.assertIn(q1, recent) + self.assertNotIn(q2, recent) + + def test_recent_for_user(self): + q1 = self.factory.create_query() + q2 = self.factory.create_query() + db.session.flush() + e = Event(org=self.factory.org, user=self.factory.user, action="edit", + object_type="query", object_id=q1.id) + db.session.add(e) + recent = Query.recent([self.factory.default_group.id], user_id=self.factory.user.id) + + self.assertIn(q1, recent) + self.assertNotIn(q2, recent) + + recent = Query.recent([self.factory.default_group.id], user_id=self.factory.user.id + 1) + self.assertNotIn(q1, recent) + self.assertNotIn(q2, recent) + + def test_respects_groups(self): + q1 = self.factory.create_query() + ds = self.factory.create_data_source(group=self.factory.create_group()) + q2 = self.factory.create_query(data_source=ds) + db.session.flush() + Event(org=self.factory.org, user=self.factory.user, action="edit", + object_type="query", object_id=q1.id) + Event(org=self.factory.org, user=self.factory.user, action="edit", + object_type="query", object_id=q2.id) + + recent = Query.recent([self.factory.default_group.id]) + + self.assertIn(q1, recent) + self.assertNotIn(q2, recent) class TestQueryByUser(BaseTestCase): @@ -42,7 +200,6 @@ def assert_visualizations(self, origin_q, origin_v, forked_q, forked_v): self.assertNotEqual(origin_v.query_rel, forked_v.query_rel) self.assertEqual(forked_q.id, forked_v.query_rel.id) - def test_fork_with_visualizations(self): # prepare original query and visualizations data_source = self.factory.create_data_source( diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -26,155 +26,6 @@ def test_appends_suffix_to_slug_when_duplicate(self): self.assertNotEquals(d2.slug, d3.slug) -class QueryTest(BaseTestCase): - def test_changing_query_text_changes_hash(self): - q = self.factory.create_query() - old_hash = q.query_hash - - q.query_text = "SELECT 2;" - db.session.flush() - self.assertNotEquals(old_hash, q.query_hash) - - def test_search_finds_in_name(self): - q1 = self.factory.create_query(name=u"Testing seåřċħ") - q2 = self.factory.create_query(name=u"Testing seåřċħing") - q3 = self.factory.create_query(name=u"Testing seå řċħ") - queries = list(models.Query.search(u"seåřċħ", [self.factory.default_group.id])) - - self.assertIn(q1, queries) - self.assertIn(q2, queries) - self.assertNotIn(q3, queries) - - def test_search_finds_in_description(self): - q1 = self.factory.create_query(description=u"Testing seåřċħ") - q2 = self.factory.create_query(description=u"Testing seåřċħing") - q3 = self.factory.create_query(description=u"Testing seå řċħ") - - queries = models.Query.search(u"seåřċħ", [self.factory.default_group.id]) - - self.assertIn(q1, queries) - self.assertIn(q2, queries) - self.assertNotIn(q3, queries) - - def test_search_by_id_returns_query(self): - q1 = self.factory.create_query(description="Testing search") - q2 = self.factory.create_query(description="Testing searching") - q3 = self.factory.create_query(description="Testing sea rch") - db.session.flush() - queries = models.Query.search(str(q3.id), [self.factory.default_group.id]) - - self.assertIn(q3, queries) - self.assertNotIn(q1, queries) - self.assertNotIn(q2, queries) - - def test_search_respects_groups(self): - other_group = models.Group(org=self.factory.org, name="Other Group") - db.session.add(other_group) - ds = self.factory.create_data_source(group=other_group) - - q1 = self.factory.create_query(description="Testing search", data_source=ds) - q2 = self.factory.create_query(description="Testing searching") - q3 = self.factory.create_query(description="Testing sea rch") - - queries = list(models.Query.search("Testing", [self.factory.default_group.id])) - - self.assertNotIn(q1, queries) - self.assertIn(q2, queries) - self.assertIn(q3, queries) - - queries = list(models.Query.search("Testing", [other_group.id, self.factory.default_group.id])) - self.assertIn(q1, queries) - self.assertIn(q2, queries) - self.assertIn(q3, queries) - - queries = list(models.Query.search("Testing", [other_group.id])) - self.assertIn(q1, queries) - self.assertNotIn(q2, queries) - self.assertNotIn(q3, queries) - - def test_returns_each_query_only_once(self): - other_group = self.factory.create_group() - second_group = self.factory.create_group() - ds = self.factory.create_data_source(group=other_group) - ds.add_group(second_group, False) - - q1 = self.factory.create_query(description="Testing search", data_source=ds) - db.session.flush() - queries = list(models.Query.search("Testing", [self.factory.default_group.id, other_group.id, second_group.id])) - - self.assertEqual(1, len(queries)) - - def test_save_updates_updated_at_field(self): - # This should be a test of ModelTimestampsMixin, but it's easier to test in context of existing model... :-\ - one_day_ago = utcnow().date() - datetime.timedelta(days=1) - q = self.factory.create_query(created_at=one_day_ago, updated_at=one_day_ago) - db.session.flush() - q.name = 'x' - db.session.flush() - self.assertNotEqual(q.updated_at, one_day_ago) - - -class QueryRecentTest(BaseTestCase): - def test_global_recent(self): - q1 = self.factory.create_query() - q2 = self.factory.create_query() - db.session.flush() - e = models.Event(org=self.factory.org, user=self.factory.user, action="edit", - object_type="query", object_id=q1.id) - db.session.add(e) - recent = models.Query.recent([self.factory.default_group.id]) - self.assertIn(q1, recent) - self.assertNotIn(q2, recent) - - def test_recent_excludes_drafts(self): - q1 = self.factory.create_query() - q2 = self.factory.create_query(is_draft=True) - - models.db.session.add_all([ - models.Event(org=self.factory.org, user=self.factory.user, - action="edit", object_type="query", - object_id=q1.id), - models.Event(org=self.factory.org, user=self.factory.user, - action="edit", object_type="query", - object_id=q2.id) - ]) - recent = models.Query.recent([self.factory.default_group.id]) - - self.assertIn(q1, recent) - self.assertNotIn(q2, recent) - - def test_recent_for_user(self): - q1 = self.factory.create_query() - q2 = self.factory.create_query() - db.session.flush() - e = models.Event(org=self.factory.org, user=self.factory.user, action="edit", - object_type="query", object_id=q1.id) - db.session.add(e) - recent = models.Query.recent([self.factory.default_group.id], user_id=self.factory.user.id) - - self.assertIn(q1, recent) - self.assertNotIn(q2, recent) - - recent = models.Query.recent([self.factory.default_group.id], user_id=self.factory.user.id + 1) - self.assertNotIn(q1, recent) - self.assertNotIn(q2, recent) - - def test_respects_groups(self): - q1 = self.factory.create_query() - ds = self.factory.create_data_source(group=self.factory.create_group()) - q2 = self.factory.create_query(data_source=ds) - db.session.flush() - models.Event(org=self.factory.org, user=self.factory.user, action="edit", - object_type="query", object_id=q1.id) - models.Event(org=self.factory.org, user=self.factory.user, action="edit", - object_type="query", object_id=q2.id) - - recent = models.Query.recent([self.factory.default_group.id]) - - self.assertIn(q1, recent) - self.assertNotIn(q2, recent) - - class ShouldScheduleNextTest(TestCase): def test_interval_schedule_that_needs_reschedule(self): now = utcnow()
Queries search should be case insensitive
2017-01-02T09:30:00
getredash/redash
1,521
getredash__redash-1521
[ "1519" ]
57e25786cd05b5047008992cf0a9f4618443cf0a
diff --git a/redash/query_runner/elasticsearch.py b/redash/query_runner/elasticsearch.py --- a/redash/query_runner/elasticsearch.py +++ b/redash/query_runner/elasticsearch.py @@ -123,8 +123,8 @@ def _get_query_mappings(self, url): if error: return mappings, error - for index_name in mappings_data: - index_mappings = mappings_data[index_name] + for index_name in mappings: + index_mappings = mappings[index_name] for m in index_mappings.get("mappings", {}): for property_name in index_mappings["mappings"][m]["properties"]: property_data = index_mappings["mappings"][m]["properties"][property_name]
Elasticsearch query runner: mappings_data is not defined ### Issue Summary When executing an elasticsearch query, a message appears that "mappings_data is not defined". ### Steps to Reproduce 1. Clone the latest version (master branch) from github and install the dev environment. 2. Add an elasticsearch instance and try to execute a query, e.g. `{"query": {'"match_all": {}}` 3. A message appears that the query was not successfull (mappings_data is not defined) The bug is quite obvious since https://github.com/getredash/redash/blob/3bc98fba5bd2b6b723ba0d0ce3007a651e8c2acb/redash/query_runner/elasticsearch.py#L126 refers to mappings_data, which is nowhere defined in the same file. Unfortunately I didn't find out what this mappings_data should contain. ### Technical details: * Redash Version: master branch * Browser/OS: Linux/Chrome
2017-01-13T18:51:48
getredash/redash
1,563
getredash__redash-1563
[ "1552" ]
dd6028384dc2bf20376c48b42e63129de98558eb
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -1316,7 +1316,7 @@ class Event(db.Model): action = Column(db.String(255)) object_type = Column(db.String(255)) object_id = Column(db.String(255), nullable=True) - additional_properties = Column(db.Text, nullable=True) + additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={}) created_at = Column(db.DateTime(True), default=db.func.now()) __tablename__ = 'events' @@ -1324,6 +1324,17 @@ class Event(db.Model): def __unicode__(self): return u"%s,%s,%s,%s" % (self.user_id, self.action, self.object_type, self.object_id) + def to_dict(self): + return { + 'org_id': self.org_id, + 'user_id': self.user_id, + 'action': self.action, + 'object_type': self.object_type, + 'object_id': self.object_id, + 'additional_properties': self.additional_properties, + 'created_at': self.created_at.isoformat() + } + @classmethod def record(cls, event): org_id = event.pop('org_id') @@ -1333,11 +1344,10 @@ def record(cls, event): object_id = event.pop('object_id', None) created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp')) - additional_properties = json.dumps(event) event = cls(org_id=org_id, user_id=user_id, action=action, object_type=object_type, object_id=object_id, - additional_properties=additional_properties, + additional_properties=event, created_at=created_at) db.session.add(event) return event diff --git a/redash/tasks/general.py b/redash/tasks/general.py --- a/redash/tasks/general.py +++ b/redash/tasks/general.py @@ -10,14 +10,18 @@ @celery.task(name="redash.tasks.record_event", base=BaseTask) -def record_event(event): - original_event = event.copy() - models.Event.record(event) +def record_event(raw_event): + event = models.Event.record(raw_event) models.db.session.commit() + for hook in settings.EVENT_REPORTING_WEBHOOKS: logger.debug("Forwarding event to: %s", hook) try: - response = requests.post(hook, original_event) + data = { + "schema": "iglu:io.redash.webhooks/event/jsonschema/1-0-0", + "data": event.to_dict() + } + response = requests.post(hook, json=data) if response.status_code != 200: logger.error("Failed posting to %s: %s", hook, response.content) except Exception:
diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -409,7 +409,7 @@ def test_records_additional_properties(self): event = models.Event.record(raw_event) - self.assertDictEqual(json.loads(event.additional_properties), additional_properties) + self.assertDictEqual(event.additional_properties, additional_properties) class TestWidgetDeleteInstance(BaseTestCase):
[Feature Request] Update the Redash webhook payload to be a Snowplow-compatible JSON This is a proposal to take the existing Redash webhook and make some minor changes to make it compatible with [Snowplow](https://github.com/snowplow/snowplow). As well as allowing a Redash+Snowplow user to warehouse all of their Redash activity in Snowplow for audit and compliance, this has the additional benefit that it ties the Redash webhook to an agreed [JSON Schema format](http://json-schema.org/). ### JSON Schema We have published a JSON Schema for the Redash webhook as part of Iglu Central, here: https://github.com/snowplow/iglu-central/blob/master/schemas/io.redash.webhooks/event/jsonschema/1-0-0 The exact naming of the webhook schema (`io.redash.webhooks/event`) was checked with @arikfr and should allow Redash to add further more-specific webhooks in the future as needed. The schema has been carefully specified so that the generated Redshift table maps very closely onto Redash's underlying event table: https://github.com/snowplow/iglu-central/blob/master/sql/io.redash.webhooks/event_1.sql ### Steps to make the Redash webhook compatible Currently the Redash webhook is a POST request with the event data encoded in the body as form data. We want to change this to JSON, but specifically Iglu-compatible self-describing JSON (Snowplow's schema system is called [Iglu](https://github.com/snowplow/iglu)). In place of the current POST payload, the Redash webhook should look like this: ```json { "schema": "iglu:io.redash.webhooks/event/jsonschema/1-0-0", "data": { "id": 23, "user_id": null, "action": "DELETE", ... } } ``` Notes: * The event's integer types should be kept as numerics in JSON * The `created_at` field should be a valid ISO8601 date-time We can easily check any test webhook JSON against the JSON Schema to verify that they match (if there is an error in the JSON Schema, we can patch it in a new Iglu Central release). ### Outcome When this has been implemented, it will be possible to warehouse all Redash webhooks in Snowplow **with zero coding**, using Snowplow's built-in [Iglu webhook adapter](https://github.com/snowplow/snowplow/wiki/Iglu-webhook-adapter). There are some really interesting use cases here: * Full audit log for Redash in Snowplow (allowing the events table in Redash to be regularly truncated?) * Real-time detection (ML-driven?) of a bad actor based on their SQL queries (e.g. PII theft, corporate espionage)
2017-02-02T08:22:25
getredash/redash
1,597
getredash__redash-1597
[ "1571" ]
d1b0a9580dd67d07716fc0f9f067ed42861f3a29
diff --git a/redash/query_runner/mssql.py b/redash/query_runner/mssql.py --- a/redash/query_runner/mssql.py +++ b/redash/query_runner/mssql.py @@ -94,7 +94,7 @@ def __init__(self, configuration): def _get_tables(self, schema): query = """ SELECT table_schema, table_name, column_name - FROM information_schema.columns + FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema NOT IN ('guest','INFORMATION_SCHEMA','sys','db_owner','db_accessadmin' ,'db_securityadmin','db_ddladmin','db_backupoperator','db_datareader' ,'db_datawriter','db_denydatareader','db_denydatawriter'
mssql get_schema() fails on case sensitive object name setups ### Issue Summary get_schema() for MS-SQL fails on case sensitive object name setups. ### Steps to Reproduce 1. Define datasource for case-sensitive-object-name MSSQL database instance. ### Technical details: * Redash Version: 0.12.0 ### How to fix change ` SELECT table_schema, table_name, column_name FROM information_schema.columns WHERE ...` to ` SELECT table_schema, table_name, column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE...` fixed my case. ### Where to go from here I'm not really sure if this is the best way to fix this issue. If any MSSQL instance can be case-sensitive AND has this tablename in lower case, then this fix will not work. But I don't know if this can happen.
2017-02-14T00:41:23
getredash/redash
1,703
getredash__redash-1703
[ "1697" ]
53268989c5fe5c619b399b488094c501cb286af4
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -16,7 +16,7 @@ from sqlalchemy.inspection import inspect from sqlalchemy.types import TypeDecorator from sqlalchemy.ext.mutable import Mutable -from sqlalchemy.orm import object_session, backref +from sqlalchemy.orm import object_session, backref, joinedload, subqueryload # noinspection PyUnresolvedReferences from sqlalchemy.orm.exc import NoResultFound from sqlalchemy import or_ @@ -806,12 +806,12 @@ def create(cls, **kwargs): @classmethod def all_queries(cls, group_ids, user_id=None, drafts=False): - q = (cls.query.join(User, Query.user_id == User.id) - .outerjoin(QueryResult) + q = (cls.query + .options(joinedload(Query.user), + joinedload(Query.latest_query_data).load_only('runtime', 'retrieved_at')) .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter(Query.is_archived == False) .filter(DataSourceGroup.group_id.in_(group_ids))\ - .group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime) .order_by(Query.created_at.desc())) if not drafts: @@ -826,8 +826,7 @@ def by_user(cls, user): @classmethod def outdated_queries(cls): queries = (db.session.query(Query) - .join(QueryResult) - .join(DataSource) + .options(joinedload(Query.latest_query_data).load_only('retrieved_at')) .filter(Query.schedule != None) .order_by(Query.id)) @@ -836,7 +835,7 @@ def outdated_queries(cls): for query in queries: if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule, query.schedule_failures): - key = "{}:{}".format(query.query_hash, query.data_source.id) + key = "{}:{}".format(query.query_hash, query.data_source_id) outdated_queries[key] = query return outdated_queries.values() @@ -862,12 +861,11 @@ def search(cls, term, group_ids, include_drafts=False): Query.data_source_id == DataSourceGroup.data_source_id) .filter(where)).distinct() - return Query.query.join(User, Query.user_id == User.id).filter( - Query.id.in_(query_ids)) + return Query.query.options(joinedload(Query.user)).filter(Query.id.in_(query_ids)) @classmethod def recent(cls, group_ids, user_id=None, limit=20): - query = (cls.query.join(User, Query.user_id == User.id) + query = (cls.query.options(subqueryload(Query.user)) .filter(Event.created_at > (db.func.current_date() - 7)) .join(Event, Query.id == Event.object_id.cast(db.Integer)) .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) @@ -879,7 +877,7 @@ def recent(cls, group_ids, user_id=None, limit=20): DataSourceGroup.group_id.in_(group_ids), or_(Query.is_draft == False, Query.user_id == user_id), Query.is_archived == False) - .group_by(Event.object_id, Query.id, User.id) + .group_by(Event.object_id, Query.id) .order_by(db.desc(db.func.count(0)))) if user_id: @@ -1069,12 +1067,11 @@ class Alert(TimestampMixin, db.Model): @classmethod def all(cls, group_ids): - # TODO: there was a join with user here to prevent N+1 queries. need to revisit this. return db.session.query(Alert)\ + .options(joinedload(Alert.user), joinedload(Alert.query_rel))\ .join(Query)\ .join(DataSourceGroup, DataSourceGroup.data_source_id==Query.data_source_id)\ - .filter(DataSourceGroup.group_id.in_(group_ids))\ - .group_by(Alert) + .filter(DataSourceGroup.group_id.in_(group_ids)) @classmethod def get_by_id_and_org(cls, id, org):
Queries list page is slow when some queries has large query result ### Issue Summary Queries listing page (`/queries`) is slow when some queries has large query result. Because it execute admin queries which select all columns from query_results for each queries on the list. ### Steps to Reproduce 1. Create query which will return large result_set 2. Execute it 3. Access `/queries?page=1` ### Technical details: * Redash Version: 1.0.0-rc2 * Browser/OS: Chrome 56 * How did you install Redash: docker-compose (used default docker-compose.yml for testing)
Thanks! This also uncovers a nasty N+1 queries issue there.
2017-04-02T11:03:48
getredash/redash
1,759
getredash__redash-1759
[ "1664", "1664" ]
24f3e071e393d1934d58929e04c9bf2560261e39
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -653,7 +653,7 @@ def make_csv_content(self): s = cStringIO.StringIO() query_data = json.loads(self.data) - writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']]) + writer = csv.DictWriter(s, extrasaction="ignore", fieldnames=[col['name'] for col in query_data['columns']]) writer.writer = utils.UnicodeWriter(s) writer.writeheader() for row in query_data['rows']:
Cannot download CSV from MongoDB Results when rows have missing columns This works fine in the Excel download however. ``` File "/redash/handlers/query_results.py", line 172, in get response = self.make_csv_response(query_result) File "/redash/handlers/query_results.py", line 199, in make_csv_response writer.writerow(row) File "/usr/lib64/python2.7/csv.py", line 148, in writerow return self.writer.writerow(self._dict_to_list(rowdict)) File "/usr/lib64/python2.7/csv.py", line 144, in _dict_to_list ", ".join(wrong_fields)) ValueError: dict contains fields not in fieldnames: _id ``` Cannot download CSV from MongoDB Results when rows have missing columns This works fine in the Excel download however. ``` File "/redash/handlers/query_results.py", line 172, in get response = self.make_csv_response(query_result) File "/redash/handlers/query_results.py", line 199, in make_csv_response writer.writerow(row) File "/usr/lib64/python2.7/csv.py", line 148, in writerow return self.writer.writerow(self._dict_to_list(rowdict)) File "/usr/lib64/python2.7/csv.py", line 144, in _dict_to_list ", ".join(wrong_fields)) ValueError: dict contains fields not in fieldnames: _id ```
The Excel serializer knows to handle missing columns, while the CSV one uses a dict writer which expects all the fields to be present in every row. (btw, it's not `_id` specific) The Excel serializer knows to handle missing columns, while the CSV one uses a dict writer which expects all the fields to be present in every row. (btw, it's not `_id` specific)
2017-05-09T15:00:32
getredash/redash
1,792
getredash__redash-1792
[ "1725" ]
764e347b74f6bfd8cd03ed3ef4c035c6b04e2c76
diff --git a/redash/destinations/email.py b/redash/destinations/email.py --- a/redash/destinations/email.py +++ b/redash/destinations/email.py @@ -40,16 +40,15 @@ def notify(self, alert, query, user, new_state, app, host, options): logging.debug("Notifying: %s", recipients) try: - with app.app_context(): - alert_name = alert.name.encode('utf-8', 'ignore') - state = new_state.upper() - subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) - message = Message( - recipients=recipients, - subject=subject_template.format(alert_name=alert_name, state=state), - html=html - ) - mail.send(message) + alert_name = alert.name.encode('utf-8', 'ignore') + state = new_state.upper() + subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) + message = Message( + recipients=recipients, + subject=subject_template.format(alert_name=alert_name, state=state), + html=html + ) + mail.send(message) except Exception: logging.exception("Mail send error.") diff --git a/redash/tasks/general.py b/redash/tasks/general.py --- a/redash/tasks/general.py +++ b/redash/tasks/general.py @@ -1,9 +1,10 @@ import requests + from celery.utils.log import get_task_logger from flask_mail import Message -from redash.worker import celery +from redash import mail, models, settings from redash.version_check import run_version_check -from redash import models, mail, settings +from redash.worker import celery logger = get_task_logger(__name__) @@ -50,12 +51,11 @@ def send_mail(to, subject, html, text): from redash.wsgi import app try: - with app.app_context(): - message = Message(recipients=to, - subject=subject, - html=html, - body=text) + message = Message(recipients=to, + subject=subject, + html=html, + body=text) - mail.send(message) + mail.send(message) except Exception: logger.exception('Failed sending message: %s', message.subject)
Alert notifications fail (sometime) with a SQLAlchemy error ### Issue Summary After migrating to 1.0.1 alerts are notified only sporadically to Slack (I cannot pinpoint exactly the conditions in which it happens). ### Steps to Reproduce 1. Create a failing alert for an existing query 2. Add your email and a Slack destination to your alert (the order seems to matter: add first the email destination then the slack destination) 3. Refresh the query such that the alert would trigger 4. You will receive an alert email but no slack alert 5. In the back-end `celery_error.log` you will see an error message like ``` [2017-04-18 13:13:58,184: ERROR/Worker-4] redash.tasks.check_alerts_for_query[fb4fee06-3318-44f7-8aaf-7984d792a793]: Error with processing destination Traceback (most recent call last): File "/opt/redash/redash.1.0.1.b2833/redash/tasks/alerts.py", line 23, in notify_subscriptions subscription.notify(alert, alert.query_rel, subscription.user, new_state, current_app, host) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 237, in __get__ return self.impl.get(instance_state(instance), dict_) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 584, in get value = self.callable_(state, passive) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/strategies.py", line 530, in _load_for_state (orm_util.state_str(state), self.key) DetachedInstanceError: Parent instance <AlertSubscription at 0x7fcd934f04d0> is not bound to a Session; lazy load operation of attribute 'user' cannot proceed ``` I think this error is caused by the `subscription.user` call. My guess is that it is eventually caused by [`models.db.session.commit()`](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L51). This "might" be connected to #1706 . ### Technical details: * Redash Version: 1.0.1+b2833 * Browser/OS: (not relevant, error is in the backend) * How did you install Redash: ec2 from ami (in 2015), recently upgraded from 0.12 to 1.0.1
But we initialize SQLAlchemy with `expire_on_commit` set to `False`: ``` db = SQLAlchemy(session_options={ 'expire_on_commit': False }) ``` ([source](https://github.com/getredash/redash/blob/master/redash/models.py#L35)) It should take care of such issues :| You provided detailed steps to reproduce, so I will try to write a test for this and see what's going on. This is definitely still happening even after moving from an old ec2 instance to a docker based installation. Today I will try to narrow the bug down and reproduce it. Also seeing this on 1.0.3+b2850 on Docker. @danielerapati @copester Do you see it happen with the same line/same objects, as in the issue description or a different one?
2017-05-29T13:43:21
getredash/redash
1,847
getredash__redash-1847
[ "1842" ]
e1eeb67025f91717d229ccb41bc09708b956020d
diff --git a/redash/handlers/destinations.py b/redash/handlers/destinations.py --- a/redash/handlers/destinations.py +++ b/redash/handlers/destinations.py @@ -2,10 +2,11 @@ from flask_restful import abort from redash import models +from redash.destinations import (destinations, + get_configuration_schema_for_destination_type) +from redash.handlers.base import BaseResource from redash.permissions import require_admin -from redash.destinations import destinations, get_configuration_schema_for_destination_type from redash.utils.configuration import ConfigurationContainer, ValidationError -from redash.handlers.base import BaseResource class DestinationTypeListResource(BaseResource): @@ -30,6 +31,8 @@ def post(self, destination_id): abort(400) try: + destination.type = req['type'] + destination.name = req['name'] destination.options.set_schema(schema) destination.options.update(req['options']) models.db.session.add(destination) @@ -37,9 +40,6 @@ def post(self, destination_id): except ValidationError: abort(400) - destination.type = req['type'] - destination.name = req['name'] - return destination.to_dict(all=True) @require_admin diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -8,12 +8,20 @@ import logging import time -from funcy import project - import xlsxwriter from flask_login import AnonymousUserMixin, UserMixin from flask_sqlalchemy import SQLAlchemy +from funcy import project from passlib.apps import custom_app_context as pwd_context +from sqlalchemy import or_ +from sqlalchemy.dialects import postgresql +from sqlalchemy.event import listens_for +from sqlalchemy.ext.mutable import Mutable +from sqlalchemy.inspection import inspect +from sqlalchemy.orm import backref, joinedload, object_session, subqueryload +from sqlalchemy.orm.exc import NoResultFound # noqa: F401 +from sqlalchemy.types import TypeDecorator + from redash import redis_connection, utils from redash.destinations import (get_configuration_schema_for_destination_type, get_destination) @@ -23,14 +31,6 @@ get_query_runner) from redash.utils import generate_token, json_dumps from redash.utils.configuration import ConfigurationContainer -from sqlalchemy import or_ -from sqlalchemy.dialects import postgresql -from sqlalchemy.event import listens_for -from sqlalchemy.ext.mutable import Mutable -from sqlalchemy.inspection import inspect -from sqlalchemy.orm import backref, joinedload, object_session, subqueryload -from sqlalchemy.orm.exc import NoResultFound # noqa: F401 -from sqlalchemy.types import TypeDecorator db = SQLAlchemy(session_options={ 'expire_on_commit': False @@ -1481,8 +1481,9 @@ class NotificationDestination(BelongsToOrgMixin, db.Model): user = db.relationship(User, backref="notification_destinations") name = Column(db.String(255)) type = Column(db.String(255)) - options = Column(Configuration) + options = Column(ConfigurationContainer.as_mutable(Configuration)) created_at = Column(db.DateTime(True), default=db.func.now()) + __tablename__ = 'notification_destinations' __table_args__ = (db.Index('notification_destinations_org_id_name', 'org_id', 'name', unique=True),)
diff --git a/tests/handlers/test_destinations.py b/tests/handlers/test_destinations.py --- a/tests/handlers/test_destinations.py +++ b/tests/handlers/test_destinations.py @@ -1,4 +1,5 @@ from tests import BaseTestCase + from redash.models import NotificationDestination @@ -55,10 +56,14 @@ def test_post(self): data = { 'name': 'updated', 'type': d.type, - 'options': d.options.to_dict() + 'options': {"url": "https://www.slack.com/updated"} } - rv = self.make_request('post', '/api/destinations/{}'.format(d.id), user=self.factory.create_admin(), data=data) - self.assertEqual(rv.status_code, 200) - self.assertEqual(NotificationDestination.query.get(d.id).name, data['name']) + with self.app.app_context(): + rv = self.make_request('post', '/api/destinations/{}'.format(d.id), user=self.factory.create_admin(), data=data) + + self.assertEqual(rv.status_code, 200) + d = NotificationDestination.query.get(d.id) + self.assertEqual(d.name, data['name']) + self.assertEqual(d.options['url'], data['options']['url'])
Updating Alert Destination email field does not change the value in the database ### Issue Summary Updating the email address list for an Alert Destination does not update the database. ### Steps to Reproduce 1. Create an email alert destination group, refresh page. 2. Change one or more emails, save. Refresh page. 3. The email group is the same as it was when created. 4. Confirm via a DB query (```select * from notification_destinations;```) ### Technical details: * Redash Version: 1.0.3+b2850 * Browser/OS: Various, consistent behaves the same * How did you install Redash: ```https://raw.githubusercontent.com/getredash/redash/master/bin/upgrade``` More details: Checking the API log for the update (when I click the save button) things look ok: ``` [2017-06-23 21:17:26,311][PID:25417][INFO][metrics] method=POST path=/api/destinations/1 endpoint=destination status=200 content_type=application/json content_length=144 duration=7.94 query_count=4 query_duration=2.80 ``` Enabling statement logging in PostgreSQL shows this happening (with sanitized emails): ``` 2017-06-23 21:31:54.550 UTC [459] redash@redash LOG: statement: BEGIN 2017-06-23 21:31:54.551 UTC [459] redash@redash LOG: statement: SELECT organizations.updated_at AS organizations_updated_at, organizations.created_at AS organizations_created_at, organizations.id AS organizations_id, organizations.name AS organizations_name, organizations.slug AS organizations_slug, organizations.settings AS organizations_settings FROM organizations WHERE organizations.slug = 'default' LIMIT 1 2017-06-23 21:31:54.552 UTC [459] redash@redash LOG: statement: SELECT users.groups AS users_groups, users.updated_at AS users_updated_at, users.created_at AS users_created_at, users.id AS users_id, users.org_id AS users_org_id, users.name AS users_name, users.email AS users_email, users.password_hash AS users_password_hash, users.api_key AS users_api_key FROM users WHERE users.id = '1' AND 1 = users.org_id 2017-06-23 21:31:54.554 UTC [459] redash@redash LOG: statement: SELECT groups.id AS groups_id, groups.org_id AS groups_org_id, groups.type AS groups_type, groups.name AS groups_name, groups.permissions AS groups_permissions, groups.created_at AS groups_created_at FROM groups WHERE groups.id IN (1, 2, 3) 2017-06-23 21:31:54.555 UTC [459] redash@redash LOG: statement: SELECT notification_destinations.id AS notification_destinations_id, notification_destinations.org_id AS notification_destinations_org_id, notification_destinations.user_id AS notification_destinations_user_id, notification_destinations.name AS notification_destinations_name, notification_destinations.type AS notification_destinations_type, notification_destinations.options AS notification_destinations_options, notification_destinations.created_at AS notification_destinations_created_at FROM notification_destinations WHERE notification_destinations.id = '1' AND 1 = notification_destinations.org_id 2017-06-23 21:31:54.557 UTC [459] redash@redash LOG: statement: COMMIT ``` In other words, it does not appear to be even attempting to update the addresses.
2017-06-28T13:02:36
getredash/redash
1,856
getredash__redash-1856
[ "1855" ]
0fb0ba64731ab06ff932fe2f1bd5bf3904b37d7e
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py --- a/redash/handlers/dashboards.py +++ b/redash/handlers/dashboards.py @@ -129,7 +129,7 @@ def post(self, dashboard_slug): require_object_modify_permission(dashboard, self.current_user) updates = project(dashboard_properties, ('name', 'layout', 'version', - 'is_draft')) + 'is_draft', 'dashboard_filters_enabled')) # SQLAlchemy handles the case where a concurrent transaction beats us # to the update. But we still have to make sure that we're not starting
Make dashboard level filters a feature available to non-admins ### Issue Summary Currently to enable dashboard level filters you have to be an administrator and you have to change a flag manually in the dashboards table. It would be good if this was just on by default or an option that users could change through the front end. ### Technical details: * Redash Version: 1.0.3 * Browser/OS: Chrome * How did you install Redash: Amazon via the AMI
2017-06-29T07:58:03
getredash/redash
1,870
getredash__redash-1870
[ "1869" ]
5bb5f46c022e8f2fb51f198c1d2567e580efbc16
diff --git a/redash/__init__.py b/redash/__init__.py --- a/redash/__init__.py +++ b/redash/__init__.py @@ -1,4 +1,5 @@ import os +import sys import logging import urlparse import redis @@ -21,7 +22,7 @@ def setup_logging(): - handler = logging.StreamHandler() + handler = logging.StreamHandler(sys.stdout if settings.LOG_STDOUT else sys.stderr) formatter = logging.Formatter('[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] %(message)s') handler.setFormatter(formatter) logging.getLogger().addHandler(handler) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -134,6 +134,7 @@ def all_settings(): SESSION_COOKIE_SECURE = parse_boolean(os.environ.get("REDASH_SESSION_COOKIE_SECURE") or str(ENFORCE_HTTPS)) LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO") +LOG_STDOUT = parse_boolean(os.environ.get('REDASH_LOG_STDOUT', 'false')) # Mail settings: MAIL_SERVER = os.environ.get('REDASH_MAIL_SERVER', 'localhost')
Log lines go to `stderr` and not to `stdout` ### Issue Summary Running redash (dockerized) is logging everything to `stderr`. I think this is the default for python's `logging` module, although I'm not entirely sure. For me it makes sense that `INFO` level log lines should go into `stdout`, so having some kind of option to configure that would be nice. ### Steps to Reproduce 1. run redash in docker and redirect both `stderr` and `stdout` to separate files: `docker run --rm redash/redash server 1> ~/out.log 2> ~/error.log` 2. `out.log` file remains empty whereas all the log lines go to `error.log`. ### Technical details: * Redash Version: 1.0.3 * Browser/OS: - * How did you install Redash: docker
2017-07-06T14:47:08
getredash/redash
1,899
getredash__redash-1899
[ "1843", "1843" ]
1b15ea8af9b1af43abbaf874ef5edcc44527adb8
diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py --- a/redash/query_runner/oracle.py +++ b/redash/query_runner/oracle.py @@ -14,18 +14,14 @@ cx_Oracle.LOB: TYPE_STRING, cx_Oracle.FIXED_CHAR: TYPE_STRING, cx_Oracle.FIXED_NCHAR: TYPE_STRING, - cx_Oracle.FIXED_UNICODE: TYPE_STRING, cx_Oracle.INTERVAL: TYPE_DATETIME, - cx_Oracle.LONG_NCHAR: TYPE_STRING, cx_Oracle.LONG_STRING: TYPE_STRING, - cx_Oracle.LONG_UNICODE: TYPE_STRING, cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT, cx_Oracle.NCHAR: TYPE_STRING, cx_Oracle.NUMBER: TYPE_FLOAT, cx_Oracle.ROWID: TYPE_INTEGER, cx_Oracle.STRING: TYPE_STRING, cx_Oracle.TIMESTAMP: TYPE_DATETIME, - cx_Oracle.UNICODE: TYPE_STRING, }
Oracle: add support for cx_Oracle v5.3. ### Issue Summary cx_Oracle.FIXED_UNICODE: TYPE_STRING, Variable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017 Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead). but is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17 ### Steps to Reproduce Our Dockerfile: FROM redash/redash:2.0.0.b2924 USER root RUN apt-get update RUN apt-get -y --no-install-recommends install alien COPY "oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" RUN alien -i "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" RUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig RUN apt-get install -y libaio1 RUN pip install cx_Oracle --pre EXPOSE 5000 ENTRYPOINT ["/app/bin/docker-entrypoint"] CMD ["server"] Output: ``` AttributeError: 'module' object has no attribute 'FIXED_UNICODE' Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py", line 515, in spawn_worker worker.init_process() File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 122, in init_process self.load_wsgi() File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 130, in load_wsgi self.wsgi = self.app.wsgi() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py", line 67, in wsgi self.callable = self.load() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 65, in load return self.load_wsgiapp() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp return util.import_app(self.app_uri) File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 357, in import_app __import__(module) File "/app/redash/__init__.py", line 68, in <module> import_query_runners(settings.QUERY_RUNNERS) File "/app/redash/query_runner/__init__.py", line 175, in import_query_runners __import__(runner_import) File "/app/redash/query_runner/oracle.py", line 17, in <module> cx_Oracle.FIXED_UNICODE: TYPE_STRING, AttributeError: 'module' object has no attribute 'FIXED_UNICODE ```' ### Technical details: * Redash Version: Docker Image redash/redash:2.0.0.b2924 * How did you install Redash: Dockerfile Oracle: add support for cx_Oracle v5.3. ### Issue Summary cx_Oracle.FIXED_UNICODE: TYPE_STRING, Variable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017 Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead). but is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17 ### Steps to Reproduce Our Dockerfile: FROM redash/redash:2.0.0.b2924 USER root RUN apt-get update RUN apt-get -y --no-install-recommends install alien COPY "oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" RUN alien -i "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" RUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig RUN apt-get install -y libaio1 RUN pip install cx_Oracle --pre EXPOSE 5000 ENTRYPOINT ["/app/bin/docker-entrypoint"] CMD ["server"] Output: ``` AttributeError: 'module' object has no attribute 'FIXED_UNICODE' Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py", line 515, in spawn_worker worker.init_process() File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 122, in init_process self.load_wsgi() File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 130, in load_wsgi self.wsgi = self.app.wsgi() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py", line 67, in wsgi self.callable = self.load() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 65, in load return self.load_wsgiapp() File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp return util.import_app(self.app_uri) File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 357, in import_app __import__(module) File "/app/redash/__init__.py", line 68, in <module> import_query_runners(settings.QUERY_RUNNERS) File "/app/redash/query_runner/__init__.py", line 175, in import_query_runners __import__(runner_import) File "/app/redash/query_runner/oracle.py", line 17, in <module> cx_Oracle.FIXED_UNICODE: TYPE_STRING, AttributeError: 'module' object has no attribute 'FIXED_UNICODE ```' ### Technical details: * Redash Version: Docker Image redash/redash:2.0.0.b2924 * How did you install Redash: Dockerfile
The Oracle query runner was written with [`cx_Oracle==5.2`](https://github.com/getredash/redash/blob/master/requirements_oracle_ds.txt#L4). A description how to enable and use oracle data sorces would be great. The Oracle query runner was written with [`cx_Oracle==5.2`](https://github.com/getredash/redash/blob/master/requirements_oracle_ds.txt#L4). A description how to enable and use oracle data sorces would be great.
2017-07-28T11:06:49
getredash/redash
1,910
getredash__redash-1910
[ "1056" ]
f80951457dbe5122b60072b30042377d80f465f4
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -1,7 +1,6 @@ import json import logging import select -import sys import psycopg2 @@ -81,15 +80,6 @@ def configuration_schema(cls): def type(cls): return "pg" - def __init__(self, configuration): - super(PostgreSQL, self).__init__(configuration) - - values = [] - for k, v in self.configuration.iteritems(): - values.append("{}={}".format(k, v)) - - self.connection_string = " ".join(values) - def _get_definitions(self, schema, query): results, error = self.run_query(query, None) @@ -136,7 +126,13 @@ def _get_tables(self, schema): return schema.values() def run_query(self, query, user): - connection = psycopg2.connect(self.connection_string, async=True) + connection = psycopg2.connect(user=self.configuration.get('user'), + password=self.configuration.get('password'), + host=self.configuration.get('host'), + port=self.configuration.get('port'), + dbname=self.configuration.get('dbname'), + async=True) + _wait(connection, timeout=10) cursor = connection.cursor()
PostgreSQL passwords with spaces are not supported The connection info string on line /opt/redash/current/redash/query_runner/pg.py cant contain spaces, so something like `host=hostname password=my secret password dbname=awesome_db_name user=postgres` will throw the error `OperationalError: missing "=" after "secret" in connection info string`. Error happens at [pg.py#L117](https://github.com/getredash/redash/blob/fbb81b62d6c79f8e9381ac8973ac34739bfa6b41/redash/query_runner/pg.py#L117), but the connection string is built [pg.py#L87](https://github.com/getredash/redash/blob/fbb81b62d6c79f8e9381ac8973ac34739bfa6b41/redash/query_runner/pg.py#L87). According the [psycopg2 docs](http://initd.org/psycopg/docs/module.html), you could use: `conn = psycopg2.connect("dbname=test user=postgres password=secret")` or `conn = psycopg2.connect(database="test", user="postgres", password="secret")` Currently the first is being used.
As you suggest, we should switch to using the dictionary format of the `connect` method. On Monday, I'll try to write up a quick fix and see if there are any other places I see the same error and let you know. Thanks! I tried to check things out by switching to a dictionary but I wasn't seeing the logs print out any more. So I'm not sure what happened, but I've spent all the time I could on it today and ended up just changing the password. Sorry. :) It's probably a pretty low priority bug, so maybe I'll have some time in a month to come back an check it out again if you'd like me to. No worries. Whenever you get the chance, it will be appreciated. Ping on me on chat, when you get to work on it again if you stumble at issues with logging. I've noticed a related problem with this - if the password field is blank (represented as an empty string in the Redash postgreSQL DB) then Redash seems to try and also confuse the fields, it seemed to mess up the selection of the database. In my case I'm using SSL client cert authentication so the password isn't used, with a blank field Redash tried to connect to a database with the same name as the username field, rather than the one specified in the dbname
2017-08-06T17:35:35
getredash/redash
1,944
getredash__redash-1944
[ "1854" ]
5b54a777d91e18398f68fcae4bdc669f438faec0
diff --git a/redash/handlers/visualizations.py b/redash/handlers/visualizations.py --- a/redash/handlers/visualizations.py +++ b/redash/handlers/visualizations.py @@ -1,9 +1,12 @@ import json + from flask import request from redash import models -from redash.permissions import require_permission, require_admin_or_owner from redash.handlers.base import BaseResource, get_object_or_404 +from redash.permissions import (require_admin_or_owner, + require_object_modify_permission, + require_permission) class VisualizationListResource(BaseResource): @@ -12,7 +15,7 @@ def post(self): kwargs = request.get_json(force=True) query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org) - require_admin_or_owner(query.user_id) + require_object_modify_permission(query, self.current_user) kwargs['options'] = json.dumps(kwargs['options']) kwargs['query_rel'] = query @@ -28,7 +31,7 @@ class VisualizationResource(BaseResource): @require_permission('edit_query') def post(self, visualization_id): vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org) - require_admin_or_owner(vis.query_rel.user_id) + require_object_modify_permission(vis.query_rel, self.current_user) kwargs = request.get_json(force=True) if 'options' in kwargs: @@ -45,6 +48,6 @@ def post(self, visualization_id): @require_permission('edit_query') def delete(self, visualization_id): vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org) - require_admin_or_owner(vis.query_rel.user_id) + require_object_modify_permission(vis.query_rel, self.current_user) models.db.session.delete(vis) models.db.session.commit() diff --git a/redash/permissions.py b/redash/permissions.py --- a/redash/permissions.py +++ b/redash/permissions.py @@ -1,6 +1,7 @@ +import functools + from flask_login import current_user from flask_restful import abort -import functools from funcy import flatten view_only = True
diff --git a/tests/handlers/test_visualizations.py b/tests/handlers/test_visualizations.py new file mode 100644 --- /dev/null +++ b/tests/handlers/test_visualizations.py @@ -0,0 +1,130 @@ +from tests import BaseTestCase + +from redash import models + + +class VisualizationResourceTest(BaseTestCase): + def test_create_visualization(self): + query = self.factory.create_query() + models.db.session.commit() + data = { + 'query_id': query.id, + 'name': 'Chart', + 'description': '', + 'options': {}, + 'type': 'CHART' + } + + rv = self.make_request('post', '/api/visualizations', data=data) + + self.assertEquals(rv.status_code, 200) + data.pop('query_id') + self.assertDictContainsSubset(data, rv.json) + + def test_delete_visualization(self): + visualization = self.factory.create_visualization() + models.db.session.commit() + rv = self.make_request('delete', '/api/visualizations/{}'.format(visualization.id)) + + self.assertEquals(rv.status_code, 200) + self.assertEquals(models.db.session.query(models.Visualization).count(), 0) + + def test_update_visualization(self): + visualization = self.factory.create_visualization() + models.db.session.commit() + rv = self.make_request('post', '/api/visualizations/{0}'.format(visualization.id), data={'name': 'After Update'}) + + self.assertEquals(rv.status_code, 200) + self.assertEquals(rv.json['name'], 'After Update') + + def test_only_owner_collaborator_or_admin_can_create_visualization(self): + query = self.factory.create_query() + other_user = self.factory.create_user() + admin = self.factory.create_admin() + admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) + models.db.session.commit() + models.db.session.refresh(admin) + models.db.session.refresh(other_user) + models.db.session.refresh(admin_from_diff_org) + data = { + 'query_id': query.id, + 'name': 'Chart', + 'description': '', + 'options': {}, + 'type': 'CHART' + } + + rv = self.make_request('post', '/api/visualizations', data=data, user=admin) + self.assertEquals(rv.status_code, 200) + + rv = self.make_request('post', '/api/visualizations', data=data, user=other_user) + self.assertEquals(rv.status_code, 403) + + self.make_request('post', '/api/queries/{}/acl'.format(query.id), data={'access_type': 'modify', 'user_id': other_user.id}) + rv = self.make_request('post', '/api/visualizations', data=data, user=other_user) + self.assertEquals(rv.status_code, 200) + + rv = self.make_request('post', '/api/visualizations', data=data, user=admin_from_diff_org) + self.assertEquals(rv.status_code, 404) + + def test_only_owner_collaborator_or_admin_can_edit_visualization(self): + vis = self.factory.create_visualization() + models.db.session.flush() + path = '/api/visualizations/{}'.format(vis.id) + data = {'name': 'After Update'} + + other_user = self.factory.create_user() + admin = self.factory.create_admin() + admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) + models.db.session.commit() + models.db.session.refresh(admin) + models.db.session.refresh(other_user) + models.db.session.refresh(admin_from_diff_org) + + rv = self.make_request('post', path, user=admin, data=data) + self.assertEquals(rv.status_code, 200) + + rv = self.make_request('post', path, user=other_user, data=data) + self.assertEquals(rv.status_code, 403) + + self.make_request('post', '/api/queries/{}/acl'.format(vis.query_id), data={'access_type': 'modify', 'user_id': other_user.id}) + rv = self.make_request('post', path, user=other_user, data=data) + self.assertEquals(rv.status_code, 200) + + rv = self.make_request('post', path, user=admin_from_diff_org, data=data) + self.assertEquals(rv.status_code, 404) + + def test_only_owner_collaborator_or_admin_can_delete_visualization(self): + vis = self.factory.create_visualization() + models.db.session.flush() + path = '/api/visualizations/{}'.format(vis.id) + + other_user = self.factory.create_user() + admin = self.factory.create_admin() + admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) + + models.db.session.commit() + models.db.session.refresh(admin) + models.db.session.refresh(other_user) + models.db.session.refresh(admin_from_diff_org) + rv = self.make_request('delete', path, user=admin) + self.assertEquals(rv.status_code, 200) + + vis = self.factory.create_visualization() + models.db.session.commit() + path = '/api/visualizations/{}'.format(vis.id) + + rv = self.make_request('delete', path, user=other_user) + self.assertEquals(rv.status_code, 403) + + self.make_request('post', '/api/queries/{}/acl'.format(vis.query_id), data={'access_type': 'modify', 'user_id': other_user.id}) + + rv = self.make_request('delete', path, user=other_user) + self.assertEquals(rv.status_code, 200) + + vis = self.factory.create_visualization() + models.db.session.commit() + path = '/api/visualizations/{}'.format(vis.id) + + rv = self.make_request('delete', path, user=admin_from_diff_org) + self.assertEquals(rv.status_code, 404) diff --git a/tests/test_handlers.py b/tests/test_handlers.py --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -1,11 +1,10 @@ -from funcy import project - from flask import url_for from flask_login import current_user +from funcy import project from mock import patch +from tests import BaseTestCase, authenticated_user + from redash import models, settings -from tests import BaseTestCase -from tests import authenticated_user class AuthenticationTestMixin(object): @@ -65,121 +64,6 @@ def test_redirects_non_authenticated_user(self): self.assertEqual(rv.status_code, 302) -class VisualizationResourceTest(BaseTestCase): - def test_create_visualization(self): - query = self.factory.create_query() - models.db.session.commit() - data = { - 'query_id': query.id, - 'name': 'Chart', - 'description': '', - 'options': {}, - 'type': 'CHART' - } - - rv = self.make_request('post', '/api/visualizations', data=data) - - self.assertEquals(rv.status_code, 200) - data.pop('query_id') - self.assertDictContainsSubset(data, rv.json) - - def test_delete_visualization(self): - visualization = self.factory.create_visualization() - models.db.session.commit() - rv = self.make_request('delete', '/api/visualizations/{}'.format(visualization.id)) - - self.assertEquals(rv.status_code, 200) - self.assertEquals(models.db.session.query(models.Visualization).count(), 0) - - def test_update_visualization(self): - visualization = self.factory.create_visualization() - models.db.session.commit() - rv = self.make_request('post', '/api/visualizations/{0}'.format(visualization.id), data={'name': 'After Update'}) - - self.assertEquals(rv.status_code, 200) - self.assertEquals(rv.json['name'], 'After Update') - - def test_only_owner_or_admin_can_create_visualization(self): - query = self.factory.create_query() - other_user = self.factory.create_user() - admin = self.factory.create_admin() - admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) - models.db.session.commit() - models.db.session.refresh(admin) - models.db.session.refresh(other_user) - models.db.session.refresh(admin_from_diff_org) - data = { - 'query_id': query.id, - 'name': 'Chart', - 'description': '', - 'options': {}, - 'type': 'CHART' - } - - - rv = self.make_request('post', '/api/visualizations', data=data, user=admin) - self.assertEquals(rv.status_code, 200) - - rv = self.make_request('post', '/api/visualizations', data=data, user=other_user) - self.assertEquals(rv.status_code, 403) - - rv = self.make_request('post', '/api/visualizations', data=data, user=admin_from_diff_org) - self.assertEquals(rv.status_code, 404) - - def test_only_owner_or_admin_can_edit_visualization(self): - vis = self.factory.create_visualization() - models.db.session.flush() - path = '/api/visualizations/{}'.format(vis.id) - data = {'name': 'After Update'} - - other_user = self.factory.create_user() - admin = self.factory.create_admin() - admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) - models.db.session.commit() - models.db.session.refresh(admin) - models.db.session.refresh(other_user) - models.db.session.refresh(admin_from_diff_org) - - rv = self.make_request('post', path, user=admin, data=data) - self.assertEquals(rv.status_code, 200) - - rv = self.make_request('post', path, user=other_user, data=data) - self.assertEquals(rv.status_code, 403) - - rv = self.make_request('post', path, user=admin_from_diff_org, data=data) - self.assertEquals(rv.status_code, 404) - - def test_only_owner_or_admin_can_delete_visualization(self): - vis = self.factory.create_visualization() - models.db.session.flush() - path = '/api/visualizations/{}'.format(vis.id) - - other_user = self.factory.create_user() - admin = self.factory.create_admin() - admin_from_diff_org = self.factory.create_admin(org=self.factory.create_org()) - - models.db.session.commit() - models.db.session.refresh(admin) - models.db.session.refresh(other_user) - models.db.session.refresh(admin_from_diff_org) - rv = self.make_request('delete', path, user=admin) - self.assertEquals(rv.status_code, 200) - - vis = self.factory.create_visualization() - models.db.session.commit() - path = '/api/visualizations/{}'.format(vis.id) - - rv = self.make_request('delete', path, user=other_user) - self.assertEquals(rv.status_code, 403) - - vis = self.factory.create_visualization() - models.db.session.commit() - path = '/api/visualizations/{}'.format(vis.id) - - rv = self.make_request('delete', path, user=admin_from_diff_org) - self.assertEquals(rv.status_code, 404) - - class JobAPITest(BaseTestCase, AuthenticationTestMixin): def setUp(self): self.paths = []
Redash Permissions not working for some use cases ### Issue Summary Currently, when query owner grants permission to another user for a query, the user is still unable to perform the following tasks: * change data source * schedule the query * add and save new visualisation I believe the user should have the ability to do all the things that the owner could do once permission has been granted. ### Technical details: * Redash Version: 1.0.3 * Browser/OS: Chrome * How did you install Redash: AWS using the AMI
The last two totally make sense, the "change data source" presents a possible issue: if one of the other editors has access to more data sources than the owner, then he might switch the query to a data source the owner doesn't have access to. Will it be OK if for now we keep this restricted? @arikfr That's a fair point. I think we could live with "change data source" remain restricted. Meanwhile, is there an existing ticket or in the roadmap of Redash to have a configurable group level security policy? i.e. admin can allow all users belong to the same group can edit each other's query without the need of query owner to grant permission for each query. cc: @rohanpd How do you grant permissions to another user for a query? For example, http://demo.redash.io/queries/5272/source#table ?
2017-09-03T11:35:00
getredash/redash
1,978
getredash__redash-1978
[ "1950" ]
50eb9a86c9fd1a50bbacdac28aad22bc805ae4ef
diff --git a/redash/handlers/base.py b/redash/handlers/base.py --- a/redash/handlers/base.py +++ b/redash/handlers/base.py @@ -44,7 +44,7 @@ def update_model(self, model, updates): def record_event(org, user, options): - if isinstance(user, ApiUser): + if user.is_api_user(): options.update({ 'api_key': user.name, 'org_id': org.id @@ -52,6 +52,7 @@ def record_event(org, user, options): else: options.update({ 'user_id': user.id, + 'user_name': user.name, 'org_id': org.id }) @@ -88,7 +89,7 @@ def paginate(query_set, page, page_size, serializer): if page < 1: abort(400, message='Page must be positive integer.') - if (page-1)*page_size+1 > count > 0: + if (page - 1) * page_size + 1 > count > 0: abort(400, message='Page is out of range.') if page_size > 250 or page_size < 1:
record_event task fails for view events generated by an user who accesses a dashboard using a public sharing link ### Issue Summary If I share a dashboard and access it through the generated public URL, the worker prints an error to the log when it tries to write the view event into the events log table from the record_event task. ### Steps to Reproduce I was able to reproduce this issue by using the docker-compose.yml file provided in the repository (i.e. in a dev setup): 1. Setup a fresh instance of Redash 2. Create a new dashboard 3. Click the share button and enable public sharing 4. Copy the public sharing link (e.g. `http://localhost:5000/public/dashboards/CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J?org_slug=default`) and access it with a clean browser profile (or private window that has no redash login cookies around) ### What happens? The following error can be seen in the worker logs: ``` [2017-09-07 08:03:40,006: ERROR/MainProcess] Task redash.tasks.record_event[41674041-c67b-4659-a151-99f9e73e3301] raised unexpected: DataError('(psycopg2.DataError) invalid input syntax for integer: "CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J"\nLINE 1: ...id, additional_properties, created_at) VALUES (1, \'CH6m72tek...\n ^\n',) Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task R = retval = fun(*args, **kwargs) File "/app/redash/worker.py", line 69, in __call__ return TaskBase.__call__(self, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 438, in __protected_call__ return self.run(*args, **kwargs) File "/app/redash/tasks/general.py", line 15, in record_event models.db.session.commit() File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/scoping.py", line 157, in do return getattr(self.registry(), name)(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 874, in commit self.transaction.commit() File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 461, in commit self._prepare_impl() File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 441, in _prepare_impl self.session.flush() File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 2137, in flush self._flush(objects) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 2257, in _flush transaction.rollback(_capture_exception=True) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/util/langhelpers.py", line 60, in __exit__ compat.reraise(exc_type, exc_value, exc_tb) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 2221, in _flush flush_context.execute() File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/unitofwork.py", line 389, in execute rec.execute(self) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/unitofwork.py", line 548, in execute uow File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/persistence.py", line 181, in save_obj mapper, table, insert) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/persistence.py", line 835, in _emit_insert_statements execute(statement, params) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/base.py", line 945, in execute return meth(self, multiparams, params) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement compiled_sql, distilled_params File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context context) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/base.py", line 1393, in _handle_dbapi_exception exc_info File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/util/compat.py", line 202, in raise_from_cause reraise(type(exception), exception, tb=exc_tb, cause=cause) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context context) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/engine/default.py", line 469, in do_execute cursor.execute(statement, parameters) DataError: (psycopg2.DataError) invalid input syntax for integer: "CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J" LINE 1: ...id, additional_properties, created_at) VALUES (1, 'CH6m72tek... ^ [SQL: 'INSERT INTO events (org_id, user_id, action, object_type, object_id, additional_properties, created_at) VALUES (%(org_id)s, %(user_id)s, %(action)s, %(object_type)s, %(object_id)s, %(additional_properties)s, %(created_at)s) RETURNING events.id'] [parameters: {'user_id': u'CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J', 'created_at': datetime.datetime(2017, 9, 7, 8, 3, 39), 'object_type': 'dashboard', 'org_id': 1, 'object_id': 1, 'additional_properties': '{"ip": "172.20.0.1", "public": true, "referer": null, "user_agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0", "headless": false}', 'action': 'view'}] ``` Coincidentally, the value that fails to be set to the user_id field (`CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J`) is the token seen in the public sharing URL (`http://localhost:5000/public/dashboards/CH6m72tekgbplwzelZuxWzdtWu3LlbsewVYVxm8J?org_slug=default`) ### What should happen? The view event is properly saved to the events table and the task does not crash. ### Technical details: * Redash Version: Both stable release 2.0.0 and latest master with commit ID 2a22b98c77757f378d3a4ed1ec8eceb99f0a8b35 * How did you install Redash: Stable v2.0.0 by upgrading a previous version with the bin/upgrade script and the latest commit in master using the provided docker-compose configuration and following the developer environment setup. I believe this issue was discussed in #1238 but that lacked a proper STR to actually fix the root cause of this issue. It did had some good ideas about what could cause the bug and it indeed seems that an anonymous user causes an incorrect user_id to be included in the view event.
2017-09-27T15:04:41
getredash/redash
1,985
getredash__redash-1985
[ "1980" ]
f03c173c5717aa467e09f659b8c76575e89601ea
diff --git a/redash/cli/users.py b/redash/cli/users.py --- a/redash/cli/users.py +++ b/redash/cli/users.py @@ -112,7 +112,8 @@ def delete(email, organization=None): models.User.org == org.id, ).delete() else: - deleted_count = models.User.query.filter(models.User.email == email).delete() + deleted_count = models.User.query.filter(models.User.email == email).delete( + synchronize_session=False) models.db.session.commit() print("Deleted %d users." % deleted_count) diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -22,6 +22,7 @@ from redash.query_runner import (get_configuration_schema_for_query_runner_type, get_query_runner) from redash.utils import generate_token, json_dumps +from redash.utils.comparators import CaseInsensitiveComparator from redash.utils.configuration import ConfigurationContainer from sqlalchemy import distinct, or_ from sqlalchemy.dialects import postgresql @@ -348,12 +349,32 @@ def __unicode__(self): return unicode(self.id) +class LowercasedString(TypeDecorator): + """ + A lowercased string + """ + impl = db.String + comparator_factory = CaseInsensitiveComparator + + def __init__(self, length=320, *args, **kwargs): + super(LowercasedString, self).__init__(length=length, *args, **kwargs) + + def process_bind_param(self, value, dialect): + if value is not None: + return value.lower() + return value + + @property + def python_type(self): + return self.impl.type.python_type + + class User(TimestampMixin, db.Model, BelongsToOrgMixin, UserMixin, PermissionsCheckMixin): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization, backref=db.backref("users", lazy="dynamic")) name = Column(db.String(320)) - email = Column(db.String(320)) + email = Column(LowercasedString) password_hash = Column(db.String(128), nullable=True) # XXX replace with association table group_ids = Column('groups', MutableList.as_mutable(postgresql.ARRAY(db.Integer)), nullable=True) @@ -365,6 +386,8 @@ class User(TimestampMixin, db.Model, BelongsToOrgMixin, UserMixin, PermissionsCh __table_args__ = (db.Index('users_org_id_email', 'org_id', 'email', unique=True),) def __init__(self, *args, **kwargs): + if kwargs.get('email') is not None: + kwargs['email'] = kwargs['email'].lower() super(User, self).__init__(*args, **kwargs) def to_dict(self, with_api_key=False): diff --git a/redash/utils/comparators.py b/redash/utils/comparators.py new file mode 100644 --- /dev/null +++ b/redash/utils/comparators.py @@ -0,0 +1,7 @@ +from sqlalchemy import func +from sqlalchemy.ext.hybrid import Comparator + + +class CaseInsensitiveComparator(Comparator): + def __eq__(self, other): + return func.lower(self.__clause_element__()) == func.lower(other)
diff --git a/tests/handlers/test_users.py b/tests/handlers/test_users.py --- a/tests/handlers/test_users.py +++ b/tests/handlers/test_users.py @@ -26,6 +26,16 @@ def test_creates_user(self): self.assertEqual(rv.json['name'], test_user['name']) self.assertEqual(rv.json['email'], test_user['email']) + def test_creates_user_case_insensitive_email(self): + admin = self.factory.create_admin() + + test_user = {'name': 'User', 'email': '[email protected]', 'password': 'test'} + rv = self.make_request('post', '/api/users', data=test_user, user=admin) + + self.assertEqual(rv.status_code, 200) + self.assertEqual(rv.json['name'], test_user['name']) + self.assertEqual(rv.json['email'], '[email protected]') + def test_returns_400_when_email_taken(self): admin = self.factory.create_admin() @@ -34,6 +44,20 @@ def test_returns_400_when_email_taken(self): self.assertEqual(rv.status_code, 400) + def test_returns_400_when_email_taken_case_insensitive(self): + admin = self.factory.create_admin() + + test_user1 = {'name': 'User', 'email': '[email protected]', 'password': 'test'} + rv = self.make_request('post', '/api/users', data=test_user1, user=admin) + + self.assertEqual(rv.status_code, 200) + self.assertEqual(rv.json['email'], '[email protected]') + + test_user2 = {'name': 'User', 'email': '[email protected]', 'password': 'test'} + rv = self.make_request('post', '/api/users', data=test_user2, user=admin) + + self.assertEqual(rv.status_code, 400) + class TestUserListGet(BaseTestCase): def test_returns_users_for_given_org_only(self): diff --git a/tests/models/test_users.py b/tests/models/test_users.py --- a/tests/models/test_users.py +++ b/tests/models/test_users.py @@ -25,3 +25,23 @@ def test_finds_users(self): users = User.find_by_email(user.email) self.assertIn(user, users) self.assertIn(user2, users) + + def test_finds_users_case_insensitive(self): + user = self.factory.create_user(email='[email protected]') + + users = User.find_by_email('[email protected]') + self.assertIn(user, users) + + +class TestUserGetByEmailAndOrg(BaseTestCase): + def test_get_user_by_email_and_org(self): + user = self.factory.create_user(email='[email protected]') + + found_user = User.get_by_email_and_org(user.email, user.org) + self.assertEqual(user, found_user) + + def test_get_user_by_email_and_org_case_insensitive(self): + user = self.factory.create_user(email='[email protected]') + + found_user = User.get_by_email_and_org("[email protected]", user.org) + self.assertEqual(user, found_user) diff --git a/tests/test_handlers.py b/tests/test_handlers.py --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -111,6 +111,18 @@ def test_submit_correct_user_and_password(self): self.assertEquals(rv.status_code, 302) login_user_mock.assert_called_with(user, remember=False) + def test_submit_case_insensitive_user_and_password(self): + user = self.factory.user + user.hash_password('password') + + self.db.session.add(user) + self.db.session.commit() + + with patch('redash.handlers.authentication.login_user') as login_user_mock: + rv = self.client.post('/default/login', data={'email': user.email.upper(), 'password': 'password'}) + self.assertEquals(rv.status_code, 302) + login_user_mock.assert_called_with(user, remember=False) + def test_submit_correct_user_and_password_and_remember_me(self): user = self.factory.user user.hash_password('password')
Login page for user/pass is case sensitive for the username ### Issue Summary When logging in with a username/password, the username is case-sensitive, and disallows a user from logging in when there is a mismatch. ### Steps to Reproduce 1. Create a user `[email protected]` with a password 2. Try to log in with `[email protected]` and get a failure It is common to have usernames/email addresses converted to lowercase (or possibly uppercase if you lean that way) to ensure uniqueness and consistent user behavior. The current implementation appears to allow both `[email protected]` and `[email protected]` to be two different users, despite having the same effective email address. I think from an implementation perspective, this could live either in the form-side (some sort of javascript conversion `.toLowerCase()` or (preferably) an Authentication API-side `.lower()` for the input. I'd be happy to hack on this, however in a quick, cursory evaluation it isn't immediately apparent where to implement this kind of change, so any pointers would be helpful.
2017-10-02T02:11:16
getredash/redash
2,044
getredash__redash-2044
[ "1974" ]
a3a1dcf4baea4d3cc74adf22e14087021493a918
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -1,3 +1,4 @@ +import os import json import logging import select @@ -130,14 +131,19 @@ def _get_tables(self, schema): return schema.values() - def run_query(self, query, user): + def _get_connection(self): connection = psycopg2.connect(user=self.configuration.get('user'), password=self.configuration.get('password'), host=self.configuration.get('host'), port=self.configuration.get('port'), dbname=self.configuration.get('dbname'), + sslmode=self.configuration.get('sslmode'), async=True) + return connection + + def run_query(self, query, user): + connection = self._get_connection() _wait(connection, timeout=10) cursor = connection.cursor() @@ -177,8 +183,23 @@ class Redshift(PostgreSQL): def type(cls): return "redshift" + def _get_connection(self): + sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt') + + connection = psycopg2.connect(user=self.configuration.get('user'), + password=self.configuration.get('password'), + host=self.configuration.get('host'), + port=self.configuration.get('port'), + dbname=self.configuration.get('dbname'), + sslmode='require', + sslrootcert=sslrootcert_path, + async=True) + + return connection + @classmethod def configuration_schema(cls): + return { "type": "object", "properties": { @@ -199,6 +220,7 @@ def configuration_schema(cls): "title": "Database Name" } }, + "order": ['host', 'port', 'user', 'password'], "required": ["dbname", "user", "password", "host", "port"], "secret": ["password"] }
Redshift: Support ACM Certificates for SSL Connections https://docs.aws.amazon.com/redshift/latest/mgmt/connecting-transitioning-to-acm-certs.html
2017-10-18T11:56:19
getredash/redash
2,062
getredash__redash-2062
[ "335" ]
900d5588578eae027dd7a6caf5f91383d8893da2
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -82,6 +82,7 @@ def post(self, widget_id): require_object_modify_permission(widget.dashboard, self.current_user) widget_properties = request.get_json(force=True) widget.text = widget_properties['text'] + widget.options = json.dumps(widget_properties['options']) models.db.session.commit() return widget.to_dict()
More flexible widgets grid (allow finer control over height/width) ## User should be able to control widget's placement, width and height ### Current implementation Currently editing a dashboard works as follow: 1. When adding a widget you select its width: regular (half width) or double (full width). Height is set based on the content. 2. When opening the "Edit Dashboard" dialog, you can rearrange the order of the widgets. ![Edit Dashboard Dialog](https://user-images.githubusercontent.com/71468/30966534-e52b506e-a461-11e7-86f4-cd3220b51d12.png) Issues with the current approach: * You can't change widget size after placing it. * You can't change widget's height. * You can't control widget's width beyond the current two options. ### Solution To solve this we want to have a flexible (_but still responsive_) grid that the user can freely place and move widgets on. * Each visualization will define minimum width and height sizes (derived from the visualization type). This will be the default sizes for the widget when placed on the grid, but the user will be able to resize it to be larger (but not smaller) afterwards. * Once entering dashboard "edit mode", the user will be able to move around and resize the widgets in place instead of a dedicated UI. * We should probably change other behaviors in the UI (move add widget to be part of the edit mode, change dashboard title editing to be in place, etc), but we will leave it to second iteration after the grid behavior is implemented. ### Technical Notes * We currently use `angular-gridster` to allow moving around the widgets in the Edit Dashboard dialog. Maybe we can use it for the grid or a similar library. * Currently the widget knows nothing about its placement, but only its size (width). The dashboard has a layout property, which is an array of arrays of widgets ids (each item in the array is a row/array of widgets). This is error prone and makes layout changes harder. If possible, it will be better if each widget retains its own placement information. * The API to update a widget currently supports only updating a textbox widget. We will need to extend it to support all widget types.
Related: #158 <a href="https:&#x2F;&#x2F;trello.com&#x2F;c&#x2F;oflWUZyz&#x2F;25-more-flexible-dashboard-grid-control-height-more-width-options"><img src="https:&#x2F;&#x2F;github.trello.services&#x2F;images&#x2F;trello-icon.png" width="12" height="12"> More flexible dashboard grid (control height, more width options)</a> This is open for some time now, any plans on implementing this? We would love it! As this feature is going to be implemented soon, I've updated the details. Feel free to comment.
2017-10-29T11:22:01
getredash/redash
2,068
getredash__redash-2068
[ "2036" ]
f38e76ad107f364d6fe0c01d88e78559c4324a08
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -967,7 +967,7 @@ def get_by_id(cls, _id): def fork(self, user): forked_list = ['org', 'data_source', 'latest_query_data', 'description', - 'query_text', 'query_hash'] + 'query_text', 'query_hash', 'options'] kwargs = {a: getattr(self, a) for a in forked_list} forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name), user=user, **kwargs)
Parameters are loosing their configuration on fork ### Issue Summary When forking a query, its parameters are loosing their configuration. ### Steps to Reproduce 1. Create query with parameters 2. Fork it Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version: commit 6982f48b34a66ef1aeb9fe42065e2e2a412e0d54 * How did you install Redash: docker
2017-10-31T21:43:14
getredash/redash
2,069
getredash__redash-2069
[ "1979" ]
ea7ca9e632455ff1013f8f4d6b529d286a45118d
diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py --- a/redash/handlers/query_results.py +++ b/redash/handlers/query_results.py @@ -185,14 +185,19 @@ def get(self, query_id=None, query_result_id=None, filetype='json'): if query_result_id: query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org) - elif query_id is not None: + + if query_id is not None: query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) - if query is not None: + if query_result is None and query is not None: if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values: query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age) elif query.latest_query_data_id is not None: query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org) + + if query is not None and query_result is not None and self.current_user.is_api_user(): + if query.query_hash != query_result.query_hash: + abort(404, message='No cached result found for this query.') if query_result: require_access(query_result.data_source.groups, self.current_user, view_only) @@ -229,7 +234,7 @@ def get(self, query_id=None, query_result_id=None, filetype='json'): self.add_cors_headers(response.headers) if should_cache: - response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR) + response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR) return response
diff --git a/tests/handlers/test_query_results.py b/tests/handlers/test_query_results.py --- a/tests/handlers/test_query_results.py +++ b/tests/handlers/test_query_results.py @@ -118,6 +118,39 @@ def test_has_full_access_to_data_source(self): rv = self.make_request('get', '/api/query_results/{}'.format(query_result.id)) self.assertEquals(rv.status_code, 200) + def test_access_with_query_api_key(self): + ds = self.factory.create_data_source(group=self.factory.org.default_group, view_only=False) + query = self.factory.create_query() + query_result = self.factory.create_query_result(data_source=ds, query_text=query.query_text) + + rv = self.make_request('get', '/api/queries/{}/results/{}.json?api_key={}'.format(query.id, query_result.id, query.api_key), user=False) + self.assertEquals(rv.status_code, 200) + + def test_access_with_query_api_key_without_query_result_id(self): + ds = self.factory.create_data_source(group=self.factory.org.default_group, view_only=False) + query = self.factory.create_query() + query_result = self.factory.create_query_result(data_source=ds, query_text=query.query_text, query_hash=query.query_hash) + query.latest_query_data = query_result + + rv = self.make_request('get', '/api/queries/{}/results.json?api_key={}'.format(query.id, query.api_key), user=False) + self.assertEquals(rv.status_code, 200) + + def test_query_api_key_and_different_query_result(self): + ds = self.factory.create_data_source(group=self.factory.org.default_group, view_only=False) + query = self.factory.create_query(query_text="SELECT 8") + query_result2 = self.factory.create_query_result(data_source=ds, query_hash='something-different') + + rv = self.make_request('get', '/api/queries/{}/results/{}.json?api_key={}'.format(query.id, query_result2.id, query.api_key), user=False) + self.assertEquals(rv.status_code, 404) + + def test_signed_in_user_and_different_query_result(self): + ds2 = self.factory.create_data_source(group=self.factory.org.admin_group, view_only=False) + query = self.factory.create_query(query_text="SELECT 8") + query_result2 = self.factory.create_query_result(data_source=ds2, query_hash='something-different') + + rv = self.make_request('get', '/api/queries/{}/results/{}.json'.format(query.id, query_result2.id)) + self.assertEquals(rv.status_code, 403) + class TestQueryResultExcelResponse(BaseTestCase): def test_renders_excel_file(self):
The API key for one query may be used to retrieve another query's results ### Issue Summary A query's API key may be used to obtain another query's results via the REST api when including the API key in the query string. ### Steps to Reproduce 1. Create one query and execute it to obtain results (call it query A) 2. Create another query and execute it to obtain different results (call it query B) 3. Get the query's API key for query A (A_API_KEY) and the query number for query A (A_QUERY_NUMBER) 4. Get the result number from query B's most recent run (B_RESULT_NUMBER) 5. Execute the below code and you'll see that the API key for query A can get results from query B ```bash REDASH_DOMAIN='yourdomain.com' A_QUERY_NUMBER='query number for query A' A_API_KEY_A='api key for query A' B_RESULT_NUMBER='query result number for query b' # this will download query B's results using query A's access key wget \ -O query_b_results.csv \ "https://$REDASH_DOMAIN/api/queries/$A_QUERY_NUMBER/results/$B_RESULT_NUMBER.csv?api_key=$A_API_KEY" ``` This is a bug because one query's API key should NOT be able to access another query's results. ### Technical details: * Redash Version: 1.0.3 * Browser/OS: (Command Line) / Linux Mint 18.2 * How did you install Redash: Command line
2017-11-01T11:51:40
getredash/redash
2,070
getredash__redash-2070
[ "1825" ]
71a235c79bcf463f4594908f2f113df8e2258c78
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -251,6 +251,7 @@ def post(self, query_id): Responds with created :ref:`query <query-response-label>` object. """ query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) + require_access(query.data_source.groups, self.current_user, not_view_only) forked_query = query.fork(self.current_user) models.db.session.commit() return forked_query.to_dict(with_visualizations=True)
diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py --- a/tests/handlers/test_queries.py +++ b/tests/handlers/test_queries.py @@ -158,3 +158,21 @@ def test_refresh_query_you_dont_have_access_to(self): user = self.factory.create_user(group_ids=[group.id]) response = self.make_request('post', self.path, user=user) self.assertEqual(403, response.status_code) + + +class TestQueryForkResourcePost(BaseTestCase): + def test_forks_a_query(self): + ds = self.factory.create_data_source(group=self.factory.org.default_group, view_only=False) + query = self.factory.create_query(data_source=ds) + + rv = self.make_request('post', '/api/queries/{}/fork'.format(query.id)) + + self.assertEqual(rv.status_code, 200) + + def test_must_have_full_access_to_data_source(self): + ds = self.factory.create_data_source(group=self.factory.org.default_group, view_only=True) + query = self.factory.create_query(data_source=ds) + + rv = self.make_request('post', '/api/queries/{}/fork'.format(query.id)) + + self.assertEqual(rv.status_code, 403)
An unauthorized (read only) user can create queries ### Issue Summary I created a user on the default group and set the data source capabilities to "Read only" on that group. Accessing as this brand new user, I really can't create a new query, but I can fork a existing one, and modify it as I want. I think this could be a security flaw. ### Steps to Reproduce 1. Installed with docker, connect to a data source (I created a MySQL one); 2. Create a few queries; 3. Create a user and assign him to the "default" group; 4. Set the permissions of the data source for the default group to read only; 5. Do login with the brand new user and click to list the queries; 6. Choice one of the queries and fork it. I expect that a "read only" role should really be read only. ### Technical details: * Redash Version: 1.0.3+b2850 * Browser/OS: Google Chrome 59/Fedora 25 * How did you install Redash: Docker with the suggested `docker-compose.production.yml`
Confirming that this issue exists on Redash Version: 1.0.3+b2866. Although the `Execute Query` and `Refresh Query` buttons are disabled, a read-only user can fork an existing query to create a new API key, and alter the refresh interval for the forked query. However, it does not appear that the altered query is executed, even when the refresh interval has elapsed. Browser/OS: Google Chrome 59/Ubuntu 17.04 How did you install Redash: Docker with the suggested `docker-compose.production.yml`
2017-11-01T12:57:01
getredash/redash
2,071
getredash__redash-2071
[ "1824" ]
59d6eb662c45c7255cae1e78241af5a40cd5fc6c
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -266,6 +266,12 @@ def post(self, query_id): Responds with query task details. """ + # TODO: this should actually check for permissions, but because currently you can only + # get here either with a user API key or a query one, we can just check whether it's + # an api key (meaning this is a query API key, which only grants read access). + if self.current_user.is_api_user(): + abort(403, message="Please use a user API key.") + query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) require_access(query.groups, self.current_user, not_view_only)
diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py --- a/tests/handlers/test_queries.py +++ b/tests/handlers/test_queries.py @@ -158,6 +158,13 @@ def test_refresh_query_you_dont_have_access_to(self): user = self.factory.create_user(group_ids=[group.id]) response = self.make_request('post', self.path, user=user) self.assertEqual(403, response.status_code) + + def test_refresh_forbiden_with_query_api_key(self): + response = self.make_request('post', '{}?api_key={}'.format(self.path, self.query.api_key), user=False) + self.assertEqual(403, response.status_code) + + response = self.make_request('post', '{}?api_key={}'.format(self.path, self.factory.user.api_key), user=False) + self.assertEqual(200, response.status_code) class TestQueryForkResourcePost(BaseTestCase):
AttributeError: 'ApiUser' object has no attribute 'email' ### Issue Summary API POST calls to /api/queries/###/refresh throw this error on 1.0.3+b2850 ### Steps to Reproduce 1. Make a POST call to /api/queries/###/refresh 2. See error in api_error.log and 500 result Full error with trace: ``` Exception on /api/queries/###/refresh [POST] Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1639, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1625, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper resp = resource(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask_login/utils.py", line 228, in decorated_view return func(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view return self.dispatch_request(*args, **kwargs) File "/opt/redash/redash.1.0.3.b2850/redash/handlers/base.py", line 28, in dispatch_request return super(BaseResource, self).dispatch_request(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request resp = meth(*args, **kwargs) File "/opt/redash/redash.1.0.3.b2850/redash/handlers/queries.py", line 266, in post return run_query(query.data_source, parameter_values, query.query_text, query.id) File "/opt/redash/redash.1.0.3.b2850/redash/handlers/query_results.py", line 45, in run_query job = enqueue_query(query_text, data_source, current_user.id, metadata={"Username": current_user.email, "Query ID": query_id}) File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 343, in __getattr__ return getattr(self._get_current_object(), name) AttributeError: 'ApiUser' object has no attribute 'email' ``` Same request worked in some previous version of Redash. Looking at the current code on master, it seems to still make the same reference to `current_user.email` so assuming this bug still exists, but haven't confirmed that yet. ### Technical details: * Redash Version: 1.0.3+b2850 * Browser/OS: N/A * How did you install Redash: N/A
Just to be clear, ### in those URL paths is an actual query ID number. I assume you call this API call with a query API key? Yes, the request is called with an api_key param. This is the reason for the error. We should probably check the API key here and only allow a user API key, because otherwise it's a security concern (in the case of queries with parameters).
2017-11-01T13:07:15
getredash/redash
2,072
getredash__redash-2072
[ "1977" ]
28b4450fa9a3e8c762b32c0c31dbfe3acf63a8cd
diff --git a/redash/query_runner/query_results.py b/redash/query_runner/query_results.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/query_results.py @@ -0,0 +1,167 @@ +import json +import logging +import numbers +import re +import sqlite3 + +from dateutil import parser + +from redash import models +from redash.permissions import has_access, not_view_only +from redash.query_runner import (TYPE_BOOLEAN, TYPE_DATETIME, TYPE_FLOAT, + TYPE_INTEGER, TYPE_STRING, BaseQueryRunner, + register) +from redash.utils import JSONEncoder + +logger = logging.getLogger(__name__) + + +class PermissionError(Exception): + pass + + +def _guess_type(value): + if value == '' or value is None: + return TYPE_STRING + + if isinstance(value, numbers.Integral): + return TYPE_INTEGER + + if isinstance(value, float): + return TYPE_FLOAT + + if unicode(value).lower() in ('true', 'false'): + return TYPE_BOOLEAN + + try: + parser.parse(value) + return TYPE_DATETIME + except (ValueError, OverflowError): + pass + + return TYPE_STRING + + +def extract_query_ids(query): + queries = re.findall(r'(?:join|from) query_(\d+)', query, re.IGNORECASE) + return [int(q) for q in queries] + + +def _load_query(user, query_id): + query = models.Query.get_by_id(query_id) + + if user.org_id != query.org_id: + raise PermissionError("Query id {} not found.".format(query.id)) + + if not has_access(query.data_source.groups, user, not_view_only): + raise PermissionError(u"You are not allowed to execute queries on {} data source (used for query id {}).".format( + query.data_source.name, query.id)) + + return query + + +def create_tables_from_query_ids(user, connection, query_ids): + for query_id in set(query_ids): + query = _load_query(user, query_id) + + results, error = query.data_source.query_runner.run_query( + query.query_text, user) + + if error: + raise Exception( + "Failed loading results for query id {}.".format(query.id)) + + results = json.loads(results) + table_name = 'query_{query_id}'.format(query_id=query_id) + create_table(connection, table_name, results) + + +def fix_column_name(name): + return name.replace(':', '_').replace('.', '_').replace(' ', '_') + + +def create_table(connection, table_name, query_results): + columns = [column['name'] + for column in query_results['columns']] + safe_columns = [fix_column_name(column) for column in columns] + + column_list = ", ".join(safe_columns) + create_table = "CREATE TABLE {table_name} ({column_list})".format( + table_name=table_name, column_list=column_list) + logger.debug("CREATE TABLE query: %s", create_table) + connection.execute(create_table) + + insert_template = u"insert into {table_name} ({column_list}) values ({place_holders})".format( + table_name=table_name, + column_list=column_list, + place_holders=','.join(['?'] * len(columns))) + + for row in query_results['rows']: + values = [row.get(column) for column in columns] + connection.execute(insert_template, values) + + +class Results(BaseQueryRunner): + noop_query = 'SELECT 1' + + @classmethod + def configuration_schema(cls): + return { + "type": "object", + "properties": { + } + } + + @classmethod + def annotate_query(cls): + return False + + @classmethod + def name(cls): + return "Query Results (Beta)" + + def run_query(self, query, user): + connection = sqlite3.connect(':memory:') + + query_ids = extract_query_ids(query) + create_tables_from_query_ids(user, connection, query_ids) + + cursor = connection.cursor() + + try: + cursor.execute(query) + + if cursor.description is not None: + columns = self.fetch_columns( + [(i[0], None) for i in cursor.description]) + + rows = [] + column_names = [c['name'] for c in columns] + + for i, row in enumerate(cursor): + for j, col in enumerate(row): + guess = _guess_type(col) + + if columns[j]['type'] is None: + columns[j]['type'] = guess + elif columns[j]['type'] != guess: + columns[j]['type'] = TYPE_STRING + + rows.append(dict(zip(column_names, row))) + + data = {'columns': columns, 'rows': rows} + error = None + json_data = json.dumps(data, cls=JSONEncoder) + else: + error = 'Query completed but it returned no data.' + json_data = None + except KeyboardInterrupt: + connection.cancel() + error = "Query cancelled by user." + json_data = None + finally: + connection.close() + return json_data, error + + +register(Results) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -222,7 +222,8 @@ def all_settings(): 'redash.query_runner.jql', 'redash.query_runner.google_analytics', 'redash.query_runner.axibase_tsd', - 'redash.query_runner.salesforce' + 'redash.query_runner.salesforce', + 'redash.query_runner.query_results' ] enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
diff --git a/tests/query_runner/test_query_results.py b/tests/query_runner/test_query_results.py new file mode 100644 --- /dev/null +++ b/tests/query_runner/test_query_results.py @@ -0,0 +1,87 @@ +import sqlite3 +from unittest import TestCase + +from redash.query_runner.query_results import (PermissionError, _load_query, create_table, + extract_query_ids) +from tests import BaseTestCase + + +class TestExtractQueryIds(TestCase): + def test_works_with_simple_query(self): + query = "SELECT 1" + self.assertEquals([], extract_query_ids(query)) + + def test_finds_queries_to_load(self): + query = "SELECT * FROM query_123" + self.assertEquals([123], extract_query_ids(query)) + + def test_finds_queries_in_joins(self): + query = "SELECT * FROM query_123 JOIN query_4566" + self.assertEquals([123, 4566], extract_query_ids(query)) + + +class TestCreateTable(TestCase): + def test_creates_table_with_colons_in_column_name(self): + connection = sqlite3.connect(':memory:') + results = {'columns': [{'name': 'ga:newUsers'}, { + 'name': 'test2'}], 'rows': [{'ga:newUsers': 123, 'test2': 2}]} + table_name = 'query_123' + create_table(connection, table_name, results) + connection.execute('SELECT 1 FROM query_123') + + def test_creates_table(self): + connection = sqlite3.connect(':memory:') + results = {'columns': [{'name': 'test1'}, + {'name': 'test2'}], 'rows': []} + table_name = 'query_123' + create_table(connection, table_name, results) + connection.execute('SELECT 1 FROM query_123') + + def test_creates_table_with_missing_columns(self): + connection = sqlite3.connect(':memory:') + results = {'columns': [{'name': 'test1'}, {'name': 'test2'}], 'rows': [ + {'test1': 1, 'test2': 2}, {'test1': 3}]} + table_name = 'query_123' + create_table(connection, table_name, results) + connection.execute('SELECT 1 FROM query_123') + + def test_creates_table_with_spaces_in_column_name(self): + connection = sqlite3.connect(':memory:') + results = {'columns': [{'name': 'two words'}, {'name': 'test2'}], 'rows': [ + {'two words': 1, 'test2': 2}, {'test1': 3}]} + table_name = 'query_123' + create_table(connection, table_name, results) + connection.execute('SELECT 1 FROM query_123') + + def test_loads_results(self): + connection = sqlite3.connect(':memory:') + rows = [{'test1': 1, 'test2': 'test'}, {'test1': 2, 'test2': 'test2'}] + results = {'columns': [{'name': 'test1'}, + {'name': 'test2'}], 'rows': rows} + table_name = 'query_123' + create_table(connection, table_name, results) + self.assertEquals( + len(list(connection.execute('SELECT * FROM query_123'))), 2) + + +class TestGetQuery(BaseTestCase): + # test query from different account + def test_raises_exception_for_query_from_different_account(self): + query = self.factory.create_query() + user = self.factory.create_user(org=self.factory.create_org()) + + self.assertRaises(PermissionError, lambda: _load_query(user, query.id)) + + def test_raises_exception_for_query_with_different_groups(self): + ds = self.factory.create_data_source(group=self.factory.create_group()) + query = self.factory.create_query(data_source=ds) + user = self.factory.create_user() + + self.assertRaises(PermissionError, lambda: _load_query(user, query.id)) + + def test_returns_query(self): + query = self.factory.create_query() + user = self.factory.create_user() + + loaded = _load_query(user, query.id) + self.assertEquals(query, loaded)
Queries using other queries as a data source I couldnt find an existing ticket for this so made this one. Redash seems to advertise that it supports adding queries as first class data sources here: https://redash.io/help/queries/using-datasets-as-data-sources.html It would be really cool to have this functionality land in redash. @arikfr any plans on open sourcing the preview, or would you accept any PRs that add this functionality directly into redash?
It will be part of the v3 release.
2017-11-01T14:18:04
getredash/redash
2,134
getredash__redash-2134
[ "68" ]
868f334471a03f0f4fac721dca4744af377d1726
diff --git a/redash/handlers/api.py b/redash/handlers/api.py --- a/redash/handlers/api.py +++ b/redash/handlers/api.py @@ -79,6 +79,7 @@ def json_representation(data, code, headers=None): api.add_org_resource(QueryResultListResource, '/api/query_results', endpoint='query_results') api.add_org_resource(QueryResultResource, + '/api/query_results/<query_result_id>.<filetype>', '/api/query_results/<query_result_id>', '/api/queries/<query_id>/results.<filetype>', '/api/queries/<query_id>/results/<query_result_id>.<filetype>',
Use can't download dataset before saving query Because the query results url contains the query id, before saving the user can't download the dataset. We need to allow addressing query results without query id.
Fixed. I am facing this issue in latest version
2017-12-06T16:14:51
getredash/redash
2,247
getredash__redash-2247
[ "1668" ]
a4d9ed5418193c7f463734c47af409a785053aaa
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -515,6 +515,7 @@ def update_group_assignments(self, group_names): groups.append(self.org.default_group) self.group_ids = [g.id for g in groups] db.session.add(self) + db.session.commit() def has_access(self, obj, access_type): return AccessPermission.exists(obj, access_type, grantee=self)
diff --git a/tests/models/test_users.py b/tests/models/test_users.py --- a/tests/models/test_users.py +++ b/tests/models/test_users.py @@ -1,12 +1,14 @@ from tests import BaseTestCase -from redash.models import User +from redash.models import User, db class TestUserUpdateGroupAssignments(BaseTestCase): def test_default_group_always_added(self): user = self.factory.create_user() user.update_group_assignments(["g_unknown"]) + db.session.refresh(user) + self.assertItemsEqual([user.org.default_group.id], user.group_ids) def test_update_group_assignments(self): @@ -14,6 +16,8 @@ def test_update_group_assignments(self): new_group = self.factory.create_group(name="g1") user.update_group_assignments(["g1"]) + db.session.refresh(user) + self.assertItemsEqual([user.org.default_group.id, new_group.id], user.group_ids)
SAML group assignment doesn't seem to work According to the documentation, sending a `RedashGroups` attribute should set the user's groups. However, this doesn't seem to work. Adding `pprint` debugging in `saml_auth.py`, I can see the necessary attribute is being sent: ``` [2017-03-13 14:12:58,516][PID:923][INFO][saml2.response] Subject NameID: <?xml version='1.0' encoding='UTF-8'?> <ns0:NameID xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion" Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" NameQualifier="https://login.torchbox.com/idp/shibboleth" SPNameQualifier="https://redash.torchbox.com/">[email protected]</ns0:NameID> {'FirstName': ['Felicity'], 'LastName': ['Tarnell'], 'RedashGroups': ['wagtail', 'tech', 'developers', 'drupal', 'admin', 'sysadmin'], 'mail': ['[email protected]']} [2017-03-13 14:13:00,362][PID:923][INFO][metrics] method=POST path=/saml/callback endpoint=saml_auth.idp_initiated status=302 content_type=text/html; charset=utf-8 content_length=209 duration=2076.67 query_count=4 query_duration=1527.05 ``` However, while the user is logged in correctly, they are not a member of any groups besides `default`. (I tried pre-creating a new group called `sysadmin`, which didn't make any difference; the `admin` group already exists anyway.)
It's a bug introduced by the move to SQLAlchemy. There should be a call to `session.commit()` after [this line](https://github.com/getredash/redash/blob/master/redash/authentication/saml_auth.py#L102). Also need to fix [the tests](https://github.com/getredash/redash/blob/master/tests/models/test_users.py#L5-L17) to catch this. @arikfr: any chance this could get some love and make it into 2.0.2?
2018-01-20T13:31:59
getredash/redash
2,267
getredash__redash-2267
[ "2238" ]
817f2ba9af4376c2e7e067bf3c050819f40fbcb6
diff --git a/redash/query_runner/elasticsearch.py b/redash/query_runner/elasticsearch.py --- a/redash/query_runner/elasticsearch.py +++ b/redash/query_runner/elasticsearch.py @@ -420,7 +420,7 @@ def run_query(self, query, user): if error: return None, error - params = {"source": json.dumps(query_dict)} + params = {"source": json.dumps(query_dict), "source_content_type": "application/json"} logger.debug("Using URL: %s", url) logger.debug("Using params : %s", params) r = requests.get(url, params=params, auth=self.auth)
ElasticSearch query fails on ES 6 ### Issue Summary Using the public docker images which seem to be based on Redash 4 (beta?) trying to setup a query for an ES 6 backend results in Error running query: Failed to execute query. Return Code: 500 Reason: {"error":{"root_cause":[{"type":"illegal_state_exception","reason":"source and source_content_type parameters are required"}],"type":"illegal_state_exception","reason":"source and source_content_type parameters are required"},"status":500} ### Steps to Reproduce 1. Deploy redash using the redash docker compose file 2. Deploy ES 6 backend 3. Setup ElasticSearch datasource in Redash 4. Try a simple query ### Technical details: * Redash Version: 4.0.0 (not sure about the exact built number - can't determine it from the docker image) * Browser/OS: Redash in docker on Ubuntu 17.10 * How did you install Redash: redash docker compose file from github pulling in redash:latest from docker hub Looks like redash uses the `source` query parameter but as per https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html this also requires to provide a `source_content_type` parameter.
Thanks @chriswue. This sounds like a support issue; have you opened a thread on the [forum](https://discuss.redash.io/)? @RichardLitt No, I assumed this was a bug in redash. I will open a support thread Opened support thread: https://discuss.redash.io/t/connecting-redash-4-to-elasticsearch-6/1412 @RichardLitt Is this really a support issue? If I go to the [relevant line](https://github.com/getredash/redash/blob/cf436e45a41d149a8133eca8bb635496e4020df2/redash/query_runner/elasticsearch.py#L423) and add the appropriate content type for my query, i.e. change it to; `params = {"source": json.dumps(query_dict), "source_content_type": "application/json"}` then my queries execute as expected. @chriswue Did you get around this? @alexdrans Good point. No, I guess it is not. Thanks for the work, there. I'll flag this as a bug. @alexdrans I haven't found a work-around, needs fixing in Redash. Would have submitted a PR but got side tracked by something else.
2018-01-30T11:03:03
getredash/redash
2,278
getredash__redash-2278
[ "2218" ]
a6f38730594e8c7f63445597502352c4aae83926
diff --git a/migrations/versions/969126bd800f_.py b/migrations/versions/969126bd800f_.py new file mode 100644 --- /dev/null +++ b/migrations/versions/969126bd800f_.py @@ -0,0 +1,69 @@ +"""Update widget's position data based on dashboard layout. + +Revision ID: 969126bd800f +Revises: 6b5be7e0a0ef +Create Date: 2018-01-31 15:20:30.396533 + +""" +import json +from alembic import op +import sqlalchemy as sa + +from redash.models import Dashboard, Widget, db + + +# revision identifiers, used by Alembic. +revision = '969126bd800f' +down_revision = '6b5be7e0a0ef' +branch_labels = None +depends_on = None + + +def upgrade(): + # Update widgets position data: + column_size = 3 + print "Updating dashboards position data:" + for dashboard in Dashboard.query: + print " Updating dashboard: {}".format(dashboard.id) + layout = json.loads(dashboard.layout) + + print " Building widgets map:" + widgets = {} + for w in dashboard.widgets: + print " Widget: {}".format(w.id) + widgets[w.id] = w + + print " Iterating over layout:" + for row_index, row in enumerate(layout): + print " Row: {} - {}".format(row_index, row) + if row is None: + continue + + for column_index, widget_id in enumerate(row): + print " Column: {} - {}".format(column_index, widget_id) + widget = widgets.get(widget_id) + + if widget is None: + continue + + options = json.loads(widget.options) or {} + options['position'] = { + "row": row_index, + "col": column_index * column_size, + "sizeX": column_size * widget.width + } + + widget.options = json.dumps(options) + + db.session.add(widget) + + db.session.commit() + + # Remove legacy columns no longer in use. + op.drop_column('widgets', 'type') + op.drop_column('widgets', 'query_id') + + +def downgrade(): + op.add_column('widgets', sa.Column('query_id', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('widgets', sa.Column('type', sa.VARCHAR(length=100), autoincrement=False, nullable=True)) diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -21,9 +21,6 @@ def post(self): :<json number width: Width for widget display :>json object widget: The created widget - :>json array layout: The new layout of the dashboard this widget was added to - :>json boolean new_row: Whether this widget was added on a new row or not - :>json number version: The revision number of the dashboard """ widget_properties = request.get_json(force=True) dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org) @@ -46,25 +43,8 @@ def post(self): models.db.session.add(widget) models.db.session.commit() - layout = json.loads(widget.dashboard.layout) - new_row = True - - if len(layout) == 0 or widget.width == 2: - layout.append([widget.id]) - elif len(layout[-1]) == 1: - neighbour_widget = models.Widget.query.get(layout[-1][0]) - if neighbour_widget.width == 1: - layout[-1].append(widget.id) - new_row = False - else: - layout.append([widget.id]) - else: - layout.append([widget.id]) - - widget.dashboard.layout = json.dumps(layout) - models.db.session.add(widget.dashboard) models.db.session.commit() - return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version} + return {'widget': widget.to_dict()} class WidgetResource(BaseResource): @@ -92,12 +72,8 @@ def delete(self, widget_id): Remove a widget from a dashboard. :param number widget_id: ID of widget to remove - - :>json array layout: New layout of dashboard this widget was removed from - :>json number version: Revision number of dashboard """ widget = models.Widget.get_by_id_and_org(widget_id, self.current_org) require_object_modify_permission(widget.dashboard, self.current_user) - widget.delete() + models.db.session.delete(widget) models.db.session.commit() - return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version} diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -1295,8 +1295,7 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model name = Column(db.String(100)) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User) - # TODO: The layout should dynamically be built from position and size information on each widget. - # Will require update in the frontend code to support this. + # layout is no longer used, but kept so we know how to render old dashboards. layout = Column(db.Text) dashboard_filters_enabled = Column(db.Boolean, default=False) is_archived = Column(db.Boolean, default=False, index=True) @@ -1311,39 +1310,22 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model def to_dict(self, with_widgets=False, user=None): layout = json.loads(self.layout) - if with_widgets: - widget_list = Widget.query.filter(Widget.dashboard == self) - - widgets = {} + widgets = [] - for w in widget_list: + if with_widgets: + for w in self.widgets: + pass if w.visualization_id is None: - widgets[w.id] = w.to_dict() + widgets.append(w.to_dict()) elif user and has_access(w.visualization.query_rel.groups, user, view_only): - widgets[w.id] = w.to_dict() + widgets.append(w.to_dict()) else: - widgets[w.id] = project(w.to_dict(), - ('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at')) - widgets[w.id]['restricted'] = True - - # The following is a workaround for cases when the widget object gets deleted without the dashboard layout - # updated. This happens for users with old databases that didn't have a foreign key relationship between - # visualizations and widgets. - # It's temporary until better solution is implemented (we probably should move the position information - # to the widget). - widgets_layout = [] - for row in layout: - if not row: - continue - new_row = [] - for widget_id in row: - widget = widgets.get(widget_id, None) - if widget: - new_row.append(widget) - - widgets_layout.append(new_row) + widget = project(w.to_dict(), + ('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at')) + widget['restricted'] = True + widgets.append(widget) else: - widgets_layout = None + widgets = None return { 'id': self.id, @@ -1352,7 +1334,7 @@ def to_dict(self, with_widgets=False, user=None): 'user_id': self.user_id, 'layout': layout, 'dashboard_filters_enabled': self.dashboard_filters_enabled, - 'widgets': widgets_layout, + 'widgets': widgets, 'is_archived': self.is_archived, 'is_draft': self.is_draft, 'updated_at': self.updated_at, @@ -1464,10 +1446,6 @@ class Widget(TimestampMixin, db.Model): options = Column(db.Text) dashboard_id = Column(db.Integer, db.ForeignKey("dashboards.id"), index=True) - # unused; kept for backward compatability: - type = Column(db.String(100), nullable=True) - query_id = Column(db.Integer, nullable=True) - __tablename__ = 'widgets' def to_dict(self): @@ -1486,15 +1464,6 @@ def to_dict(self): return d - def delete(self): - layout = json.loads(self.dashboard.layout) - layout = map(lambda row: filter(lambda w: w != self.id, row), layout) - layout = filter(lambda row: len(row) > 0, layout) - self.dashboard.layout = json.dumps(layout) - - db.session.add(self.dashboard) - db.session.delete(self) - def __unicode__(self): return u"%s" % self.id
diff --git a/tests/factories.py b/tests/factories.py --- a/tests/factories.py +++ b/tests/factories.py @@ -118,7 +118,6 @@ def __call__(self): options='{}') widget_factory = ModelFactory(redash.models.Widget, - type='chart', width=1, options='{}', dashboard=dashboard_factory.create, diff --git a/tests/handlers/test_dashboards.py b/tests/handlers/test_dashboards.py --- a/tests/handlers/test_dashboards.py +++ b/tests/handlers/test_dashboards.py @@ -38,8 +38,8 @@ def test_get_dashboard_filters_unauthorized_widgets(self): rv = self.make_request('get', '/api/dashboards/{0}'.format(dashboard.slug)) self.assertEquals(rv.status_code, 200) - self.assertTrue(rv.json['widgets'][0][1]['restricted']) - self.assertNotIn('restricted', rv.json['widgets'][0][0]) + self.assertTrue(rv.json['widgets'][0]['restricted']) + self.assertNotIn('restricted', rv.json['widgets'][1]) def test_get_non_existing_dashboard(self): rv = self.make_request('get', '/api/dashboards/not_existing') diff --git a/tests/handlers/test_widgets.py b/tests/handlers/test_widgets.py --- a/tests/handlers/test_widgets.py +++ b/tests/handlers/test_widgets.py @@ -22,28 +22,6 @@ def test_create_widget(self): rv = self.create_widget(dashboard, vis) self.assertEquals(rv.status_code, 200) - dashboard = models.Dashboard.query.get(dashboard.id) - self.assertEquals(unicode(rv.json['layout']), dashboard.layout) - - self.assertEquals(dashboard.widgets.count(), 1) - self.assertEquals(rv.json['layout'], [[rv.json['widget']['id']]]) - self.assertEquals(rv.json['new_row'], True) - - rv2 = self.create_widget(dashboard, vis) - self.assertEquals(dashboard.widgets.count(), 2) - self.assertEquals(rv2.json['layout'], - [[rv.json['widget']['id'], rv2.json['widget']['id']]]) - self.assertEquals(rv2.json['new_row'], False) - - rv3 = self.create_widget(dashboard, vis) - self.assertEquals(rv3.json['new_row'], True) - rv4 = self.create_widget(dashboard, vis, width=2) - self.assertEquals(rv4.json['layout'], - [[rv.json['widget']['id'], rv2.json['widget']['id']], - [rv3.json['widget']['id']], - [rv4.json['widget']['id']]]) - self.assertEquals(rv4.json['new_row'], True) - def test_wont_create_widget_for_visualization_you_dont_have_access_to(self): dashboard = self.factory.create_dashboard() vis = self.factory.create_visualization() @@ -86,8 +64,3 @@ def test_delete_widget(self): self.assertEquals(rv.status_code, 200) dashboard = models.Dashboard.get_by_slug_and_org(widget.dashboard.slug, widget.dashboard.org) self.assertEquals(dashboard.widgets.count(), 0) - self.assertEquals(dashboard.layout, '[]') - - # TODO: test how it updates the layout - - diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -476,26 +476,6 @@ def test_records_additional_properties(self): self.assertDictEqual(event.additional_properties, additional_properties) -class TestWidgetDeleteInstance(BaseTestCase): - def test_delete_removes_from_layout(self): - widget = self.factory.create_widget() - widget2 = self.factory.create_widget(dashboard=widget.dashboard) - db.session.flush() - widget.dashboard.layout = json.dumps([[widget.id, widget2.id]]) - widget.delete() - self.assertEquals(json.dumps([[widget2.id]]), widget.dashboard.layout) - - def test_delete_removes_empty_rows(self): - widget = self.factory.create_widget() - widget2 = self.factory.create_widget(dashboard=widget.dashboard) - db.session.flush() - widget.dashboard.layout = json.dumps([[widget.id, widget2.id]]) - db.session.flush() - widget.delete() - widget2.delete() - self.assertEquals("[]", widget.dashboard.layout) - - def _set_up_dashboard_test(d): d.g1 = d.factory.create_group(name='First', permissions=['create', 'view']) d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view'])
Remove old dashboard/widget positioning code
l fix this bug... the code is from https://github.com/getredash/redash/pull/2025/commits
2018-02-01T12:41:04
getredash/redash
2,284
getredash__redash-2284
[ "1413" ]
daea3337b0870f1379bca63108c2840162e9c10e
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py --- a/redash/query_runner/big_query.py +++ b/redash/query_runner/big_query.py @@ -205,7 +205,8 @@ def _get_query_result(self, jobs, query): data = { "columns": columns, - "rows": rows + "rows": rows, + 'metadata': {'data_scanned': query_reply['totalBytesProcessed']} } return data @@ -224,7 +225,14 @@ def get_schema(self, get_stats=False): for table in tables.get('tables', []): table_data = service.tables().get(projectId=project_id, datasetId=dataset_id, tableId=table['tableReference']['tableId']).execute() - schema.append({'name': table_data['id'], 'columns': map(lambda r: r['name'], table_data['schema']['fields'])}) + columns = [] + for column in table_data['schema']['fields']: + if column['type'] == 'RECORD': + for field in column['fields']: + columns.append(u"{}.{}".format(column['name'], field['name'])) + else: + columns.append(column['name']) + schema.append({'name': table_data['id'], 'columns': columns}) return schema
Properly show RECORD fields in BigQuery's schema This RECORD: ![image](https://cloud.githubusercontent.com/assets/71468/20478473/ebeb428e-afe2-11e6-8dde-531415041eda.png) Will be shown in Redash just as `location_data`. Instead we should deconstruct it into `location_data.city_name`, `location_data.country_name`, etc.
2018-02-02T21:19:40
getredash/redash
2,304
getredash__redash-2304
[ "1199" ]
790ac2ec5423ab8c736d9021119936851aca4eed
diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py --- a/redash/query_runner/mongodb.py +++ b/redash/query_runner/mongodb.py @@ -74,6 +74,49 @@ def parse_query_json(query): return query_data +def _get_column_by_name(columns, column_name): + for c in columns: + if "name" in c and c["name"] == column_name: + return c + + return None + + +def parse_results(results): + rows = [] + columns = [] + + for row in results: + parsed_row = {} + + for key in row: + if isinstance(row[key], dict): + for inner_key in row[key]: + column_name = '{}.{}'.format(key, inner_key) + if _get_column_by_name(columns, column_name) is None: + columns.append({ + "name": column_name, + "friendly_name": column_name, + "type": TYPES_MAP.get(type(row[key][inner_key]), TYPE_STRING) + }) + + parsed_row[column_name] = row[key][inner_key] + + else: + if _get_column_by_name(columns, key) is None: + columns.append({ + "name": key, + "friendly_name": key, + "type": TYPES_MAP.get(type(row[key]), TYPE_STRING) + }) + + parsed_row[key] = row[key] + + rows.append(parsed_row) + + return rows, columns + + class MongoDB(BaseQueryRunner): @classmethod def configuration_schema(cls): @@ -113,13 +156,6 @@ def __init__(self, configuration): self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False - def _get_column_by_name(self, columns, column_name): - for c in columns: - if "name" in c and c["name"] == column_name: - return c - - return None - def _get_db(self): if self.is_replica_set: db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"]) @@ -259,21 +295,14 @@ def run_query(self, query, user): rows.append({ "count" : cursor }) else: - for r in cursor: - for k in r: - if self._get_column_by_name(columns, k) is None: - columns.append({ - "name": k, - "friendly_name": k, - "type": TYPES_MAP.get(type(r[k]), TYPE_STRING) - }) - - rows.append(r) + rows, columns = parse_results(cursor) if f: ordered_columns = [] for k in sorted(f, key=f.get): - ordered_columns.append(self._get_column_by_name(columns, k)) + column = _get_column_by_name(columns, k) + if column: + ordered_columns.append(column) columns = ordered_columns
diff --git a/tests/query_runner/test_mongodb.py b/tests/query_runner/test_mongodb.py --- a/tests/query_runner/test_mongodb.py +++ b/tests/query_runner/test_mongodb.py @@ -2,7 +2,7 @@ import json from unittest import TestCase from pytz import utc -from redash.query_runner.mongodb import parse_query_json +from redash.query_runner.mongodb import parse_query_json, parse_results, _get_column_by_name from redash.utils import parse_human_time @@ -105,4 +105,42 @@ def test_supports_relative_timestamps(self): self.assertEqual(query_data['ts'], one_hour_ago) - +class TestMongoResults(TestCase): + def test_parses_regular_results(self): + raw_results = [ + {'column': 1, 'column2': 'test'}, + {'column': 2, 'column2': 'test', 'column3': 'hello'} + ] + rows, columns = parse_results(raw_results) + + for i, row in enumerate(rows): + self.assertDictEqual(row, raw_results[i]) + + self.assertIsNotNone(_get_column_by_name(columns, 'column')) + self.assertIsNotNone(_get_column_by_name(columns, 'column2')) + self.assertIsNotNone(_get_column_by_name(columns, 'column3')) + + def test_parses_nested_results(self): + raw_results = [ + {'column': 1, 'column2': 'test', 'nested': { + 'a': 1, + 'b': 'str' + }}, + {'column': 2, 'column2': 'test', 'column3': 'hello', 'nested': { + 'a': 2, + 'b': 'str2', + 'c': 'c' + }} + ] + + rows, columns = parse_results(raw_results) + + self.assertDictEqual(rows[0], { 'column': 1, 'column2': 'test', 'nested.a': 1, 'nested.b': 'str' }) + self.assertDictEqual(rows[1], { 'column': 2, 'column2': 'test', 'column3': 'hello', 'nested.a': 2, 'nested.b': 'str2', 'nested.c': 'c' }) + + self.assertIsNotNone(_get_column_by_name(columns, 'column')) + self.assertIsNotNone(_get_column_by_name(columns, 'column2')) + self.assertIsNotNone(_get_column_by_name(columns, 'column3')) + self.assertIsNotNone(_get_column_by_name(columns, 'nested.a')) + self.assertIsNotNone(_get_column_by_name(columns, 'nested.b')) + self.assertIsNotNone(_get_column_by_name(columns, 'nested.c')) \ No newline at end of file
Inner objects of MongoDB results are not handled correctly ### Issue Summary When querying a mongo object that has inner objects and returning only a given field of an inner object, results are not displayed in the UI. ### Steps to Reproduce 1. Have a MongoDB data_source that contains an object looking like that ``` json { "foo": { "bar":0, "foo":1 } } ``` 1. Query this data source like that: ``` json { "collection": "foo", "query": { }, "fields": { "foo.bar":1 } } ``` 1. Notice that the results table is empty. 2. Look at the response using the network tab of your browser's console. 3. Notice that the data is correctly returned but incorrectly handled in the UI. I expect the data to be handled correctly. Since it is not in the table, it is impossible to make any visualization. Inner objects are common in MongoDB. I believer it can be fixed either on the UI or the server since when it is an inner field, it isn't in the same format as the other fields. ### Technical details: - Redash Version: 0.10 - Browser/OS: Firefox/Linux - How did you install Redash: Docker
Can you share a GitHub Gist with the query result of the above (the result you get from the API)? This will help with reproducing and fixing. I unfortunately have no public instance of redash, so the Gist wouldn't actually work. However, here's the results I get from the API (a real life example): ``` json { "query_result": { "retrieved_at": "2016-07-21T13:50:37.155615+00:00", "query_hash": "ac18633652fccb3940d0e752fecb10f2", "query": "{\n \"collection\": \"jenkins_builds\",\n \"query\": {\n \"slave_info.slaveName\": \"optimusprime\"\n },\n \"fields\":{\n \"slave_info.slaveName\":1,\n \"build_url\":1\n }\n}", "runtime": 0.074466, "data": { "rows": [{ "slave_info": { "slaveName": "optimusprime" }, "build_url": "aUrl", "_id": "5784e1161f7a4b0006edebeb" }, { "slave_info": { "slaveName": "optimusprime" }, "build_url": "anotherUrl", "_id": "5784f6ef1f7a4b0006edede3" }], "columns": [null, { "friendly_name": "build_url", "type": "string", "name": "build_url" }] }, "id": 45, "data_source_id": 1 } } ``` Here's how a document looks like in the collection: ``` json { "_id": ObjectId("578f6caac3775823842c9b1a"), "queue_time": 29, "result": "ABORTED", "scm_checkout_end_time": ISODate("2016-07-20T08:20:59.237Z"), "end_time": ISODate("2016-07-20T08:21:07.912Z"), "ci_url": "http://localhost:8080/jenkins/", "context_id": 102490963, "full_job_name": "jenkins_test", "parameters": { }, "build_failure_causes": [{ "name": "abort", "categories": [ "aaa" ], "id": "ccff5d15-1003-4570-b38f-f844255c6be1", "description": "aborted" }], "build_url": "aUrl", "build_cause": "Démarré par l'utilisateur anonymous", "start_time": ISODate("2016-07-20T08:20:58.406Z"), "number": 252, "scm_checkout_start_time": ISODate("2016-07-20T08:20:58.450Z"), "started_username": "anonymous", "scm_checkout_duration": 787, "build_steps": [{ "start_time": ISODate("2016-07-20T08:20:59.242Z"), "end_time": ISODate("2016-07-20T08:21:00.127Z"), "duration": 885, "build_step_type": "hudson.tasks.Shell" }, { "start_time": ISODate("2016-07-20T08:21:00.128Z"), "end_time": ISODate("1969-12-31T19:00:00Z"), "build_step_type": "hudson.tasks.Maven" }, { "start_time": ISODate("2016-07-20T08:21:07.710Z"), "end_time": ISODate("2016-07-20T08:21:07.711Z"), "duration": 1, "build_step_type": "com.sonyericsson.jenkins.plugins.bfa.model.ScannerJobProperty" }, { "start_time": ISODate("2016-07-20T08:21:07.712Z"), "end_time": ISODate("2016-07-20T08:21:07.714Z"), "duration": 2, "build_step_type": "jenkins.model.BuildDiscarderProperty" }], "job_name": "jenkins_test", "slave_info": { "slaveName": "optimusprime", "executor": "1", "label": "master, windows," }, "duration": 9452, "scm_info": { "url": "aGithubUrl", "commit": "aCommitHash", "branch": "origin/master" }, "started_user_id": "anonymous" } ``` And here's the JS errors we get in the UI: ``` TypeError: Cannot read property 'name' of null at http://localhost:5000/scripts/scripts.f1054183.js:1:4880 at Array.forEach (native) at Function.k.each.k.forEach (http://localhost:5000/scripts/plugins.ec2c390a.js:15:7459) at n.o [as update] (http://localhost:5000/scripts/scripts.f1054183.js:1:4829) at http://localhost:5000/scripts/scripts.f1054183.js:1:8914 at http:/localhost:5000/scripts/plugins.ec2c390a.js:15:1819 at c (http://localhost:5000/scripts/plugins.ec2c390a.js:5:16812) at http://localhost:5000/scripts/plugins.ec2c390a.js:5:17489 at c.$eval (http://localhost:5000/scripts/plugins.ec2c390a.js:5:22696) at c.$digest (http://localhost:5000/scripts/plugins.ec2c390a.js:5:21221) ``` and ``` Error: [$interpolate:interr] Can't interpolate: {{queryResult.getData().length}} TypeError: Cannot read property 'name' of null http://errors.angularjs.org/1.2.18/$interpolate/interr?p0=%7B%7BqueryResult…ngth%7D%7D&p1=TypeError%3A%20Cannot%20read%20property%20'name'%20of%20null at plugins.ec2c390a.js:3 at Object.f (plugins.ec2c390a.js:5) at c.$digest (plugins.ec2c390a.js:5) at c.$apply (plugins.ec2c390a.js:5) at l (plugins.ec2c390a.js:5) at v (plugins.ec2c390a.js:5) at XMLHttpRequest.w.onreadystatechange (plugins.ec2c390a.js:5) ``` If you need more information or if anything is not clear, let me know. I have the same issue. I'm using `aggregate` instead of `query` to get around the issue.
2018-02-10T19:56:29
getredash/redash
2,346
getredash__redash-2346
[ "2235", "1259" ]
11d09b2f0945fb90898b68b730d12e7a52190251
diff --git a/redash/authentication/saml_auth.py b/redash/authentication/saml_auth.py --- a/redash/authentication/saml_auth.py +++ b/redash/authentication/saml_auth.py @@ -20,7 +20,7 @@ def get_saml_client(org): """ metadata_url = org.get_setting("auth_saml_metadata_url") entity_id = org.get_setting("auth_saml_entity_id") - acs_url = url_for("saml_auth.idp_initiated", _external=True) + acs_url = url_for("saml_auth.idp_initiated", org_slug=org.slug, _external=True) saml_settings = { 'metadata': { @@ -61,10 +61,10 @@ def get_saml_client(org): @blueprint.route(org_scoped_rule('/saml/callback'), methods=['POST']) -def idp_initiated(): +def idp_initiated(org_slug=None): if not current_org.get_setting("auth_saml_enabled"): logger.error("SAML Login is not enabled") - return redirect(url_for('redash.index')) + return redirect(url_for('redash.index', org_slug=org_slug)) saml_client = get_saml_client(current_org) authn_response = saml_client.parse_authn_request_response( @@ -84,16 +84,16 @@ def idp_initiated(): group_names = authn_response.ava.get('RedashGroups') user.update_group_assignments(group_names) - url = url_for('redash.index') + url = url_for('redash.index', org_slug=org_slug) return redirect(url) @blueprint.route(org_scoped_rule("/saml/login")) -def sp_initiated(): +def sp_initiated(org_slug=None): if not current_org.get_setting("auth_saml_enabled"): logger.error("SAML Login is not enabled") - return redirect(url_for('redash.index')) + return redirect(url_for('redash.index', org_slug=org_slug)) saml_client = get_saml_client(current_org) nameid_format = current_org.get_setting('auth_saml_nameid_format') diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py --- a/redash/handlers/authentication.py +++ b/redash/handlers/authentication.py @@ -111,16 +111,6 @@ def login(org_slug=None): if current_user.is_authenticated: return redirect(next_path) - if not current_org.get_setting('auth_password_login_enabled'): - if settings.REMOTE_USER_LOGIN_ENABLED: - return redirect(url_for("remote_user_auth.login", next=next_path)) - elif current_org.get_setting('auth_saml_enabled'): # settings.SAML_LOGIN_ENABLED: - return redirect(url_for("saml_auth.sp_initiated", next=next_path)) - elif settings.LDAP_LOGIN_ENABLED: - return redirect(url_for("ldap_auth.login", next=next_path)) - else: - return redirect(get_google_auth_url(next_path)) - if request.method == 'POST': try: org = current_org._get_current_object() @@ -142,6 +132,7 @@ def login(org_slug=None): email=request.form.get('email', ''), show_google_openid=settings.GOOGLE_OAUTH_ENABLED, google_auth_url=google_auth_url, + show_password_login=current_org.get_setting('auth_password_login_enabled'), show_saml_login=current_org.get_setting('auth_saml_enabled'), show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED, show_ldap_login=settings.LDAP_LOGIN_ENABLED) diff --git a/redash/handlers/organization.py b/redash/handlers/organization.py --- a/redash/handlers/organization.py +++ b/redash/handlers/organization.py @@ -9,7 +9,6 @@ @routes.route(org_scoped_rule('/api/organization/status'), methods=['GET']) -@require_admin @login_required def organization_status(org_slug=None): counters = {
diff --git a/tests/test_handlers.py b/tests/test_handlers.py --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -83,14 +83,6 @@ def setUpClass(cls): def tearDownClass(cls): settings.ORG_RESOLVING = "multi_org" - def test_redirects_to_google_login_if_password_disabled(self): - self.factory.org.set_setting('auth_password_login_enabled', False) - with self.app.test_request_context('/default/login'): - rv = self.client.get('/default/login') - self.assertEquals(rv.status_code, 302) - self.assertTrue(rv.location.endswith(url_for('google_oauth.authorize', next='/default/'))) - self.factory.org.set_setting('auth_password_login_enabled', True) - def test_get_login_form(self): rv = self.client.get('/default/login') self.assertEquals(rv.status_code, 200)
LDAP Auth /login_auth/login 404 <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 --> ### Issue Summary With LDAP auth enabled, app routes url to '/ldap/login' based on ldap_auth.py in authentication. Template login page redirects to '/ldap_auth/login' which returns a 404 since there is no route pointing to that url. ### Steps to Reproduce 1. enable LDAP auth 2. navigate to localhost and click link to SSO login page 3. observe 404 Fix: adjust route in /authentication/ldap_auth.py ### Technical details: * Redash Version: redash.3.0.0.b3134 * Browser/OS: Chrome * How did you install Redash: ami Can't logout when using only Google Apps SSO ### Issue Summary Trying to logout Redash keep redirecting me to the login page and signing me in automatically. This happened on a fresh installation with the following settings, which I think that is somehow related with the issue: - **REDASH_GOOGLE_APPS_DOMAINS**: "domain1.com,gmail.com" - **REDASH_PASSWORD_LOGIN_ENABLED**: "false" --- the logs: ``` [2016-08-26 03:32:45,014][PID:25][INFO][metrics] method=GET path=/logout endpoint=redash.logout status=302 content_type=text/html; charset=utf-8 content_length=237 duration=6.48 query_count=2 query_duration=2.41 [2016-08-26 03:32:45,016][PID:25][INFO][metrics] method=GET path=/logout endpoint=redash.logout status=500 content_type=? content_length=-1 duration=8.56 query_count=2 query_duration=2.41 [2016-08-26 03:32:45,163][PID:25][INFO][metrics] method=GET path=/login endpoint=redash.login status=302 content_type=text/html; charset=utf-8 content_length=251 duration=2.97 query_count=0 query_duration=0.00 [2016-08-26 03:32:45,165][PID:25][INFO][metrics] method=GET path=/login endpoint=redash.login status=500 content_type=? content_length=-1 duration=5.24 query_count=0 query_duration=0.00 [2016-08-26 03:32:45,438][PID:25][INFO][metrics] method=GET path=/oauth/google endpoint=google_oauth.authorize status=302 content_type=text/html; charset=utf-8 content_length=941 duration=2.95 query_count=0 query_duration=0.00 [2016-08-26 03:32:45,440][PID:25][INFO][metrics] method=GET path=/oauth/google endpoint=google_oauth.authorize status=500 content_type=? content_length=-1 duration=5.40 query_count=0 query_duration=0.00 [2016-08-26 03:32:46,375][PID:25][INFO][requests.packages.urllib3.connectionpool] Starting new HTTPS connection (1): www.googleapis.com [2016-08-26 03:32:46,634][PID:25][INFO][metrics] method=GET path=/oauth/google_callback endpoint=google_oauth.callback status=302 content_type=text/html; charset=utf-8 content_length=209 duration=494.07 query_count=6 query_duration=3.87 [2016-08-26 03:32:46,637][PID:25][INFO][metrics] method=GET path=/oauth/google_callback endpoint=google_oauth.callback status=500 content_type=? content_length=-1 duration=496.63 query_count=6 query_duration=3.87 [2016-08-26 03:32:46,789][PID:30][INFO][metrics] method=GET path=/ endpoint=redash.index status=200 content_type=text/html; charset=utf-8 content_length=3420 duration=8.75 query_count=4 query_duration=3.33 [2016-08-26 03:32:46,791][PID:30][INFO][metrics] method=GET path=/ endpoint=redash.index status=500 content_type=? content_length=-1 duration=10.59 query_count=4 query_duration=3.33 [2016-08-26 03:32:48,043][PID:30][INFO][metrics] method=GET path=/api/dashboards endpoint=dashboards status=200 content_type=application/json content_length=2 duration=11.50 query_count=5 query_duration=5.29 [2016-08-26 03:32:48,045][PID:30][INFO][metrics] method=GET path=/api/dashboards endpoint=dashboards status=500 content_type=? content_length=-1 duration=13.32 query_count=5 query_duration=5.29 [2016-08-26 03:32:48,222][PID:30][INFO][metrics] method=GET path=/api/queries/recent endpoint=recent_queries status=200 content_type=application/json content_length=2 duration=14.51 query_count=5 query_duration=7.75 [2016-08-26 03:32:48,225][PID:30][INFO][metrics] method=GET path=/api/queries/recent endpoint=recent_queries status=500 content_type=? content_length=-1 duration=17.70 query_count=5 query_duration=7.75 [2016-08-26 03:32:48,226][PID:25][INFO][metrics] method=GET path=/api/dashboards/recent endpoint=recent_dashboards status=200 content_type=application/json content_length=2 duration=15.83 query_count=7 query_duration=7.64 [2016-08-26 03:32:48,228][PID:25][INFO][metrics] method=GET path=/api/dashboards/recent endpoint=recent_dashboards status=500 content_type=? content_length=-1 duration=18.15 query_count=7 query_duration=7.64 [2016-08-26 03:32:49,253][PID:32][INFO][metrics] method=POST path=/api/events endpoint=events status=200 content_type=application/json content_length=4 duration=9.48 query_count=3 query_duration=2.62 [2016-08-26 03:32:49,255][PID:32][INFO][metrics] method=POST path=/api/events endpoint=events status=500 content_type=? content_length=-1 duration=11.85 query_count=3 query_duration=2.62 ``` --- - Redash Version: 0.12.0-rc - Browser/OS: Chrome Incognito Mode - How did you install Redash: docker
This seems fixed in beta version 4, but should still be addressed in v3 I don't think it was fixed in v4, but will try to fix it in one of the next updates of v4. We won't release a patch release for v3 for this bug, but you can patch your version by replacing `ldap_auth/login` with `ldap/login` in the templates. It stopped when I removed the google cookie. What happens is: 1. You logout, and redirected to `/login`. 2. Because you have only Google Apps enabled, it redirects to Google Apps for authentication. 3. As you already authorized Redash, it redirects back to Redash and logs you in. What we can do (should do?) is show you the login page with the Google Apps button instead of automatic redirect. If you enable password based login, this is what will happen. I think that we should do this. Because sometimes you want to outsource the user management by disabling `REDASH_PASSWORD_LOGIN_ENABLED` and allowing only Google Apps Login. So if we want to disable an user from accessing Redash we just need to deactivate the google account.
2018-02-26T18:48:07
getredash/redash
2,426
getredash__redash-2426
[ "2396" ]
fc368ee4253d3c47bf0ac7a7a796937f01ffd67c
diff --git a/migrations/versions/640888ce445d_.py b/migrations/versions/640888ce445d_.py new file mode 100644 --- /dev/null +++ b/migrations/versions/640888ce445d_.py @@ -0,0 +1,107 @@ +""" +Add new scheduling data. + +Revision ID: 640888ce445d +Revises: 71477dadd6ef +Create Date: 2018-09-21 19:35:58.578796 +""" + +import json +from alembic import op +import sqlalchemy as sa +from sqlalchemy.sql import table + +from redash.models import MutableDict, PseudoJSON + + +# revision identifiers, used by Alembic. +revision = '640888ce445d' +down_revision = '71477dadd6ef' +branch_labels = None +depends_on = None + + +def upgrade(): + # Copy "schedule" column into "old_schedule" column + op.add_column('queries', sa.Column('old_schedule', sa.String(length=10), nullable=True)) + + queries = table( + 'queries', + sa.Column('schedule', sa.String(length=10)), + sa.Column('old_schedule', sa.String(length=10))) + + op.execute( + queries + .update() + .values({'old_schedule': queries.c.schedule})) + + # Recreate "schedule" column as a dict type + op.drop_column('queries', 'schedule') + op.add_column('queries', sa.Column('schedule', MutableDict.as_mutable(PseudoJSON), nullable=False, server_default=json.dumps({}))) + + # Move over values from old_schedule + queries = table( + 'queries', + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)), + sa.Column('old_schedule', sa.String(length=10))) + + conn = op.get_bind() + for query in conn.execute(queries.select()): + schedule_json = { + 'interval': None, + 'until': None, + 'day_of_week': None, + 'time': None + } + + if query.old_schedule is not None: + if ":" in query.old_schedule: + schedule_json['interval'] = 86400 + schedule_json['time'] = query.old_schedule + else: + schedule_json['interval'] = query.old_schedule + + conn.execute( + queries + .update() + .where(queries.c.id == query.id) + .values(schedule=MutableDict(schedule_json))) + + op.drop_column('queries', 'old_schedule') + +def downgrade(): + op.add_column('queries', sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON), nullable=False, server_default=json.dumps({}))) + + queries = table( + 'queries', + sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)), + sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON))) + + op.execute( + queries + .update() + .values({'old_schedule': queries.c.schedule})) + + op.drop_column('queries', 'schedule') + op.add_column('queries', sa.Column('schedule', sa.String(length=10), nullable=True)) + + queries = table( + 'queries', + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('schedule', sa.String(length=10)), + sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON))) + + conn = op.get_bind() + for query in conn.execute(queries.select()): + scheduleValue = query.old_schedule['interval'] + if scheduleValue <= 86400: + scheduleValue = query.old_schedule['time'] + + conn.execute( + queries + .update() + .where(queries.c.id == query.id) + .values(schedule=scheduleValue)) + + op.drop_column('queries', 'old_schedule') diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -1,11 +1,13 @@ import cStringIO import csv import datetime +import calendar import functools import hashlib import itertools import logging import time +import pytz from functools import reduce from six import python_2_unicode_compatible, string_types, text_type @@ -857,12 +859,14 @@ def make_excel_content(self): return s.getvalue() -def should_schedule_next(previous_iteration, now, schedule, failures): - if schedule.isdigit(): - ttl = int(schedule) +def should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0): + # if time exists then interval > 23 hours (82800s) + # if day_of_week exists then interval > 6 days (518400s) + if (time is None): + ttl = int(interval) next_iteration = previous_iteration + datetime.timedelta(seconds=ttl) else: - hour, minute = schedule.split(':') + hour, minute = time.split(':') hour, minute = int(hour), int(minute) # The following logic is needed for cases like the following: @@ -870,10 +874,18 @@ def should_schedule_next(previous_iteration, now, schedule, failures): # - The scheduler wakes up at 00:01. # - Using naive implementation of comparing timestamps, it will skip the execution. normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute) + if normalized_previous_iteration > previous_iteration: previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1) - next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute) + days_delay = int(interval) / 60 / 60 / 24 + + days_to_add = 0 + if (day_of_week is not None): + days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday() + + next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) + + datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute) if failures: next_iteration += datetime.timedelta(minutes=2**failures) return now > next_iteration @@ -901,7 +913,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model): foreign_keys=[last_modified_by_id]) is_archived = Column(db.Boolean, default=False, index=True) is_draft = Column(db.Boolean, default=True, index=True) - schedule = Column(db.String(10), nullable=True) + schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True) schedule_failures = Column(db.Integer, default=0) visualizations = db.relationship("Visualization", cascade="all, delete-orphan") options = Column(MutableDict.as_mutable(PseudoJSON), default={}) @@ -923,7 +935,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model): def archive(self, user=None): db.session.add(self) self.is_archived = True - self.schedule = None + self.schedule = {} for vis in self.visualizations: for w in vis.widgets: @@ -1026,7 +1038,7 @@ def by_user(cls, user): def outdated_queries(cls): queries = (db.session.query(Query) .options(joinedload(Query.latest_query_data).load_only('retrieved_at')) - .filter(Query.schedule != None) + .filter(Query.schedule != {}) .order_by(Query.id)) now = utils.utcnow() @@ -1034,6 +1046,13 @@ def outdated_queries(cls): scheduled_queries_executions.refresh() for query in queries: + schedule_until = pytz.utc.localize(datetime.datetime.strptime( + query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None + if (query.schedule['interval'] == None or ( + schedule_until != None and ( + schedule_until <= now))): + continue + if query.latest_query_data: retrieved_at = query.latest_query_data.retrieved_at else: @@ -1041,7 +1060,8 @@ def outdated_queries(cls): retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at - if should_schedule_next(retrieved_at, now, query.schedule, query.schedule_failures): + if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'], + query.schedule['day_of_week'], query.schedule_failures): key = "{}:{}".format(query.query_hash, query.data_source_id) outdated_queries[key] = query
diff --git a/tests/factories.py b/tests/factories.py --- a/tests/factories.py +++ b/tests/factories.py @@ -75,7 +75,7 @@ def __call__(self): user=user_factory.create, is_archived=False, is_draft=False, - schedule=None, + schedule={}, data_source=data_source_factory.create, org_id=1) @@ -86,7 +86,7 @@ def __call__(self): user=user_factory.create, is_archived=False, is_draft=False, - schedule=None, + schedule={}, data_source=data_source_factory.create, org_id=1) diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py --- a/tests/handlers/test_queries.py +++ b/tests/handlers/test_queries.py @@ -168,7 +168,7 @@ def test_create_query(self): query_data = { 'name': 'Testing', 'query': 'SELECT 1', - 'schedule': "3600", + 'schedule': {"interval": "3600"}, 'data_source_id': self.factory.data_source.id } diff --git a/tests/tasks/test_queries.py b/tests/tasks/test_queries.py --- a/tests/tasks/test_queries.py +++ b/tests/tasks/test_queries.py @@ -94,7 +94,7 @@ def test_success_scheduled(self): """ cm = mock.patch("celery.app.task.Context.delivery_info", {'routing_key': 'test'}) - q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300) + q = self.factory.create_query(query_text="SELECT 1, 2", schedule={"interval": 300}) with cm, mock.patch.object(PostgreSQL, "run_query") as qr: qr.return_value = ([1, 2], None) result_id = execute_query( @@ -112,7 +112,7 @@ def test_failure_scheduled(self): """ cm = mock.patch("celery.app.task.Context.delivery_info", {'routing_key': 'test'}) - q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300) + q = self.factory.create_query(query_text="SELECT 1, 2", schedule={"interval": 300}) with cm, mock.patch.object(PostgreSQL, "run_query") as qr: qr.side_effect = ValueError("broken") with self.assertRaises(QueryExecutionError): @@ -132,7 +132,7 @@ def test_success_after_failure(self): """ cm = mock.patch("celery.app.task.Context.delivery_info", {'routing_key': 'test'}) - q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300) + q = self.factory.create_query(query_text="SELECT 1, 2", schedule={"interval": 300}) with cm, mock.patch.object(PostgreSQL, "run_query") as qr: qr.side_effect = ValueError("broken") with self.assertRaises(QueryExecutionError): diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,4 +1,5 @@ #encoding: utf8 +import calendar import datetime import json from unittest import TestCase @@ -32,58 +33,117 @@ class ShouldScheduleNextTest(TestCase): def test_interval_schedule_that_needs_reschedule(self): now = utcnow() two_hours_ago = now - datetime.timedelta(hours=2) - self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600", - 0)) + self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600")) def test_interval_schedule_that_doesnt_need_reschedule(self): now = utcnow() half_an_hour_ago = now - datetime.timedelta(minutes=30) - self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, - "3600", 0)) + self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600")) def test_exact_time_that_needs_reschedule(self): now = utcnow() yesterday = now - datetime.timedelta(days=1) scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) - self.assertTrue(models.should_schedule_next(yesterday, now, - scheduled_time, 0)) + self.assertTrue(models.should_schedule_next(yesterday, now, "86400", + scheduled_time)) def test_exact_time_that_doesnt_need_reschedule(self): now = date_parse("2015-10-16 20:10") yesterday = date_parse("2015-10-15 23:07") schedule = "23:00" - self.assertFalse(models.should_schedule_next(yesterday, now, schedule, - 0)) + self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule)) def test_exact_time_with_day_change(self): now = utcnow().replace(hour=0, minute=1) previous = (now - datetime.timedelta(days=2)).replace(hour=23, minute=59) schedule = "23:59".format(now.hour + 3) - self.assertTrue(models.should_schedule_next(previous, now, schedule, - 0)) + self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule)) + + def test_exact_time_every_x_days_that_needs_reschedule(self): + now = utcnow() + four_days_ago = now - datetime.timedelta(days=4) + three_day_interval = "259200" + scheduled_datetime = now - datetime.timedelta(hours=3) + scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) + self.assertTrue(models.should_schedule_next(four_days_ago, now, three_day_interval, + scheduled_time)) + + def test_exact_time_every_x_days_that_doesnt_need_reschedule(self): + now = utcnow() + four_days_ago = now - datetime.timedelta(days=2) + three_day_interval = "259200" + scheduled_datetime = now - datetime.timedelta(hours=3) + scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) + self.assertFalse(models.should_schedule_next(four_days_ago, now, three_day_interval, + scheduled_time)) + + def test_exact_time_every_x_days_with_day_change(self): + now = utcnow().replace(hour=23, minute=59) + previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1) + schedule = "23:58" + three_day_interval = "259200" + self.assertTrue(models.should_schedule_next(previous, now, three_day_interval, schedule)) + + def test_exact_time_every_x_weeks_that_needs_reschedule(self): + # Setup: + # + # 1) The query should run every 3 weeks on Tuesday + # 2) The last time it ran was 3 weeks ago from this week's Thursday + # 3) It is now Wednesday of this week + # + # Expectation: Even though less than 3 weeks have passed since the + # last run 3 weeks ago on Thursday, it's overdue since + # it should be running on Tuesdays. + this_thursday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Thursday") - utcnow().weekday()) + three_weeks_ago = this_thursday - datetime.timedelta(weeks=3) + now = this_thursday - datetime.timedelta(days=1) + three_week_interval = "1814400" + scheduled_datetime = now - datetime.timedelta(hours=3) + scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) + self.assertTrue(models.should_schedule_next(three_weeks_ago, now, three_week_interval, + scheduled_time, "Tuesday")) + + def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self): + # Setup: + # + # 1) The query should run every 3 weeks on Thurday + # 2) The last time it ran was 3 weeks ago from this week's Tuesday + # 3) It is now Wednesday of this week + # + # Expectation: Even though more than 3 weeks have passed since the + # last run 3 weeks ago on Tuesday, it's not overdue since + # it should be running on Thursdays. + this_tuesday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Tuesday") - utcnow().weekday()) + three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3) + now = this_tuesday + datetime.timedelta(days=1) + three_week_interval = "1814400" + scheduled_datetime = now - datetime.timedelta(hours=3) + scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) + self.assertFalse(models.should_schedule_next(three_weeks_ago, now, three_week_interval, + scheduled_time, "Thursday")) def test_backoff(self): now = utcnow() two_hours_ago = now - datetime.timedelta(hours=2) self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600", - 5)) + failures=5)) self.assertFalse(models.should_schedule_next(two_hours_ago, now, - "3600", 10)) + "3600", failures=10)) class QueryOutdatedQueriesTest(BaseTestCase): # TODO: this test can be refactored to use mock version of should_schedule_next to simplify it. def test_outdated_queries_skips_unscheduled_queries(self): - query = self.factory.create_query(schedule=None) + query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None}) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) def test_outdated_queries_works_with_ttl_based_schedule(self): two_hours_ago = utcnow() - datetime.timedelta(hours=2) - query = self.factory.create_query(schedule="3600") + query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None}) query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago) query.latest_query_data = query_result @@ -92,7 +152,7 @@ def test_outdated_queries_works_with_ttl_based_schedule(self): def test_outdated_queries_works_scheduled_queries_tracker(self): two_hours_ago = datetime.datetime.now() - datetime.timedelta(hours=2) - query = self.factory.create_query(schedule="3600") + query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None}) query_result = self.factory.create_query_result(query=query, retrieved_at=two_hours_ago) query.latest_query_data = query_result @@ -103,7 +163,7 @@ def test_outdated_queries_works_scheduled_queries_tracker(self): def test_skips_fresh_queries(self): half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30) - query = self.factory.create_query(schedule="3600") + query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None}) query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago) query.latest_query_data = query_result @@ -112,7 +172,7 @@ def test_skips_fresh_queries(self): def test_outdated_queries_works_with_specific_time_schedule(self): half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30) - query = self.factory.create_query(schedule=half_an_hour_ago.strftime('%H:%M')) + query = self.factory.create_query(schedule={'interval':'86400', 'time':half_an_hour_ago.strftime('%H:%M'), 'until':None, 'day_of_week':None}) query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1)) query.latest_query_data = query_result @@ -124,9 +184,9 @@ def test_enqueues_query_only_once(self): Only one query per data source with the same text will be reported by Query.outdated_queries(). """ - query = self.factory.create_query(schedule="60") + query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}) query2 = self.factory.create_query( - schedule="60", query_text=query.query_text, + schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text, query_hash=query.query_hash) retrieved_at = utcnow() - datetime.timedelta(minutes=10) query_result = self.factory.create_query_result( @@ -143,9 +203,9 @@ def test_enqueues_query_with_correct_data_source(self): Query.outdated_queries() even if they have the same query text. """ query = self.factory.create_query( - schedule="60", data_source=self.factory.create_data_source()) + schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, data_source=self.factory.create_data_source()) query2 = self.factory.create_query( - schedule="60", query_text=query.query_text, + schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text, query_hash=query.query_hash) retrieved_at = utcnow() - datetime.timedelta(minutes=10) query_result = self.factory.create_query_result( @@ -162,9 +222,9 @@ def test_enqueues_only_for_relevant_data_source(self): If multiple queries with the same text exist, only ones that are scheduled to be refreshed are reported by Query.outdated_queries(). """ - query = self.factory.create_query(schedule="60") + query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}) query2 = self.factory.create_query( - schedule="3600", query_text=query.query_text, + schedule={'interval':'3600', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text, query_hash=query.query_hash) retrieved_at = utcnow() - datetime.timedelta(minutes=10) query_result = self.factory.create_query_result( @@ -180,7 +240,7 @@ def test_failure_extends_schedule(self): Execution failures recorded for a query result in exponential backoff for scheduling future execution. """ - query = self.factory.create_query(schedule="60", schedule_failures=4) + query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, schedule_failures=4) retrieved_at = utcnow() - datetime.timedelta(minutes=16) query_result = self.factory.create_query_result( retrieved_at=retrieved_at, query_text=query.query_text, @@ -192,6 +252,34 @@ def test_failure_extends_schedule(self): query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17) self.assertEqual(list(models.Query.outdated_queries()), [query]) + def test_schedule_until_after(self): + """ + Queries with non-null ``schedule['until']`` are not reported by + Query.outdated_queries() after the given time is past. + """ + one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d") + two_hours_ago = utcnow() - datetime.timedelta(hours=2) + query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_ago, 'time':None, 'day_of_week':None}) + query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago) + query.latest_query_data = query_result + + queries = models.Query.outdated_queries() + self.assertNotIn(query, queries) + + def test_schedule_until_before(self): + """ + Queries with non-null ``schedule['until']`` are reported by + Query.outdated_queries() before the given time is past. + """ + one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d") + two_hours_ago = utcnow() - datetime.timedelta(hours=2) + query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_from_now, 'time': None, 'day_of_week':None}) + query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago) + query.latest_query_data = query_result + + queries = models.Query.outdated_queries() + self.assertIn(query, queries) + class QueryArchiveTest(BaseTestCase): def setUp(self): @@ -205,7 +293,7 @@ def test_archive_query_sets_flag(self): self.assertEquals(query.is_archived, True) def test_archived_query_doesnt_return_in_all(self): - query = self.factory.create_query(schedule="1") + query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None}) yesterday = utcnow() - datetime.timedelta(days=1) query_result, _ = models.QueryResult.store_result( query.org_id, query.data_source, query.query_hash, query.query_text, @@ -230,11 +318,11 @@ def test_removes_associated_widgets_from_dashboards(self): self.assertEqual(db.session.query(models.Widget).get(widget.id), None) def test_removes_scheduling(self): - query = self.factory.create_query(schedule="1") + query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None}) query.archive() - self.assertEqual(None, query.schedule) + self.assertEqual({}, query.schedule) def test_deletes_alerts(self): subscription = self.factory.create_alert_subscription()
Add Finer-grained scheduling I've started working on this feature that was initially filed here: https://github.com/mozilla/redash/issues/187 I have some thoughts on a slightly new design that might make query scheduling more flexible. The idea was based on how google calendar allows one to schedule an event. Here are some screenshots: <img width="298" alt="screen shot 2018-03-19 at 3 21 08 pm" src="https://user-images.githubusercontent.com/784781/37617338-44146472-2b89-11e8-9fe7-eea1d3145afd.png"> <img width="295" alt="screen shot 2018-03-19 at 3 25 36 pm" src="https://user-images.githubusercontent.com/784781/37617534-cd42f31c-2b89-11e8-9467-83f6fa058f95.png"> <img width="300" alt="screen shot 2018-03-19 at 3 21 27 pm" src="https://user-images.githubusercontent.com/784781/37617342-46672dfe-2b89-11e8-89ce-98b91949f3d2.png"> <img width="298" alt="screen shot 2018-03-19 at 3 21 40 pm" src="https://user-images.githubusercontent.com/784781/37617345-48a0a64a-2b89-11e8-9c0a-f69a72046036.png"> Essentially the same functionality would still exist with some additional new functionality: * It would be possible to schedule a query every X days instead of only every 1 day * It would be possible to schedule a query every X weeks on a specific day Would love any thoughts you have on this @arikfr @kocsmy. Thanks!
Looks good! There are a few small changes I would apply to the wording and the way the stop date input works, but the overall idea is great. There was a previous try at adding extra scheduling options and this one is much better, as it adds additional options without making it more complex (on the contrary actually). @kocsmy what do you think? Nice improvements @emtwo thank you! A few ideas quickly: - Maybe use "Refresh every" instead of "Repeat empty" in order to stay consistent - Stop scheduling input format is very complex and uncomfortable to type :) Considering we don't even have that feature now, maybe it'd be enough to just use a simpler (and more limited) 2018-05-28 format or similar. Just an idea, ultimately a proper GUI would be the best.
2018-03-29T21:04:14
getredash/redash
2,501
getredash__redash-2501
[ "2398", "2465" ]
df774b03040ebeb97e21d9140bf792fd3af1b5ce
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -44,7 +44,7 @@ def post(self): models.db.session.commit() models.db.session.commit() - return {'widget': widget.to_dict()} + return widget.to_dict() class WidgetResource(BaseResource):
diff --git a/tests/handlers/test_widgets.py b/tests/handlers/test_widgets.py --- a/tests/handlers/test_widgets.py +++ b/tests/handlers/test_widgets.py @@ -54,7 +54,7 @@ def test_create_text_widget(self): rv = self.make_request('post', '/api/widgets', data=data) self.assertEquals(rv.status_code, 200) - self.assertEquals(rv.json['widget']['text'], 'Sample text.') + self.assertEquals(rv.json['text'], 'Sample text.') def test_delete_widget(self): widget = self.factory.create_widget()
Non blocking widget refresh indicator When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times. To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress. Is the following animation enough? ![](http://g.recordit.co/CyccMD6dFc.gif) After refreshing a dashboard, widgets become draggable even when not in edit mode
@kocsmy Yes @arikfr I think this is great and that animation should be enough. I assume the frame rate is low because of the recording/gif and it should be smooth movement.
2018-04-30T08:06:56
getredash/redash
2,503
getredash__redash-2503
[ "2500" ]
64783b7f067f1adcf6a22c8f828ec0d0a910bbcc
diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py --- a/redash/query_runner/athena.py +++ b/redash/query_runner/athena.py @@ -121,18 +121,20 @@ def __get_schema_from_glue(self): region_name=self.configuration['region'] ) schema = {} - paginator = client.get_paginator('get_tables') - - for database in client.get_databases()['DatabaseList']: - iterator = paginator.paginate(DatabaseName=database['Name']) - for table in iterator.search('TableList[]'): - table_name = '%s.%s' % (database['Name'], table['Name']) - if table_name not in schema: - column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']] - schema[table_name] = {'name': table_name, 'columns': column} - for partition in table.get('PartitionKeys', []): - schema[table_name]['columns'].append(partition['Name']) + database_paginator = client.get_paginator('get_databases') + table_paginator = client.get_paginator('get_tables') + + for databases in database_paginator.paginate(): + for database in databases['DatabaseList']: + iterator = table_paginator.paginate(DatabaseName=database['Name']) + for table in iterator.search('TableList[]'): + table_name = '%s.%s' % (database['Name'], table['Name']) + if table_name not in schema: + column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']] + schema[table_name] = {'name': table_name, 'columns': column} + for partition in table.get('PartitionKeys', []): + schema[table_name]['columns'].append(partition['Name']) return schema.values() def get_schema(self, get_stats=False):
Athena query runner doesn't paginate schemas <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 --> ### Issue Summary Hi, AWS Athena query runner paginates only tables but not schemas. We have ~170 schemas, however, only the first 100 are shown in schema browser. ### Steps to Reproduce 1. Connect to Athena with Glue metastore which has more than 100 schemas 2. In the new query window, only 100 schemas are shown. I've looked into the code and looks like you paginate only on tables but not on schemas (https://github.com/getredash/redash/blob/v4.0.0/redash/query_runner/athena.py#L130). ### Technical details: * Redash Version: 4.0.0 * Browser/OS: * How did you install Redash: docker Kind regards, Ievgen.
Possible fix (haven't tested it) ```python schema = {} schema_paginator = client.get_paginator('get_databases') table_paginator = client.get_paginator('get_tables') for databases in schema_paginator.paginate(): for database in databases['DatabaseList']: iterator = table_paginator.paginate(DatabaseName=database['Name']) for table in iterator.search('TableList[]'): table_name = '%s.%s' % (database['Name'], table['Name']) if table_name not in schema: column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']] schema[table_name] = {'name': table_name, 'columns': column} for partition in table['PartitionKeys']: schema[table_name]['columns'].append(partition['Name']) ```
2018-05-01T03:13:45
getredash/redash
2,533
getredash__redash-2533
[ "2" ]
414fabadf651f0195bc048e12b457d8dd0f27dc3
diff --git a/redash/query_runner/yandex_metrika.py b/redash/query_runner/yandex_metrika.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/yandex_metrika.py @@ -0,0 +1,166 @@ +import json +import yaml +import logging +from redash.query_runner import * +from redash.utils import JSONEncoder +import requests +from urlparse import parse_qs, urlparse +logger = logging.getLogger(__name__) + +COLUMN_TYPES = { + 'date': ( + 'firstVisitDate', 'firstVisitStartOfYear', 'firstVisitStartOfQuarter', + 'firstVisitStartOfMonth', 'firstVisitStartOfWeek', + ), + 'datetime': ( + 'firstVisitStartOfHour', 'firstVisitStartOfDekaminute', 'firstVisitStartOfMinute', + 'firstVisitDateTime', 'firstVisitHour', 'firstVisitHourMinute' + + ), + 'int': ( + 'pageViewsInterval', 'pageViews', 'firstVisitYear', 'firstVisitMonth', + 'firstVisitDayOfMonth', 'firstVisitDayOfWeek', 'firstVisitMinute', + 'firstVisitDekaminute', + ) +} + +for type_, elements in COLUMN_TYPES.items(): + for el in elements: + if 'first' in el: + el = el.replace('first', 'last') + COLUMN_TYPES[type_] += (el, ) + + +def parse_ym_response(response): + columns = [] + dimensions_len = len(response['query']['dimensions']) + + for h in response['query']['dimensions'] + response['query']['metrics']: + friendly_name = h.split(':')[-1] + if friendly_name in COLUMN_TYPES['date']: + data_type = TYPE_DATE + elif friendly_name in COLUMN_TYPES['datetime']: + data_type = TYPE_DATETIME + else: + data_type = TYPE_STRING + columns.append({'name': h, 'friendly_name': friendly_name, 'type': data_type}) + + rows = [] + for num, row in enumerate(response['data']): + res = {} + for i, d in enumerate(row['dimensions']): + res[columns[i]['name']] = d['name'] + for i, d in enumerate(row['metrics']): + res[columns[dimensions_len + i]['name']] = d + if num == 0 and isinstance(d, float): + columns[dimensions_len + i]['type'] = TYPE_FLOAT + rows.append(res) + + return {'columns': columns, 'rows': rows} + + +class YandexMetrika(BaseSQLQueryRunner): + @classmethod + def annotate_query(cls): + return False + + @classmethod + def type(cls): + return "yandex_metrika" + + @classmethod + def name(cls): + return "Yandex Metrika" + + @classmethod + def configuration_schema(cls): + return { + "type": "object", + "properties": { + "token": { + "type": "string", + "title": "OAuth Token" + } + }, + "required": ["token"], + } + + def __init__(self, configuration): + super(YandexMetrika, self).__init__(configuration) + self.syntax = 'yaml' + self.host = 'https://api-metrika.yandex.ru' + self.list_path = 'counters' + + def _get_tables(self, schema): + + counters = self._send_query('management/v1/{0}'.format(self.list_path)) + + for row in counters[self.list_path]: + owner = row.get('owner_login') + counter = '{0} | {1}'.format( + row.get('name', 'Unknown').encode('utf-8'), row.get('id', 'Unknown') + ) + if owner not in schema: + schema[owner] = {'name': owner, 'columns': []} + + schema[owner]['columns'].append(counter) + + return schema.values() + + def test_connection(self): + self._send_query('management/v1/{0}'.format(self.list_path)) + + def _send_query(self, path='stat/v1/data', **kwargs): + token = kwargs.pop('oauth_token', self.configuration['token']) + r = requests.get('{0}/{1}'.format(self.host, path), params=dict(oauth_token=token, **kwargs)) + if r.status_code != 200: + raise Exception(r.text) + return r.json() + + def run_query(self, query, user): + logger.debug("Metrika is about to execute query: %s", query) + data = None + query = query.strip() + if query == "": + error = "Query is empty" + return data, error + try: + params = yaml.load(query) + except ValueError as e: + logging.exception(e) + error = unicode(e) + return data, error + + if isinstance(params, dict): + if 'url' in params: + params = parse_qs(urlparse(params['url']).query, keep_blank_values=True) + else: + error = 'The query format must be JSON or YAML' + return data, error + + try: + data = json.dumps(parse_ym_response(self._send_query(**params)), cls=JSONEncoder) + error = None + except Exception as e: + logging.exception(e) + error = unicode(e) + return data, error + + +class YandexAppMetrika(YandexMetrika): + @classmethod + def type(cls): + return "yandex_appmetrika" + + @classmethod + def name(cls): + return "Yandex AppMetrika" + + def __init__(self, configuration): + super(YandexAppMetrika, self).__init__(configuration) + self.host = 'https://api.appmetrica.yandex.ru' + self.list_path = 'applications' + + +register(YandexMetrika) +register(YandexAppMetrika) diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py --- a/redash/settings/__init__.py +++ b/redash/settings/__init__.py @@ -157,6 +157,7 @@ def all_settings(): 'redash.query_runner.impala_ds', 'redash.query_runner.vertica', 'redash.query_runner.clickhouse', + 'redash.query_runner.yandex_metrika', 'redash.query_runner.treasuredata', 'redash.query_runner.sqlite', 'redash.query_runner.dynamodb_sql',
Visualizations workflow & object Visualizations (widget?) should have an object of their own containing the following data: - query - type (chart, cohort, grid, ...) - options Tasks: - [x] Visualization object - [x] UI to create new visualizations instead of the hardcoded option we have today - [x] Change the dashboard widgets to use visualizations rather than queries - [ ] Friendlier selector when adding new widgets to dashboard
There was a case (EvMe's query 607), where the data was unsorted and had too many series, which resulted in HighCharts dying. When we revisit visualizations we should take into account limits for different visualizers, and prevent the user from killing his browser. cc: @shayel. http://app.raw.densitydesign.org/#/ @amirnissim this requires some changes on the backend too, but let's try to move forward without them and on Sunday we will discuss the needed changes. Below is a "brain dump" about this feature, ask any questions you feel necessary: Basically the idea is to have different types of visualizations, and the ability to create a new visualization from any dataset (query). Eventually, this will replace the "Chart" and "Cohort" static tabs. Each visualization will define: 1. Name 2. Description 3. Properties (mandatory & optional + default values) 4. Rendering logic When creating new visualization it will have: 1. query_id - reference to queries table 2. visualization_type - string 3. options - JSON 4. ? Until we add the visualization object, let's start by creating the "infrastructure" for this in the frontend code. In terms of UI, I think we will represent each visualization as a tab where we currently have the "Chart" and "Cohort" tabs and also have a "+" tab, where you define a new visualization.
2018-05-14T14:35:12
getredash/redash
2,631
getredash__redash-2631
[ "2624" ]
8abaf89394082b41565c3f55a6f6238b91ea1cc6
diff --git a/redash/authentication/account.py b/redash/authentication/account.py --- a/redash/authentication/account.py +++ b/redash/authentication/account.py @@ -71,3 +71,11 @@ def send_password_reset_email(user): send_mail.delay([user.email], subject, html_content, text_content) return reset_link + + +def send_user_disabled_email(user): + html_content = render_template('emails/reset_disabled.html', user=user) + text_content = render_template('emails/reset_disabled.txt', user=user) + subject = u"Your Redash account is disabled" + + send_mail.delay([user.email], subject, html_content, text_content) diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py --- a/redash/handlers/authentication.py +++ b/redash/handlers/authentication.py @@ -7,6 +7,7 @@ from redash.authentication import current_org, get_login_url, get_next_path from redash.authentication.account import (BadSignature, SignatureExpired, send_password_reset_email, + send_user_disabled_email, send_verify_email, validate_token) from redash.handlers import routes @@ -118,7 +119,10 @@ def forgot_password(org_slug=None): try: org = current_org._get_current_object() user = models.User.get_by_email_and_org(email, org) - send_password_reset_email(user) + if user.is_disabled: + send_user_disabled_email(user) + else: + send_password_reset_email(user) except NoResultFound: logging.error("No user found for forgot password: %s", email)
diff --git a/tests/test_authentication.py b/tests/test_authentication.py --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -319,3 +319,26 @@ def test_remote_login_custom_header(self): }) self.assert_correct_user_attributes(self.get_test_user()) + + +class TestUserForgotPassword(BaseTestCase): + def test_user_should_receive_password_reset_link(self): + user = self.factory.create_user() + + with patch('redash.handlers.authentication.send_password_reset_email') as send_password_reset_email_mock: + response = self.post_request('/forgot', org=user.org, data={'email': user.email}) + self.assertEqual(response.status_code, 200) + send_password_reset_email_mock.assert_called_with(user) + + def test_disabled_user_should_not_receive_password_reset_link(self): + user = self.factory.create_user() + user.disable() + self.db.session.add(user) + self.db.session.commit() + + with patch('redash.handlers.authentication.send_password_reset_email') as send_password_reset_email_mock,\ + patch('redash.handlers.authentication.send_user_disabled_email') as send_user_disabled_email_mock: + response = self.post_request('/forgot', org=user.org, data={'email': user.email}) + self.assertEqual(response.status_code, 200) + send_password_reset_email_mock.assert_not_called() + send_user_disabled_email_mock.assert_called_with(user)
Password reset form for disabled users When a disabled user asks to reset password, we should send them an email saying their account is disabled (instead of sending them a password reset link).
Instead of sending them an email, I'd recommend popping them an error message right there: "Your account is disabled, please contact your administrator (maybe email here)". I thought of that, but it's a bad idea, as it leaks information about users: anyone can enter any email address to check if it's disabled.
2018-06-27T14:58:16
getredash/redash
2,653
getredash__redash-2653
[ "1411" ]
75df88a8ff9a7aa184e41d4f677de76da60bf84c
diff --git a/redash/monitor.py b/redash/monitor.py --- a/redash/monitor.py +++ b/redash/monitor.py @@ -1,39 +1,72 @@ from redash import redis_connection, models, __version__, settings -def get_status(): - status = {} +def get_redis_status(): info = redis_connection.info() - status['redis_used_memory'] = info['used_memory'] - status['redis_used_memory_human'] = info['used_memory_human'] - status['version'] = __version__ + return {'redis_used_memory': info['used_memory'], 'redis_used_memory_human': info['used_memory_human']} + + +def get_object_counts(): + status = {} status['queries_count'] = models.db.session.query(models.Query).count() if settings.FEATURE_SHOW_QUERY_RESULTS_COUNT: status['query_results_count'] = models.db.session.query(models.QueryResult).count() status['unused_query_results_count'] = models.QueryResult.unused().count() status['dashboards_count'] = models.Dashboard.query.count() status['widgets_count'] = models.Widget.query.count() + return status - status['workers'] = [] - - status['manager'] = redis_connection.hgetall('redash:status') +def get_queues(): queues = {} for ds in models.DataSource.query: for queue in (ds.queue_name, ds.scheduled_queue_name): queues.setdefault(queue, set()) queues[queue].add(ds.name) - status['manager']['queues'] = {} + return queues + + +def get_queues_status(): + queues = get_queues() + for queue, sources in queues.iteritems(): - status['manager']['queues'][queue] = { + queues[queue] = { 'data_sources': ', '.join(sources), 'size': redis_connection.llen(queue) } - status['manager']['queues']['celery'] = { + queues['celery'] = { 'size': redis_connection.llen('celery'), 'data_sources': '' } + return queues + + +def get_db_sizes(): + database_metrics = [] + queries = [ + ['Query Results Size', "select pg_total_relation_size('query_results') as size from (select 1) as a"], + ['Redash DB Size', "select pg_database_size('postgres') as size"] + ] + for query_name, query in queries: + result = models.db.session.execute(query).first() + database_metrics.append([query_name, result[0]]) + + return database_metrics + + +def get_status(): + status = { + 'version': __version__, + 'workers': [] + } + status.update(get_redis_status()) + status.update(get_object_counts()) + status['manager'] = redis_connection.hgetall('redash:status') + status['manager']['queues'] = get_queues_status() + status['database_metrics'] = {} + status['database_metrics']['metrics'] = get_db_sizes() + return status
Add to status page the size of Redash's DB ```sql SELECT pg_size_pretty(pg_total_relation_size('query_results')); ``` ```sql SELECT pg_size_pretty(pg_database_size('redash')); ```
@arikfr Happy to work on this one but I'm not seeing how status.json gets populated, or even where it is. Could you provide a pointer please? Here: https://github.com/getredash/redash/blob/2a22b98c77757f378d3a4ed1ec8eceb99f0a8b35/redash/handlers/__init__.py#L15-L20 And it uses the `get_status` method from here: https://github.com/getredash/redash/blob/2a22b98c77757f378d3a4ed1ec8eceb99f0a8b35/redash/monitor.py#L4 Thanks!
2018-06-30T22:24:15
getredash/redash
2,722
getredash__redash-2722
[ "2689" ]
0772b12f19c3b249e6fbde006674b305c9895186
diff --git a/redash/query_runner/elasticsearch.py b/redash/query_runner/elasticsearch.py --- a/redash/query_runner/elasticsearch.py +++ b/redash/query_runner/elasticsearch.py @@ -420,10 +420,9 @@ def run_query(self, query, user): if error: return None, error - params = {"source": json.dumps(query_dict), "source_content_type": "application/json"} logger.debug("Using URL: %s", url) - logger.debug("Using params : %s", params) - r = requests.get(url, params=params, auth=self.auth) + logger.debug("Using query: %s", query_dict) + r = requests.get(url, json=query_dict, auth=self.auth) r.raise_for_status() logger.debug("Result: %s", r.json())
elastic search datasource query error redash version 4.1 any query will return error: Error running query: Failed to execute query. Return Code: 400 Reason: {"error":{"root_cause":[{"type":"illegal_argument_exception","reason":"request [/app_api_log_2018-07-20/_search] contains unrecognized parameter: [source_content_type]"}],"type":"illegal_argument_exception","reason":"request [/app_api_log_2018-07-20/_search] contains unrecognized parameter: [source_content_type]"},"status":400} but i never use "source_content_type" in my query.is redash pass this unrecognized param to es? will u guys fix this issue?
Hi @jesusslim. I think #2267 changes this behavior.
2018-08-06T13:08:08
getredash/redash
2,747
getredash__redash-2747
[ "2685" ]
1cafcc1eac9572aba870d95225a8ec56e186f15b
diff --git a/redash/query_runner/databricks.py b/redash/query_runner/databricks.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/databricks.py @@ -0,0 +1,22 @@ +import base64 +from .hive_ds import Hive + +try: + from pyhive import hive + from thrift.transport import THttpClient + enabled = True +except ImportError: + enabled = False + + +class DataBricks(Hive): + + @classmethod + def type(cls): + return "databricks" + + @classmethod + def enabled(cls): + return enabled + +register(DataBricks) diff --git a/redash/query_runner/hive_ds.py b/redash/query_runner/hive_ds.py --- a/redash/query_runner/hive_ds.py +++ b/redash/query_runner/hive_ds.py @@ -1,6 +1,7 @@ import json import logging import sys +import base64 from redash.query_runner import * from redash.utils import JSONEncoder @@ -53,7 +54,24 @@ def configuration_schema(cls): }, "username": { "type": "string" - } + }, + "use_http": { + "type": "boolean", + "title": "Use HTTP transport" + }, + "http_scheme": { + "type": "string", + "title": "Scheme when using HTTP transport", + "default": "https" + }, + "http_path": { + "type": "string", + "title": "Path when using HTTP transport" + }, + "http_password": { + "type": "string", + "title": "Password when using HTTP transport" + }, }, "required": ["host"] } @@ -97,7 +115,43 @@ def run_query(self, query, user): connection = None try: - connection = hive.connect(**self.configuration.to_dict()) + host = self.configuration['host'] + + if self.configuration.get('use_http', False): + # default to https + scheme = self.configuration.get('http_scheme', 'https') + + # if path is set but is missing initial slash, append it + path = self.configuration.get('http_path', '') + if path and path[0] != '/': + path = '/' + path + + # if port is set prepend colon + port = self.configuration.get('port', '') + if port: + port = ':' + port + + http_uri = "{}://{}{}{}".format(scheme, host, port, path) + + # create transport + transport = THttpClient.THttpClient(http_uri) + + # if username or password is set, add Authorization header + username = self.configuration.get('username', '') + password = self.configuration.get('http_password', '') + if username | password: + auth = base64.b64encode(username + ':' + password) + transport.setCustomHeaders({'Authorization': 'Basic ' + auth}) + + # create connection + connection = hive.connect(thrift_transport=transport) + else: + connection = hive.connect( + host=host, + port=self.configuration.get('port', None), + database=self.configuration.get('database', 'default'), + username=self.configuration.get('username', None), + ) cursor = connection.cursor()
Add Databricks query runner <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 --> ### Issue Summary [Databricks](https://databricks.com/) offers a [Hive-like API](https://docs.databricks.com/user-guide/bi/jdbc-odbc-bi.html) that could be used to connect Redash to it to query tables over there. ### Steps to Reproduce Mozilla has worked on a [POC code based on the existing Hive query runner](https://gist.github.com/robotblake/d0eff602664ab62959772dbb4f0cca22 ) (curtesy of @robotblake) that could be the base for the code.
2018-08-13T00:23:34
getredash/redash
2,760
getredash__redash-2760
[ "2771" ]
4d2df72f1f158d8152f6ba77fa6cd5bb8877337c
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py --- a/redash/handlers/dashboards.py +++ b/redash/handlers/dashboards.py @@ -1,11 +1,11 @@ -from itertools import chain - from flask import request, url_for -from funcy import distinct, project, take +from funcy import project, rpartial from flask_restful import abort -from redash import models, serializers, settings -from redash.handlers.base import BaseResource, get_object_or_404, paginate, filter_by_tags +from redash import models, serializers +from redash.handlers.base import (BaseResource, get_object_or_404, paginate, + filter_by_tags, + order_results as _order_results) from redash.serializers import serialize_dashboard from redash.permissions import (can_modify, require_admin_or_owner, require_object_modify_permission, @@ -13,24 +13,61 @@ from sqlalchemy.orm.exc import StaleDataError +# Ordering map for relationships +order_map = { + 'name': 'lowercase_name', + '-name': '-lowercase_name', + 'created_at': 'created_at', + '-created_at': '-created_at', +} + +order_results = rpartial(_order_results, '-created_at', order_map) + + class DashboardListResource(BaseResource): @require_permission('list_dashboards') def get(self): """ Lists all accessible dashboards. + + :qparam number page_size: Number of queries to return per page + :qparam number page: Page number to retrieve + :qparam number order: Name of column to order by + :qparam number q: Full text search term + + Responds with an array of :ref:`dashboard <dashboard-response-label>` + objects. """ search_term = request.args.get('q') if search_term: - results = models.Dashboard.search(self.current_org, self.current_user.group_ids, self.current_user.id, search_term) + results = models.Dashboard.search( + self.current_org, + self.current_user.group_ids, + self.current_user.id, + search_term, + ) else: - results = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id) + results = models.Dashboard.all( + self.current_org, + self.current_user.group_ids, + self.current_user.id, + ) results = filter_by_tags(results, models.Dashboard.tags) + # order results according to passed order parameter + ordered_results = order_results(results) + page = request.args.get('page', 1, type=int) page_size = request.args.get('page_size', 25, type=int) - response = paginate(results, page, page_size, serialize_dashboard) + + response = paginate( + ordered_results, + page=page, + page_size=page_size, + serializer=serialize_dashboard, + ) return response @@ -120,7 +157,7 @@ def post(self, dashboard_slug): require_object_modify_permission(dashboard, self.current_user) - updates = project(dashboard_properties, ('name', 'layout', 'version', 'tags', + updates = project(dashboard_properties, ('name', 'layout', 'version', 'tags', 'is_draft', 'dashboard_filters_enabled')) # SQLAlchemy handles the case where a concurrent transaction beats us diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -1405,6 +1405,16 @@ def favorites(cls, user, base_query=None): def get_by_slug_and_org(cls, slug, org): return cls.query.filter(cls.slug == slug, cls.org == org).one() + @hybrid_property + def lowercase_name(self): + "Optional property useful for sorting purposes." + return self.name.lower() + + @lowercase_name.expression + def lowercase_name(cls): + "The SQLAlchemy expression for the property above." + return func.lower(cls.name) + def __unicode__(self): return u"%s=%s" % (self.id, self.name)
Add server-side sorting for dashboard list <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 --> ### Issue Summary The dashboard list view doesn't currently support server side sorting like the new query list view. ### Steps to Reproduce 1. Open the dashboard list view and try to sort 2. Find the table column headers not able to sort Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version: Redash master (bc15c0b6d1f4fc79e10b6920ad6117f118a8c820) * Browser/OS: Firefox/macOS * How did you install Redash: Local development
2018-08-23T18:08:40
getredash/redash
2,799
getredash__redash-2799
[ "2565" ]
1661553da78cd24a70c06c592d7897b30af5141d
diff --git a/redash/extensions.py b/redash/extensions.py --- a/redash/extensions.py +++ b/redash/extensions.py @@ -1,4 +1,5 @@ -from pkg_resources import iter_entry_points +import os +from pkg_resources import iter_entry_points, resource_isdir, resource_listdir def init_extensions(app): @@ -10,5 +11,20 @@ def init_extensions(app): for entry_point in iter_entry_points('redash.extensions'): app.logger.info('Loading Redash extension %s.', entry_point.name) - extension = entry_point.load() - app.redash_extensions[entry_point.name] = extension(app) + try: + extension = entry_point.load() + app.redash_extensions[entry_point.name] = { + "entry_function": extension(app), + "resources_list": [] + } + except ImportError: + app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name) + (root_module, _) = os.path.splitext(entry_point.module_name) + content_folder_relative = os.path.join(entry_point.name, 'bundle') + + # If it's a frontend extension only, store a list of files in the bundle directory. + if resource_isdir(root_module, content_folder_relative): + app.redash_extensions[entry_point.name] = { + "entry_function": None, + "resources_list": resource_listdir(root_module, content_folder_relative) + }
Add front-end extension capability This is similar in concept to https://github.com/getredash/redash/pull/2354/files
Some thoughts to start the discussion: 1. We already have some dynamic functionality around adding custom pages, visualizations and components by auto loading any file in `client/app/{components,pages,visualizations,services}` and executing the exported function (while passing the app module to it). `webpack`'s `require.context` function can't take a variable for a path, but we can use webpack's `alias` configuration to be able to specify additional folder to load code from in a similar way. This folder path can be defined using an environment variable, so we no longer have to put it in the existing folder structure (which gets overridden when deploying a new version). 2. The above will allow for adding new functionality to Redash (like new pages/components/visualizations), but we also need extension points in the code, to be able to modify and extend existing functionality. This is what we did in the [Policy pull request](https://github.com/getredash/redash/pull/2560). There are two types of extensions there: * The `Policy` object which you can replace with a different implementation, that allows you to set custom logic around whether certain operations are permitted. * References to non existing components (like `<users-list-exta>` [here](https://github.com/getredash/redash/pull/2560/files#diff-c07c2352a3162819862c318998aed2cbR7)) which you can implement to show additional information in various places in the application. We can add similar "extension points" to implement some of the custom UI Mozilla has (like data source version or documentation link). This is what I have so far, some additional half baked thoughts I have: * It might be interesting to have the ability to load extensions at "runtime" (specially visualizations), but I'm not sure how useful that can be. * It can be useful to be able to replace components with a different implementation. Not sure how to achieve this though. Also we need to keep in mind that any solution we implement, needs to keep working when we migrate to React... I stumbled at [flask-webpackext](https://github.com/inveniosoftware/flask-webpackext), which in their README lists the following as one of the features: > Collect bundles: If you Webpack project is spread over multiple Python packages, Flask-WebpackExt can help you dynamically assemble the files into a Webpack project. This is useful if you don't know until runtime which packages are installed. Might be interesting for us as well. @arikfr Interesting indeed since that'd allow us to ship bundle configurations based on Python entrypoints and have that bridge to the backend extensions that we're looking for. Behind the scenes this uses [pywebpack](https://github.com/inveniosoftware/pywebpack) and the flask package is just the integration layer for Flask best practices/patterns. In fact there seems to be a stub already to do the [entrypoint based loading there](https://pywebpack.readthedocs.io/en/latest/_modules/pywebpack/helpers.html#bundles_from_entry_point). Just not used anywhere it seems. This smells like not fully baked but maybe we can talk to @lnielsen and see if he's able to shed some light for the plans of the library? I'm happy to provide more info. https://getindico.io is using the plugin, and next week we are starting a three-week sprint to integrate it into Invenio, and push the pynpm, pywebpack and flask-webpack out into final releases. We have an initial prototype of the implementation available [here](https://github.com/inveniosoftware/invenio-assets/commit/89be089c5b5cc9e67f1ec1c5e589bfc77f9f2082) There's also the [bundle test case](https://github.com/inveniosoftware/pywebpack/blob/master/tests/test_pywebpack.py#L136) E.g. to load bundles from entry points: ```python setup( entry_points={'my_bundles':['module.bundles:abundle']} ) #... project = WebpackBundleProject( # ... bundles=bundles_from_entry_point('my_bundles') ) ``` Thanks @lnielsen, that looks super useful, thanks!
2018-09-07T16:53:17
getredash/redash
2,833
getredash__redash-2833
[ "2819" ]
2b0e6e9e790c1f1c9b782887e1ca871726437bf5
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py --- a/redash/handlers/dashboards.py +++ b/redash/handlers/dashboards.py @@ -291,10 +291,20 @@ def delete(self, dashboard_id): 'object_type': 'dashboard', }) + class DashboardTagsResource(BaseResource): @require_permission('list_dashboards') def get(self): """ Lists all accessible dashboards. """ - return {t[0]: t[1] for t in models.Dashboard.all_tags(self.current_org, self.current_user)} + tags = models.Dashboard.all_tags(self.current_org, self.current_user) + return { + 'tags': [ + { + 'name': name, + 'count': count, + } + for name, count in tags + ] + } diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -382,4 +382,16 @@ def post(self, query_id): class QueryTagsResource(BaseResource): def get(self): - return {t[0]: t[1] for t in models.Query.all_tags(self.current_user, True)} + """ + Returns all query tags including those for drafts. + """ + tags = models.Query.all_tags(self.current_user, include_drafts=True) + return { + 'tags': [ + { + 'name': name, + 'count': count, + } + for name, count in tags + ] + }
Improve tag list ordering on query and dashboard list <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 --> ### Issue Summary On the queries and dashboard list view the tags are ordered by usage count, but the sort order is ascending, meaning the tags with the *least* usage are shown on the top, the tags with the most tagged items on the bottom of the list. I would suggest to do that to descending. In terms of visualization, I wonder if it would make sense to show the number of items that individual tags have been assigned to? Our QA team has filed a related issue to sort the tags alphabetical: https://github.com/mozilla/redash/issues/481 ### Steps to Reproduce 1. Go to either queries or dashboard list with some tags assigned 2. See the list of tags being shown with the least tagged items first, not last. ### Technical details: * Redash Version: 5.0beta * Browser/OS: Firefox 62 / macOS 10.13.6 * How did you install Redash: Docker
"I wonder if it would make sense to show the number of items that individual tags have been assigned to?" We could just use tag (x) to show how many items has this tag. I like the idea but now sure if this would be actually useful. WDYT @arikfr I think it's useful to show the number of uses of a tag, to help direct users into what tags to use. Because if two tags have similar meaning but one is more popular, it will direct people in the right direction. About whether to use popularity order or alphabetical one -- I'm not sure. Both makes sense, but for sure we should at least reverse the current order (major fail on my part 🤦‍♂️ ).
2018-09-17T15:57:40
getredash/redash
2,884
getredash__redash-2884
[ "2874" ]
e3a63899d35a981ff1b1aad0ff192ba19b816aed
diff --git a/redash/query_runner/yandex_metrika.py b/redash/query_runner/yandex_metrica.py similarity index 87% rename from redash/query_runner/yandex_metrika.py rename to redash/query_runner/yandex_metrica.py --- a/redash/query_runner/yandex_metrika.py +++ b/redash/query_runner/yandex_metrica.py @@ -59,18 +59,19 @@ def parse_ym_response(response): return {'columns': columns, 'rows': rows} -class YandexMetrika(BaseSQLQueryRunner): +class YandexMetrica(BaseSQLQueryRunner): @classmethod def annotate_query(cls): return False @classmethod def type(cls): + # This is written with a "k" for backward-compatibility. See #2874. return "yandex_metrika" @classmethod def name(cls): - return "Yandex Metrika" + return "Yandex Metrica" @classmethod def configuration_schema(cls): @@ -86,9 +87,9 @@ def configuration_schema(cls): } def __init__(self, configuration): - super(YandexMetrika, self).__init__(configuration) + super(YandexMetrica, self).__init__(configuration) self.syntax = 'yaml' - self.host = 'https://api-metrika.yandex.ru' + self.host = 'https://api-metrica.yandex.com' self.list_path = 'counters' def _get_tables(self, schema): @@ -118,7 +119,7 @@ def _send_query(self, path='stat/v1/data', **kwargs): return r.json() def run_query(self, query, user): - logger.debug("Metrika is about to execute query: %s", query) + logger.debug("Metrica is about to execute query: %s", query) data = None query = query.strip() if query == "": @@ -147,20 +148,21 @@ def run_query(self, query, user): return data, error -class YandexAppMetrika(YandexMetrika): +class YandexAppMetrica(YandexMetrica): @classmethod def type(cls): + # This is written with a "k" for backward-compatibility. See #2874. return "yandex_appmetrika" @classmethod def name(cls): - return "Yandex AppMetrika" + return "Yandex AppMetrica" def __init__(self, configuration): - super(YandexAppMetrika, self).__init__(configuration) - self.host = 'https://api.appmetrica.yandex.ru' + super(YandexAppMetrica, self).__init__(configuration) + self.host = 'https://api.appmetrica.yandex.com' self.list_path = 'applications' -register(YandexMetrika) -register(YandexAppMetrika) +register(YandexMetrica) +register(YandexAppMetrica) diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py --- a/redash/settings/__init__.py +++ b/redash/settings/__init__.py @@ -169,7 +169,7 @@ def all_settings(): 'redash.query_runner.impala_ds', 'redash.query_runner.vertica', 'redash.query_runner.clickhouse', - 'redash.query_runner.yandex_metrika', + 'redash.query_runner.yandex_metrica', 'redash.query_runner.treasuredata', 'redash.query_runner.sqlite', 'redash.query_runner.dynamodb_sql',
Fix Yandex Metrica name Should be Yandex Metrica. Need to apply a fix, but keep the type name with a `k` to avoid breaking existing deployments.
FWIW, it seems it has been rebranded for the global market, e.g. see https://metrika.yandex.ru/promo. The API URL is also specified as [`api-metrica.yandex.com`](https://tech.yandex.com/metrika/doc/api2/api_v1/examples-docpage/) (instead of our `api-metrika.yandex.ru`) now, I think we should update that as well.
2018-10-01T16:13:45
getredash/redash
2,931
getredash__redash-2931
[ "2924", "2924" ]
be7f601d21cf2b8748ec918b72323caab8da30ab
diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py --- a/redash/query_runner/google_spreadsheets.py +++ b/redash/query_runner/google_spreadsheets.py @@ -147,6 +147,7 @@ def request(self, *args, **kwargs): class GoogleSpreadsheet(BaseQueryRunner): + @classmethod def annotate_query(cls): return False @@ -190,13 +191,22 @@ def _get_spreadsheet_service(self): def test_connection(self): self._get_spreadsheet_service() + def is_url_key(self, key): + if key.startswith('https://'): + return True + return False + def run_query(self, query, user): logger.debug("Spreadsheet is about to execute query: %s", query) key, worksheet_num = parse_query(query) try: spreadsheet_service = self._get_spreadsheet_service() - spreadsheet = spreadsheet_service.open_by_key(key) + + if self.is_url_key(key): + spreadsheet = spreadsheet_service.open_by_url(key) + else: + spreadsheet = spreadsheet_service.open_by_key(key) data = parse_spreadsheet(spreadsheet, worksheet_num)
Google Spreadsheets: support for open by url We currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method. Using the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users. To remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one. Google Spreadsheets: support for open by url We currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method. Using the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users. To remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one.
2018-10-11T15:09:26
getredash/redash
2,951
getredash__redash-2951
[ "2950", "2950" ]
02e919c39bd495cb9dd2ec1021b3225dcf42b213
diff --git a/redash/cli/users.py b/redash/cli/users.py --- a/redash/cli/users.py +++ b/redash/cli/users.py @@ -245,5 +245,5 @@ def list(organization=None): if i > 0: print("-" * 20) - print("Id: {}\nName: {}\nEmail: {}\nOrganization: {}".format( - user.id, user.name.encode('utf-8'), user.email, user.org.name)) + print("Id: {}\nName: {}\nEmail: {}\nOrganization: {}\nActive: {}".format( + user.id, user.name.encode('utf-8'), user.email, user.org.name, not(user.is_disabled)))
diff --git a/tests/test_cli.py b/tests/test_cli.py --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -391,6 +391,7 @@ def test_list(self): Name: Fred Foobar Email: [email protected] Organization: Default + Active: True """ self.assertMultiLineEqual(result.output, textwrap.dedent(output).lstrip())
Users who are already disabled still appear on CLI <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 Found a security vulnerability? Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary When I refer to user list on redash cli. Even I made some user disabled, the user is still existed on the list. ### Steps to Reproduce ``` docker exec -it redash_server_1 ./manage.py users list ``` ``` Id: 2 Name: sumito Email: [email protected] Organization: sumito -------------------- ``` Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version: 5.0.1+b4850 * Browser/OS: Chrome * How did you install Redash: docker Users who are already disabled still appear on CLI <!-- ##################################################################### # # Need support? USE THE FORUM! https://discuss.redash.io/c/support. # # Don't have steps to reproduce and actually not sure it's a bug? # Use the forum! https://discuss.redash.io/c/support. # ##################################################################### **Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟. Found a bug? Please fill out the sections below... thank you 👍 Found a security vulnerability? Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary When I refer to user list on redash cli. Even I made some user disabled, the user is still existed on the list. ### Steps to Reproduce ``` docker exec -it redash_server_1 ./manage.py users list ``` ``` Id: 2 Name: sumito Email: [email protected] Organization: sumito -------------------- ``` Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version: 5.0.1+b4850 * Browser/OS: Chrome * How did you install Redash: docker
2018-10-15T03:19:47
getredash/redash
3,008
getredash__redash-3008
[ "2965", "2965" ]
ab6ed7da34dd2797914382c435981450b7bf9d7a
diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py --- a/redash/query_runner/google_analytics.py +++ b/redash/query_runner/google_analytics.py @@ -43,7 +43,7 @@ def parse_ga_response(response): }) rows = [] - for r in response['rows']: + for r in response.get('rows', []): d = {} for c, value in enumerate(r): column_name = response['columnHeaders'][c]['name']
GA Data Source throws an error when no rows returned ### Issue Summary Google Analytics Data Source throws `Error running query: 'rows'` when the query result is empty. I have a pretty simple query with dimensions and filters, like: ```json { "ids": "ga:177xxxxxx", "start_date": "2018-10-08", "end_date": "2018-10-12", "metrics": "ga:uniqueEvents", "dimensions": "ga:dimension1,ga:dimension3", "filters": "ga:dimension2==userrole;ga:eventCategory==eventcategory;ga:eventAction==enentaction;ga:dimension1!=demo" } ``` Sometimes it returns empty result as there is no data. This results in error in redash. ### Steps to Reproduce 1. Create the Google Analytics Data Source 2. Make some query returning zero rows 3. Execute it in query editor `Error running query: 'rows'` will be thrown. While this might be considered not a bug, I'd expect just an empty result with no errors. ### Technical details: * Redash Version: 5.0.1 * Browser/OS: Chrome/macOS * How did you install Redash: docker-compose GA Data Source throws an error when no rows returned ### Issue Summary Google Analytics Data Source throws `Error running query: 'rows'` when the query result is empty. I have a pretty simple query with dimensions and filters, like: ```json { "ids": "ga:177xxxxxx", "start_date": "2018-10-08", "end_date": "2018-10-12", "metrics": "ga:uniqueEvents", "dimensions": "ga:dimension1,ga:dimension3", "filters": "ga:dimension2==userrole;ga:eventCategory==eventcategory;ga:eventAction==enentaction;ga:dimension1!=demo" } ``` Sometimes it returns empty result as there is no data. This results in error in redash. ### Steps to Reproduce 1. Create the Google Analytics Data Source 2. Make some query returning zero rows 3. Execute it in query editor `Error running query: 'rows'` will be thrown. While this might be considered not a bug, I'd expect just an empty result with no errors. ### Technical details: * Redash Version: 5.0.1 * Browser/OS: Chrome/macOS * How did you install Redash: docker-compose
2018-10-27T22:43:06
getredash/redash
3,078
getredash__redash-3078
[ "2919" ]
42b05cee00dab3b73e6275bf50df91796a8968eb
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py --- a/redash/query_runner/__init__.py +++ b/redash/query_runner/__init__.py @@ -146,6 +146,7 @@ def _get_tables_stats(self, tables_dict): class BaseHTTPQueryRunner(BaseQueryRunner): response_error = "Endpoint returned unexpected status code" requires_authentication = False + requires_url = True url_title = 'URL base path' username_title = 'HTTP Basic Auth Username' password_title = 'HTTP Basic Auth Password' @@ -168,9 +169,15 @@ def configuration_schema(cls): 'title': cls.password_title, }, }, - 'required': ['url'], 'secret': ['password'] } + + if cls.requires_url or cls.requires_authentication: + schema['required'] = [] + + if cls.requires_url: + schema['required'] += ['url'] + if cls.requires_authentication: schema['required'] += ['username', 'password'] return schema diff --git a/redash/query_runner/url.py b/redash/query_runner/url.py --- a/redash/query_runner/url.py +++ b/redash/query_runner/url.py @@ -2,6 +2,7 @@ class Url(BaseHTTPQueryRunner): + requires_url = False @classmethod def annotate_query(cls):
URL query runner: URL base path doesn't need to be a required field
We would appreciate it if you could provide us with more info about this issue/pr! Seems like a regression from #2318. Wait, hm, there is no other field to build the URL to request though, https://github.com/getredash/redash/blob/c2429e92d2a5dd58cbe378b9cc06f3be969f747e/redash/query_runner/__init__.py#L146-L176 The field is kind of badly named, it's not really the "URL base path" but the actual "URL" that the field contains. I had a feeling using a generic base class will come back to bite us here :) You pass the URL you want to query as the query text. The idea of using a base path was to allow limiting the URLs the end user might use and/or save some typing if you happen to load many URLs from the same base path.
2018-11-14T12:28:28
getredash/redash
3,085
getredash__redash-3085
[ "2770" ]
bf85ddaaffa125a7474d87690f5a2cd97be1613b
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py --- a/redash/query_runner/big_query.py +++ b/redash/query_runner/big_query.py @@ -110,7 +110,8 @@ def configuration_schema(cls): }, 'useStandardSql': { "type": "boolean", - 'title': "Use Standard SQL (Beta)", + 'title': "Use Standard SQL", + "default": True, }, 'location': { "type": "string", @@ -238,7 +239,9 @@ def _get_columns_schema(self, table_data): for column in table_data['schema']['fields']: columns.extend(self._get_columns_schema_column(column)) - return {'name': table_data['id'], 'columns': columns} + project_id = self._get_project_id() + table_name = table_data['id'].replace("%s:" % project_id, "") + return {'name': table_name, 'columns': columns} def _get_columns_schema_column(self, column): columns = [] @@ -339,7 +342,8 @@ def configuration_schema(cls): }, 'useStandardSql': { "type": "boolean", - 'title': "Use Standard SQL (Beta)", + 'title': "Use Standard SQL", + "default": True, }, 'location': { "type": "string",
Switch to Standard SQL for BigQuery as the default I think it's time to switch to use Standard SQL as the default syntax for BigQuery. Also, we should show table names in the schema browser in the Standard SQL format instead of the legacy one (`.` as separator instead of `:`).
2018-11-15T10:57:37
getredash/redash
3,088
getredash__redash-3088
[ "1646" ]
d43b35ba6f9e3b8ee013062cc25ce6c9c165ea25
diff --git a/redash/query_runner/python.py b/redash/query_runner/python.py --- a/redash/query_runner/python.py +++ b/redash/query_runner/python.py @@ -211,10 +211,15 @@ def get_query_result(query_id): return json_loads(query.latest_query_data.data) + def get_current_user(self): + return self._current_user.to_dict() + def test_connection(self): pass def run_query(self, query, user): + self._current_user = user + try: error = None @@ -239,6 +244,7 @@ def run_query(self, query, user): restricted_globals = dict(__builtins__=builtins) restricted_globals["get_query_result"] = self.get_query_result restricted_globals["get_source_schema"] = self.get_source_schema + restricted_globals["get_current_user"] = self.get_current_user restricted_globals["execute_query"] = self.execute_query restricted_globals["add_result_column"] = self.add_result_column restricted_globals["add_result_row"] = self.add_result_row
Access current user id/details from Python data source First of all, thanks to the authors and contributors for a very interesting and promising tool. ### Question: Is it possible to access current user id/details from Python data source? I would like to be able to control access to the viewed data at row level without a need of creating and maintaining multiple (in my case about 80) data sources and dashboards. My desired scenario: 1. Store A logs into their account and accesses dashboard with KPI/Metrics. 2. Dashboard calls Python data source(s). 3. Python code loads data from a db table with all stores, identifies current user and filters out all records where store != Store A. 4. Dashboard is dynamically loaded with entries for Store A only. Also if there are any other ways to achieve the above, please advise. ### Technical details: * Redash Version: 0.12.0
Also interested in something along these lines!
2018-11-17T08:30:06
getredash/redash
3,120
getredash__redash-3120
[ "3108" ]
07c0bba56849dfa902a9785cf1b1c16b59dc2ec4
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -31,7 +31,7 @@ from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.ext.mutable import Mutable from sqlalchemy.inspection import inspect -from sqlalchemy.orm import backref, contains_eager, joinedload, object_session +from sqlalchemy.orm import backref, contains_eager, joinedload, object_session, load_only from sqlalchemy.orm.exc import NoResultFound # noqa: F401 from sqlalchemy.types import TypeDecorator from sqlalchemy.orm.attributes import flag_modified @@ -990,20 +990,23 @@ def favorites(cls, user, base_query=None): @classmethod def all_tags(cls, user, include_drafts=False): - where = cls.is_archived == False - - if not include_drafts: - where &= cls.is_draft == False - - where &= DataSourceGroup.group_id.in_(user.group_ids) + queries = cls.all_queries( + group_ids=user.group_ids, + user_id=user.id, + drafts=include_drafts, + ) tag_column = func.unnest(cls.tags).label('tag') usage_count = func.count(1).label('usage_count') - return db.session.query(tag_column, usage_count).join( - DataSourceGroup, - cls.data_source_id == DataSourceGroup.data_source_id - ).filter(where).distinct().group_by(tag_column).order_by(usage_count.desc()) # .limit(limit) + query = ( + db.session + .query(tag_column, usage_count) + .group_by(tag_column) + .filter(Query.id.in_(queries.options(load_only('id')))) + .order_by(usage_count.desc()) + ) + return query @classmethod def by_user(cls, user): @@ -1360,7 +1363,7 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model __tablename__ = 'dashboards' __mapper_args__ = { "version_id_col": version - } + } @classmethod def all(cls, org, group_ids, user_id): @@ -1390,26 +1393,19 @@ def search(cls, org, groups_ids, user_id, search_term): @classmethod def all_tags(cls, org, user): + dashboards = cls.all(org, user.group_ids, user.id) + tag_column = func.unnest(cls.tags).label('tag') usage_count = func.count(1).label('usage_count') query = ( - db.session.query(tag_column, usage_count) - .outerjoin(Widget) - .outerjoin(Visualization) - .outerjoin(Query) - .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) - .filter( - Dashboard.is_archived == False, - (DataSourceGroup.group_id.in_(user.group_ids) | - (Dashboard.user_id == user.id) | - ((Widget.dashboard != None) & (Widget.visualization == None))), - Dashboard.org == org) - .group_by(tag_column)) - - query = query.filter(or_(Dashboard.user_id == user.id, Dashboard.is_draft == False)) - - return query.order_by(usage_count.desc()) + db.session + .query(tag_column, usage_count) + .group_by(tag_column) + .filter(Dashboard.id.in_(dashboards.options(load_only('id')))) + .order_by(usage_count.desc()) + ) + return query @classmethod def favorites(cls, user, base_query=None):
diff --git a/tests/models/test_dashboards.py b/tests/models/test_dashboards.py new file mode 100644 --- /dev/null +++ b/tests/models/test_dashboards.py @@ -0,0 +1,30 @@ +from tests import BaseTestCase +from redash.models import db, Dashboard + + +class DashboardTest(BaseTestCase): + def create_tagged_dashboard(self, tags): + dashboard = self.factory.create_dashboard(tags=tags) + ds = self.factory.create_data_source(group=self.factory.default_group) + query = self.factory.create_query(data_source=ds) + # We need a bunch of visualizations and widgets configured + # to trigger wrong counts via the left outer joins. + vis1 = self.factory.create_visualization(query_rel=query) + vis2 = self.factory.create_visualization(query_rel=query) + vis3 = self.factory.create_visualization(query_rel=query) + widget1 = self.factory.create_widget(visualization=vis1, dashboard=dashboard) + widget2 = self.factory.create_widget(visualization=vis2, dashboard=dashboard) + widget3 = self.factory.create_widget(visualization=vis3, dashboard=dashboard) + dashboard.layout = '[[{}, {}, {}]]'.format(widget1.id, widget2.id, widget3.id) + db.session.commit() + return dashboard + + def test_all_tags(self): + self.create_tagged_dashboard(tags=['tag1']) + self.create_tagged_dashboard(tags=['tag1', 'tag2']) + self.create_tagged_dashboard(tags=['tag1', 'tag2', 'tag3']) + + self.assertEqual( + list(Dashboard.all_tags(self.factory.org, self.factory.user)), + [('tag1', 3), ('tag2', 2), ('tag3', 1)] + ) diff --git a/tests/models/test_queries.py b/tests/models/test_queries.py --- a/tests/models/test_queries.py +++ b/tests/models/test_queries.py @@ -15,6 +15,21 @@ def test_changing_query_text_changes_hash(self): db.session.flush() self.assertNotEquals(old_hash, q.query_hash) + def create_tagged_query(self, tags): + ds = self.factory.create_data_source(group=self.factory.default_group) + query = self.factory.create_query(data_source=ds, tags=tags) + return query + + def test_all_tags(self): + self.create_tagged_query(tags=['tag1']) + self.create_tagged_query(tags=['tag1', 'tag2']) + self.create_tagged_query(tags=['tag1', 'tag2', 'tag3']) + + self.assertEqual( + list(Query.all_tags(self.factory.user)), + [('tag1', 3), ('tag2', 2), ('tag3', 1)] + ) + def test_search_finds_in_name(self): q1 = self.factory.create_query(name=u"Testing seåřċħ") q2 = self.factory.create_query(name=u"Testing seåřċħing")
Tags count for dashboards is wrong ![image](https://user-images.githubusercontent.com/71468/48824355-b395f400-ed6c-11e8-8fcb-7a04d6f2c2a4.png)
Huh! @arikfr I'll take a look.
2018-11-26T09:45:02
getredash/redash
3,187
getredash__redash-3187
[ "3181" ]
8481dacff43b767ccc129ea068a09d888c130db2
diff --git a/redash/__init__.py b/redash/__init__.py --- a/redash/__init__.py +++ b/redash/__init__.py @@ -3,7 +3,7 @@ import urlparse import urllib import redis -from flask import Flask +from flask import Flask, current_app from flask_sslify import SSLify from werkzeug.contrib.fixers import ProxyFix from werkzeug.routing import BaseConverter @@ -141,3 +141,11 @@ def create_app(load_admin=True): chrome_logger.init_app(app) return app + + +def safe_create_app(): + """Return current_app or create a new one.""" + if current_app: + return current_app + + return create_app() \ No newline at end of file diff --git a/redash/worker.py b/redash/worker.py --- a/redash/worker.py +++ b/redash/worker.py @@ -8,7 +8,7 @@ from celery import Celery from celery.schedules import crontab from celery.signals import worker_process_init -from redash import __version__, create_app, settings +from redash import __version__, safe_create_app, settings from redash.metrics import celery as celery_metrics celery = Celery('redash', @@ -77,14 +77,14 @@ def __call__(self, *args, **kwargs): # Create Flask app after forking a new worker, to make sure no resources are shared between processes. @worker_process_init.connect def init_celery_flask_app(**kwargs): - app = create_app() + app = safe_create_app() app.app_context().push() # Hook for extensions to add periodic tasks. @celery.on_after_configure.connect def add_periodic_tasks(sender, **kwargs): - app = create_app() + app = safe_create_app() periodic_tasks = getattr(app, 'periodic_tasks', {}) for params in periodic_tasks.values(): sender.add_periodic_task(**params)
RuntimeError: Working outside of application context. After deploying v6-beta to demo.redash.io we started seeing this error, which we haven't before: https://sentry.io/share/issue/08f84c11d6d04474b642b2d1435f9fb8/ By guessing what might be the cause of the issue I commented out the following code: https://github.com/getredash/redash/blob/cfe12c5a5d37b8ec01c8a26fd9c8c8ad97e12d07/redash/worker.py#L85-L90 Once I have the issue stopped happening. The reason I suspected this code was because we call `create_app` here for the second time during the initialization process of Celery. I'm not sure why it causes it though or how to fix it (aside from removing this code). 🤔 @jezdez @emtwo If I'm not mistaken, you were running this code in your env for a long time now. You haven't seen this exception?
Hm, I have personally not come across this and I'm having trouble reproducing it locally in the docker dev environment. Can you reproduce it locally or does this only show up on sentry? One idea we could try in `add_periodic_tasks()` is similar to what's done here to avoid creating a new context if it's not needed: https://github.com/getredash/redash/blob/master/redash/cli/__init__.py#L13 `app = current_app or create_app()` I couldn't reproduce it locally, although I didn't invest much time in it. I will give the `current_app or create_app()` option a try and see if the problem stops or persists. But can you check in your production's Sentry reports if there was a report of a similar error?
2018-12-13T12:32:26
getredash/redash
3,277
getredash__redash-3277
[ "3294" ]
7847cf7d63ad680d7fb3915dc8b41005305161c0
diff --git a/migrations/versions/73beceabb948_bring_back_null_schedule.py b/migrations/versions/73beceabb948_bring_back_null_schedule.py new file mode 100644 --- /dev/null +++ b/migrations/versions/73beceabb948_bring_back_null_schedule.py @@ -0,0 +1,56 @@ +"""bring_back_null_schedule + +Revision ID: 73beceabb948 +Revises: e7f8a917aa8e +Create Date: 2019-01-17 13:22:21.729334 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql +from sqlalchemy.sql import table + +from redash.models import MutableDict, PseudoJSON + +# revision identifiers, used by Alembic. +revision = '73beceabb948' +down_revision = 'e7f8a917aa8e' +branch_labels = None +depends_on = None + + +def is_empty_schedule(schedule): + if schedule is None: + return False + + if schedule == {}: + return True + + if schedule.get('interval') is None and schedule.get('until') is None and schedule.get('day_of_week') is None and schedule.get('time') is None: + return True + + return False + + +def upgrade(): + op.alter_column('queries', 'schedule', + nullable=True, + server_default=None) + + queries = table( + 'queries', + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('schedule', MutableDict.as_mutable(PseudoJSON))) + + conn = op.get_bind() + for query in conn.execute(queries.select()): + if is_empty_schedule(query.schedule): + conn.execute( + queries + .update() + .where(queries.c.id == query.id) + .values(schedule=None)) + + +def downgrade(): + pass diff --git a/redash/models/__init__.py b/redash/models/__init__.py --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -443,7 +443,7 @@ def __str__(self): def archive(self, user=None): db.session.add(self) self.is_archived = True - self.schedule = {} + self.schedule = None for vis in self.visualizations: for w in vis.widgets: @@ -550,11 +550,11 @@ def by_user(cls, user): @classmethod def outdated_queries(cls): - queries = (db.session.query(Query) - .options(joinedload(Query.latest_query_data).load_only('retrieved_at')) - .filter(Query.schedule != {}) - .order_by(Query.id)) - + queries = (Query.query + .options(joinedload(Query.latest_query_data).load_only('retrieved_at')) + .filter(Query.schedule.isnot(None)) + .order_by(Query.id)) + now = utils.utcnow() outdated_queries = {} scheduled_queries_executions.refresh() diff --git a/redash/models/types.py b/redash/models/types.py --- a/redash/models/types.py +++ b/redash/models/types.py @@ -24,6 +24,9 @@ class PseudoJSON(TypeDecorator): impl = db.Text def process_bind_param(self, value, dialect): + if value is None: + return value + return json_dumps(value) def process_result_value(self, value, dialect):
diff --git a/client/app/components/queries/ScheduleDialog.test.js b/client/app/components/queries/ScheduleDialog.test.js --- a/client/app/components/queries/ScheduleDialog.test.js +++ b/client/app/components/queries/ScheduleDialog.test.js @@ -1,17 +1,11 @@ import React from 'react'; import { mount } from 'enzyme'; import { ScheduleDialog } from './ScheduleDialog'; +import RefreshScheduleDefault from '../proptypes'; const defaultProps = { show: true, - query: { - schedule: { - time: null, - until: null, - interval: null, - day_of_week: null, - }, - }, + schedule: RefreshScheduleDefault, refreshOptions: [ 60, 300, 600, // 1, 5 ,10 mins 3600, 36000, 82800, // 1, 10, 23 hours @@ -23,12 +17,11 @@ const defaultProps = { }; function getWrapper(schedule = {}, props = {}) { - const defaultSchedule = defaultProps.query.schedule; props = Object.assign( {}, defaultProps, props, - { query: { schedule: Object.assign({}, defaultSchedule, schedule) } }, + { schedule: Object.assign({}, RefreshScheduleDefault, schedule) }, ); return [mount(<ScheduleDialog {...props} />), props]; } @@ -78,7 +71,7 @@ describe('ScheduleDialog', () => { const [wrapper] = getWrapper({ interval: 1209600, time: '22:15', - day_of_week: 2, + day_of_week: 'Monday', }); test('Sets to correct interval', () => { diff --git a/client/app/components/queries/__snapshots__/ScheduleDialog.test.js.snap b/client/app/components/queries/__snapshots__/ScheduleDialog.test.js.snap --- a/client/app/components/queries/__snapshots__/ScheduleDialog.test.js.snap +++ b/client/app/components/queries/__snapshots__/ScheduleDialog.test.js.snap @@ -1632,6 +1632,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "2 Weeks 22:15 Tu > <RadioGroup buttonStyle="outline" + defaultValue="Mon" disabled={false} onChange={[Function]} size="medium" @@ -1700,7 +1701,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "2 Weeks 22:15 Tu value="Mon" > <Radio - checked={false} + checked={true} className="input" disabled={false} onChange={[Function]} @@ -1709,10 +1710,10 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "2 Weeks 22:15 Tu value="Mon" > <label - className="input ant-radio-button-wrapper" + className="input ant-radio-button-wrapper ant-radio-button-wrapper-checked" > <Checkbox - checked={false} + checked={true} className="" defaultChecked={false} disabled={false} @@ -1725,11 +1726,11 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "2 Weeks 22:15 Tu value="Mon" > <span - className="ant-radio-button" + className="ant-radio-button ant-radio-button-checked" style={Object {}} > <input - checked={false} + checked={true} className="ant-radio-button-input" disabled={false} onBlur={[Function]} @@ -2375,7 +2376,6 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` onChange={[Function]} showSearch={false} transitionName="slide-up" - value={null} > <Select allowClear={false} @@ -2445,7 +2445,6 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` tags={false} tokenSeparators={Array []} transitionName="slide-up" - value={null} > <SelectTrigger ariaId="test-uuid" @@ -2478,11 +2477,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` } showSearch={false} transitionName="slide-up" - value={ - Array [ - null, - ] - } + value={Array []} visible={false} > <Trigger @@ -2577,11 +2572,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` onMenuSelect={[Function]} onPopupFocus={[Function]} prefixCls="ant-select-dropdown" - value={ - Array [ - null, - ] - } + value={Array []} visible={false} /> } @@ -2599,11 +2590,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` } showSearch={false} transitionName="slide-up" - value={ - Array [ - null, - ] - } + value={Array []} visible={false} > <div @@ -2631,21 +2618,7 @@ exports[`ScheduleDialog Sets correct schedule settings Sets to "Never" 1`] = ` > <div className="ant-select-selection__rendered" - > - <div - className="ant-select-selection-selected-value" - key="value" - style={ - Object { - "display": "block", - "opacity": 1, - } - } - title="Never" - > - Never - </div> - </div> + /> <span className="ant-select-arrow" key="arrow" diff --git a/tests/factories.py b/tests/factories.py --- a/tests/factories.py +++ b/tests/factories.py @@ -75,7 +75,7 @@ def __call__(self): user=user_factory.create, is_archived=False, is_draft=False, - schedule={}, + schedule=None, data_source=data_source_factory.create, org_id=1) diff --git a/tests/models/test_dashboards.py b/tests/models/test_dashboards.py --- a/tests/models/test_dashboards.py +++ b/tests/models/test_dashboards.py @@ -20,11 +20,11 @@ def create_tagged_dashboard(self, tags): return dashboard def test_all_tags(self): - self.create_tagged_dashboard(tags=['tag1']) - self.create_tagged_dashboard(tags=['tag1', 'tag2']) - self.create_tagged_dashboard(tags=['tag1', 'tag2', 'tag3']) + self.create_tagged_dashboard(tags=[u'tag1']) + self.create_tagged_dashboard(tags=[u'tag1', u'tag2']) + self.create_tagged_dashboard(tags=[u'tag1', u'tag2', u'tag3']) self.assertEqual( list(Dashboard.all_tags(self.factory.org, self.factory.user)), - [('tag1', 3), ('tag2', 2), ('tag3', 1)] + [(u'tag1', 3), (u'tag2', 2), (u'tag3', 1)] ) diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -136,9 +136,12 @@ class QueryOutdatedQueriesTest(BaseTestCase): # TODO: this test can be refactored to use mock version of should_schedule_next to simplify it. def test_outdated_queries_skips_unscheduled_queries(self): query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None}) + query_with_none = self.factory.create_query(schedule=None) + queries = models.Query.outdated_queries() self.assertNotIn(query, queries) + self.assertNotIn(query_with_none, queries) def test_outdated_queries_works_with_ttl_based_schedule(self): two_hours_ago = utcnow() - datetime.timedelta(hours=2) @@ -318,7 +321,7 @@ def test_removes_scheduling(self): query.archive() - self.assertEqual({}, query.schedule) + self.assertIsNone(query.schedule) def test_deletes_alerts(self): subscription = self.factory.create_alert_subscription()
Forked queries cannot be scheduled to recur As described here: https://github.com/getredash/redash/pull/2426#issuecomment-454827177 This is due to forked queries having a `None` value for `schedule` but the UI expects the schedule format to be like ``` { 'interval': ..., 'until': ..., 'day_of_week': ..., 'time': ... } ```
2019-01-13T13:51:21
getredash/redash
3,298
getredash__redash-3298
[ "3297" ]
121a44ef15fd4c8b18161cbd58f1de6244e38898
diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py --- a/redash/handlers/authentication.py +++ b/redash/handlers/authentication.py @@ -38,7 +38,7 @@ def render_token_login_page(template, org_slug, token): return render_template("error.html", error_message="Your invite link has expired. Please ask for a new one."), 400 - if not user.is_invitation_pending: + if user.details.get('is_invitation_pending') is False: return render_template("error.html", error_message=("This invitation has already been accepted. " "Please try resetting your password instead.")), 400
diff --git a/tests/handlers/test_authentication.py b/tests/handlers/test_authentication.py --- a/tests/handlers/test_authentication.py +++ b/tests/handlers/test_authentication.py @@ -50,6 +50,12 @@ def test_bad_token(self): response = self.post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org) self.assertEqual(response.status_code, 400) + def test_user_invited_before_invitation_pending_check(self): + user = self.factory.create_user(details={}) + token = invite_token(user) + response = self.post_request('/invite/{}'.format(token), data={'password': 'test1234'}, org=self.factory.org) + self.assertEqual(response.status_code, 302) + def test_already_active_user(self): token = invite_token(self.factory.user) self.post_request('/invite/{}'.format(token), data={'password': 'test1234'}, org=self.factory.org)
Pending invitation links broken in latest version <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary It looks like the change in #3261 doesn't handle the case of existing users that haven't accepted their invitations yet, so users trying to sign up using invitations created in a previous version incorrectly get the "This invitation has already been accepted" message. https://github.com/getredash/redash/pull/3229/files#diff-a0c5448134fdb627ae48d25bad76393fR40 I was able to fix this in our org with: ```sql UPDATE users SET details = '{"is_invitation_pending": true}'::json WHERE password_hash IS NULL ``` Maybe this case should be handled in the migration here? https://github.com/getredash/redash/blob/master/migrations/versions/e7f8a917aa8e_add_user_details_json_column.py ### Steps to Reproduce 1. Send invitation from a version prior to #3229 and #3261 2. Upgrade to current master 3. Try to access the invitation link Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version: * Browser/OS: * How did you install Redash:
Good catch, @chang! We can't do a migration in this case, because prior to this change there was no information on whether the user accepted the invitation or not. I think the solution can be to assume the invitation is pending if there is no `is_invitation_pending` key in the `details` property. To avoid code changes, I would rename the db property to `_is_invitation_pending`, and have a class property that does this check.
2019-01-17T08:14:58
getredash/redash
3,304
getredash__redash-3304
[ "1720", "1720" ]
40c6a2621cae6ea0e09b2868b57d4d728fe6e040
diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py --- a/redash/query_runner/jql.py +++ b/redash/query_runner/jql.py @@ -24,6 +24,8 @@ def add_column(self, column, column_type=TYPE_STRING): def to_json(self): return json_dumps({'rows': self.rows, 'columns': self.columns.values()}) + def merge(self, set): + self.rows = self.rows + set.rows def parse_issue(issue, field_mapping): result = OrderedDict() @@ -179,6 +181,19 @@ def run_query(self, query, user): results = parse_count(data) else: results = parse_issues(data, field_mapping) + index = data['startAt'] + data['maxResults'] + + while data['total'] > index: + query['startAt'] = index + response, error = self.get_response(jql_url, params=query) + if error is not None: + return None, error + + data = response.json() + index = data['startAt'] + data['maxResults'] + + addl_results = parse_issues(data, field_mapping) + results.merge(addl_results) return results.to_json(), None except KeyboardInterrupt:
JQL: add support for fetching all the results by way of pagination ### Issue Summary The JQL integration returns only the first 50 issues. This is the default number of issues returned via JIRA REST API. A mechanism should be implemented where a query is executed multiple times to fetch subsequent issues from JIRA. ### Steps to Reproduce 1. Configure JIRA integration 2. Create any JQL query which returns more than 50 issues 3. Execute the query Expected: More than 50 issues returned. Actual: Only 50 issues returned. ### Technical details: * Redash Version: 0.12.0+b2449 * Browser/OS: Ubuntu 16.4 * How did you install Redash: Provisioning script from https://redash.io/help-onpremise/setup/setting-up-redash-instance.html JQL: add support for fetching all the results by way of pagination ### Issue Summary The JQL integration returns only the first 50 issues. This is the default number of issues returned via JIRA REST API. A mechanism should be implemented where a query is executed multiple times to fetch subsequent issues from JIRA. ### Steps to Reproduce 1. Configure JIRA integration 2. Create any JQL query which returns more than 50 issues 3. Execute the query Expected: More than 50 issues returned. Actual: Only 50 issues returned. ### Technical details: * Redash Version: 0.12.0+b2449 * Browser/OS: Ubuntu 16.4 * How did you install Redash: Provisioning script from https://redash.io/help-onpremise/setup/setting-up-redash-instance.html
You can set `"maxResults"` option to get more than 50 rows. It is not a bug, it's default JQL limitation. Merged #1721. While @denisov-vlad is right that you can just specify this in a query, it looks like 1K is a reasonable limit. It doesn't look like the 1K bump resolves this issue. The API [limits the results to 100](https://jira.atlassian.com/browse/JRACLOUD-67570?_ga=2.214036247.1681779146.1547612561-1966547513.1547612561) so setting `maxResults` above that is effectively ignored. To return more than 100 results, we would have to [paginate ](https://developer.atlassian.com/cloud/jira/platform/rest/v3/?_ga=2.15756085.2024100639.1505889530-892835306.1502362635#pagination) through the API. Could we re-open or should I create another issue to implement pagination? @justmiles I reopened this. You can set `"maxResults"` option to get more than 50 rows. It is not a bug, it's default JQL limitation. Merged #1721. While @denisov-vlad is right that you can just specify this in a query, it looks like 1K is a reasonable limit. It doesn't look like the 1K bump resolves this issue. The API [limits the results to 100](https://jira.atlassian.com/browse/JRACLOUD-67570?_ga=2.214036247.1681779146.1547612561-1966547513.1547612561) so setting `maxResults` above that is effectively ignored. To return more than 100 results, we would have to [paginate ](https://developer.atlassian.com/cloud/jira/platform/rest/v3/?_ga=2.15756085.2024100639.1505889530-892835306.1502362635#pagination) through the API. Could we re-open or should I create another issue to implement pagination? @justmiles I reopened this.
2019-01-18T01:22:18
getredash/redash
3,319
getredash__redash-3319
[ "2932" ]
d5afa1815e7575b194ea18c4e01129ce9429a3cc
diff --git a/redash/query_runner/uptycs.py b/redash/query_runner/uptycs.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/uptycs.py @@ -0,0 +1,140 @@ +from redash.query_runner import * +from redash.utils import json_dumps + +import json +import jwt +import datetime +import requests +import logging + +logger = logging.getLogger(__name__) + + +class Uptycs(BaseSQLQueryRunner): + noop_query = "SELECT 1" + + @classmethod + def configuration_schema(cls): + return { + "type": "object", + "properties": { + "url": { + "type": "string" + }, + "customer_id": { + "type": "string" + }, + "key": { + "type": "string" + }, + "verify_ssl": { + "type": "boolean", + "default": True, + "title": "Verify SSL Certificates", + }, + "secret": { + "type": "string", + }, + }, + "order": ['url', 'customer_id', 'key', 'secret'], + "required": ["url", "customer_id", "key", "secret"], + "secret": ["secret", "key"] + } + + @classmethod + def annotate_query(cls): + return False + + def generate_header(self, key, secret): + header = {} + utcnow = datetime.datetime.utcnow() + date = utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT") + auth_var = jwt.encode({'iss': key}, secret, algorithm='HS256') + authorization = "Bearer %s" % (auth_var) + header['date'] = date + header['Authorization'] = authorization + return header + + def transformed_to_redash_json(self, data): + transformed_columns = [] + rows = [] + # convert all type to JSON string + # In future we correct data type mapping later + if 'columns' in data: + for json_each in data['columns']: + name = json_each['name'] + new_json = {"name": name, + "type": "string", + "friendly_name": name} + transformed_columns.append(new_json) + # Transfored items into rows. + if 'items' in data: + rows = data['items'] + + redash_json_data = {"columns": transformed_columns, + "rows": rows} + return redash_json_data + + def api_call(self, sql): + # JWT encoded header + header = self.generate_header(self.configuration.get('key'), + self.configuration.get('secret')) + + # URL form using API key file based on GLOBAL + url = ("%s/public/api/customers/%s/query" % + (self.configuration.get('url'), + self.configuration.get('customer_id'))) + + # post data base sql + post_data_json = {"query": sql} + + response = requests.post(url, headers=header, json=post_data_json, + verify=self.configuration.get('verify_ssl', + True)) + + if response.status_code == 200: + response_output = json.loads(response.content) + else: + error = 'status_code ' + str(response.status_code) + '\n' + error = error + "failed to connect" + json_data = {} + return json_data, error + # if we get right status code then call transfored_to_redash + json_data = self.transformed_to_redash_json(response_output) + error = None + # if we got error from Uptycs include error information + if 'error' in response_output: + error = response_output['error']['message']['brief'] + error = error + '\n' + response_output['error']['message']['detail'] + return json_data, error + + def run_query(self, query, user): + data, error = self.api_call(query) + json_data = json_dumps(data) + logger.debug("%s", json_data) + return json_data, error + + def get_schema(self, get_stats=False): + header = self.generate_header(self.configuration.get('key'), + self.configuration.get('secret')) + url = ("%s/public/api/customers/%s/schema/global" % + (self.configuration.get('url'), + self.configuration.get('customer_id'))) + response = requests.get(url, headers=header, + verify=self.configuration.get('verify_ssl', + True)) + redash_json = [] + schema = json.loads(response.content) + for each_def in schema['tables']: + table_name = each_def['name'] + columns = [] + for col in each_def['columns']: + columns.append(col['name']) + table_json = {"name": table_name, "columns": columns} + redash_json.append(table_json) + + logger.debug("%s", schema.values()) + return redash_json + + +register(Uptycs) diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py --- a/redash/settings/__init__.py +++ b/redash/settings/__init__.py @@ -190,6 +190,7 @@ def all_settings(): 'redash.query_runner.druid', 'redash.query_runner.kylin', 'redash.query_runner.drill', + 'redash.query_runner.uptycs', ] enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
Uptycs Query_runner for Redash Hi Team, I want to add new query_runner for Uptycs in Redash. I did some testing with query_runner for Uptycs. However before I share the code, please let me know the process of posting the query_runner. Thanks, Vibhor
The process is to create a pull request with the new query runner and whatever other changes are needed. Are you familiar with Pull Requests and GitHub? Yes. Will create a pull request and will include the changes for Uptycs. Thank you @arikfr !
2019-01-21T21:16:00
getredash/redash
3,323
getredash__redash-3323
[ "3321" ]
8bdcfb06c5fcdfb24acc2eb757ffe91bf23f4f45
diff --git a/redash/models/__init__.py b/redash/models/__init__.py --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -351,7 +351,7 @@ def make_excel_content(self): for (r, row) in enumerate(query_data['rows']): for (c, name) in enumerate(column_names): v = row.get(name) - if isinstance(v, list): + if isinstance(v, list) or isinstance(v, dict): v = str(v).encode('utf-8') sheet.write(r + 1, c, v)
Exporting to Excel file fails when one of the columns is a dictionary <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary get error when exporting query results to excel file: ![image](https://user-images.githubusercontent.com/33534430/51517880-30217d80-1e57-11e9-9cbf-0ef7ad187218.png) environment:ec2 on ecs ### Steps to Reproduce 1. create a new query 2. excute the query,save it ,then download it as excel file Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version:6.0.0+b8537 * Browser/OS:chrome * How did you install Redash:run redash by ecs on aws
Can you show the error log more detail? And can you download CSV successfully? > Can you show the error log more detail? And can you download CSV successfully? ![image](https://user-images.githubusercontent.com/33534430/51521827-b7282300-1e62-11e9-9375-d5a119fd29ab.png) i can download csv successfully,but get error when downloading excel ,and i found that there is a field whose attribute is json.that maybe the cause,because I can export query without json attribute to excel successfully. BTW,the data source is postgres. > Can you show the error log more detail? And can you download CSV successfully? I see the similiar problem has benn sloved in issue #2060,is this the same problem? Indeed it looks like a similar issue.
2019-01-22T12:27:25
getredash/redash
3,337
getredash__redash-3337
[ "3316" ]
e21bbcc6fe75fa1e289f86f1bd75e264fc147316
diff --git a/redash/handlers/api.py b/redash/handlers/api.py --- a/redash/handlers/api.py +++ b/redash/handlers/api.py @@ -10,7 +10,7 @@ from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource, DataSourceTestResource from redash.handlers.events import EventsResource from redash.handlers.queries import QueryArchiveResource, QueryForkResource, QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource -from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource +from redash.handlers.query_results import QueryResultListResource, QueryResultDropdownResource, QueryResultResource, JobResource from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource, UserDisableResource, UserRegenerateApiKeyResource from redash.handlers.visualizations import VisualizationListResource from redash.handlers.visualizations import VisualizationResource @@ -69,10 +69,10 @@ def json_representation(data, code, headers=None): api.add_org_resource(EventsResource, '/api/events', endpoint='events') -api.add_org_resource(QueryFavoriteListResource, '/api/queries/favorites', endpoint='query_fovorites') -api.add_org_resource(QueryFavoriteResource, '/api/queries/<query_id>/favorite', endpoint='query_fovorite') -api.add_org_resource(DashboardFavoriteListResource, '/api/dashboards/favorites', endpoint='dashboard_fovorites') -api.add_org_resource(DashboardFavoriteResource, '/api/dashboards/<object_id>/favorite', endpoint='dashboard_fovorite') +api.add_org_resource(QueryFavoriteListResource, '/api/queries/favorites', endpoint='query_favorites') +api.add_org_resource(QueryFavoriteResource, '/api/queries/<query_id>/favorite', endpoint='query_favorite') +api.add_org_resource(DashboardFavoriteListResource, '/api/dashboards/favorites', endpoint='dashboard_favorites') +api.add_org_resource(DashboardFavoriteResource, '/api/dashboards/<object_id>/favorite', endpoint='dashboard_favorite') api.add_org_resource(QueryTagsResource, '/api/queries/tags', endpoint='query_tags') api.add_org_resource(DashboardTagsResource, '/api/dashboards/tags', endpoint='dashboard_tags') @@ -90,6 +90,7 @@ def json_representation(data, code, headers=None): api.add_org_resource(CheckPermissionResource, '/api/<object_type>/<object_id>/acl/<access_type>', endpoint='check_permissions') api.add_org_resource(QueryResultListResource, '/api/query_results', endpoint='query_results') +api.add_org_resource(QueryResultDropdownResource, '/api/queries/<query_id>/dropdown', endpoint='query_result_dropdown') api.add_org_resource(QueryResultResource, '/api/query_results/<query_result_id>.<filetype>', '/api/query_results/<query_result_id>', diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py --- a/redash/handlers/query_results.py +++ b/redash/handlers/query_results.py @@ -10,8 +10,8 @@ require_permission, view_only) from redash.tasks import QueryTask from redash.tasks.queries import enqueue_query -from redash.utils import (collect_parameters_from_request, gen_query_hash, json_dumps, json_loads, utcnow) -from redash.utils.parameterized_query import ParameterizedQuery +from redash.utils import (collect_parameters_from_request, gen_query_hash, json_dumps, utcnow) +from redash.utils.parameterized_query import ParameterizedQuery, dropdown_values def error_response(message): @@ -131,6 +131,11 @@ def post(self): ONE_YEAR = 60 * 60 * 24 * 365.25 +class QueryResultDropdownResource(BaseResource): + def get(self, query_id): + return dropdown_values(query_id) + + class QueryResultResource(BaseResource): @staticmethod def add_cors_headers(headers): @@ -188,8 +193,7 @@ def post(self, query_id): max_age = int(params.get('max_age', 0)) query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) - parameter_schema = map(self._convert_queries_to_enums, - query.options.get("parameters", [])) + parameter_schema = query.options.get("parameters", []) if not has_access(query.data_source.groups, self.current_user, not_view_only): return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403 diff --git a/redash/utils/parameterized_query.py b/redash/utils/parameterized_query.py --- a/redash/utils/parameterized_query.py +++ b/redash/utils/parameterized_query.py @@ -1,9 +1,38 @@ import pystache +from functools import partial +from flask_login import current_user +from redash.authentication.org_resolving import current_org from numbers import Number -from redash.utils import mustache_render +from redash import models +from redash.utils import mustache_render, json_loads +from redash.permissions import require_access, view_only from funcy import distinct from dateutil.parser import parse + +def _pluck_name_and_value(default_column, row): + row = {k.lower(): v for k, v in row.items()} + name_column = "name" if "name" in row.keys() else default_column + value_column = "value" if "value" in row.keys() else default_column + + return {"name": row[name_column], "value": row[value_column]} + + +def _load_result(query_id): + query = models.Query.get_by_id_and_org(query_id, current_org) + require_access(query.data_source.groups, current_user, view_only) + query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org) + + return json_loads(query_result.data) + + +def dropdown_values(query_id): + data = _load_result(query_id) + first_column = data["columns"][0]["name"] + pluck = partial(_pluck_name_and_value, first_column) + return map(pluck, data["rows"]) + + def _collect_key_names(nodes): keys = [] for node in nodes._parse_tree: @@ -76,6 +105,7 @@ def _valid(self, name, value): "text": lambda value: isinstance(value, basestring), "number": lambda value: isinstance(value, Number), "enum": lambda value: value in definition["enumOptions"], + "query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])], "date": _is_date, "datetime-local": _is_date, "datetime-with-seconds": _is_date,
diff --git a/tests/handlers/test_query_results.py b/tests/handlers/test_query_results.py --- a/tests/handlers/test_query_results.py +++ b/tests/handlers/test_query_results.py @@ -170,6 +170,16 @@ def test_signed_in_user_and_different_query_result(self): self.assertEquals(rv.status_code, 403) +class TestQueryResultDropdownResource(BaseTestCase): + def test_checks_for_access_to_the_query(self): + ds2 = self.factory.create_data_source(group=self.factory.org.admin_group, view_only=False) + query = self.factory.create_query(data_source=ds2) + + rv = self.make_request('get', '/api/queries/{}/dropdown'.format(query.id)) + + self.assertEquals(rv.status_code, 403) + + class TestQueryResultExcelResponse(BaseTestCase): def test_renders_excel_file(self): query = self.factory.create_query() diff --git a/tests/utils/test_parameterized_query.py b/tests/utils/test_parameterized_query.py --- a/tests/utils/test_parameterized_query.py +++ b/tests/utils/test_parameterized_query.py @@ -1,7 +1,9 @@ from unittest import TestCase +from mock import patch +from collections import namedtuple import pytest -from redash.utils.parameterized_query import ParameterizedQuery, InvalidParameterError +from redash.utils.parameterized_query import ParameterizedQuery, InvalidParameterError, dropdown_values class TestParameterizedQuery(TestCase): @@ -110,6 +112,31 @@ def test_validates_enum_parameters(self): self.assertEquals("foo baz", query.text) + @patch('redash.utils.parameterized_query.dropdown_values') + def test_raises_on_invalid_query_parameters(self, _): + schema = [{"name": "bar", "type": "query", "queryId": 1}] + query = ParameterizedQuery("foo", schema) + + with pytest.raises(InvalidParameterError): + query.apply({"bar": 7}) + + @patch('redash.utils.parameterized_query.dropdown_values', return_value=[{"value": "baz"}]) + def test_raises_on_unlisted_query_value_parameters(self, _): + schema = [{"name": "bar", "type": "query", "queryId": 1}] + query = ParameterizedQuery("foo", schema) + + with pytest.raises(InvalidParameterError): + query.apply({"bar": "shlomo"}) + + @patch('redash.utils.parameterized_query.dropdown_values', return_value=[{"value": "baz"}]) + def test_validates_query_parameters(self, _): + schema = [{"name": "bar", "type": "query", "queryId": 1}] + query = ParameterizedQuery("foo {{bar}}", schema) + + query.apply({"bar": "baz"}) + + self.assertEquals("foo baz", query.text) + def test_raises_on_invalid_date_range_parameters(self): schema = [{"name": "bar", "type": "date-range"}] query = ParameterizedQuery("foo", schema) @@ -131,3 +158,17 @@ def test_raises_on_unexpected_param_types(self): with pytest.raises(InvalidParameterError): query.apply({"bar": "baz"}) + + @patch('redash.utils.parameterized_query._load_result', return_value={ + "columns": [{"name": "id"}, {"name": "Name"}, {"name": "Value"}], + "rows": [{"id": 5, "Name": "John", "Value": "John Doe"}]}) + def test_dropdown_values_prefers_name_and_value_columns(self, _): + values = dropdown_values(1) + self.assertEquals(values, [{"name": "John", "value": "John Doe"}]) + + @patch('redash.utils.parameterized_query._load_result', return_value={ + "columns": [{"name": "id"}, {"name": "fish"}, {"name": "poultry"}], + "rows": [{"fish": "Clown", "id": 5, "poultry": "Hen"}]}) + def test_dropdown_values_compromises_for_first_column(self, _): + values = dropdown_values(1) + self.assertEquals(values, [{"name": 5, "value": 5}])
Combine frontend/backend query-based dropdown parameters Currently, query-based dropdown lists are populated in the frontend using an API call that fetches the values and some logic which is implemented in the frontend. The same kind of logic is introduced in #3315 so it's probably best to unite those and create a dedicated endpoint that returns query-based dropdown list values.
2019-01-24T19:44:52
getredash/redash
3,362
getredash__redash-3362
[ "3289" ]
61e7cdaa8107443f0cc59d9dadf575278f4aea7a
diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py --- a/redash/query_runner/clickhouse.py +++ b/redash/query_runner/clickhouse.py @@ -68,13 +68,13 @@ def _get_tables(self, schema): def _send_query(self, data, stream=False): r = requests.post( - self.configuration['url'], + self.configuration.get('url', "http://127.0.0.1:8123"), data=data.encode("utf-8"), stream=stream, timeout=self.configuration.get('timeout', 30), params={ - 'user': self.configuration['user'], - 'password': self.configuration['password'], + 'user': self.configuration.get('user', "default"), + 'password': self.configuration.get('password', ""), 'database': self.configuration['dbname'] } )
Clickhouse: password is optional but we try to access it anyway For Clickhouse type data sources, we don't require a password. But the code does require it by trying to directly access the value in the options dictionary, instead of using `get`: https://github.com/getredash/redash/blob/823e4ccdd6fcfee5d0df0d919d87af3100876549/redash/query_runner/clickhouse.py#L77
2019-01-29T20:52:31
getredash/redash
3,421
getredash__redash-3421
[ "3372" ]
eee77a1c9b851a61c776294fee62575977939998
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -228,7 +228,7 @@ def post(self): 'object_type': 'query' }) - return QuerySerializer(query).serialize() + return QuerySerializer(query, with_visualizations=True).serialize() class QueryArchiveResource(BaseQueryListResource):
When creating a visualisation after running a query the query results are deleted We are testing the latest stable version of redash (6.0.0+b10818 (7fa66654) Found a regression when creating a new visualization, to reproduce: 0. Open a new browser tab and log into redash 1. Run a query 2. Create a new visualization 3. The X & Y drop downs are empty 4. When you cancel the visualization creation you see that the query results disappeared Note - the issue occurs only when you run redash for the first time on a new session
The issue isn't with a specific visualization, but with the query losing reference to the query result. I think the issue happens because we don't take reference to the query result from the UI, so when first saving the query it gets saved without a query result reference (`latest_query_data_id`): https://github.com/getredash/redash/blob/933dd753a8197708fe37432ee037a7f6bba258d5/redash/handlers/queries.py#L213-L214
2019-02-11T11:28:32
getredash/redash
3,423
getredash__redash-3423
[ "3257" ]
cb22764d68c4f150194e87388d00dbed90e5a684
diff --git a/redash/models/__init__.py b/redash/models/__init__.py --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -922,7 +922,7 @@ def copy(self): class Widget(TimestampMixin, BelongsToOrgMixin, db.Model): id = Column(db.Integer, primary_key=True) visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True) - visualization = db.relationship(Visualization, backref='widgets') + visualization = db.relationship(Visualization, backref=backref('widgets', cascade='delete')) text = Column(db.Text, nullable=True) width = Column(db.Integer) options = Column(db.Text)
diff --git a/tests/handlers/test_visualizations.py b/tests/handlers/test_visualizations.py --- a/tests/handlers/test_visualizations.py +++ b/tests/handlers/test_visualizations.py @@ -128,3 +128,11 @@ def test_only_owner_collaborator_or_admin_can_delete_visualization(self): rv = self.make_request('delete', path, user=admin_from_diff_org) self.assertEquals(rv.status_code, 404) + + def test_deleting_a_visualization_deletes_dashboard_widgets(self): + vis = self.factory.create_visualization() + widget = self.factory.create_widget(visualization=vis) + + rv = self.make_request('delete', '/api/visualizations/{}'.format(vis.id)) + + self.assertIsNone(models.Widget.query.filter(models.Widget.id == widget.id).first())
Widget turns into a textbox when a visualization is removed Current behavior: when deleting a visualization in use in some dashboard, the widget using it turns into a text box. Expected behavior: the widget should be removed. Bonus behavior: warn the user if the visualization is currently in use before deleting.
@arikfr Are you sure that this is frontend-related bug? I think the backend should search for dashboards that use the visualizations and update them. On a frontend we can show a placeholder instead of vis ("Visualization is not available" or something like that) - it's the most we can do with JS. It's indeed a backend issue, I miscategorized.
2019-02-11T12:02:40
getredash/redash
3,442
getredash__redash-3442
[ "1764" ]
8fc2ecf55c2d9ecd94b391d6d80d58c33fed9373
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -3,9 +3,10 @@ import select import psycopg2 +from psycopg2.extras import Range from redash.query_runner import * -from redash.utils import json_dumps, json_loads +from redash.utils import JSONEncoder, json_dumps, json_loads logger = logging.getLogger(__name__) @@ -28,6 +29,26 @@ } +class PostgreSQLJSONEncoder(JSONEncoder): + def default(self, o): + if isinstance(o, Range): + # From: https://github.com/psycopg/psycopg2/pull/779 + if o._bounds is None: + return '' + + items = [ + o._bounds[0], + str(o._lower), + ', ', + str(o._upper), + o._bounds[1] + ] + + return ''.join(items) + + return super(PostgreSQLJSONEncoder, self).default(o) + + def _wait(conn, timeout=None): while 1: try: @@ -165,7 +186,7 @@ def run_query(self, query, user): data = {'columns': columns, 'rows': rows} error = None - json_data = json_dumps(data, ignore_nan=True) + json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder) else: error = 'Query completed but it returned no data.' json_data = None
PostgreSQL queries returning a tsrange fail ### Issue Summary Running a query which includes a tsrange column fail with a JSON error like this: ``` DateTimeRange(datetime.datetime(2016, 8, 25, 15, 21, 45, 760521), datetime.datetime(2016, 10, 14, 5, 56, 52, 452297), '[)') is not JSON serializable ``` ### Steps to Reproduce 1. Create a table with a tsrange column 2. Insert some data into it 3. Create a `SELECT * FROM table` query 4. Run the query This is turning out to be confusing for our users - I'm getting regular complaints about queries failing with this error because some of our tables have range columns. ### Technical details: * Redash Version: 1.0.1+b2845 * Browser/OS: Linux * How did you install Redash: Docker image
We need to implement a custom JSON encoder, as we did for MongoDB and other data sources, that will properly handle this class type.
2019-02-15T20:44:54
getredash/redash
3,504
getredash__redash-3504
[ "3503" ]
fbaded45484e9dfff656041237b68fe3fe4b4e80
diff --git a/redash/handlers/users.py b/redash/handlers/users.py --- a/redash/handlers/users.py +++ b/redash/handlers/users.py @@ -13,7 +13,7 @@ require_permission_or_owner, require_admin from redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results -from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email +from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email from redash.settings import parse_boolean @@ -225,10 +225,17 @@ def post(self, user_id): if domain.lower() in blacklist or domain.lower() == 'qq.com': abort(400, message='Bad email address.') + email_changed = 'email' in params and params['email'] != user.email + if email_changed: + user.is_email_verified = False + try: self.update_model(user, params) models.db.session.commit() + if email_changed: + send_verify_email(user, self.current_org) + # The user has updated their email or password. This should invalidate all _other_ sessions, # forcing them to log in again. Since we don't want to force _this_ session to have to go # through login again, we call `login_user` in order to update the session with the new identity details.
diff --git a/tests/handlers/test_users.py b/tests/handlers/test_users.py --- a/tests/handlers/test_users.py +++ b/tests/handlers/test_users.py @@ -202,6 +202,12 @@ def test_returns_200_for_non_admin_changing_his_own(self): rv = self.make_request('post', "/api/users/{}".format(self.factory.user.id), data={"name": "New Name"}) self.assertEqual(rv.status_code, 200) + def test_marks_email_as_not_verified_when_changed(self): + user = self.factory.user + user.is_email_verified = True + rv = self.make_request('post', "/api/users/{}".format(user.id), data={"email": "[email protected]"}) + self.assertFalse(user.is_email_verified) + def test_returns_200_for_admin_changing_other_user(self): admin = self.factory.create_admin()
Verify address when users change their e-mail ### Issue Summary Currently, e-mail address are verified automatically by the fact that users receive an e-mail invitation and accept it. However, users can later on change their e-mail address and no verification is currently done in that scenario.
2019-02-27T09:49:44
getredash/redash
3,506
getredash__redash-3506
[ "3507" ]
34da15fd6a67cac25fa5c8907297bce12a0a4db0
diff --git a/redash/handlers/users.py b/redash/handlers/users.py --- a/redash/handlers/users.py +++ b/redash/handlers/users.py @@ -15,6 +15,7 @@ from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email from redash.settings import parse_boolean +from redash import settings # Ordering map for relationships @@ -36,9 +37,17 @@ ) -def invite_user(org, inviter, user): +def invite_user(org, inviter, user, send_email=True): + email_configured = settings.MAIL_DEFAULT_SENDER is not None + d = user.to_dict() + invite_url = invite_link_for_user(user) - send_invite_email(inviter, user, invite_url, org) + if email_configured and send_email: + send_invite_email(inviter, user, invite_url, org) + else: + d['invite_link'] = invite_url + + return d class UserListResource(BaseResource): @@ -137,19 +146,14 @@ def post(self): }) should_send_invitation = 'no_invite' not in request.args - if should_send_invitation: - invite_user(self.current_org, self.current_user, user) - - return user.to_dict() + return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation) class UserInviteResource(BaseResource): @require_admin def post(self, user_id): user = models.User.get_by_id_and_org(user_id, self.current_org) - invite_url = invite_user(self.current_org, self.current_user, user) - - return user.to_dict() + return invite_user(self.current_org, self.current_user, user) class UserResetPasswordResource(BaseResource):
diff --git a/client/app/components/users/UserShow.test.js b/client/app/components/users/UserShow.test.js --- a/client/app/components/users/UserShow.test.js +++ b/client/app/components/users/UserShow.test.js @@ -1,6 +1,6 @@ import React from 'react'; import renderer from 'react-test-renderer'; -import { UserShow } from './UserShow'; +import UserShow from './UserShow'; test('renders correctly', () => { const user = { diff --git a/tests/handlers/test_users.py b/tests/handlers/test_users.py --- a/tests/handlers/test_users.py +++ b/tests/handlers/test_users.py @@ -1,4 +1,4 @@ -from redash import models +from redash import models, settings from tests import BaseTestCase from mock import patch @@ -40,6 +40,34 @@ def test_creates_user(self): self.assertEqual(rv.json['name'], test_user['name']) self.assertEqual(rv.json['email'], test_user['email']) + def test_shows_invite_link_when_email_is_not_configured(self): + previous = settings.MAIL_DEFAULT_SENDER + settings.MAIL_DEFAULT_SENDER = None + + admin = self.factory.create_admin() + + test_user = {'name': 'User', 'email': '[email protected]'} + rv = self.make_request('post', '/api/users', data=test_user, user=admin) + + self.assertEqual(rv.status_code, 200) + self.assertTrue('invite_link' in rv.json) + + settings.MAIL_DEFAULT_SENDER = previous + + def test_does_not_show_invite_link_when_email_is_configured(self): + previous = settings.MAIL_DEFAULT_SENDER + settings.MAIL_DEFAULT_SENDER = "[email protected]" + + admin = self.factory.create_admin() + + test_user = {'name': 'User', 'email': '[email protected]'} + rv = self.make_request('post', '/api/users', data=test_user, user=admin) + + self.assertEqual(rv.status_code, 200) + self.assertFalse('invite_link' in rv.json) + + settings.MAIL_DEFAULT_SENDER = previous + def test_creates_user_case_insensitive_email(self): admin = self.factory.create_admin()
No invitation link after adding a new user when email server is not configured <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary After #3267 and #3229 admins no longer receive a link to invite users when the server doesn't have an email server configured. ### Steps to Reproduce 1. Open `/users/new` in a server without email configured (e.g.: [preview](https://redash-preview.netlify.com/users/new)) 2. Create a new user and check that no link is provided. 3. Open the pending invitation User Profile and notice there's only the "Resend" option (again it leads to nowhere) Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? The admin should receive a link to send to the user manually. ### Technical details: * Redash Version: Latest (Redash Preview) * Browser/OS: -- * How did you install Redash: --
2019-02-27T23:44:17
getredash/redash
3,532
getredash__redash-3532
[ "3438" ]
bc22797009d345b706bdf24c8618790880e674fc
diff --git a/redash/__init__.py b/redash/__init__.py --- a/redash/__init__.py +++ b/redash/__init__.py @@ -97,8 +97,8 @@ def to_url(self, value): return value -def create_app(load_admin=True): - from redash import admin, authentication, extensions, handlers +def create_app(): + from redash import authentication, extensions, handlers from redash.handlers.webpack import configure_webpack from redash.handlers import chrome_logger from redash.models import db, users @@ -126,8 +126,6 @@ def create_app(load_admin=True): provision_app(app) db.init_app(app) migrate.init_app(app, db) - if load_admin: - admin.init_admin(app) mail.init_app(app) authentication.init_app(app) limiter.init_app(app) diff --git a/redash/admin.py b/redash/admin.py deleted file mode 100644 --- a/redash/admin.py +++ /dev/null @@ -1,78 +0,0 @@ -from flask_admin import Admin -from flask_admin.base import MenuLink -from flask_admin.contrib.sqla import ModelView -from flask_admin.contrib.sqla.form import AdminModelConverter -from wtforms import fields -from wtforms.widgets import TextInput - -from redash import models -from redash.permissions import require_super_admin -from redash.utils import json_loads - - -class ArrayListField(fields.Field): - widget = TextInput() - - def _value(self): - if self.data: - return u', '.join(self.data) - else: - return u'' - - def process_formdata(self, valuelist): - if valuelist: - self.data = [x.strip() for x in valuelist[0].split(',')] - else: - self.data = [] - - -class JSONTextAreaField(fields.TextAreaField): - def process_formdata(self, valuelist): - if valuelist: - try: - json_loads(valuelist[0]) - except ValueError: - raise ValueError(self.gettext(u'Invalid JSON')) - self.data = valuelist[0] - else: - self.data = '' - - -class BaseModelView(ModelView): - column_display_pk = True - model_form_converter = AdminModelConverter - form_excluded_columns = ('created_at', 'updated_at') - - @require_super_admin - def is_accessible(self): - return True - - -class QueryResultModelView(BaseModelView): - column_exclude_list = ('data',) - - -class QueryModelView(BaseModelView): - column_exclude_list = ('latest_query_data',) - form_excluded_columns = ('version', 'visualizations', 'alerts', 'org', 'created_at', - 'updated_at', 'latest_query_data', 'search_vector') - - -class DashboardModelView(BaseModelView): - column_searchable_list = ('name', 'slug') - column_exclude_list = ('version', ) - form_excluded_columns = ('version', 'widgets', 'org', 'created_at', 'updated_at') - - -def init_admin(app): - admin = Admin(app, name='Redash Admin', template_mode='bootstrap3') - - admin.add_view(QueryModelView(models.Query, models.db.session)) - admin.add_view(QueryResultModelView(models.QueryResult, models.db.session)) - admin.add_view(DashboardModelView(models.Dashboard, models.db.session)) - logout_link = MenuLink('Logout', '/logout', 'logout') - - for m in (models.Visualization, models.Widget, models.Event, models.Organization): - admin.add_view(BaseModelView(m, models.db.session)) - - admin.add_link(logout_link)
Remove Flask-Admin Flask-Admin was a nice stop gap solution when our UI wasn't as evolved, but at this point it's more of a liability than any benefit. Unless someone has a good use case for keeping it, we're going to remove it in the upcoming release or the one after.
2019-03-05T15:51:28
getredash/redash
3,534
getredash__redash-3534
[ "3325", "3325" ]
bc22797009d345b706bdf24c8618790880e674fc
diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py --- a/redash/settings/__init__.py +++ b/redash/settings/__init__.py @@ -47,6 +47,7 @@ def all_settings(): QUERY_RESULTS_CLEANUP_MAX_AGE = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7")) SCHEMAS_REFRESH_SCHEDULE = int(os.environ.get("REDASH_SCHEMAS_REFRESH_SCHEDULE", 30)) +SCHEMAS_REFRESH_QUEUE = os.environ.get("REDASH_SCHEMAS_REFRESH_QUEUE", "celery") AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key") ENFORCE_HTTPS = parse_boolean(os.environ.get("REDASH_ENFORCE_HTTPS", "false")) diff --git a/redash/tasks/queries.py b/redash/tasks/queries.py --- a/redash/tasks/queries.py +++ b/redash/tasks/queries.py @@ -409,7 +409,7 @@ def refresh_schemas(): elif ds.org.is_disabled: logger.info(u"task=refresh_schema state=skip ds_id=%s reason=org_disabled", ds.id) else: - refresh_schema.apply_async(args=(ds.id,), queue="schemas") + refresh_schema.apply_async(args=(ds.id,), queue=settings.SCHEMAS_REFRESH_QUEUE) logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
diff --git a/tests/tasks/test_refresh_schemas.py b/tests/tasks/test_refresh_schemas.py --- a/tests/tasks/test_refresh_schemas.py +++ b/tests/tasks/test_refresh_schemas.py @@ -1,6 +1,4 @@ -import datetime - -from mock import ANY, call, patch +from mock import patch from tests import BaseTestCase from redash.tasks import refresh_schemas @@ -8,7 +6,7 @@ class TestRefreshSchemas(BaseTestCase): def test_calls_refresh_of_all_data_sources(self): - self.factory.data_source # trigger creation + self.factory.data_source # trigger creation with patch('redash.tasks.queries.refresh_schema.apply_async') as refresh_job: refresh_schemas() refresh_job.assert_called()
schemas queue is missing in default Docker configurations Because there are existing deployments without it, we will change the queue the schema refresh job is using to the default queue (`celery`) & make it configurable so whoever wants can change it to `schemas` again. schemas queue is missing in default Docker configurations Because there are existing deployments without it, we will change the queue the schema refresh job is using to the default queue (`celery`) & make it configurable so whoever wants can change it to `schemas` again.
2019-03-05T16:21:51
getredash/redash
3,563
getredash__redash-3563
[ "3562" ]
4cfa26a55ea7797f864a50cc1390fefbad33b684
diff --git a/redash/utils/parameterized_query.py b/redash/utils/parameterized_query.py --- a/redash/utils/parameterized_query.py +++ b/redash/utils/parameterized_query.py @@ -13,7 +13,7 @@ def _pluck_name_and_value(default_column, row): name_column = "name" if "name" in row.keys() else default_column.lower() value_column = "value" if "value" in row.keys() else default_column.lower() - return {"name": row[name_column], "value": row[value_column]} + return {"name": row[name_column], "value": unicode(row[value_column])} def _load_result(query_id):
diff --git a/tests/utils/test_parameterized_query.py b/tests/utils/test_parameterized_query.py --- a/tests/utils/test_parameterized_query.py +++ b/tests/utils/test_parameterized_query.py @@ -196,11 +196,11 @@ def test_dropdown_values_prefers_name_and_value_columns(self, _): "rows": [{"fish": "Clown", "id": 5, "poultry": "Hen"}]}) def test_dropdown_values_compromises_for_first_column(self, _): values = dropdown_values(1) - self.assertEquals(values, [{"name": 5, "value": 5}]) + self.assertEquals(values, [{"name": 5, "value": "5"}]) @patch('redash.utils.parameterized_query._load_result', return_value={ "columns": [{"name": "ID"}, {"name": "fish"}, {"name": "poultry"}], "rows": [{"fish": "Clown", "ID": 5, "poultry": "Hen"}]}) def test_dropdown_supports_upper_cased_columns(self, _): values = dropdown_values(1) - self.assertEquals(values, [{"name": 5, "value": 5}]) + self.assertEquals(values, [{"name": 5, "value": "5"}])
Issues with Query Based Dropdown when the value is numeric When the values the query returns for the dropdown are numeric, we have two issues: 1. We don’t load the correct value because the value from the URL passed as string, while the actual value is a number. 2. We don’t load results at all, because they fail parameter schema validation (the string doesn't match the values the dropdown has, which are numbers).
2019-03-10T13:20:09
getredash/redash
3,569
getredash__redash-3569
[ "3466", "3466" ]
aa9d2466cdfc1972390a80b7c4bba2121e49f11c
diff --git a/redash/extensions.py b/redash/extensions.py --- a/redash/extensions.py +++ b/redash/extensions.py @@ -1,30 +1,103 @@ -import os -from pkg_resources import iter_entry_points, resource_isdir, resource_listdir +# -*- coding: utf-8 -*- +import logging +from collections import OrderedDict as odict +from importlib_metadata import entry_points -def init_app(app): - """ - Load the Redash extensions for the given Redash Flask app. - """ - if not hasattr(app, 'redash_extensions'): - app.redash_extensions = {} +# The global Redash extension registry +extensions = odict() + +# The periodic Celery tasks as provided by Redash extensions. +# This is separate from the internal periodic Celery tasks in +# celery_schedule since the extension task discovery phase is +# after the configuration has already happened. +periodic_tasks = odict() + +logger = logging.getLogger(__name__) + + +def load_extensions(app): + """Load the Redash extensions for the given Redash Flask app. - for entry_point in iter_entry_points('redash.extensions'): - app.logger.info('Loading Redash extension %s.', entry_point.name) + The extension entry point can return any type of value but + must take a Flask application object. + + E.g.:: + + def extension(app): + app.logger.info("Loading the Foobar extenions") + Foobar(app) + + """ + for entry_point in entry_points().get("redash.extensions", []): + app.logger.info('Loading Redash extension "%s".', entry_point.name) try: - extension = entry_point.load() - app.redash_extensions[entry_point.name] = { - "entry_function": extension(app), - "resources_list": [] + # Then try to load the entry point (import and getattr) + obj = entry_point.load() + except (ImportError, AttributeError): + # or move on + app.logger.error( + 'Redash extension "%s" could not be found.', entry_point.name + ) + continue + + if not callable(obj): + app.logger.error( + 'Redash extension "%s" is not a callable.', entry_point.name + ) + continue + + # then simply call the loaded entry point. + extensions[entry_point.name] = obj(app) + + +def load_periodic_tasks(logger): + """Load the periodic tasks as defined in Redash extensions. + + The periodic task entry point needs to return a set of parameters + that can be passed to Celery's add_periodic_task: + + https://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html#entries + + E.g.:: + + def add_two_and_two(): + return { + 'name': 'add 2 and 2 every 10 seconds' + 'sig': add.s(2, 2), + 'schedule': 10.0, # in seconds or a timedelta } - except ImportError: - app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name) - (root_module, _) = os.path.splitext(entry_point.module_name) - content_folder_relative = os.path.join(entry_point.name, 'bundle') - - # If it's a frontend extension only, store a list of files in the bundle directory. - if resource_isdir(root_module, content_folder_relative): - app.redash_extensions[entry_point.name] = { - "entry_function": None, - "resources_list": resource_listdir(root_module, content_folder_relative) - } + + and then registered with an entry point under the "redash.periodic_tasks" + group, e.g. in your setup.py:: + + setup( + # ... + entry_points={ + "redash.periodic_tasks": [ + "add_two_and_two = calculus.addition:add_two_and_two", + ] + # ... + }, + # ... + ) + """ + for entry_point in entry_points().get("redash.periodic_tasks", []): + logger.info( + 'Loading periodic Redash tasks "%s" from "%s".', + entry_point.name, + entry_point.value, + ) + try: + periodic_tasks[entry_point.name] = entry_point.load() + except (ImportError, AttributeError): + # and move on if it couldn't load it + logger.error( + 'Periodic Redash task "%s" could not be found at "%s".', + entry_point.name, + entry_point.value, + ) + + +def init_app(app): + load_extensions(app) diff --git a/redash/worker.py b/redash/worker.py --- a/redash/worker.py +++ b/redash/worker.py @@ -1,5 +1,4 @@ from __future__ import absolute_import - from datetime import timedelta from random import randint @@ -8,15 +7,20 @@ from celery import Celery from celery.schedules import crontab from celery.signals import worker_process_init +from celery.utils.log import get_logger -from redash import create_app, settings +from redash import create_app, extensions, settings from redash.metrics import celery as celery_metrics # noqa +logger = get_logger(__name__) + + celery = Celery('redash', broker=settings.CELERY_BROKER, include='redash.tasks') +# The internal periodic Celery tasks to automatically schedule. celery_schedule = { 'refresh_queries': { 'task': 'redash.tasks.refresh_queries', @@ -69,18 +73,21 @@ def __call__(self, *args, **kwargs): celery.Task = ContextTask -# Create Flask app after forking a new worker, to make sure no resources are shared between processes. @worker_process_init.connect def init_celery_flask_app(**kwargs): + """Create the Flask app after forking a new worker. + + This is to make sure no resources are shared between processes. + """ app = create_app() app.app_context().push() -# Commented until https://github.com/getredash/redash/issues/3466 is implemented. -# Hook for extensions to add periodic tasks. -# @celery.on_after_configure.connect -# def add_periodic_tasks(sender, **kwargs): -# app = safe_create_app() -# periodic_tasks = getattr(app, 'periodic_tasks', {}) -# for params in periodic_tasks.values(): -# sender.add_periodic_task(**params) [email protected]_after_configure.connect +def add_periodic_tasks(sender, **kwargs): + """Load all periodic tasks from extensions and add them to Celery.""" + # Populate the redash.extensions.periodic_tasks dictionary + extensions.load_periodic_tasks(logger) + for params in extensions.periodic_tasks.values(): + # Add it to Celery's periodic task registry, too. + sender.add_periodic_task(**params)
Rewrite Extensions registry not to depend on Flask app - [ ] Rewrite Extensions registry not to depend on Flask app - [ ] Bring back support for `add_periodic_tasks` (disabled in #3465) Rewrite Extensions registry not to depend on Flask app - [ ] Rewrite Extensions registry not to depend on Flask app - [ ] Bring back support for `add_periodic_tasks` (disabled in #3465)
Please allow me to specify again why the backend extensions need the Flask app instance (at the moment) passed directly (and can't use `flask.current_app`): - traditional Flask extensions require manual initialization of extension instances, Flask blueprints are not enough to cover the full life cycle of Flask apps - our Docker based deployment workflow (contrary to e.g. a Python package based one) requires us to have a mechanism in place that extends Redash via separate dependencies (refs #2810) that are installed into the container - to be able to extend the Redash backend system with own API endpoints, we're currently requiring the use of Flask-Restful's API endpoint registry) - once we've moved to something else (#2981) the app instance is still needed to since the request handler api in anything based on Flask uses the app instance to store the handlers - `flask.current_app` isn't set yet when extensions are loaded in `redash.create_app` since that's where the flask runtime looks for the app instance Some ideas how to solve this: - move the extension initialization into a signal handler for [Flask.before_first_request](http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_first_request) which is called [just before the first request is handled](https://github.com/pallets/flask/blob/a74864ec229141784374f1998324d2cbac837295/flask/app.py#L1826) - simply rely on flask.current_app in extensions entrypoint callbacks - move to using a Flask integration layer for initializing the Celery app, e.g. CERN's [flask-celeryext](https://github.com/inveniosoftware/flask-celeryext) For some examples, feel free to check out https://github.com/mozilla/redash-stmo which contains a few extensions already, including examples that: - extend a built-in query runner - extend built-in API endpoints - add new API endpoints - bundle additional React components - use a 3rd party Flask extension - additional Celery tasks I would be extremely happy if we were not to break any of those features 😬 But I'm also happy to work on this if you'd like and make sure the extension API becomes rock solid. > Please allow me to specify again why the backend extensions need the Flask app instance (at the moment) passed directly (and can't use `flask.current_app`): I should've elaborated more in the issue description... The idea here is not to prevent existing use cases, but to improve the code architecture. The registry not being coupled with Flask doesn't mean we can't use it with Flask, it just means we don't need Flask to make it work in Celery. Currently `redash.extensions.init_extensions` both iterates on `iter_entry_points` and updates the Flask app object. Once it's done the only way to get the list of extensions is to query Flask's app. We just need to introduce some other object in the middle which we can query for extensions. Please allow me to specify again why the backend extensions need the Flask app instance (at the moment) passed directly (and can't use `flask.current_app`): - traditional Flask extensions require manual initialization of extension instances, Flask blueprints are not enough to cover the full life cycle of Flask apps - our Docker based deployment workflow (contrary to e.g. a Python package based one) requires us to have a mechanism in place that extends Redash via separate dependencies (refs #2810) that are installed into the container - to be able to extend the Redash backend system with own API endpoints, we're currently requiring the use of Flask-Restful's API endpoint registry) - once we've moved to something else (#2981) the app instance is still needed to since the request handler api in anything based on Flask uses the app instance to store the handlers - `flask.current_app` isn't set yet when extensions are loaded in `redash.create_app` since that's where the flask runtime looks for the app instance Some ideas how to solve this: - move the extension initialization into a signal handler for [Flask.before_first_request](http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_first_request) which is called [just before the first request is handled](https://github.com/pallets/flask/blob/a74864ec229141784374f1998324d2cbac837295/flask/app.py#L1826) - simply rely on flask.current_app in extensions entrypoint callbacks - move to using a Flask integration layer for initializing the Celery app, e.g. CERN's [flask-celeryext](https://github.com/inveniosoftware/flask-celeryext) For some examples, feel free to check out https://github.com/mozilla/redash-stmo which contains a few extensions already, including examples that: - extend a built-in query runner - extend built-in API endpoints - add new API endpoints - bundle additional React components - use a 3rd party Flask extension - additional Celery tasks I would be extremely happy if we were not to break any of those features 😬 But I'm also happy to work on this if you'd like and make sure the extension API becomes rock solid. > Please allow me to specify again why the backend extensions need the Flask app instance (at the moment) passed directly (and can't use `flask.current_app`): I should've elaborated more in the issue description... The idea here is not to prevent existing use cases, but to improve the code architecture. The registry not being coupled with Flask doesn't mean we can't use it with Flask, it just means we don't need Flask to make it work in Celery. Currently `redash.extensions.init_extensions` both iterates on `iter_entry_points` and updates the Flask app object. Once it's done the only way to get the list of extensions is to query Flask's app. We just need to introduce some other object in the middle which we can query for extensions.
2019-03-11T22:57:07
getredash/redash
3,613
getredash__redash-3613
[ "3608" ]
c47dd05095b449fe6c7648bf9d0625aef1be9ac6
diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py --- a/redash/handlers/authentication.py +++ b/redash/handlers/authentication.py @@ -224,7 +224,7 @@ def client_config(): 'showPermissionsControl': current_org.get_setting("feature_show_permissions_control"), 'allowCustomJSVisualizations': settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS, 'autoPublishNamedQueries': settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES, - 'mailSettingsMissing': settings.MAIL_DEFAULT_SENDER is None, + 'mailSettingsMissing': not settings.email_server_is_configured(), 'dashboardRefreshIntervals': settings.DASHBOARD_REFRESH_INTERVALS, 'queryRefreshIntervals': settings.QUERY_REFRESH_INTERVALS, 'googleLoginEnabled': settings.GOOGLE_OAUTH_ENABLED, diff --git a/redash/handlers/users.py b/redash/handlers/users.py --- a/redash/handlers/users.py +++ b/redash/handlers/users.py @@ -38,11 +38,10 @@ def invite_user(org, inviter, user, send_email=True): - email_configured = settings.MAIL_DEFAULT_SENDER is not None d = user.to_dict() invite_url = invite_link_for_user(user) - if email_configured and send_email: + if settings.email_server_is_configured() and send_email: send_invite_email(inviter, user, invite_url, org) else: d['invite_link'] = invite_url @@ -229,15 +228,16 @@ def post(self, user_id): if domain.lower() in blacklist or domain.lower() == 'qq.com': abort(400, message='Bad email address.') - email_changed = 'email' in params and params['email'] != user.email - if email_changed: + email_address_changed = 'email' in params and params['email'] != user.email + needs_to_verify_email = email_address_changed and settings.email_server_is_configured() + if needs_to_verify_email: user.is_email_verified = False try: self.update_model(user, params) models.db.session.commit() - if email_changed: + if needs_to_verify_email: send_verify_email(user, self.current_org) # The user has updated their email or password. This should invalidate all _other_ sessions, diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py --- a/redash/settings/__init__.py +++ b/redash/settings/__init__.py @@ -141,6 +141,11 @@ def all_settings(): MAIL_MAX_EMAILS = os.environ.get('REDASH_MAIL_MAX_EMAILS', None) MAIL_ASCII_ATTACHMENTS = parse_boolean(os.environ.get('REDASH_MAIL_ASCII_ATTACHMENTS', 'false')) + +def email_server_is_configured(): + return MAIL_DEFAULT_SENDER is not None + + HOST = os.environ.get('REDASH_HOST', '') ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE = os.environ.get('REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE', "({state}) {alert_name}")
diff --git a/tests/handlers/test_users.py b/tests/handlers/test_users.py --- a/tests/handlers/test_users.py +++ b/tests/handlers/test_users.py @@ -40,10 +40,8 @@ def test_creates_user(self): self.assertEqual(rv.json['name'], test_user['name']) self.assertEqual(rv.json['email'], test_user['email']) - def test_shows_invite_link_when_email_is_not_configured(self): - previous = settings.MAIL_DEFAULT_SENDER - settings.MAIL_DEFAULT_SENDER = None - + @patch('redash.settings.email_server_is_configured', return_value=False) + def test_shows_invite_link_when_email_is_not_configured(self, _): admin = self.factory.create_admin() test_user = {'name': 'User', 'email': '[email protected]'} @@ -52,12 +50,8 @@ def test_shows_invite_link_when_email_is_not_configured(self): self.assertEqual(rv.status_code, 200) self.assertTrue('invite_link' in rv.json) - settings.MAIL_DEFAULT_SENDER = previous - - def test_does_not_show_invite_link_when_email_is_configured(self): - previous = settings.MAIL_DEFAULT_SENDER - settings.MAIL_DEFAULT_SENDER = "[email protected]" - + @patch('redash.settings.email_server_is_configured', return_value=True) + def test_does_not_show_invite_link_when_email_is_configured(self, _): admin = self.factory.create_admin() test_user = {'name': 'User', 'email': '[email protected]'} @@ -66,8 +60,6 @@ def test_does_not_show_invite_link_when_email_is_configured(self): self.assertEqual(rv.status_code, 200) self.assertFalse('invite_link' in rv.json) - settings.MAIL_DEFAULT_SENDER = previous - def test_creates_user_case_insensitive_email(self): admin = self.factory.create_admin() @@ -230,12 +222,20 @@ def test_returns_200_for_non_admin_changing_his_own(self): rv = self.make_request('post', "/api/users/{}".format(self.factory.user.id), data={"name": "New Name"}) self.assertEqual(rv.status_code, 200) - def test_marks_email_as_not_verified_when_changed(self): + @patch('redash.settings.email_server_is_configured', return_value=True) + def test_marks_email_as_not_verified_when_changed(self, _): user = self.factory.user user.is_email_verified = True rv = self.make_request('post', "/api/users/{}".format(user.id), data={"email": "[email protected]"}) self.assertFalse(user.is_email_verified) + @patch('redash.settings.email_server_is_configured', return_value=False) + def test_doesnt_mark_email_as_not_verified_when_changed_and_email_server_is_not_configured(self, _): + user = self.factory.user + user.is_email_verified = True + rv = self.make_request('post', "/api/users/{}".format(user.id), data={"email": "[email protected]"}) + self.assertTrue(user.is_email_verified) + def test_returns_200_for_admin_changing_other_user(self): admin = self.factory.create_admin()
"Please verify your email address" message shows up when changing email but having no email server configured "Please verify your email address" message shows up when changing email but having no email server configured. Expected behavior: not show this message without an email server configured.
2019-03-20T11:18:16
getredash/redash
3,619
getredash__redash-3619
[ "2925" ]
c47dd05095b449fe6c7648bf9d0625aef1be9ac6
diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py --- a/redash/query_runner/presto.py +++ b/redash/query_runner/presto.py @@ -56,8 +56,11 @@ def configuration_schema(cls): 'username': { 'type': 'string' }, + 'password': { + 'type': 'string' + }, }, - 'order': ['host', 'protocol', 'port', 'username', 'schema', 'catalog'], + 'order': ['host', 'protocol', 'port', 'username', 'password', 'schema', 'catalog'], 'required': ['host'] } @@ -100,6 +103,7 @@ def run_query(self, query, user): port=self.configuration.get('port', 8080), protocol=self.configuration.get('protocol', 'http'), username=self.configuration.get('username', 'redash'), + password=self.configuration.get('password', ''), catalog=self.configuration.get('catalog', 'hive'), schema=self.configuration.get('schema', 'default'))
Support for Presto username and password Currently the Presto query runner supports username only. We should support password as well. This probably requires upgrading the PyHive library.
2019-03-21T12:28:21
getredash/redash
3,634
getredash__redash-3634
[ "3602" ]
ff0967f0d8b7dbd5d6169e021f7b5b690e20aa29
diff --git a/redash/cli/database.py b/redash/cli/database.py --- a/redash/cli/database.py +++ b/redash/cli/database.py @@ -1,5 +1,6 @@ import time +import sqlalchemy from flask.cli import AppGroup from flask_migrate import stamp from sqlalchemy.exc import DatabaseError @@ -25,6 +26,8 @@ def create_tables(): from redash.models import db _wait_for_db_connection(db) + # To create triggers for searchable models, we need to call configure_mappers(). + sqlalchemy.orm.configure_mappers() db.create_all() # Need to mark current DB as up to date
Queries search is not working on release/7.0.x ### Issue Summary I try building docker image with the redash dockerfile and start by docker-compose. But queries search is not working ### Steps to Reproduce ![Untitled](https://user-images.githubusercontent.com/6704470/54521288-93d2ad80-49a5-11e9-9794-94572b6698d0.gif) Check out the queries table and find that search_vector is null. ![1](https://user-images.githubusercontent.com/6704470/54521423-de542a00-49a5-11e9-9f37-5b5923042973.png) ### Technical details: * Redash Version: release/7.0.x * Browser/OS: Chrome Mac * How did you install Redash: redash setup
How did you build the database? > How did you build the database? use redash/setup/docker-compose.yml ``` postgres: image: postgres:9.5.6-alpine env_file: /opt/redash/env volumes: - /opt/redash/postgres-data:/var/lib/postgresql/data restart: always ``` Yes, but how did you create the database tables? > Yes, but how did you create the database tables? Replace the original image with the built image in redash/setup/docker-compose.yml, and run in redash/setup ``` docker-compose -f docker-compose.yml -p redash run --rm server create_db ``` Just to make sure: this is a new install not an upgrade, correct? > Just to make sure: this is a new install not an upgrade, correct? Yes, just to test the new version Thanks. I managed to confirm this :-( @jezdez maybe we need to setup something for the trigger to be created when creating DB from scratch (using the `create_db` command)? Ah, right, we simply have to run the migrations in the `create_tables` command so this gets picked up. I don't think there is a better way to cover ongoing schema changes than the migration system, it's built for that exact purpose. I don't recall why `create_tables` was written like this to be honest. Because from my experience migrations don't age well. Usually over time they tend to break. When creating a database from scratch the ORM usually does a better job by creating the tables from their definitions than trying to mutate the DB over all the different versions we ever had. Yeah, I've had that experience as well, especially when migrations are leaky and codify subtle data model structure or use specific code paths. I don't have enough experience with it to say if that's the case for Alembic though. One thing we could consider is resetting the migrations to reduce the risk of decay, basically squash them to just a few migration files. Alternatively we could create a command to hook up the search update trigger manually if needed.
2019-03-24T13:28:38
getredash/redash
3,684
getredash__redash-3684
[ "3481" ]
595af3bce86eb37e09fc472b648e18fa7709fb1d
diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py --- a/redash/query_runner/mongodb.py +++ b/redash/query_runner/mongodb.py @@ -13,6 +13,7 @@ import pymongo from bson.objectid import ObjectId from bson.timestamp import Timestamp + from bson.decimal128 import Decimal128 from bson.son import SON from bson.json_util import object_hook as bson_object_hook enabled = True @@ -38,7 +39,8 @@ def default(self, o): return str(o) elif isinstance(o, Timestamp): return super(MongoDBJSONEncoder, self).default(o.as_datetime()) - + elif isinstance(o, Decimal128): + return o.to_decimal() return super(MongoDBJSONEncoder, self).default(o)
Error running query: Decimal128('0') is not JSON serializable ANY mongo query for table with Decimal128 field type fails. Every mongo query must be aggregated and projected to not return Decimal128 fields. We are using Decimal128 to work with e.g. transaction amounts so its critical to us. ### Technical details: * Redash Version: Redash 6.0.0+b8537 (4780bd9c) * Browser/OS: Safari * How did you install Redash: Cloud
Need to extend the [JSON encoder](https://github.com/getredash/redash/blob/e9c88ea176387cad6b5188ec7c6061eb78d765fd/redash/query_runner/mongodb.py#L35-L42) to support this object type as well.
2019-04-07T12:49:36
getredash/redash
3,716
getredash__redash-3716
[ "3711" ]
9fec3ca9eaeedafc3bec7c66063e427bbd157c57
diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py --- a/redash/handlers/query_results.py +++ b/redash/handlers/query_results.py @@ -108,18 +108,22 @@ def post(self): class QueryResultDropdownResource(BaseResource): def get(self, query_id): + query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) + require_access(query.data_source, current_user, view_only) return dropdown_values(query_id) class QueryDropdownsResource(BaseResource): def get(self, query_id, dropdown_query_id): query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org) + require_access(query, current_user, view_only) related_queries_ids = [p['queryId'] for p in query.parameters if p['type'] == 'query'] if int(dropdown_query_id) not in related_queries_ids: - abort(403) + dropdown_query = get_object_or_404(models.Query.get_by_id_and_org, dropdown_query_id, self.current_org) + require_access(dropdown_query.data_source, current_user, view_only) - return dropdown_values(dropdown_query_id, should_require_access=False) + return dropdown_values(dropdown_query_id) class QueryResultResource(BaseResource): diff --git a/redash/models/parameterized_query.py b/redash/models/parameterized_query.py --- a/redash/models/parameterized_query.py +++ b/redash/models/parameterized_query.py @@ -1,6 +1,5 @@ import pystache from functools import partial -from flask_login import current_user from flask_restful import abort from numbers import Number from redash.utils import mustache_render, json_loads @@ -17,27 +16,21 @@ def _pluck_name_and_value(default_column, row): return {"name": row[name_column], "value": unicode(row[value_column])} -def _load_result(query_id, should_require_access): +def _load_result(query_id): from redash.authentication.org_resolving import current_org from redash import models query = models.Query.get_by_id_and_org(query_id, current_org) - if should_require_access: - require_access(query.data_source, current_user, view_only) - - query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org) - if query.data_source: - require_access(query.data_source.groups, current_user, view_only) query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org) return json_loads(query_result.data) else: abort(400, message="This query is detached from any data source. Please select a different query.") -def dropdown_values(query_id, should_require_access=True): - data = _load_result(query_id, should_require_access) +def dropdown_values(query_id): + data = _load_result(query_id) first_column = data["columns"][0]["name"] pluck = partial(_pluck_name_and_value, first_column) return map(pluck, data["rows"])
diff --git a/tests/handlers/test_query_results.py b/tests/handlers/test_query_results.py --- a/tests/handlers/test_query_results.py +++ b/tests/handlers/test_query_results.py @@ -201,26 +201,39 @@ def test_checks_for_access_to_the_query(self): self.assertEquals(rv.status_code, 403) + class TestQueryDropdownsResource(BaseTestCase): - def test_prevents_access_if_query_isnt_associated_with_parent(self): + def test_prevents_access_if_unassociated_and_doesnt_have_access(self): query = self.factory.create_query() - unrelated_dropdown_query = self.factory.create_query() + ds2 = self.factory.create_data_source(group=self.factory.org.admin_group, view_only=False) + unrelated_dropdown_query = self.factory.create_query(data_source=ds2) + + # unrelated_dropdown_query has not been associated with query + # user does not have direct access to unrelated_dropdown_query rv = self.make_request('get', '/api/queries/{}/dropdowns/{}'.format(query.id, unrelated_dropdown_query.id)) self.assertEquals(rv.status_code, 403) - -class TestQueryDropdownsResource(BaseTestCase): - def test_prevents_access_if_query_isnt_associated_with_parent(self): + def test_allows_access_if_unassociated_but_user_has_access(self): query = self.factory.create_query() - unrelated_dropdown_query = self.factory.create_query() + + query_result = self.factory.create_query_result() + data = { + 'rows': [], + 'columns': [{'name': 'whatever'}] + } + query_result = self.factory.create_query_result(data=json_dumps(data)) + unrelated_dropdown_query = self.factory.create_query(latest_query_data=query_result) + + # unrelated_dropdown_query has not been associated with query + # user has direct access to unrelated_dropdown_query rv = self.make_request('get', '/api/queries/{}/dropdowns/{}'.format(query.id, unrelated_dropdown_query.id)) - self.assertEquals(rv.status_code, 403) + self.assertEquals(rv.status_code, 200) - def test_allows_access_if_user_has_access_to_parent_query(self): + def test_allows_access_if_associated_and_has_access_to_parent(self): query_result = self.factory.create_query_result() data = { 'rows': [], @@ -237,22 +250,28 @@ def test_allows_access_if_user_has_access_to_parent_query(self): } query = self.factory.create_query(options=options) + # dropdown_query has been associated with query + # user has access to query + rv = self.make_request('get', '/api/queries/{}/dropdowns/{}'.format(query.id, dropdown_query.id)) self.assertEquals(rv.status_code, 200) - def test_prevents_access_if_user_doesnt_have_access_to_parent_query(self): - related_dropdown_query = self.factory.create_query() - unrelated_dropdown_query = self.factory.create_query() + def test_prevents_access_if_associated_and_doesnt_have_access_to_parent(self): + ds2 = self.factory.create_data_source(group=self.factory.org.admin_group, view_only=False) + dropdown_query = self.factory.create_query(data_source=ds2) options = { 'parameters': [{ 'type': 'query', - 'queryId': related_dropdown_query.id + 'queryId': dropdown_query.id }] } - query = self.factory.create_query(options=options) + query = self.factory.create_query(data_source=ds2, options=options) - rv = self.make_request('get', '/api/queries/{}/dropdowns/{}'.format(query.id, unrelated_dropdown_query.id)) + # dropdown_query has been associated with query + # user doesnt have access to either query + + rv = self.make_request('get', '/api/queries/{}/dropdowns/{}'.format(query.id, dropdown_query.id)) self.assertEquals(rv.status_code, 403)
Cannot Add Query-based Dropdown Parameter to a Saved Query <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary On a new query, query-based dropdown parameters get their values from `/api/queries/:id/dropdown` and on saved queries they get them by association from their parent query id (`/api/queries/:parent_id/dropdowns/:id`). When adding a query-based dropdown parameter to a saved query, the association is not yet created which results in a 403. ### Steps to Reproduce 1. Create a new query without a parameter 2. Save it 3. Add a query-based parameter to the query. 4. The dropdown is not populated with values (a 403 occurs)
2019-04-17T09:20:42
getredash/redash
3,717
getredash__redash-3717
[ "3710" ]
a1e75d2f0b6d1bc10fd931419a6607f8a2751eb1
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -174,13 +174,13 @@ def get(self): def require_access_to_dropdown_queries(user, query_def): parameters = query_def.get('options', {}).get('parameters', []) - dropdown_query_ids = [str(p['queryId']) for p in parameters if p['type'] == 'query'] + dropdown_query_ids = set([str(p['queryId']) for p in parameters if p['type'] == 'query']) if dropdown_query_ids: groups = models.Query.all_groups_for_query_ids(dropdown_query_ids) if len(groups) < len(dropdown_query_ids): - abort(400, message="You are trying to associate a dropdown query that does not have a matching group." + abort(400, message="You are trying to associate a dropdown query that does not have a matching group. " "Please verify the dropdown query id you are trying to associate with this query.") require_access(dict(groups), user, view_only)
diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py --- a/tests/handlers/test_queries.py +++ b/tests/handlers/test_queries.py @@ -122,6 +122,11 @@ def test_allows_association_with_authorized_dropdown_queries(self): options = { 'parameters': [{ + 'name': 'foo', + 'type': 'query', + 'queryId': other_query.id + }, { + 'name': 'bar', 'type': 'query', 'queryId': other_query.id }] @@ -251,6 +256,11 @@ def test_allows_association_with_authorized_dropdown_queries(self): 'data_source_id': self.factory.data_source.id, 'options': { 'parameters': [{ + 'name': 'foo', + 'type': 'query', + 'queryId': other_query.id + }, { + 'name': 'bar', 'type': 'query', 'queryId': other_query.id }]
Can't save queries with more than one Dropdown Parameter from the same query <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary It's not possible to save a query that contains more than one Query Based Dropdown Parameter from the same query. ### Steps to Reproduce 1. Create a Query and add a Query Based Dropdown Parameter (you can use [this query](https://redash-preview.netlify.com/queries/123) to test - use query "v7 Authors" for the new parameter) and save it 2. Add another Query Based Dropdown Parameter using the same query as the first one 3. Try to save the query It should save the query successfully. ![issue-dropdown-parameters-1](https://user-images.githubusercontent.com/3356951/56169940-35a1e600-5fb6-11e9-81ce-6fd1eb0b219b.gif) ### Technical details: * Redash Version: Latest * Browser/OS: Any * How did you install Redash: --
2019-04-17T09:50:40
getredash/redash
3,828
getredash__redash-3828
[ "3787" ]
d7b03bac02d946134b816bcf815f3a2bbb76bd98
diff --git a/redash/handlers/data_sources.py b/redash/handlers/data_sources.py --- a/redash/handlers/data_sources.py +++ b/redash/handlers/data_sources.py @@ -60,6 +60,12 @@ def post(self, data_source_id): abort(400) + self.record_event({ + 'action': 'edit', + 'object_id': data_source.id, + 'object_type': 'datasource', + }) + return data_source.to_dict(all=True) @require_admin @@ -202,15 +208,18 @@ class DataSourceTestResource(BaseResource): def post(self, data_source_id): data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org) + response = {} + try: + data_source.query_runner.test_connection() + except Exception as e: + response = {"message": text_type(e), "ok": False} + else: + response = {"message": "success", "ok": True} + self.record_event({ 'action': 'test', 'object_id': data_source_id, 'object_type': 'datasource', + 'result': response, }) - - try: - data_source.query_runner.test_connection() - except Exception as e: - return {"message": text_type(e), "ok": False} - else: - return {"message": "success", "ok": True} + return response
Data Source Lifecycle Events 1. Add event when someone opens the new data source dialog. 2. Add event when someone edits a data source (on the backend). 3. Update Test Connection event to include result of test.
2019-05-23T21:14:06
getredash/redash
3,877
getredash__redash-3877
[ "3770" ]
05f6ef0fb6518b751c1973841e429677ec5415ab
diff --git a/redash/authentication/__init__.py b/redash/authentication/__init__.py --- a/redash/authentication/__init__.py +++ b/redash/authentication/__init__.py @@ -41,6 +41,10 @@ def sign(key, path, expires): @login_manager.user_loader def load_user(user_id_with_identity): + user = api_key_load_user_from_request(request) + if user: + return user + org = current_org._get_current_object() try:
diff --git a/tests/test_authentication.py b/tests/test_authentication.py --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -127,6 +127,19 @@ def test_user_api_key(self): self.assertEqual(user.id, hmac_load_user_from_request(request).id) +class TestSessionAuthentication(BaseTestCase): + def test_prefers_api_key_over_session_user_id(self): + user = self.factory.create_user() + query = self.factory.create_query(user=user) + + other_org = self.factory.create_org() + other_user = self.factory.create_user(org=other_org) + models.db.session.flush() + + rv = self.make_request('get', '/api/queries/{}?api_key={}'.format(query.id, query.api_key), user=other_user) + self.assertEqual(rv.status_code, 200) + + class TestCreateAndLoginUser(BaseTestCase): def test_logins_valid_user(self): user = self.factory.create_user(email=u'[email protected]')
API Key authentication should take precedence over cookies Currently when you're logged in as a user, you can't use embeds or shared dashboards for which you do not have access -- even if you provide the API key. This is because the current user is being defined by the session cookie and the API key is being ignored. We need to change this behavior so the API key takes precedence. From a quick look at [Flask-Login](https://flask-login.readthedocs.io)'s documentation, it seems that it calls `user_loader` first and only if this fails will try the `requets_loader`. Unless this is configurable, we might need to change `user_loader` to always return `None` and implement our own logic in `request_loader`.
2019-06-04T10:04:57
getredash/redash
3,894
getredash__redash-3894
[ "3893" ]
dda75cce24437d5c308b2ca8c42acaa164a62667
diff --git a/redash/models/__init__.py b/redash/models/__init__.py --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -140,41 +140,47 @@ def delete(self): QueryResult.query.filter(QueryResult.data_source == self).delete() res = db.session.delete(self) db.session.commit() + + redis_connection.delete(self._schema_key) + return res def get_schema(self, refresh=False): - key = "data_source:schema:{}".format(self.id) - cache = None if not refresh: - cache = redis_connection.get(key) + cache = redis_connection.get(self._schema_key) if cache is None: query_runner = self.query_runner schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name']) - redis_connection.set(key, json_dumps(schema)) + redis_connection.set(self._schema_key, json_dumps(schema)) else: schema = json_loads(cache) return schema + @property + def _schema_key(self): + return "data_source:schema:{}".format(self.id) + + @property def _pause_key(self): return 'ds:{}:pause'.format(self.id) @property def paused(self): - return redis_connection.exists(self._pause_key()) + return redis_connection.exists(self._pause_key) @property def pause_reason(self): - return redis_connection.get(self._pause_key()) + return redis_connection.get(self._pause_key) def pause(self, reason=None): - redis_connection.set(self._pause_key(), reason or '') + redis_connection.set(self._pause_key, reason or '') def resume(self): - redis_connection.delete(self._pause_key()) + redis_connection.delete(self._pause_key) def add_group(self, group, view_only=False): dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)
diff --git a/tests/models/test_data_sources.py b/tests/models/test_data_sources.py --- a/tests/models/test_data_sources.py +++ b/tests/models/test_data_sources.py @@ -1,4 +1,5 @@ import mock +from mock import patch from tests import BaseTestCase from redash.models import DataSource, Query, QueryResult @@ -96,3 +97,10 @@ def test_deletes_child_models(self): data_source.delete() self.assertIsNone(DataSource.query.get(data_source.id)) self.assertEqual(0, QueryResult.query.filter(QueryResult.data_source == data_source).count()) + + @patch('redash.redis_connection.delete') + def test_deletes_schema(self, mock_redis): + data_source = self.factory.create_data_source() + data_source.delete() + + mock_redis.assert_called_with(data_source._schema_key)
When deleting a data source, it should remove the cached schema from Redis When deleting a data source ([DataSource#delete](https://github.com/getredash/redash/blob/dda75cce24437d5c308b2ca8c42acaa164a62667/redash/models/__init__.py#L138-L143)), we need to remove the cached schema from Redis as well. (The schema is being in the [DataSource#get_schema](https://github.com/getredash/redash/blob/dda75cce24437d5c308b2ca8c42acaa164a62667/redash/models/__init__.py#L145-L160) method.)
2019-06-10T17:51:43