repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
opendatateam/udata | udata/core/discussions/actions.py | discussions_for | def discussions_for(user, only_open=True):
'''
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
'''
# Only fetch required fields for discussion filtering (id and slug)
# Greatly improve performances and memory usage
datasets = Dataset.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
reuses = Reuse.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
qs = Discussion.objects(subject__in=list(datasets) + list(reuses))
if only_open:
qs = qs(closed__exists=False)
return qs | python | def discussions_for(user, only_open=True):
'''
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
'''
# Only fetch required fields for discussion filtering (id and slug)
# Greatly improve performances and memory usage
datasets = Dataset.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
reuses = Reuse.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
qs = Discussion.objects(subject__in=list(datasets) + list(reuses))
if only_open:
qs = qs(closed__exists=False)
return qs | [
"def",
"discussions_for",
"(",
"user",
",",
"only_open",
"=",
"True",
")",
":",
"# Only fetch required fields for discussion filtering (id and slug)",
"# Greatly improve performances and memory usage",
"datasets",
"=",
"Dataset",
".",
"objects",
".",
"owned_by",
"(",
"user",
".",
"id",
",",
"*",
"user",
".",
"organizations",
")",
".",
"only",
"(",
"'id'",
",",
"'slug'",
")",
"reuses",
"=",
"Reuse",
".",
"objects",
".",
"owned_by",
"(",
"user",
".",
"id",
",",
"*",
"user",
".",
"organizations",
")",
".",
"only",
"(",
"'id'",
",",
"'slug'",
")",
"qs",
"=",
"Discussion",
".",
"objects",
"(",
"subject__in",
"=",
"list",
"(",
"datasets",
")",
"+",
"list",
"(",
"reuses",
")",
")",
"if",
"only_open",
":",
"qs",
"=",
"qs",
"(",
"closed__exists",
"=",
"False",
")",
"return",
"qs"
] | Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not. | [
"Build",
"a",
"queryset",
"to",
"query",
"discussions",
"related",
"to",
"a",
"given",
"user",
"s",
"assets",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/actions.py#L9-L25 | train |
opendatateam/udata | udata/frontend/markdown.py | nofollow_callback | def nofollow_callback(attrs, new=False):
"""
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
"""
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs | python | def nofollow_callback(attrs, new=False):
"""
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
"""
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs | [
"def",
"nofollow_callback",
"(",
"attrs",
",",
"new",
"=",
"False",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
")",
"if",
"parsed_url",
".",
"netloc",
"in",
"(",
"''",
",",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
")",
":",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
"=",
"'{scheme}://{netloc}{path}'",
".",
"format",
"(",
"scheme",
"=",
"'https'",
"if",
"request",
".",
"is_secure",
"else",
"'http'",
",",
"netloc",
"=",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
",",
"path",
"=",
"parsed_url",
".",
"path",
")",
"return",
"attrs",
"else",
":",
"rel",
"=",
"[",
"x",
"for",
"x",
"in",
"attrs",
".",
"get",
"(",
"(",
"None",
",",
"'rel'",
")",
",",
"''",
")",
".",
"split",
"(",
"' '",
")",
"if",
"x",
"]",
"if",
"'nofollow'",
"not",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"rel",
"]",
":",
"rel",
".",
"append",
"(",
"'nofollow'",
")",
"attrs",
"[",
"(",
"None",
",",
"'rel'",
")",
"]",
"=",
"' '",
".",
"join",
"(",
"rel",
")",
"return",
"attrs"
] | Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once. | [
"Turn",
"relative",
"links",
"into",
"external",
"ones",
"and",
"avoid",
"nofollow",
"for",
"us"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/markdown.py#L40-L59 | train |
opendatateam/udata | udata/frontend/markdown.py | bleach_clean | def bleach_clean(stream):
"""
Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`.
"""
return bleach.clean(
stream,
tags=current_app.config['MD_ALLOWED_TAGS'],
attributes=current_app.config['MD_ALLOWED_ATTRIBUTES'],
styles=current_app.config['MD_ALLOWED_STYLES'],
strip_comments=False) | python | def bleach_clean(stream):
"""
Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`.
"""
return bleach.clean(
stream,
tags=current_app.config['MD_ALLOWED_TAGS'],
attributes=current_app.config['MD_ALLOWED_ATTRIBUTES'],
styles=current_app.config['MD_ALLOWED_STYLES'],
strip_comments=False) | [
"def",
"bleach_clean",
"(",
"stream",
")",
":",
"return",
"bleach",
".",
"clean",
"(",
"stream",
",",
"tags",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_TAGS'",
"]",
",",
"attributes",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_ATTRIBUTES'",
"]",
",",
"styles",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_STYLES'",
"]",
",",
"strip_comments",
"=",
"False",
")"
] | Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`. | [
"Sanitize",
"malicious",
"attempts",
"but",
"keep",
"the",
"EXCERPT_TOKEN",
".",
"By",
"default",
"only",
"keeps",
"bleach",
".",
"ALLOWED_TAGS",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/markdown.py#L94-L104 | train |
opendatateam/udata | udata/core/badges/commands.py | toggle | def toggle(path_or_id, badge_kind):
'''Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs.
'''
if exists(path_or_id):
with open(path_or_id) as open_file:
for id_or_slug in open_file.readlines():
toggle_badge(id_or_slug.strip(), badge_kind)
else:
toggle_badge(path_or_id, badge_kind) | python | def toggle(path_or_id, badge_kind):
'''Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs.
'''
if exists(path_or_id):
with open(path_or_id) as open_file:
for id_or_slug in open_file.readlines():
toggle_badge(id_or_slug.strip(), badge_kind)
else:
toggle_badge(path_or_id, badge_kind) | [
"def",
"toggle",
"(",
"path_or_id",
",",
"badge_kind",
")",
":",
"if",
"exists",
"(",
"path_or_id",
")",
":",
"with",
"open",
"(",
"path_or_id",
")",
"as",
"open_file",
":",
"for",
"id_or_slug",
"in",
"open_file",
".",
"readlines",
"(",
")",
":",
"toggle_badge",
"(",
"id_or_slug",
".",
"strip",
"(",
")",
",",
"badge_kind",
")",
"else",
":",
"toggle_badge",
"(",
"path_or_id",
",",
"badge_kind",
")"
] | Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs. | [
"Toggle",
"a",
"badge_kind",
"for",
"a",
"given",
"path_or_id"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/commands.py#L35-L46 | train |
opendatateam/udata | udata/core/storages/views.py | upload | def upload(name):
'''Handle upload on POST if authorized.'''
storage = fs.by_name(name)
return jsonify(success=True, **handle_upload(storage)) | python | def upload(name):
'''Handle upload on POST if authorized.'''
storage = fs.by_name(name)
return jsonify(success=True, **handle_upload(storage)) | [
"def",
"upload",
"(",
"name",
")",
":",
"storage",
"=",
"fs",
".",
"by_name",
"(",
"name",
")",
"return",
"jsonify",
"(",
"success",
"=",
"True",
",",
"*",
"*",
"handle_upload",
"(",
"storage",
")",
")"
] | Handle upload on POST if authorized. | [
"Handle",
"upload",
"on",
"POST",
"if",
"authorized",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/storages/views.py#L22-L25 | train |
opendatateam/udata | udata/search/__init__.py | unindex_model_on_delete | def unindex_model_on_delete(sender, document, **kwargs):
'''Unindex Mongo document on post_delete'''
if current_app.config.get('AUTO_INDEX'):
unindex.delay(document) | python | def unindex_model_on_delete(sender, document, **kwargs):
'''Unindex Mongo document on post_delete'''
if current_app.config.get('AUTO_INDEX'):
unindex.delay(document) | [
"def",
"unindex_model_on_delete",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_app",
".",
"config",
".",
"get",
"(",
"'AUTO_INDEX'",
")",
":",
"unindex",
".",
"delay",
"(",
"document",
")"
] | Unindex Mongo document on post_delete | [
"Unindex",
"Mongo",
"document",
"on",
"post_delete"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L167-L170 | train |
opendatateam/udata | udata/search/__init__.py | register | def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter | python | def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter | [
"def",
"register",
"(",
"adapter",
")",
":",
"# register the class in the catalog",
"if",
"adapter",
".",
"model",
"and",
"adapter",
".",
"model",
"not",
"in",
"adapter_catalog",
":",
"adapter_catalog",
"[",
"adapter",
".",
"model",
"]",
"=",
"adapter",
"# Automatically (re|un)index objects on save/delete",
"post_save",
".",
"connect",
"(",
"reindex_model_on_save",
",",
"sender",
"=",
"adapter",
".",
"model",
")",
"post_delete",
".",
"connect",
"(",
"unindex_model_on_delete",
",",
"sender",
"=",
"adapter",
".",
"model",
")",
"return",
"adapter"
] | Register a search adapter | [
"Register",
"a",
"search",
"adapter"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L173-L181 | train |
opendatateam/udata | udata/forms/__init__.py | CommonFormMixin.process | def process(self, formdata=None, obj=None, data=None, **kwargs):
'''Wrap the process method to store the current object instance'''
self._obj = obj
super(CommonFormMixin, self).process(formdata, obj, data, **kwargs) | python | def process(self, formdata=None, obj=None, data=None, **kwargs):
'''Wrap the process method to store the current object instance'''
self._obj = obj
super(CommonFormMixin, self).process(formdata, obj, data, **kwargs) | [
"def",
"process",
"(",
"self",
",",
"formdata",
"=",
"None",
",",
"obj",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_obj",
"=",
"obj",
"super",
"(",
"CommonFormMixin",
",",
"self",
")",
".",
"process",
"(",
"formdata",
",",
"obj",
",",
"data",
",",
"*",
"*",
"kwargs",
")"
] | Wrap the process method to store the current object instance | [
"Wrap",
"the",
"process",
"method",
"to",
"store",
"the",
"current",
"object",
"instance"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/__init__.py#L19-L22 | train |
opendatateam/udata | udata/linkchecker/backends.py | get | def get(name):
'''Get a linkchecker given its name or fallback on default'''
linkcheckers = get_enabled(ENTRYPOINT, current_app)
linkcheckers.update(no_check=NoCheckLinkchecker) # no_check always enabled
selected_linkchecker = linkcheckers.get(name)
if not selected_linkchecker:
default_linkchecker = current_app.config.get(
'LINKCHECKING_DEFAULT_LINKCHECKER')
selected_linkchecker = linkcheckers.get(default_linkchecker)
if not selected_linkchecker:
log.error('No linkchecker found ({} requested and no fallback)'.format(
name))
return selected_linkchecker | python | def get(name):
'''Get a linkchecker given its name or fallback on default'''
linkcheckers = get_enabled(ENTRYPOINT, current_app)
linkcheckers.update(no_check=NoCheckLinkchecker) # no_check always enabled
selected_linkchecker = linkcheckers.get(name)
if not selected_linkchecker:
default_linkchecker = current_app.config.get(
'LINKCHECKING_DEFAULT_LINKCHECKER')
selected_linkchecker = linkcheckers.get(default_linkchecker)
if not selected_linkchecker:
log.error('No linkchecker found ({} requested and no fallback)'.format(
name))
return selected_linkchecker | [
"def",
"get",
"(",
"name",
")",
":",
"linkcheckers",
"=",
"get_enabled",
"(",
"ENTRYPOINT",
",",
"current_app",
")",
"linkcheckers",
".",
"update",
"(",
"no_check",
"=",
"NoCheckLinkchecker",
")",
"# no_check always enabled",
"selected_linkchecker",
"=",
"linkcheckers",
".",
"get",
"(",
"name",
")",
"if",
"not",
"selected_linkchecker",
":",
"default_linkchecker",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'LINKCHECKING_DEFAULT_LINKCHECKER'",
")",
"selected_linkchecker",
"=",
"linkcheckers",
".",
"get",
"(",
"default_linkchecker",
")",
"if",
"not",
"selected_linkchecker",
":",
"log",
".",
"error",
"(",
"'No linkchecker found ({} requested and no fallback)'",
".",
"format",
"(",
"name",
")",
")",
"return",
"selected_linkchecker"
] | Get a linkchecker given its name or fallback on default | [
"Get",
"a",
"linkchecker",
"given",
"its",
"name",
"or",
"fallback",
"on",
"default"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/backends.py#L28-L40 | train |
opendatateam/udata | udata/features/notifications/actions.py | get_notifications | def get_notifications(user):
'''List notification for a given user'''
notifications = []
for name, func in _providers.items():
notifications.extend([{
'type': name,
'created_on': dt,
'details': details
} for dt, details in func(user)])
return notifications | python | def get_notifications(user):
'''List notification for a given user'''
notifications = []
for name, func in _providers.items():
notifications.extend([{
'type': name,
'created_on': dt,
'details': details
} for dt, details in func(user)])
return notifications | [
"def",
"get_notifications",
"(",
"user",
")",
":",
"notifications",
"=",
"[",
"]",
"for",
"name",
",",
"func",
"in",
"_providers",
".",
"items",
"(",
")",
":",
"notifications",
".",
"extend",
"(",
"[",
"{",
"'type'",
":",
"name",
",",
"'created_on'",
":",
"dt",
",",
"'details'",
":",
"details",
"}",
"for",
"dt",
",",
"details",
"in",
"func",
"(",
"user",
")",
"]",
")",
"return",
"notifications"
] | List notification for a given user | [
"List",
"notification",
"for",
"a",
"given",
"user"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/notifications/actions.py#L29-L40 | train |
opendatateam/udata | udata/core/tags/tasks.py | count_tags | def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save() | python | def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save() | [
"def",
"count_tags",
"(",
"self",
")",
":",
"for",
"key",
",",
"model",
"in",
"TAGGED",
".",
"items",
"(",
")",
":",
"collection",
"=",
"'{0}_tags'",
".",
"format",
"(",
"key",
")",
"results",
"=",
"(",
"model",
".",
"objects",
"(",
"tags__exists",
"=",
"True",
")",
".",
"map_reduce",
"(",
"map_tags",
",",
"reduce_tags",
",",
"collection",
")",
")",
"for",
"result",
"in",
"results",
":",
"tag",
",",
"created",
"=",
"Tag",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"result",
".",
"key",
",",
"auto_save",
"=",
"False",
")",
"tag",
".",
"counts",
"[",
"key",
"]",
"=",
"int",
"(",
"result",
".",
"value",
")",
"if",
"result",
".",
"value",
"else",
"0",
"tag",
".",
"save",
"(",
")"
] | Count tag occurences by type and update the tag collection | [
"Count",
"tag",
"occurences",
"by",
"type",
"and",
"update",
"the",
"tag",
"collection"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/tags/tasks.py#L38-L48 | train |
opendatateam/udata | udata/search/adapter.py | ModelSearchAdapter.from_model | def from_model(cls, document):
"""By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields
"""
return cls(meta={'id': document.id}, **cls.serialize(document)) | python | def from_model(cls, document):
"""By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields
"""
return cls(meta={'id': document.id}, **cls.serialize(document)) | [
"def",
"from_model",
"(",
"cls",
",",
"document",
")",
":",
"return",
"cls",
"(",
"meta",
"=",
"{",
"'id'",
":",
"document",
".",
"id",
"}",
",",
"*",
"*",
"cls",
".",
"serialize",
"(",
"document",
")",
")"
] | By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields | [
"By",
"default",
"use",
"the",
"to_dict",
"method"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L38-L43 | train |
opendatateam/udata | udata/search/adapter.py | ModelSearchAdapter.completer_tokenize | def completer_tokenize(cls, value, min_length=3):
'''Quick and dirty tokenizer for completion suggester'''
tokens = list(itertools.chain(*[
[m for m in n.split("'") if len(m) > min_length]
for n in value.split(' ')
]))
return list(set([value] + tokens + [' '.join(tokens)])) | python | def completer_tokenize(cls, value, min_length=3):
'''Quick and dirty tokenizer for completion suggester'''
tokens = list(itertools.chain(*[
[m for m in n.split("'") if len(m) > min_length]
for n in value.split(' ')
]))
return list(set([value] + tokens + [' '.join(tokens)])) | [
"def",
"completer_tokenize",
"(",
"cls",
",",
"value",
",",
"min_length",
"=",
"3",
")",
":",
"tokens",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"[",
"m",
"for",
"m",
"in",
"n",
".",
"split",
"(",
"\"'\"",
")",
"if",
"len",
"(",
"m",
")",
">",
"min_length",
"]",
"for",
"n",
"in",
"value",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"return",
"list",
"(",
"set",
"(",
"[",
"value",
"]",
"+",
"tokens",
"+",
"[",
"' '",
".",
"join",
"(",
"tokens",
")",
"]",
")",
")"
] | Quick and dirty tokenizer for completion suggester | [
"Quick",
"and",
"dirty",
"tokenizer",
"for",
"completion",
"suggester"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L54-L60 | train |
opendatateam/udata | udata/search/adapter.py | ModelSearchAdapter.facet_search | def facet_search(cls, *facets):
'''
Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested.
'''
f = dict((k, v) for k, v in cls.facets.items() if k in facets)
class TempSearch(SearchQuery):
adapter = cls
analyzer = cls.analyzer
boosters = cls.boosters
doc_types = cls
facets = f
fields = cls.fields
fuzzy = cls.fuzzy
match_type = cls.match_type
model = cls.model
return TempSearch | python | def facet_search(cls, *facets):
'''
Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested.
'''
f = dict((k, v) for k, v in cls.facets.items() if k in facets)
class TempSearch(SearchQuery):
adapter = cls
analyzer = cls.analyzer
boosters = cls.boosters
doc_types = cls
facets = f
fields = cls.fields
fuzzy = cls.fuzzy
match_type = cls.match_type
model = cls.model
return TempSearch | [
"def",
"facet_search",
"(",
"cls",
",",
"*",
"facets",
")",
":",
"f",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"facets",
".",
"items",
"(",
")",
"if",
"k",
"in",
"facets",
")",
"class",
"TempSearch",
"(",
"SearchQuery",
")",
":",
"adapter",
"=",
"cls",
"analyzer",
"=",
"cls",
".",
"analyzer",
"boosters",
"=",
"cls",
".",
"boosters",
"doc_types",
"=",
"cls",
"facets",
"=",
"f",
"fields",
"=",
"cls",
".",
"fields",
"fuzzy",
"=",
"cls",
".",
"fuzzy",
"match_type",
"=",
"cls",
".",
"match_type",
"model",
"=",
"cls",
".",
"model",
"return",
"TempSearch"
] | Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested. | [
"Build",
"a",
"FacetSearch",
"for",
"a",
"given",
"list",
"of",
"facets"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L63-L88 | train |
opendatateam/udata | udata/models/slug_fields.py | populate_slug | def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug | python | def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug | [
"def",
"populate_slug",
"(",
"instance",
",",
"field",
")",
":",
"value",
"=",
"getattr",
"(",
"instance",
",",
"field",
".",
"db_field",
")",
"try",
":",
"previous",
"=",
"instance",
".",
"__class__",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"instance",
".",
"id",
")",
"except",
"Exception",
":",
"previous",
"=",
"None",
"# Field value has changed",
"changed",
"=",
"field",
".",
"db_field",
"in",
"instance",
".",
"_get_changed_fields",
"(",
")",
"# Field initial value has been manually set",
"manual",
"=",
"not",
"previous",
"and",
"value",
"or",
"changed",
"if",
"not",
"manual",
"and",
"field",
".",
"populate_from",
":",
"# value to slugify is extracted from populate_from parameter",
"value",
"=",
"getattr",
"(",
"instance",
",",
"field",
".",
"populate_from",
")",
"if",
"previous",
"and",
"value",
"==",
"getattr",
"(",
"previous",
",",
"field",
".",
"populate_from",
")",
":",
"return",
"value",
"if",
"previous",
"and",
"getattr",
"(",
"previous",
",",
"field",
".",
"db_field",
")",
"==",
"value",
":",
"# value is unchanged from DB",
"return",
"value",
"if",
"previous",
"and",
"not",
"changed",
"and",
"not",
"field",
".",
"update",
":",
"# Field is not manually set and slug should not update on change",
"return",
"value",
"slug",
"=",
"field",
".",
"slugify",
"(",
"value",
")",
"# This can happen when serializing an object which does not contain",
"# the properties used to generate the slug. Typically, when such",
"# an object is passed to one of the Celery workers (see issue #20).",
"if",
"slug",
"is",
"None",
":",
"return",
"old_slug",
"=",
"getattr",
"(",
"previous",
",",
"field",
".",
"db_field",
",",
"None",
")",
"if",
"slug",
"==",
"old_slug",
":",
"return",
"slug",
"# Ensure uniqueness",
"if",
"field",
".",
"unique",
":",
"base_slug",
"=",
"slug",
"index",
"=",
"1",
"qs",
"=",
"instance",
".",
"__class__",
".",
"objects",
"if",
"previous",
":",
"qs",
"=",
"qs",
"(",
"id__ne",
"=",
"previous",
".",
"id",
")",
"def",
"exists",
"(",
"s",
")",
":",
"return",
"qs",
"(",
"class_check",
"=",
"False",
",",
"*",
"*",
"{",
"field",
".",
"db_field",
":",
"s",
"}",
")",
".",
"limit",
"(",
"1",
")",
".",
"count",
"(",
"True",
")",
">",
"0",
"while",
"exists",
"(",
"slug",
")",
":",
"slug",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"base_slug",
",",
"index",
")",
"index",
"+=",
"1",
"# Track old slugs for this class",
"if",
"field",
".",
"follow",
"and",
"old_slug",
"!=",
"slug",
":",
"ns",
"=",
"instance",
".",
"__class__",
".",
"__name__",
"# Destroy redirections from this new slug",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"ns",
",",
"old_slug",
"=",
"slug",
")",
".",
"delete",
"(",
")",
"if",
"old_slug",
":",
"# Create a redirect for previous slug",
"slug_follower",
",",
"created",
"=",
"SlugFollow",
".",
"objects",
".",
"get_or_create",
"(",
"namespace",
"=",
"ns",
",",
"old_slug",
"=",
"old_slug",
",",
"auto_save",
"=",
"False",
",",
")",
"slug_follower",
".",
"new_slug",
"=",
"slug",
"slug_follower",
".",
"save",
"(",
")",
"# Maintain previous redirects",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"ns",
",",
"new_slug",
"=",
"old_slug",
")",
".",
"update",
"(",
"new_slug",
"=",
"slug",
")",
"setattr",
"(",
"instance",
",",
"field",
".",
"db_field",
",",
"slug",
")",
"return",
"slug"
] | Populate a slug field if needed. | [
"Populate",
"a",
"slug",
"field",
"if",
"needed",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L113-L193 | train |
opendatateam/udata | udata/models/slug_fields.py | SlugField.slugify | def slugify(self, value):
'''
Apply slugification according to specified field rules
'''
if value is None:
return
return slugify.slugify(value, max_length=self.max_length,
separator=self.separator,
to_lower=self.lower_case) | python | def slugify(self, value):
'''
Apply slugification according to specified field rules
'''
if value is None:
return
return slugify.slugify(value, max_length=self.max_length,
separator=self.separator,
to_lower=self.lower_case) | [
"def",
"slugify",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"return",
"slugify",
".",
"slugify",
"(",
"value",
",",
"max_length",
"=",
"self",
".",
"max_length",
",",
"separator",
"=",
"self",
".",
"separator",
",",
"to_lower",
"=",
"self",
".",
"lower_case",
")"
] | Apply slugification according to specified field rules | [
"Apply",
"slugification",
"according",
"to",
"specified",
"field",
"rules"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L55-L64 | train |
opendatateam/udata | udata/models/slug_fields.py | SlugField.cleanup_on_delete | def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete() | python | def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete() | [
"def",
"cleanup_on_delete",
"(",
"self",
",",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"follow",
"or",
"sender",
"is",
"not",
"self",
".",
"owner_document",
":",
"return",
"slug",
"=",
"getattr",
"(",
"document",
",",
"self",
".",
"db_field",
")",
"namespace",
"=",
"self",
".",
"owner_document",
".",
"__name__",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"namespace",
",",
"new_slug",
"=",
"slug",
")",
".",
"delete",
"(",
")"
] | Clean up slug redirections on object deletion | [
"Clean",
"up",
"slug",
"redirections",
"on",
"object",
"deletion"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L76-L84 | train |
opendatateam/udata | udata/core/badges/forms.py | badge_form | def badge_form(model):
'''A form factory for a given model badges'''
class BadgeForm(ModelForm):
model_class = Badge
kind = fields.RadioField(
_('Kind'), [validators.DataRequired()],
choices=model.__badges__.items(),
description=_('Kind of badge (certified, etc)'))
return BadgeForm | python | def badge_form(model):
'''A form factory for a given model badges'''
class BadgeForm(ModelForm):
model_class = Badge
kind = fields.RadioField(
_('Kind'), [validators.DataRequired()],
choices=model.__badges__.items(),
description=_('Kind of badge (certified, etc)'))
return BadgeForm | [
"def",
"badge_form",
"(",
"model",
")",
":",
"class",
"BadgeForm",
"(",
"ModelForm",
")",
":",
"model_class",
"=",
"Badge",
"kind",
"=",
"fields",
".",
"RadioField",
"(",
"_",
"(",
"'Kind'",
")",
",",
"[",
"validators",
".",
"DataRequired",
"(",
")",
"]",
",",
"choices",
"=",
"model",
".",
"__badges__",
".",
"items",
"(",
")",
",",
"description",
"=",
"_",
"(",
"'Kind of badge (certified, etc)'",
")",
")",
"return",
"BadgeForm"
] | A form factory for a given model badges | [
"A",
"form",
"factory",
"for",
"a",
"given",
"model",
"badges"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/forms.py#L11-L21 | train |
opendatateam/udata | udata/core/jobs/actions.py | delay | def delay(name, args, kwargs):
'''Run a job asynchronously'''
args = args or []
kwargs = dict(k.split() for k in kwargs) if kwargs else {}
if name not in celery.tasks:
log.error('Job %s not found', name)
job = celery.tasks[name]
log.info('Sending job %s', name)
async_result = job.delay(*args, **kwargs)
log.info('Job %s sended to workers', async_result.id) | python | def delay(name, args, kwargs):
'''Run a job asynchronously'''
args = args or []
kwargs = dict(k.split() for k in kwargs) if kwargs else {}
if name not in celery.tasks:
log.error('Job %s not found', name)
job = celery.tasks[name]
log.info('Sending job %s', name)
async_result = job.delay(*args, **kwargs)
log.info('Job %s sended to workers', async_result.id) | [
"def",
"delay",
"(",
"name",
",",
"args",
",",
"kwargs",
")",
":",
"args",
"=",
"args",
"or",
"[",
"]",
"kwargs",
"=",
"dict",
"(",
"k",
".",
"split",
"(",
")",
"for",
"k",
"in",
"kwargs",
")",
"if",
"kwargs",
"else",
"{",
"}",
"if",
"name",
"not",
"in",
"celery",
".",
"tasks",
":",
"log",
".",
"error",
"(",
"'Job %s not found'",
",",
"name",
")",
"job",
"=",
"celery",
".",
"tasks",
"[",
"name",
"]",
"log",
".",
"info",
"(",
"'Sending job %s'",
",",
"name",
")",
"async_result",
"=",
"job",
".",
"delay",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"log",
".",
"info",
"(",
"'Job %s sended to workers'",
",",
"async_result",
".",
"id",
")"
] | Run a job asynchronously | [
"Run",
"a",
"job",
"asynchronously"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/jobs/actions.py#L22-L31 | train |
opendatateam/udata | udata/harvest/filters.py | is_url | def is_url(default_scheme='http', **kwargs):
"""Return a converter that converts a clean string to an URL."""
def converter(value):
if value is None:
return value
if '://' not in value and default_scheme:
value = '://'.join((default_scheme, value.strip()))
try:
return uris.validate(value)
except uris.ValidationError as e:
raise Invalid(e.message)
return converter | python | def is_url(default_scheme='http', **kwargs):
"""Return a converter that converts a clean string to an URL."""
def converter(value):
if value is None:
return value
if '://' not in value and default_scheme:
value = '://'.join((default_scheme, value.strip()))
try:
return uris.validate(value)
except uris.ValidationError as e:
raise Invalid(e.message)
return converter | [
"def",
"is_url",
"(",
"default_scheme",
"=",
"'http'",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"converter",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"if",
"'://'",
"not",
"in",
"value",
"and",
"default_scheme",
":",
"value",
"=",
"'://'",
".",
"join",
"(",
"(",
"default_scheme",
",",
"value",
".",
"strip",
"(",
")",
")",
")",
"try",
":",
"return",
"uris",
".",
"validate",
"(",
"value",
")",
"except",
"uris",
".",
"ValidationError",
"as",
"e",
":",
"raise",
"Invalid",
"(",
"e",
".",
"message",
")",
"return",
"converter"
] | Return a converter that converts a clean string to an URL. | [
"Return",
"a",
"converter",
"that",
"converts",
"a",
"clean",
"string",
"to",
"an",
"URL",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/filters.py#L89-L100 | train |
opendatateam/udata | udata/harvest/filters.py | hash | def hash(value):
'''Detect an hash type'''
if not value:
return
elif len(value) == 32:
type = 'md5'
elif len(value) == 40:
type = 'sha1'
elif len(value) == 64:
type = 'sha256'
else:
return None
return {'type': type, 'value': value} | python | def hash(value):
'''Detect an hash type'''
if not value:
return
elif len(value) == 32:
type = 'md5'
elif len(value) == 40:
type = 'sha1'
elif len(value) == 64:
type = 'sha256'
else:
return None
return {'type': type, 'value': value} | [
"def",
"hash",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"elif",
"len",
"(",
"value",
")",
"==",
"32",
":",
"type",
"=",
"'md5'",
"elif",
"len",
"(",
"value",
")",
"==",
"40",
":",
"type",
"=",
"'sha1'",
"elif",
"len",
"(",
"value",
")",
"==",
"64",
":",
"type",
"=",
"'sha256'",
"else",
":",
"return",
"None",
"return",
"{",
"'type'",
":",
"type",
",",
"'value'",
":",
"value",
"}"
] | Detect an hash type | [
"Detect",
"an",
"hash",
"type"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/filters.py#L103-L115 | train |
opendatateam/udata | udata/search/commands.py | iter_adapters | def iter_adapters():
'''Iter over adapter in predictable way'''
adapters = adapter_catalog.values()
return sorted(adapters, key=lambda a: a.model.__name__) | python | def iter_adapters():
'''Iter over adapter in predictable way'''
adapters = adapter_catalog.values()
return sorted(adapters, key=lambda a: a.model.__name__) | [
"def",
"iter_adapters",
"(",
")",
":",
"adapters",
"=",
"adapter_catalog",
".",
"values",
"(",
")",
"return",
"sorted",
"(",
"adapters",
",",
"key",
"=",
"lambda",
"a",
":",
"a",
".",
"model",
".",
"__name__",
")"
] | Iter over adapter in predictable way | [
"Iter",
"over",
"adapter",
"in",
"predictable",
"way"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L38-L41 | train |
opendatateam/udata | udata/search/commands.py | iter_qs | def iter_qs(qs, adapter):
'''Safely iterate over a DB QuerySet yielding ES documents'''
for obj in qs.no_cache().no_dereference().timeout(False):
if adapter.is_indexable(obj):
try:
doc = adapter.from_model(obj).to_dict(include_meta=True)
yield doc
except Exception as e:
model = adapter.model.__name__
log.error('Unable to index %s "%s": %s', model, str(obj.id),
str(e), exc_info=True) | python | def iter_qs(qs, adapter):
'''Safely iterate over a DB QuerySet yielding ES documents'''
for obj in qs.no_cache().no_dereference().timeout(False):
if adapter.is_indexable(obj):
try:
doc = adapter.from_model(obj).to_dict(include_meta=True)
yield doc
except Exception as e:
model = adapter.model.__name__
log.error('Unable to index %s "%s": %s', model, str(obj.id),
str(e), exc_info=True) | [
"def",
"iter_qs",
"(",
"qs",
",",
"adapter",
")",
":",
"for",
"obj",
"in",
"qs",
".",
"no_cache",
"(",
")",
".",
"no_dereference",
"(",
")",
".",
"timeout",
"(",
"False",
")",
":",
"if",
"adapter",
".",
"is_indexable",
"(",
"obj",
")",
":",
"try",
":",
"doc",
"=",
"adapter",
".",
"from_model",
"(",
"obj",
")",
".",
"to_dict",
"(",
"include_meta",
"=",
"True",
")",
"yield",
"doc",
"except",
"Exception",
"as",
"e",
":",
"model",
"=",
"adapter",
".",
"model",
".",
"__name__",
"log",
".",
"error",
"(",
"'Unable to index %s \"%s\": %s'",
",",
"model",
",",
"str",
"(",
"obj",
".",
"id",
")",
",",
"str",
"(",
"e",
")",
",",
"exc_info",
"=",
"True",
")"
] | Safely iterate over a DB QuerySet yielding ES documents | [
"Safely",
"iterate",
"over",
"a",
"DB",
"QuerySet",
"yielding",
"ES",
"documents"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L44-L54 | train |
opendatateam/udata | udata/search/commands.py | index_model | def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error']) | python | def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error']) | [
"def",
"index_model",
"(",
"index_name",
",",
"adapter",
")",
":",
"model",
"=",
"adapter",
".",
"model",
"log",
".",
"info",
"(",
"'Indexing {0} objects'",
".",
"format",
"(",
"model",
".",
"__name__",
")",
")",
"qs",
"=",
"model",
".",
"objects",
"if",
"hasattr",
"(",
"model",
".",
"objects",
",",
"'visible'",
")",
":",
"qs",
"=",
"qs",
".",
"visible",
"(",
")",
"if",
"adapter",
".",
"exclude_fields",
":",
"qs",
"=",
"qs",
".",
"exclude",
"(",
"*",
"adapter",
".",
"exclude_fields",
")",
"docs",
"=",
"iter_qs",
"(",
"qs",
",",
"adapter",
")",
"docs",
"=",
"iter_for_index",
"(",
"docs",
",",
"index_name",
")",
"for",
"ok",
",",
"info",
"in",
"streaming_bulk",
"(",
"es",
".",
"client",
",",
"docs",
",",
"raise_on_error",
"=",
"False",
")",
":",
"if",
"not",
"ok",
":",
"log",
".",
"error",
"(",
"'Unable to index %s \"%s\": %s'",
",",
"model",
".",
"__name__",
",",
"info",
"[",
"'index'",
"]",
"[",
"'_id'",
"]",
",",
"info",
"[",
"'index'",
"]",
"[",
"'error'",
"]",
")"
] | Indel all objects given a model | [
"Indel",
"all",
"objects",
"given",
"a",
"model"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L64-L80 | train |
opendatateam/udata | udata/search/commands.py | enable_refresh | def enable_refresh(index_name):
'''
Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk
''' # noqa
refresh_interval = current_app.config['ELASTICSEARCH_REFRESH_INTERVAL']
es.indices.put_settings(index=index_name, body={
'index': {'refresh_interval': refresh_interval}
})
es.indices.forcemerge(index=index_name, request_timeout=30) | python | def enable_refresh(index_name):
'''
Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk
''' # noqa
refresh_interval = current_app.config['ELASTICSEARCH_REFRESH_INTERVAL']
es.indices.put_settings(index=index_name, body={
'index': {'refresh_interval': refresh_interval}
})
es.indices.forcemerge(index=index_name, request_timeout=30) | [
"def",
"enable_refresh",
"(",
"index_name",
")",
":",
"# noqa",
"refresh_interval",
"=",
"current_app",
".",
"config",
"[",
"'ELASTICSEARCH_REFRESH_INTERVAL'",
"]",
"es",
".",
"indices",
".",
"put_settings",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"{",
"'index'",
":",
"{",
"'refresh_interval'",
":",
"refresh_interval",
"}",
"}",
")",
"es",
".",
"indices",
".",
"forcemerge",
"(",
"index",
"=",
"index_name",
",",
"request_timeout",
"=",
"30",
")"
] | Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk | [
"Enable",
"refresh",
"and",
"force",
"merge",
".",
"To",
"be",
"used",
"after",
"indexing",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L96-L106 | train |
opendatateam/udata | udata/search/commands.py | set_alias | def set_alias(index_name, delete=True):
'''
Properly end an indexation by creating an alias.
Previous alias is deleted if needed.
'''
log.info('Creating alias "{0}" on index "{1}"'.format(
es.index_name, index_name))
if es.indices.exists_alias(name=es.index_name):
alias = es.indices.get_alias(name=es.index_name)
previous_indices = alias.keys()
if index_name not in previous_indices:
es.indices.put_alias(index=index_name, name=es.index_name)
for index in previous_indices:
if index != index_name:
es.indices.delete_alias(index=index, name=es.index_name)
if delete:
es.indices.delete(index=index)
else:
es.indices.put_alias(index=index_name, name=es.index_name) | python | def set_alias(index_name, delete=True):
'''
Properly end an indexation by creating an alias.
Previous alias is deleted if needed.
'''
log.info('Creating alias "{0}" on index "{1}"'.format(
es.index_name, index_name))
if es.indices.exists_alias(name=es.index_name):
alias = es.indices.get_alias(name=es.index_name)
previous_indices = alias.keys()
if index_name not in previous_indices:
es.indices.put_alias(index=index_name, name=es.index_name)
for index in previous_indices:
if index != index_name:
es.indices.delete_alias(index=index, name=es.index_name)
if delete:
es.indices.delete(index=index)
else:
es.indices.put_alias(index=index_name, name=es.index_name) | [
"def",
"set_alias",
"(",
"index_name",
",",
"delete",
"=",
"True",
")",
":",
"log",
".",
"info",
"(",
"'Creating alias \"{0}\" on index \"{1}\"'",
".",
"format",
"(",
"es",
".",
"index_name",
",",
"index_name",
")",
")",
"if",
"es",
".",
"indices",
".",
"exists_alias",
"(",
"name",
"=",
"es",
".",
"index_name",
")",
":",
"alias",
"=",
"es",
".",
"indices",
".",
"get_alias",
"(",
"name",
"=",
"es",
".",
"index_name",
")",
"previous_indices",
"=",
"alias",
".",
"keys",
"(",
")",
"if",
"index_name",
"not",
"in",
"previous_indices",
":",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"es",
".",
"index_name",
")",
"for",
"index",
"in",
"previous_indices",
":",
"if",
"index",
"!=",
"index_name",
":",
"es",
".",
"indices",
".",
"delete_alias",
"(",
"index",
"=",
"index",
",",
"name",
"=",
"es",
".",
"index_name",
")",
"if",
"delete",
":",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"index",
")",
"else",
":",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"es",
".",
"index_name",
")"
] | Properly end an indexation by creating an alias.
Previous alias is deleted if needed. | [
"Properly",
"end",
"an",
"indexation",
"by",
"creating",
"an",
"alias",
".",
"Previous",
"alias",
"is",
"deleted",
"if",
"needed",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L109-L127 | train |
opendatateam/udata | udata/search/commands.py | handle_error | def handle_error(index_name, keep=False):
'''
Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted.
'''
# Handle keyboard interrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.default_int_handler)
has_error = False
try:
yield
except KeyboardInterrupt:
print('') # Proper warning message under the "^C" display
log.warning('Interrupted by signal')
has_error = True
except Exception as e:
log.error(e)
has_error = True
if has_error:
if not keep:
log.info('Removing index %s', index_name)
es.indices.delete(index=index_name)
sys.exit(-1) | python | def handle_error(index_name, keep=False):
'''
Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted.
'''
# Handle keyboard interrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.default_int_handler)
has_error = False
try:
yield
except KeyboardInterrupt:
print('') # Proper warning message under the "^C" display
log.warning('Interrupted by signal')
has_error = True
except Exception as e:
log.error(e)
has_error = True
if has_error:
if not keep:
log.info('Removing index %s', index_name)
es.indices.delete(index=index_name)
sys.exit(-1) | [
"def",
"handle_error",
"(",
"index_name",
",",
"keep",
"=",
"False",
")",
":",
"# Handle keyboard interrupt",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"default_int_handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"default_int_handler",
")",
"has_error",
"=",
"False",
"try",
":",
"yield",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"''",
")",
"# Proper warning message under the \"^C\" display",
"log",
".",
"warning",
"(",
"'Interrupted by signal'",
")",
"has_error",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"e",
")",
"has_error",
"=",
"True",
"if",
"has_error",
":",
"if",
"not",
"keep",
":",
"log",
".",
"info",
"(",
"'Removing index %s'",
",",
"index_name",
")",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"index_name",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")"
] | Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted. | [
"Handle",
"errors",
"while",
"indexing",
".",
"In",
"case",
"of",
"error",
"properly",
"log",
"it",
"remove",
"the",
"index",
"and",
"exit",
".",
"If",
"keep",
"is",
"True",
"index",
"is",
"not",
"deleted",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L131-L154 | train |
opendatateam/udata | udata/search/commands.py | index | def index(models=None, name=None, force=False, keep=False):
'''
Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed.
'''
index_name = name or default_index_name()
doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()]
models = [model.lower().rstrip('s') for model in (models or [])]
for model in models:
if model not in doc_types_names:
log.error('Unknown model %s', model)
sys.exit(-1)
log.info('Initiliazing index "{0}"'.format(index_name))
if es.indices.exists(index_name):
if IS_TTY and not force:
msg = 'Index {0} will be deleted, are you sure?'
click.confirm(msg.format(index_name), abort=True)
es.indices.delete(index_name)
es.initialize(index_name)
with handle_error(index_name, keep):
disable_refresh(index_name)
for adapter in iter_adapters():
if not models or adapter.doc_type().lower() in models:
index_model(index_name, adapter)
else:
log.info('Copying {0} objects to the new index'.format(
adapter.model.__name__))
# Need upgrade to Elasticsearch-py 5.0.0 to write:
# es.reindex({
# 'source': {'index': es.index_name, 'type': adapter.doc_type()},
# 'dest': {'index': index_name}
# })
#
# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex
# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)
# triggers a server-side documents copy.
# Instead we use this helper for meant for backward compatibility
# but with poor performance as copy is client-side (scan+bulk)
es_reindex(es.client, es.index_name, index_name, scan_kwargs={
'doc_type': adapter.doc_type()
})
enable_refresh(index_name)
# At this step, we don't want error handler to delete the index
# in case of error
set_alias(index_name, delete=not keep) | python | def index(models=None, name=None, force=False, keep=False):
'''
Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed.
'''
index_name = name or default_index_name()
doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()]
models = [model.lower().rstrip('s') for model in (models or [])]
for model in models:
if model not in doc_types_names:
log.error('Unknown model %s', model)
sys.exit(-1)
log.info('Initiliazing index "{0}"'.format(index_name))
if es.indices.exists(index_name):
if IS_TTY and not force:
msg = 'Index {0} will be deleted, are you sure?'
click.confirm(msg.format(index_name), abort=True)
es.indices.delete(index_name)
es.initialize(index_name)
with handle_error(index_name, keep):
disable_refresh(index_name)
for adapter in iter_adapters():
if not models or adapter.doc_type().lower() in models:
index_model(index_name, adapter)
else:
log.info('Copying {0} objects to the new index'.format(
adapter.model.__name__))
# Need upgrade to Elasticsearch-py 5.0.0 to write:
# es.reindex({
# 'source': {'index': es.index_name, 'type': adapter.doc_type()},
# 'dest': {'index': index_name}
# })
#
# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex
# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)
# triggers a server-side documents copy.
# Instead we use this helper for meant for backward compatibility
# but with poor performance as copy is client-side (scan+bulk)
es_reindex(es.client, es.index_name, index_name, scan_kwargs={
'doc_type': adapter.doc_type()
})
enable_refresh(index_name)
# At this step, we don't want error handler to delete the index
# in case of error
set_alias(index_name, delete=not keep) | [
"def",
"index",
"(",
"models",
"=",
"None",
",",
"name",
"=",
"None",
",",
"force",
"=",
"False",
",",
"keep",
"=",
"False",
")",
":",
"index_name",
"=",
"name",
"or",
"default_index_name",
"(",
")",
"doc_types_names",
"=",
"[",
"m",
".",
"__name__",
".",
"lower",
"(",
")",
"for",
"m",
"in",
"adapter_catalog",
".",
"keys",
"(",
")",
"]",
"models",
"=",
"[",
"model",
".",
"lower",
"(",
")",
".",
"rstrip",
"(",
"'s'",
")",
"for",
"model",
"in",
"(",
"models",
"or",
"[",
"]",
")",
"]",
"for",
"model",
"in",
"models",
":",
"if",
"model",
"not",
"in",
"doc_types_names",
":",
"log",
".",
"error",
"(",
"'Unknown model %s'",
",",
"model",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"log",
".",
"info",
"(",
"'Initiliazing index \"{0}\"'",
".",
"format",
"(",
"index_name",
")",
")",
"if",
"es",
".",
"indices",
".",
"exists",
"(",
"index_name",
")",
":",
"if",
"IS_TTY",
"and",
"not",
"force",
":",
"msg",
"=",
"'Index {0} will be deleted, are you sure?'",
"click",
".",
"confirm",
"(",
"msg",
".",
"format",
"(",
"index_name",
")",
",",
"abort",
"=",
"True",
")",
"es",
".",
"indices",
".",
"delete",
"(",
"index_name",
")",
"es",
".",
"initialize",
"(",
"index_name",
")",
"with",
"handle_error",
"(",
"index_name",
",",
"keep",
")",
":",
"disable_refresh",
"(",
"index_name",
")",
"for",
"adapter",
"in",
"iter_adapters",
"(",
")",
":",
"if",
"not",
"models",
"or",
"adapter",
".",
"doc_type",
"(",
")",
".",
"lower",
"(",
")",
"in",
"models",
":",
"index_model",
"(",
"index_name",
",",
"adapter",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Copying {0} objects to the new index'",
".",
"format",
"(",
"adapter",
".",
"model",
".",
"__name__",
")",
")",
"# Need upgrade to Elasticsearch-py 5.0.0 to write:",
"# es.reindex({",
"# 'source': {'index': es.index_name, 'type': adapter.doc_type()},",
"# 'dest': {'index': index_name}",
"# })",
"#",
"# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex",
"# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)",
"# triggers a server-side documents copy.",
"# Instead we use this helper for meant for backward compatibility",
"# but with poor performance as copy is client-side (scan+bulk)",
"es_reindex",
"(",
"es",
".",
"client",
",",
"es",
".",
"index_name",
",",
"index_name",
",",
"scan_kwargs",
"=",
"{",
"'doc_type'",
":",
"adapter",
".",
"doc_type",
"(",
")",
"}",
")",
"enable_refresh",
"(",
"index_name",
")",
"# At this step, we don't want error handler to delete the index",
"# in case of error",
"set_alias",
"(",
"index_name",
",",
"delete",
"=",
"not",
"keep",
")"
] | Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed. | [
"Initialize",
"or",
"rebuild",
"the",
"search",
"index"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L162-L214 | train |
opendatateam/udata | udata/app.py | create_app | def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app | python | def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app | [
"def",
"create_app",
"(",
"config",
"=",
"'udata.settings.Defaults'",
",",
"override",
"=",
"None",
",",
"init_logging",
"=",
"init_logging",
")",
":",
"app",
"=",
"UDataApp",
"(",
"APP_NAME",
")",
"app",
".",
"config",
".",
"from_object",
"(",
"config",
")",
"settings",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'UDATA_SETTINGS'",
",",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'udata.cfg'",
")",
")",
"if",
"exists",
"(",
"settings",
")",
":",
"app",
".",
"settings_file",
"=",
"settings",
"# Keep track of loaded settings for diagnostic",
"app",
".",
"config",
".",
"from_pyfile",
"(",
"settings",
")",
"if",
"override",
":",
"app",
".",
"config",
".",
"from_object",
"(",
"override",
")",
"# Loads defaults from plugins",
"for",
"pkg",
"in",
"entrypoints",
".",
"get_roots",
"(",
"app",
")",
":",
"if",
"pkg",
"==",
"'udata'",
":",
"continue",
"# Defaults are already loaded",
"module",
"=",
"'{}.settings'",
".",
"format",
"(",
"pkg",
")",
"if",
"pkgutil",
".",
"find_loader",
"(",
"module",
")",
":",
"settings",
"=",
"pkgutil",
".",
"get_loader",
"(",
"module",
")",
"for",
"key",
",",
"default",
"in",
"settings",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"app",
".",
"config",
".",
"setdefault",
"(",
"key",
",",
"default",
")",
"app",
".",
"json_encoder",
"=",
"UDataJsonEncoder",
"app",
".",
"debug",
"=",
"app",
".",
"config",
"[",
"'DEBUG'",
"]",
"and",
"not",
"app",
".",
"config",
"[",
"'TESTING'",
"]",
"app",
".",
"wsgi_app",
"=",
"ProxyFix",
"(",
"app",
".",
"wsgi_app",
")",
"init_logging",
"(",
"app",
")",
"register_extensions",
"(",
"app",
")",
"return",
"app"
] | Factory for a minimal application | [
"Factory",
"for",
"a",
"minimal",
"application"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/app.py#L155-L188 | train |
opendatateam/udata | udata/app.py | standalone | def standalone(app):
'''Factory for an all in one application'''
from udata import api, core, frontend
core.init_app(app)
frontend.init_app(app)
api.init_app(app)
register_features(app)
return app | python | def standalone(app):
'''Factory for an all in one application'''
from udata import api, core, frontend
core.init_app(app)
frontend.init_app(app)
api.init_app(app)
register_features(app)
return app | [
"def",
"standalone",
"(",
"app",
")",
":",
"from",
"udata",
"import",
"api",
",",
"core",
",",
"frontend",
"core",
".",
"init_app",
"(",
"app",
")",
"frontend",
".",
"init_app",
"(",
"app",
")",
"api",
".",
"init_app",
"(",
"app",
")",
"register_features",
"(",
"app",
")",
"return",
"app"
] | Factory for an all in one application | [
"Factory",
"for",
"an",
"all",
"in",
"one",
"application"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/app.py#L191-L201 | train |
opendatateam/udata | udata/commands/db.py | get_migration | def get_migration(plugin, filename):
'''Get an existing migration record if exists'''
db = get_db()
return db.migrations.find_one({'plugin': plugin, 'filename': filename}) | python | def get_migration(plugin, filename):
'''Get an existing migration record if exists'''
db = get_db()
return db.migrations.find_one({'plugin': plugin, 'filename': filename}) | [
"def",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
":",
"db",
"=",
"get_db",
"(",
")",
"return",
"db",
".",
"migrations",
".",
"find_one",
"(",
"{",
"'plugin'",
":",
"plugin",
",",
"'filename'",
":",
"filename",
"}",
")"
] | Get an existing migration record if exists | [
"Get",
"an",
"existing",
"migration",
"record",
"if",
"exists"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L85-L88 | train |
opendatateam/udata | udata/commands/db.py | record_migration | def record_migration(plugin, filename, script, **kwargs):
'''Only record a migration without applying it'''
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True | python | def record_migration(plugin, filename, script, **kwargs):
'''Only record a migration without applying it'''
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True | [
"def",
"record_migration",
"(",
"plugin",
",",
"filename",
",",
"script",
",",
"*",
"*",
"kwargs",
")",
":",
"db",
"=",
"get_db",
"(",
")",
"db",
".",
"eval",
"(",
"RECORD_WRAPPER",
",",
"plugin",
",",
"filename",
",",
"script",
")",
"return",
"True"
] | Only record a migration without applying it | [
"Only",
"record",
"a",
"migration",
"without",
"applying",
"it"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L115-L119 | train |
opendatateam/udata | udata/commands/db.py | available_migrations | def available_migrations():
'''
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
'''
migrations = []
for filename in resource_listdir('udata', 'migrations'):
if filename.endswith('.js'):
migrations.append(('udata', 'udata', filename))
plugins = entrypoints.get_enabled('udata.models', current_app)
for plugin, module in plugins.items():
if resource_isdir(module.__name__, 'migrations'):
for filename in resource_listdir(module.__name__, 'migrations'):
if filename.endswith('.js'):
migrations.append((plugin, module.__name__, filename))
return sorted(migrations, key=lambda r: r[2]) | python | def available_migrations():
'''
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
'''
migrations = []
for filename in resource_listdir('udata', 'migrations'):
if filename.endswith('.js'):
migrations.append(('udata', 'udata', filename))
plugins = entrypoints.get_enabled('udata.models', current_app)
for plugin, module in plugins.items():
if resource_isdir(module.__name__, 'migrations'):
for filename in resource_listdir(module.__name__, 'migrations'):
if filename.endswith('.js'):
migrations.append((plugin, module.__name__, filename))
return sorted(migrations, key=lambda r: r[2]) | [
"def",
"available_migrations",
"(",
")",
":",
"migrations",
"=",
"[",
"]",
"for",
"filename",
"in",
"resource_listdir",
"(",
"'udata'",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"'udata'",
",",
"'udata'",
",",
"filename",
")",
")",
"plugins",
"=",
"entrypoints",
".",
"get_enabled",
"(",
"'udata.models'",
",",
"current_app",
")",
"for",
"plugin",
",",
"module",
"in",
"plugins",
".",
"items",
"(",
")",
":",
"if",
"resource_isdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"for",
"filename",
"in",
"resource_listdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"plugin",
",",
"module",
".",
"__name__",
",",
"filename",
")",
")",
"return",
"sorted",
"(",
"migrations",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
"[",
"2",
"]",
")"
] | List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename) | [
"List",
"available",
"migrations",
"for",
"udata",
"and",
"enabled",
"plugins"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L122-L141 | train |
opendatateam/udata | udata/commands/db.py | log_status | def log_status(plugin, filename, status):
'''Properly display a migration status line'''
display = ':'.join((plugin, filename)) + ' '
log.info('%s [%s]', '{:.<70}'.format(display), status) | python | def log_status(plugin, filename, status):
'''Properly display a migration status line'''
display = ':'.join((plugin, filename)) + ' '
log.info('%s [%s]', '{:.<70}'.format(display), status) | [
"def",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")",
":",
"display",
"=",
"':'",
".",
"join",
"(",
"(",
"plugin",
",",
"filename",
")",
")",
"+",
"' '",
"log",
".",
"info",
"(",
"'%s [%s]'",
",",
"'{:.<70}'",
".",
"format",
"(",
"display",
")",
",",
"status",
")"
] | Properly display a migration status line | [
"Properly",
"display",
"a",
"migration",
"status",
"line"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L144-L147 | train |
opendatateam/udata | udata/commands/db.py | status | def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status) | python | def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status) | [
"def",
"status",
"(",
")",
":",
"for",
"plugin",
",",
"package",
",",
"filename",
"in",
"available_migrations",
"(",
")",
":",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
":",
"status",
"=",
"green",
"(",
"migration",
"[",
"'date'",
"]",
".",
"strftime",
"(",
"DATE_FORMAT",
")",
")",
"else",
":",
"status",
"=",
"yellow",
"(",
"'Not applied'",
")",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")"
] | Display the database migrations status | [
"Display",
"the",
"database",
"migrations",
"status"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L151-L159 | train |
opendatateam/udata | udata/commands/db.py | migrate | def migrate(record, dry_run=False):
'''Perform database migrations'''
handler = record_migration if record else execute_migration
success = True
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration or not success:
log_status(plugin, filename, cyan('Skipped'))
else:
status = magenta('Recorded') if record else yellow('Apply')
log_status(plugin, filename, status)
script = resource_string(package, join('migrations', filename))
success &= handler(plugin, filename, script, dryrun=dry_run) | python | def migrate(record, dry_run=False):
'''Perform database migrations'''
handler = record_migration if record else execute_migration
success = True
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration or not success:
log_status(plugin, filename, cyan('Skipped'))
else:
status = magenta('Recorded') if record else yellow('Apply')
log_status(plugin, filename, status)
script = resource_string(package, join('migrations', filename))
success &= handler(plugin, filename, script, dryrun=dry_run) | [
"def",
"migrate",
"(",
"record",
",",
"dry_run",
"=",
"False",
")",
":",
"handler",
"=",
"record_migration",
"if",
"record",
"else",
"execute_migration",
"success",
"=",
"True",
"for",
"plugin",
",",
"package",
",",
"filename",
"in",
"available_migrations",
"(",
")",
":",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
"or",
"not",
"success",
":",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"cyan",
"(",
"'Skipped'",
")",
")",
"else",
":",
"status",
"=",
"magenta",
"(",
"'Recorded'",
")",
"if",
"record",
"else",
"yellow",
"(",
"'Apply'",
")",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")",
"script",
"=",
"resource_string",
"(",
"package",
",",
"join",
"(",
"'migrations'",
",",
"filename",
")",
")",
"success",
"&=",
"handler",
"(",
"plugin",
",",
"filename",
",",
"script",
",",
"dryrun",
"=",
"dry_run",
")"
] | Perform database migrations | [
"Perform",
"database",
"migrations"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L167-L179 | train |
opendatateam/udata | udata/commands/db.py | unrecord | def unrecord(plugin_or_specs, filename):
'''
Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js
'''
plugin, filename = normalize_migration(plugin_or_specs, filename)
migration = get_migration(plugin, filename)
if migration:
log.info('Removing migration %s:%s', plugin, filename)
db = get_db()
db.eval(UNRECORD_WRAPPER, migration['_id'])
else:
log.error('Migration not found %s:%s', plugin, filename) | python | def unrecord(plugin_or_specs, filename):
'''
Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js
'''
plugin, filename = normalize_migration(plugin_or_specs, filename)
migration = get_migration(plugin, filename)
if migration:
log.info('Removing migration %s:%s', plugin, filename)
db = get_db()
db.eval(UNRECORD_WRAPPER, migration['_id'])
else:
log.error('Migration not found %s:%s', plugin, filename) | [
"def",
"unrecord",
"(",
"plugin_or_specs",
",",
"filename",
")",
":",
"plugin",
",",
"filename",
"=",
"normalize_migration",
"(",
"plugin_or_specs",
",",
"filename",
")",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
":",
"log",
".",
"info",
"(",
"'Removing migration %s:%s'",
",",
"plugin",
",",
"filename",
")",
"db",
"=",
"get_db",
"(",
")",
"db",
".",
"eval",
"(",
"UNRECORD_WRAPPER",
",",
"migration",
"[",
"'_id'",
"]",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Migration not found %s:%s'",
",",
"plugin",
",",
"filename",
")"
] | Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js | [
"Remove",
"a",
"database",
"migration",
"record",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L185-L203 | train |
opendatateam/udata | udata/uris.py | validate | def validate(url, schemes=None, tlds=None, private=None, local=None,
credentials=None):
'''
Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate
'''
url = url.strip()
private = config_for(private, 'URLS_ALLOW_PRIVATE')
local = config_for(local, 'URLS_ALLOW_LOCAL')
credentials = config_for(credentials, 'URLS_ALLOW_CREDENTIALS')
schemes = config_for(schemes, 'URLS_ALLOWED_SCHEMES')
tlds = config_for(tlds, 'URLS_ALLOWED_TLDS')
match = URL_REGEX.match(url)
if not match:
error(url)
scheme = (match.group('scheme') or '').lower()
if scheme and scheme not in schemes:
error(url, 'Invalid scheme {0}'.format(scheme))
if not credentials and match.group('credentials'):
error(url, 'Credentials in URL are not allowed')
tld = match.group('tld')
if tld and tld not in tlds and tld.encode('idna') not in tlds:
error(url, 'Invalid TLD {0}'.format(tld))
ip = match.group('ipv6') or match.group('ipv4')
if ip:
try:
ip = IPAddress(ip)
except AddrFormatError:
error(url)
if ip.is_multicast():
error(url, '{0} is a multicast IP'.format(ip))
elif not ip.is_loopback() and ip.is_hostmask() or ip.is_netmask():
error(url, '{0} is a mask IP'.format(ip))
if not local:
if ip and ip.is_loopback() or match.group('localhost'):
error(url, 'is a local URL')
if not private and ip and ip.is_private():
error(url, 'is a private URL')
return url | python | def validate(url, schemes=None, tlds=None, private=None, local=None,
credentials=None):
'''
Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate
'''
url = url.strip()
private = config_for(private, 'URLS_ALLOW_PRIVATE')
local = config_for(local, 'URLS_ALLOW_LOCAL')
credentials = config_for(credentials, 'URLS_ALLOW_CREDENTIALS')
schemes = config_for(schemes, 'URLS_ALLOWED_SCHEMES')
tlds = config_for(tlds, 'URLS_ALLOWED_TLDS')
match = URL_REGEX.match(url)
if not match:
error(url)
scheme = (match.group('scheme') or '').lower()
if scheme and scheme not in schemes:
error(url, 'Invalid scheme {0}'.format(scheme))
if not credentials and match.group('credentials'):
error(url, 'Credentials in URL are not allowed')
tld = match.group('tld')
if tld and tld not in tlds and tld.encode('idna') not in tlds:
error(url, 'Invalid TLD {0}'.format(tld))
ip = match.group('ipv6') or match.group('ipv4')
if ip:
try:
ip = IPAddress(ip)
except AddrFormatError:
error(url)
if ip.is_multicast():
error(url, '{0} is a multicast IP'.format(ip))
elif not ip.is_loopback() and ip.is_hostmask() or ip.is_netmask():
error(url, '{0} is a mask IP'.format(ip))
if not local:
if ip and ip.is_loopback() or match.group('localhost'):
error(url, 'is a local URL')
if not private and ip and ip.is_private():
error(url, 'is a private URL')
return url | [
"def",
"validate",
"(",
"url",
",",
"schemes",
"=",
"None",
",",
"tlds",
"=",
"None",
",",
"private",
"=",
"None",
",",
"local",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"url",
"=",
"url",
".",
"strip",
"(",
")",
"private",
"=",
"config_for",
"(",
"private",
",",
"'URLS_ALLOW_PRIVATE'",
")",
"local",
"=",
"config_for",
"(",
"local",
",",
"'URLS_ALLOW_LOCAL'",
")",
"credentials",
"=",
"config_for",
"(",
"credentials",
",",
"'URLS_ALLOW_CREDENTIALS'",
")",
"schemes",
"=",
"config_for",
"(",
"schemes",
",",
"'URLS_ALLOWED_SCHEMES'",
")",
"tlds",
"=",
"config_for",
"(",
"tlds",
",",
"'URLS_ALLOWED_TLDS'",
")",
"match",
"=",
"URL_REGEX",
".",
"match",
"(",
"url",
")",
"if",
"not",
"match",
":",
"error",
"(",
"url",
")",
"scheme",
"=",
"(",
"match",
".",
"group",
"(",
"'scheme'",
")",
"or",
"''",
")",
".",
"lower",
"(",
")",
"if",
"scheme",
"and",
"scheme",
"not",
"in",
"schemes",
":",
"error",
"(",
"url",
",",
"'Invalid scheme {0}'",
".",
"format",
"(",
"scheme",
")",
")",
"if",
"not",
"credentials",
"and",
"match",
".",
"group",
"(",
"'credentials'",
")",
":",
"error",
"(",
"url",
",",
"'Credentials in URL are not allowed'",
")",
"tld",
"=",
"match",
".",
"group",
"(",
"'tld'",
")",
"if",
"tld",
"and",
"tld",
"not",
"in",
"tlds",
"and",
"tld",
".",
"encode",
"(",
"'idna'",
")",
"not",
"in",
"tlds",
":",
"error",
"(",
"url",
",",
"'Invalid TLD {0}'",
".",
"format",
"(",
"tld",
")",
")",
"ip",
"=",
"match",
".",
"group",
"(",
"'ipv6'",
")",
"or",
"match",
".",
"group",
"(",
"'ipv4'",
")",
"if",
"ip",
":",
"try",
":",
"ip",
"=",
"IPAddress",
"(",
"ip",
")",
"except",
"AddrFormatError",
":",
"error",
"(",
"url",
")",
"if",
"ip",
".",
"is_multicast",
"(",
")",
":",
"error",
"(",
"url",
",",
"'{0} is a multicast IP'",
".",
"format",
"(",
"ip",
")",
")",
"elif",
"not",
"ip",
".",
"is_loopback",
"(",
")",
"and",
"ip",
".",
"is_hostmask",
"(",
")",
"or",
"ip",
".",
"is_netmask",
"(",
")",
":",
"error",
"(",
"url",
",",
"'{0} is a mask IP'",
".",
"format",
"(",
"ip",
")",
")",
"if",
"not",
"local",
":",
"if",
"ip",
"and",
"ip",
".",
"is_loopback",
"(",
")",
"or",
"match",
".",
"group",
"(",
"'localhost'",
")",
":",
"error",
"(",
"url",
",",
"'is a local URL'",
")",
"if",
"not",
"private",
"and",
"ip",
"and",
"ip",
".",
"is_private",
"(",
")",
":",
"error",
"(",
"url",
",",
"'is a private URL'",
")",
"return",
"url"
] | Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate | [
"Validate",
"and",
"normalize",
"an",
"URL"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/uris.py#L69-L119 | train |
opendatateam/udata | udata/core/dataset/models.py | get_json_ld_extra | def get_json_ld_extra(key, value):
'''Serialize an extras key, value pair into JSON-LD'''
value = value.serialize() if hasattr(value, 'serialize') else value
return {
'@type': 'http://schema.org/PropertyValue',
'name': key,
'value': value,
} | python | def get_json_ld_extra(key, value):
'''Serialize an extras key, value pair into JSON-LD'''
value = value.serialize() if hasattr(value, 'serialize') else value
return {
'@type': 'http://schema.org/PropertyValue',
'name': key,
'value': value,
} | [
"def",
"get_json_ld_extra",
"(",
"key",
",",
"value",
")",
":",
"value",
"=",
"value",
".",
"serialize",
"(",
")",
"if",
"hasattr",
"(",
"value",
",",
"'serialize'",
")",
"else",
"value",
"return",
"{",
"'@type'",
":",
"'http://schema.org/PropertyValue'",
",",
"'name'",
":",
"key",
",",
"'value'",
":",
"value",
",",
"}"
] | Serialize an extras key, value pair into JSON-LD | [
"Serialize",
"an",
"extras",
"key",
"value",
"pair",
"into",
"JSON",
"-",
"LD"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L105-L112 | train |
opendatateam/udata | udata/core/dataset/models.py | get_resource | def get_resource(id):
'''Fetch a resource given its UUID'''
dataset = Dataset.objects(resources__id=id).first()
if dataset:
return get_by(dataset.resources, 'id', id)
else:
return CommunityResource.objects(id=id).first() | python | def get_resource(id):
'''Fetch a resource given its UUID'''
dataset = Dataset.objects(resources__id=id).first()
if dataset:
return get_by(dataset.resources, 'id', id)
else:
return CommunityResource.objects(id=id).first() | [
"def",
"get_resource",
"(",
"id",
")",
":",
"dataset",
"=",
"Dataset",
".",
"objects",
"(",
"resources__id",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"dataset",
":",
"return",
"get_by",
"(",
"dataset",
".",
"resources",
",",
"'id'",
",",
"id",
")",
"else",
":",
"return",
"CommunityResource",
".",
"objects",
"(",
"id",
"=",
"id",
")",
".",
"first",
"(",
")"
] | Fetch a resource given its UUID | [
"Fetch",
"a",
"resource",
"given",
"its",
"UUID"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L692-L698 | train |
opendatateam/udata | udata/core/dataset/models.py | License.guess | def guess(cls, *strings, **kwargs):
'''
Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license.
'''
license = None
for string in strings:
license = cls.guess_one(string)
if license:
break
return license or kwargs.get('default') | python | def guess(cls, *strings, **kwargs):
'''
Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license.
'''
license = None
for string in strings:
license = cls.guess_one(string)
if license:
break
return license or kwargs.get('default') | [
"def",
"guess",
"(",
"cls",
",",
"*",
"strings",
",",
"*",
"*",
"kwargs",
")",
":",
"license",
"=",
"None",
"for",
"string",
"in",
"strings",
":",
"license",
"=",
"cls",
".",
"guess_one",
"(",
"string",
")",
"if",
"license",
":",
"break",
"return",
"license",
"or",
"kwargs",
".",
"get",
"(",
"'default'",
")"
] | Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license. | [
"Try",
"to",
"guess",
"a",
"license",
"from",
"a",
"list",
"of",
"strings",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L134-L146 | train |
opendatateam/udata | udata/core/dataset/models.py | License.guess_one | def guess_one(cls, text):
'''
Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification)
'''
if not text:
return
qs = cls.objects
text = text.strip().lower() # Stored identifiers are lower case
slug = cls.slug.slugify(text) # Use slug as it normalize string
license = qs(
db.Q(id=text) | db.Q(slug=slug) | db.Q(url=text)
| db.Q(alternate_urls=text)
).first()
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = ((l, rdlevenshtein(l.slug, slug)) for l in cls.objects)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = (
(l, rdlevenshtein(cls.slug.slugify(t), slug))
for l in cls.objects
for t in l.alternate_titles
)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
return license | python | def guess_one(cls, text):
'''
Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification)
'''
if not text:
return
qs = cls.objects
text = text.strip().lower() # Stored identifiers are lower case
slug = cls.slug.slugify(text) # Use slug as it normalize string
license = qs(
db.Q(id=text) | db.Q(slug=slug) | db.Q(url=text)
| db.Q(alternate_urls=text)
).first()
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = ((l, rdlevenshtein(l.slug, slug)) for l in cls.objects)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = (
(l, rdlevenshtein(cls.slug.slugify(t), slug))
for l in cls.objects
for t in l.alternate_titles
)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
return license | [
"def",
"guess_one",
"(",
"cls",
",",
"text",
")",
":",
"if",
"not",
"text",
":",
"return",
"qs",
"=",
"cls",
".",
"objects",
"text",
"=",
"text",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"# Stored identifiers are lower case",
"slug",
"=",
"cls",
".",
"slug",
".",
"slugify",
"(",
"text",
")",
"# Use slug as it normalize string",
"license",
"=",
"qs",
"(",
"db",
".",
"Q",
"(",
"id",
"=",
"text",
")",
"|",
"db",
".",
"Q",
"(",
"slug",
"=",
"slug",
")",
"|",
"db",
".",
"Q",
"(",
"url",
"=",
"text",
")",
"|",
"db",
".",
"Q",
"(",
"alternate_urls",
"=",
"text",
")",
")",
".",
"first",
"(",
")",
"if",
"license",
"is",
"None",
":",
"# Try to single match with a low Damerau-Levenshtein distance",
"computed",
"=",
"(",
"(",
"l",
",",
"rdlevenshtein",
"(",
"l",
".",
"slug",
",",
"slug",
")",
")",
"for",
"l",
"in",
"cls",
".",
"objects",
")",
"candidates",
"=",
"[",
"l",
"for",
"l",
",",
"d",
"in",
"computed",
"if",
"d",
"<=",
"MAX_DISTANCE",
"]",
"# If there is more that one match, we cannot determinate",
"# which one is closer to safely choose between candidates",
"if",
"len",
"(",
"candidates",
")",
"==",
"1",
":",
"license",
"=",
"candidates",
"[",
"0",
"]",
"if",
"license",
"is",
"None",
":",
"# Try to single match with a low Damerau-Levenshtein distance",
"computed",
"=",
"(",
"(",
"l",
",",
"rdlevenshtein",
"(",
"cls",
".",
"slug",
".",
"slugify",
"(",
"t",
")",
",",
"slug",
")",
")",
"for",
"l",
"in",
"cls",
".",
"objects",
"for",
"t",
"in",
"l",
".",
"alternate_titles",
")",
"candidates",
"=",
"[",
"l",
"for",
"l",
",",
"d",
"in",
"computed",
"if",
"d",
"<=",
"MAX_DISTANCE",
"]",
"# If there is more that one match, we cannot determinate",
"# which one is closer to safely choose between candidates",
"if",
"len",
"(",
"candidates",
")",
"==",
"1",
":",
"license",
"=",
"candidates",
"[",
"0",
"]",
"return",
"license"
] | Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification) | [
"Try",
"to",
"guess",
"license",
"from",
"a",
"string",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L149-L185 | train |
opendatateam/udata | udata/core/dataset/models.py | ResourceMixin.need_check | def need_check(self):
'''Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability.
'''
min_cache_duration, max_cache_duration, ko_threshold = [
current_app.config.get(k) for k in (
'LINKCHECKING_MIN_CACHE_DURATION',
'LINKCHECKING_MAX_CACHE_DURATION',
'LINKCHECKING_UNAVAILABLE_THRESHOLD',
)
]
count_availability = self.extras.get('check:count-availability', 1)
is_available = self.check_availability()
if is_available == 'unknown':
return True
elif is_available or count_availability > ko_threshold:
delta = min(min_cache_duration * count_availability,
max_cache_duration)
else:
delta = min_cache_duration
if self.extras.get('check:date'):
limit_date = datetime.now() - timedelta(minutes=delta)
check_date = self.extras['check:date']
if not isinstance(check_date, datetime):
try:
check_date = parse_dt(check_date)
except (ValueError, TypeError):
return True
if check_date >= limit_date:
return False
return True | python | def need_check(self):
'''Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability.
'''
min_cache_duration, max_cache_duration, ko_threshold = [
current_app.config.get(k) for k in (
'LINKCHECKING_MIN_CACHE_DURATION',
'LINKCHECKING_MAX_CACHE_DURATION',
'LINKCHECKING_UNAVAILABLE_THRESHOLD',
)
]
count_availability = self.extras.get('check:count-availability', 1)
is_available = self.check_availability()
if is_available == 'unknown':
return True
elif is_available or count_availability > ko_threshold:
delta = min(min_cache_duration * count_availability,
max_cache_duration)
else:
delta = min_cache_duration
if self.extras.get('check:date'):
limit_date = datetime.now() - timedelta(minutes=delta)
check_date = self.extras['check:date']
if not isinstance(check_date, datetime):
try:
check_date = parse_dt(check_date)
except (ValueError, TypeError):
return True
if check_date >= limit_date:
return False
return True | [
"def",
"need_check",
"(",
"self",
")",
":",
"min_cache_duration",
",",
"max_cache_duration",
",",
"ko_threshold",
"=",
"[",
"current_app",
".",
"config",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"(",
"'LINKCHECKING_MIN_CACHE_DURATION'",
",",
"'LINKCHECKING_MAX_CACHE_DURATION'",
",",
"'LINKCHECKING_UNAVAILABLE_THRESHOLD'",
",",
")",
"]",
"count_availability",
"=",
"self",
".",
"extras",
".",
"get",
"(",
"'check:count-availability'",
",",
"1",
")",
"is_available",
"=",
"self",
".",
"check_availability",
"(",
")",
"if",
"is_available",
"==",
"'unknown'",
":",
"return",
"True",
"elif",
"is_available",
"or",
"count_availability",
">",
"ko_threshold",
":",
"delta",
"=",
"min",
"(",
"min_cache_duration",
"*",
"count_availability",
",",
"max_cache_duration",
")",
"else",
":",
"delta",
"=",
"min_cache_duration",
"if",
"self",
".",
"extras",
".",
"get",
"(",
"'check:date'",
")",
":",
"limit_date",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"minutes",
"=",
"delta",
")",
"check_date",
"=",
"self",
".",
"extras",
"[",
"'check:date'",
"]",
"if",
"not",
"isinstance",
"(",
"check_date",
",",
"datetime",
")",
":",
"try",
":",
"check_date",
"=",
"parse_dt",
"(",
"check_date",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"True",
"if",
"check_date",
">=",
"limit_date",
":",
"return",
"False",
"return",
"True"
] | Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability. | [
"Does",
"the",
"resource",
"needs",
"to",
"be",
"checked",
"against",
"its",
"linkchecker?"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L258-L291 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.check_availability | def check_availability(self):
"""Check if resources from that dataset are available.
Return a list of (boolean or 'unknown')
"""
# Only check remote resources.
remote_resources = [resource
for resource in self.resources
if resource.filetype == 'remote']
if not remote_resources:
return []
return [resource.check_availability() for resource in remote_resources] | python | def check_availability(self):
"""Check if resources from that dataset are available.
Return a list of (boolean or 'unknown')
"""
# Only check remote resources.
remote_resources = [resource
for resource in self.resources
if resource.filetype == 'remote']
if not remote_resources:
return []
return [resource.check_availability() for resource in remote_resources] | [
"def",
"check_availability",
"(",
"self",
")",
":",
"# Only check remote resources.",
"remote_resources",
"=",
"[",
"resource",
"for",
"resource",
"in",
"self",
".",
"resources",
"if",
"resource",
".",
"filetype",
"==",
"'remote'",
"]",
"if",
"not",
"remote_resources",
":",
"return",
"[",
"]",
"return",
"[",
"resource",
".",
"check_availability",
"(",
")",
"for",
"resource",
"in",
"remote_resources",
"]"
] | Check if resources from that dataset are available.
Return a list of (boolean or 'unknown') | [
"Check",
"if",
"resources",
"from",
"that",
"dataset",
"are",
"available",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L471-L482 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.next_update | def next_update(self):
"""Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
"""
delta = None
if self.frequency == 'daily':
delta = timedelta(days=1)
elif self.frequency == 'weekly':
delta = timedelta(weeks=1)
elif self.frequency == 'fortnighly':
delta = timedelta(weeks=2)
elif self.frequency == 'monthly':
delta = timedelta(weeks=4)
elif self.frequency == 'bimonthly':
delta = timedelta(weeks=4 * 2)
elif self.frequency == 'quarterly':
delta = timedelta(weeks=52 / 4)
elif self.frequency == 'biannual':
delta = timedelta(weeks=52 / 2)
elif self.frequency == 'annual':
delta = timedelta(weeks=52)
elif self.frequency == 'biennial':
delta = timedelta(weeks=52 * 2)
elif self.frequency == 'triennial':
delta = timedelta(weeks=52 * 3)
elif self.frequency == 'quinquennial':
delta = timedelta(weeks=52 * 5)
if delta is None:
return
else:
return self.last_update + delta | python | def next_update(self):
"""Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
"""
delta = None
if self.frequency == 'daily':
delta = timedelta(days=1)
elif self.frequency == 'weekly':
delta = timedelta(weeks=1)
elif self.frequency == 'fortnighly':
delta = timedelta(weeks=2)
elif self.frequency == 'monthly':
delta = timedelta(weeks=4)
elif self.frequency == 'bimonthly':
delta = timedelta(weeks=4 * 2)
elif self.frequency == 'quarterly':
delta = timedelta(weeks=52 / 4)
elif self.frequency == 'biannual':
delta = timedelta(weeks=52 / 2)
elif self.frequency == 'annual':
delta = timedelta(weeks=52)
elif self.frequency == 'biennial':
delta = timedelta(weeks=52 * 2)
elif self.frequency == 'triennial':
delta = timedelta(weeks=52 * 3)
elif self.frequency == 'quinquennial':
delta = timedelta(weeks=52 * 5)
if delta is None:
return
else:
return self.last_update + delta | [
"def",
"next_update",
"(",
"self",
")",
":",
"delta",
"=",
"None",
"if",
"self",
".",
"frequency",
"==",
"'daily'",
":",
"delta",
"=",
"timedelta",
"(",
"days",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'weekly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'fortnighly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'monthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'bimonthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quarterly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biannual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'annual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'triennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"3",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quinquennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"5",
")",
"if",
"delta",
"is",
"None",
":",
"return",
"else",
":",
"return",
"self",
".",
"last_update",
"+",
"delta"
] | Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled. | [
"Compute",
"the",
"next",
"expected",
"update",
"date"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L492-L524 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.quality | def quality(self):
"""Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
"""
from udata.models import Discussion # noqa: Prevent circular imports
result = {}
if not self.id:
# Quality is only relevant on saved Datasets
return result
if self.next_update:
result['frequency'] = self.frequency
result['update_in'] = -(self.next_update - datetime.now()).days
if self.tags:
result['tags_count'] = len(self.tags)
if self.description:
result['description_length'] = len(self.description)
if self.resources:
result['has_resources'] = True
result['has_only_closed_or_no_formats'] = all(
resource.closed_or_no_format for resource in self.resources)
result['has_unavailable_resources'] = not all(
self.check_availability())
discussions = Discussion.objects(subject=self)
if discussions:
result['discussions'] = len(discussions)
result['has_untreated_discussions'] = not all(
discussion.person_involved(self.owner)
for discussion in discussions)
result['score'] = self.compute_quality_score(result)
return result | python | def quality(self):
"""Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
"""
from udata.models import Discussion # noqa: Prevent circular imports
result = {}
if not self.id:
# Quality is only relevant on saved Datasets
return result
if self.next_update:
result['frequency'] = self.frequency
result['update_in'] = -(self.next_update - datetime.now()).days
if self.tags:
result['tags_count'] = len(self.tags)
if self.description:
result['description_length'] = len(self.description)
if self.resources:
result['has_resources'] = True
result['has_only_closed_or_no_formats'] = all(
resource.closed_or_no_format for resource in self.resources)
result['has_unavailable_resources'] = not all(
self.check_availability())
discussions = Discussion.objects(subject=self)
if discussions:
result['discussions'] = len(discussions)
result['has_untreated_discussions'] = not all(
discussion.person_involved(self.owner)
for discussion in discussions)
result['score'] = self.compute_quality_score(result)
return result | [
"def",
"quality",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Discussion",
"# noqa: Prevent circular imports",
"result",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"id",
":",
"# Quality is only relevant on saved Datasets",
"return",
"result",
"if",
"self",
".",
"next_update",
":",
"result",
"[",
"'frequency'",
"]",
"=",
"self",
".",
"frequency",
"result",
"[",
"'update_in'",
"]",
"=",
"-",
"(",
"self",
".",
"next_update",
"-",
"datetime",
".",
"now",
"(",
")",
")",
".",
"days",
"if",
"self",
".",
"tags",
":",
"result",
"[",
"'tags_count'",
"]",
"=",
"len",
"(",
"self",
".",
"tags",
")",
"if",
"self",
".",
"description",
":",
"result",
"[",
"'description_length'",
"]",
"=",
"len",
"(",
"self",
".",
"description",
")",
"if",
"self",
".",
"resources",
":",
"result",
"[",
"'has_resources'",
"]",
"=",
"True",
"result",
"[",
"'has_only_closed_or_no_formats'",
"]",
"=",
"all",
"(",
"resource",
".",
"closed_or_no_format",
"for",
"resource",
"in",
"self",
".",
"resources",
")",
"result",
"[",
"'has_unavailable_resources'",
"]",
"=",
"not",
"all",
"(",
"self",
".",
"check_availability",
"(",
")",
")",
"discussions",
"=",
"Discussion",
".",
"objects",
"(",
"subject",
"=",
"self",
")",
"if",
"discussions",
":",
"result",
"[",
"'discussions'",
"]",
"=",
"len",
"(",
"discussions",
")",
"result",
"[",
"'has_untreated_discussions'",
"]",
"=",
"not",
"all",
"(",
"discussion",
".",
"person_involved",
"(",
"self",
".",
"owner",
")",
"for",
"discussion",
"in",
"discussions",
")",
"result",
"[",
"'score'",
"]",
"=",
"self",
".",
"compute_quality_score",
"(",
"result",
")",
"return",
"result"
] | Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on | [
"Return",
"a",
"dict",
"filled",
"with",
"metrics",
"related",
"to",
"the",
"inner"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L527-L561 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.compute_quality_score | def compute_quality_score(self, quality):
"""Compute the score related to the quality of that dataset."""
score = 0
UNIT = 2
if 'frequency' in quality:
# TODO: should be related to frequency.
if quality['update_in'] < 0:
score += UNIT
else:
score -= UNIT
if 'tags_count' in quality:
if quality['tags_count'] > 3:
score += UNIT
if 'description_length' in quality:
if quality['description_length'] > 100:
score += UNIT
if 'has_resources' in quality:
if quality['has_only_closed_or_no_formats']:
score -= UNIT
else:
score += UNIT
if quality['has_unavailable_resources']:
score -= UNIT
else:
score += UNIT
if 'discussions' in quality:
if quality['has_untreated_discussions']:
score -= UNIT
else:
score += UNIT
if score < 0:
return 0
return score | python | def compute_quality_score(self, quality):
"""Compute the score related to the quality of that dataset."""
score = 0
UNIT = 2
if 'frequency' in quality:
# TODO: should be related to frequency.
if quality['update_in'] < 0:
score += UNIT
else:
score -= UNIT
if 'tags_count' in quality:
if quality['tags_count'] > 3:
score += UNIT
if 'description_length' in quality:
if quality['description_length'] > 100:
score += UNIT
if 'has_resources' in quality:
if quality['has_only_closed_or_no_formats']:
score -= UNIT
else:
score += UNIT
if quality['has_unavailable_resources']:
score -= UNIT
else:
score += UNIT
if 'discussions' in quality:
if quality['has_untreated_discussions']:
score -= UNIT
else:
score += UNIT
if score < 0:
return 0
return score | [
"def",
"compute_quality_score",
"(",
"self",
",",
"quality",
")",
":",
"score",
"=",
"0",
"UNIT",
"=",
"2",
"if",
"'frequency'",
"in",
"quality",
":",
"# TODO: should be related to frequency.",
"if",
"quality",
"[",
"'update_in'",
"]",
"<",
"0",
":",
"score",
"+=",
"UNIT",
"else",
":",
"score",
"-=",
"UNIT",
"if",
"'tags_count'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'tags_count'",
"]",
">",
"3",
":",
"score",
"+=",
"UNIT",
"if",
"'description_length'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'description_length'",
"]",
">",
"100",
":",
"score",
"+=",
"UNIT",
"if",
"'has_resources'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'has_only_closed_or_no_formats'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"quality",
"[",
"'has_unavailable_resources'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"'discussions'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'has_untreated_discussions'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"score",
"<",
"0",
":",
"return",
"0",
"return",
"score"
] | Compute the score related to the quality of that dataset. | [
"Compute",
"the",
"score",
"related",
"to",
"the",
"quality",
"of",
"that",
"dataset",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L563-L595 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.add_resource | def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id) | python | def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id) | [
"def",
"add_resource",
"(",
"self",
",",
"resource",
")",
":",
"resource",
".",
"validate",
"(",
")",
"self",
".",
"update",
"(",
"__raw__",
"=",
"{",
"'$push'",
":",
"{",
"'resources'",
":",
"{",
"'$each'",
":",
"[",
"resource",
".",
"to_mongo",
"(",
")",
"]",
",",
"'$position'",
":",
"0",
"}",
"}",
"}",
")",
"self",
".",
"reload",
"(",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
",",
"resource_added",
"=",
"resource",
".",
"id",
")"
] | Perform an atomic prepend for a new resource | [
"Perform",
"an",
"atomic",
"prepend",
"for",
"a",
"new",
"resource"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L602-L615 | train |
opendatateam/udata | udata/core/dataset/models.py | Dataset.update_resource | def update_resource(self, resource):
'''Perform an atomic update for an existing resource'''
index = self.resources.index(resource)
data = {
'resources__{index}'.format(index=index): resource
}
self.update(**data)
self.reload()
post_save.send(self.__class__, document=self) | python | def update_resource(self, resource):
'''Perform an atomic update for an existing resource'''
index = self.resources.index(resource)
data = {
'resources__{index}'.format(index=index): resource
}
self.update(**data)
self.reload()
post_save.send(self.__class__, document=self) | [
"def",
"update_resource",
"(",
"self",
",",
"resource",
")",
":",
"index",
"=",
"self",
".",
"resources",
".",
"index",
"(",
"resource",
")",
"data",
"=",
"{",
"'resources__{index}'",
".",
"format",
"(",
"index",
"=",
"index",
")",
":",
"resource",
"}",
"self",
".",
"update",
"(",
"*",
"*",
"data",
")",
"self",
".",
"reload",
"(",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
")"
] | Perform an atomic update for an existing resource | [
"Perform",
"an",
"atomic",
"update",
"for",
"an",
"existing",
"resource"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L617-L625 | train |
opendatateam/udata | udata/search/result.py | SearchResult.get_aggregation | def get_aggregation(self, name):
'''
Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled
'''
agg = self.aggregations[name]
if 'buckets' in agg:
return agg['buckets']
else:
return agg | python | def get_aggregation(self, name):
'''
Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled
'''
agg = self.aggregations[name]
if 'buckets' in agg:
return agg['buckets']
else:
return agg | [
"def",
"get_aggregation",
"(",
"self",
",",
"name",
")",
":",
"agg",
"=",
"self",
".",
"aggregations",
"[",
"name",
"]",
"if",
"'buckets'",
"in",
"agg",
":",
"return",
"agg",
"[",
"'buckets'",
"]",
"else",
":",
"return",
"agg"
] | Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled | [
"Fetch",
"an",
"aggregation",
"result",
"given",
"its",
"name"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/result.py#L98-L111 | train |
opendatateam/udata | udata/i18n.py | language | def language(lang_code):
'''Force a given language'''
ctx = None
if not request:
ctx = current_app.test_request_context()
ctx.push()
backup = g.get('lang_code')
g.lang_code = lang_code
refresh()
yield
g.lang_code = backup
if ctx:
ctx.pop()
refresh() | python | def language(lang_code):
'''Force a given language'''
ctx = None
if not request:
ctx = current_app.test_request_context()
ctx.push()
backup = g.get('lang_code')
g.lang_code = lang_code
refresh()
yield
g.lang_code = backup
if ctx:
ctx.pop()
refresh() | [
"def",
"language",
"(",
"lang_code",
")",
":",
"ctx",
"=",
"None",
"if",
"not",
"request",
":",
"ctx",
"=",
"current_app",
".",
"test_request_context",
"(",
")",
"ctx",
".",
"push",
"(",
")",
"backup",
"=",
"g",
".",
"get",
"(",
"'lang_code'",
")",
"g",
".",
"lang_code",
"=",
"lang_code",
"refresh",
"(",
")",
"yield",
"g",
".",
"lang_code",
"=",
"backup",
"if",
"ctx",
":",
"ctx",
".",
"pop",
"(",
")",
"refresh",
"(",
")"
] | Force a given language | [
"Force",
"a",
"given",
"language"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L145-L158 | train |
opendatateam/udata | udata/i18n.py | redirect_to_lang | def redirect_to_lang(*args, **kwargs):
'''Redirect non lang-prefixed urls to default language.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs['lang_code'] = default_lang
return redirect(url_for(endpoint, **kwargs)) | python | def redirect_to_lang(*args, **kwargs):
'''Redirect non lang-prefixed urls to default language.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs['lang_code'] = default_lang
return redirect(url_for(endpoint, **kwargs)) | [
"def",
"redirect_to_lang",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"request",
".",
"endpoint",
".",
"replace",
"(",
"'_redirect'",
",",
"''",
")",
"kwargs",
"=",
"multi_to_dict",
"(",
"request",
".",
"args",
")",
"kwargs",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"kwargs",
"[",
"'lang_code'",
"]",
"=",
"default_lang",
"return",
"redirect",
"(",
"url_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
")"
] | Redirect non lang-prefixed urls to default language. | [
"Redirect",
"non",
"lang",
"-",
"prefixed",
"urls",
"to",
"default",
"language",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L203-L209 | train |
opendatateam/udata | udata/i18n.py | redirect_to_unlocalized | def redirect_to_unlocalized(*args, **kwargs):
'''Redirect lang-prefixed urls to no prefixed URL.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs.pop('lang_code', None)
return redirect(url_for(endpoint, **kwargs)) | python | def redirect_to_unlocalized(*args, **kwargs):
'''Redirect lang-prefixed urls to no prefixed URL.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs.pop('lang_code', None)
return redirect(url_for(endpoint, **kwargs)) | [
"def",
"redirect_to_unlocalized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"request",
".",
"endpoint",
".",
"replace",
"(",
"'_redirect'",
",",
"''",
")",
"kwargs",
"=",
"multi_to_dict",
"(",
"request",
".",
"args",
")",
"kwargs",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"kwargs",
".",
"pop",
"(",
"'lang_code'",
",",
"None",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
")"
] | Redirect lang-prefixed urls to no prefixed URL. | [
"Redirect",
"lang",
"-",
"prefixed",
"urls",
"to",
"no",
"prefixed",
"URL",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L212-L218 | train |
opendatateam/udata | udata/i18n.py | PluggableDomain.get_translations | def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
translations_dir = self.get_translations_path(ctx)
translations = Translations.load(translations_dir, locale,
domain=self.domain)
# Load plugins translations
if isinstance(translations, Translations):
# Load core extensions translations
from wtforms.i18n import messages_path
wtforms_translations = Translations.load(messages_path(),
locale,
domain='wtforms')
translations.merge(wtforms_translations)
import flask_security
flask_security_translations = Translations.load(
join(flask_security.__path__[0], 'translations'),
locale,
domain='flask_security'
)
translations.merge(flask_security_translations)
for pkg in entrypoints.get_roots(current_app):
package = pkgutil.get_loader(pkg)
path = join(package.filename, 'translations')
domains = [f.replace(path, '').replace('.pot', '')[1:]
for f in iglob(join(path, '*.pot'))]
for domain in domains:
translations.merge(Translations.load(path, locale,
domain=domain))
# Allows the theme to provide or override translations
from . import theme
theme_translations_dir = join(theme.current.path, 'translations')
if exists(theme_translations_dir):
domain = theme.current.identifier
theme_translations = Translations.load(theme_translations_dir,
locale,
domain=domain)
translations.merge(theme_translations)
cache[str(locale)] = translations
return translations | python | def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
translations_dir = self.get_translations_path(ctx)
translations = Translations.load(translations_dir, locale,
domain=self.domain)
# Load plugins translations
if isinstance(translations, Translations):
# Load core extensions translations
from wtforms.i18n import messages_path
wtforms_translations = Translations.load(messages_path(),
locale,
domain='wtforms')
translations.merge(wtforms_translations)
import flask_security
flask_security_translations = Translations.load(
join(flask_security.__path__[0], 'translations'),
locale,
domain='flask_security'
)
translations.merge(flask_security_translations)
for pkg in entrypoints.get_roots(current_app):
package = pkgutil.get_loader(pkg)
path = join(package.filename, 'translations')
domains = [f.replace(path, '').replace('.pot', '')[1:]
for f in iglob(join(path, '*.pot'))]
for domain in domains:
translations.merge(Translations.load(path, locale,
domain=domain))
# Allows the theme to provide or override translations
from . import theme
theme_translations_dir = join(theme.current.path, 'translations')
if exists(theme_translations_dir):
domain = theme.current.identifier
theme_translations = Translations.load(theme_translations_dir,
locale,
domain=domain)
translations.merge(theme_translations)
cache[str(locale)] = translations
return translations | [
"def",
"get_translations",
"(",
"self",
")",
":",
"ctx",
"=",
"stack",
".",
"top",
"if",
"ctx",
"is",
"None",
":",
"return",
"NullTranslations",
"(",
")",
"locale",
"=",
"get_locale",
"(",
")",
"cache",
"=",
"self",
".",
"get_translations_cache",
"(",
"ctx",
")",
"translations",
"=",
"cache",
".",
"get",
"(",
"str",
"(",
"locale",
")",
")",
"if",
"translations",
"is",
"None",
":",
"translations_dir",
"=",
"self",
".",
"get_translations_path",
"(",
"ctx",
")",
"translations",
"=",
"Translations",
".",
"load",
"(",
"translations_dir",
",",
"locale",
",",
"domain",
"=",
"self",
".",
"domain",
")",
"# Load plugins translations",
"if",
"isinstance",
"(",
"translations",
",",
"Translations",
")",
":",
"# Load core extensions translations",
"from",
"wtforms",
".",
"i18n",
"import",
"messages_path",
"wtforms_translations",
"=",
"Translations",
".",
"load",
"(",
"messages_path",
"(",
")",
",",
"locale",
",",
"domain",
"=",
"'wtforms'",
")",
"translations",
".",
"merge",
"(",
"wtforms_translations",
")",
"import",
"flask_security",
"flask_security_translations",
"=",
"Translations",
".",
"load",
"(",
"join",
"(",
"flask_security",
".",
"__path__",
"[",
"0",
"]",
",",
"'translations'",
")",
",",
"locale",
",",
"domain",
"=",
"'flask_security'",
")",
"translations",
".",
"merge",
"(",
"flask_security_translations",
")",
"for",
"pkg",
"in",
"entrypoints",
".",
"get_roots",
"(",
"current_app",
")",
":",
"package",
"=",
"pkgutil",
".",
"get_loader",
"(",
"pkg",
")",
"path",
"=",
"join",
"(",
"package",
".",
"filename",
",",
"'translations'",
")",
"domains",
"=",
"[",
"f",
".",
"replace",
"(",
"path",
",",
"''",
")",
".",
"replace",
"(",
"'.pot'",
",",
"''",
")",
"[",
"1",
":",
"]",
"for",
"f",
"in",
"iglob",
"(",
"join",
"(",
"path",
",",
"'*.pot'",
")",
")",
"]",
"for",
"domain",
"in",
"domains",
":",
"translations",
".",
"merge",
"(",
"Translations",
".",
"load",
"(",
"path",
",",
"locale",
",",
"domain",
"=",
"domain",
")",
")",
"# Allows the theme to provide or override translations",
"from",
".",
"import",
"theme",
"theme_translations_dir",
"=",
"join",
"(",
"theme",
".",
"current",
".",
"path",
",",
"'translations'",
")",
"if",
"exists",
"(",
"theme_translations_dir",
")",
":",
"domain",
"=",
"theme",
".",
"current",
".",
"identifier",
"theme_translations",
"=",
"Translations",
".",
"load",
"(",
"theme_translations_dir",
",",
"locale",
",",
"domain",
"=",
"domain",
")",
"translations",
".",
"merge",
"(",
"theme_translations",
")",
"cache",
"[",
"str",
"(",
"locale",
")",
"]",
"=",
"translations",
"return",
"translations"
] | Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found. | [
"Returns",
"the",
"correct",
"gettext",
"translations",
"that",
"should",
"be",
"used",
"for",
"this",
"request",
".",
"This",
"will",
"never",
"fail",
"and",
"return",
"a",
"dummy",
"translation",
"object",
"if",
"used",
"outside",
"of",
"the",
"request",
"or",
"if",
"a",
"translation",
"cannot",
"be",
"found",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L37-L96 | train |
opendatateam/udata | udata/core/discussions/models.py | Discussion.person_involved | def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion) | python | def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion) | [
"def",
"person_involved",
"(",
"self",
",",
"person",
")",
":",
"return",
"any",
"(",
"message",
".",
"posted_by",
"==",
"person",
"for",
"message",
"in",
"self",
".",
"discussion",
")"
] | Return True if the given person has been involved in the
discussion, False otherwise. | [
"Return",
"True",
"if",
"the",
"given",
"person",
"has",
"been",
"involved",
"in",
"the"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/models.py#L37-L42 | train |
opendatateam/udata | udata/linkchecker/checker.py | is_ignored | def is_ignored(resource):
'''Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS'''
ignored_domains = current_app.config['LINKCHECKING_IGNORE_DOMAINS']
url = resource.url
if url:
parsed_url = urlparse(url)
return parsed_url.netloc in ignored_domains
return True | python | def is_ignored(resource):
'''Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS'''
ignored_domains = current_app.config['LINKCHECKING_IGNORE_DOMAINS']
url = resource.url
if url:
parsed_url = urlparse(url)
return parsed_url.netloc in ignored_domains
return True | [
"def",
"is_ignored",
"(",
"resource",
")",
":",
"ignored_domains",
"=",
"current_app",
".",
"config",
"[",
"'LINKCHECKING_IGNORE_DOMAINS'",
"]",
"url",
"=",
"resource",
".",
"url",
"if",
"url",
":",
"parsed_url",
"=",
"urlparse",
"(",
"url",
")",
"return",
"parsed_url",
".",
"netloc",
"in",
"ignored_domains",
"return",
"True"
] | Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS | [
"Check",
"of",
"the",
"resource",
"s",
"URL",
"is",
"part",
"of",
"LINKCHECKING_IGNORE_DOMAINS"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/checker.py#L25-L32 | train |
opendatateam/udata | udata/linkchecker/checker.py | check_resource | def check_resource(resource):
'''
Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error).
'''
linkchecker_type = resource.extras.get('check:checker')
LinkChecker = get_linkchecker(linkchecker_type)
if not LinkChecker:
return {'error': 'No linkchecker configured.'}, 503
if is_ignored(resource):
return dummy_check_response()
result = LinkChecker().check(resource)
if not result:
return {'error': 'No response from linkchecker'}, 503
elif result.get('check:error'):
return {'error': result['check:error']}, 500
elif not result.get('check:status'):
return {'error': 'No status in response from linkchecker'}, 503
# store the check result in the resource's extras
# XXX maybe this logic should be in the `Resource` model?
previous_status = resource.extras.get('check:available')
check_keys = _get_check_keys(result, resource, previous_status)
resource.extras.update(check_keys)
resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset
return result | python | def check_resource(resource):
'''
Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error).
'''
linkchecker_type = resource.extras.get('check:checker')
LinkChecker = get_linkchecker(linkchecker_type)
if not LinkChecker:
return {'error': 'No linkchecker configured.'}, 503
if is_ignored(resource):
return dummy_check_response()
result = LinkChecker().check(resource)
if not result:
return {'error': 'No response from linkchecker'}, 503
elif result.get('check:error'):
return {'error': result['check:error']}, 500
elif not result.get('check:status'):
return {'error': 'No status in response from linkchecker'}, 503
# store the check result in the resource's extras
# XXX maybe this logic should be in the `Resource` model?
previous_status = resource.extras.get('check:available')
check_keys = _get_check_keys(result, resource, previous_status)
resource.extras.update(check_keys)
resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset
return result | [
"def",
"check_resource",
"(",
"resource",
")",
":",
"linkchecker_type",
"=",
"resource",
".",
"extras",
".",
"get",
"(",
"'check:checker'",
")",
"LinkChecker",
"=",
"get_linkchecker",
"(",
"linkchecker_type",
")",
"if",
"not",
"LinkChecker",
":",
"return",
"{",
"'error'",
":",
"'No linkchecker configured.'",
"}",
",",
"503",
"if",
"is_ignored",
"(",
"resource",
")",
":",
"return",
"dummy_check_response",
"(",
")",
"result",
"=",
"LinkChecker",
"(",
")",
".",
"check",
"(",
"resource",
")",
"if",
"not",
"result",
":",
"return",
"{",
"'error'",
":",
"'No response from linkchecker'",
"}",
",",
"503",
"elif",
"result",
".",
"get",
"(",
"'check:error'",
")",
":",
"return",
"{",
"'error'",
":",
"result",
"[",
"'check:error'",
"]",
"}",
",",
"500",
"elif",
"not",
"result",
".",
"get",
"(",
"'check:status'",
")",
":",
"return",
"{",
"'error'",
":",
"'No status in response from linkchecker'",
"}",
",",
"503",
"# store the check result in the resource's extras",
"# XXX maybe this logic should be in the `Resource` model?",
"previous_status",
"=",
"resource",
".",
"extras",
".",
"get",
"(",
"'check:available'",
")",
"check_keys",
"=",
"_get_check_keys",
"(",
"result",
",",
"resource",
",",
"previous_status",
")",
"resource",
".",
"extras",
".",
"update",
"(",
"check_keys",
")",
"resource",
".",
"save",
"(",
"signal_kwargs",
"=",
"{",
"'ignores'",
":",
"[",
"'post_save'",
"]",
"}",
")",
"# Prevent signal triggering on dataset",
"return",
"result"
] | Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error). | [
"Check",
"a",
"resource",
"availability",
"against",
"a",
"linkchecker",
"backend"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/checker.py#L40-L74 | train |
opendatateam/udata | udata/models/owned.py | owned_pre_save | def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner | python | def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner | [
"def",
"owned_pre_save",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"document",
",",
"Owned",
")",
":",
"return",
"changed_fields",
"=",
"getattr",
"(",
"document",
",",
"'_changed_fields'",
",",
"[",
"]",
")",
"if",
"'organization'",
"in",
"changed_fields",
":",
"if",
"document",
".",
"owner",
":",
"# Change from owner to organization",
"document",
".",
"_previous_owner",
"=",
"document",
".",
"owner",
"document",
".",
"owner",
"=",
"None",
"else",
":",
"# Change from org to another",
"# Need to fetch previous value in base",
"original",
"=",
"sender",
".",
"objects",
".",
"only",
"(",
"'organization'",
")",
".",
"get",
"(",
"pk",
"=",
"document",
".",
"pk",
")",
"document",
".",
"_previous_owner",
"=",
"original",
".",
"organization",
"elif",
"'owner'",
"in",
"changed_fields",
":",
"if",
"document",
".",
"organization",
":",
"# Change from organization to owner",
"document",
".",
"_previous_owner",
"=",
"document",
".",
"organization",
"document",
".",
"organization",
"=",
"None",
"else",
":",
"# Change from owner to another",
"# Need to fetch previous value in base",
"original",
"=",
"sender",
".",
"objects",
".",
"only",
"(",
"'owner'",
")",
".",
"get",
"(",
"pk",
"=",
"document",
".",
"pk",
")",
"document",
".",
"_previous_owner",
"=",
"original",
".",
"owner"
] | Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it. | [
"Owned",
"mongoengine",
".",
"pre_save",
"signal",
"handler",
"Need",
"to",
"fetch",
"original",
"owner",
"before",
"the",
"new",
"one",
"erase",
"it",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/owned.py#L41-L68 | train |
opendatateam/udata | udata/models/owned.py | owned_post_save | def owned_post_save(sender, document, **kwargs):
'''
Owned mongoengine.post_save signal handler
Dispatch the `Owned.on_owner_change` signal
once the document has been saved including the previous owner.
The signal handler should have the following signature:
``def handler(document, previous)``
'''
if isinstance(document, Owned) and hasattr(document, '_previous_owner'):
Owned.on_owner_change.send(document, previous=document._previous_owner) | python | def owned_post_save(sender, document, **kwargs):
'''
Owned mongoengine.post_save signal handler
Dispatch the `Owned.on_owner_change` signal
once the document has been saved including the previous owner.
The signal handler should have the following signature:
``def handler(document, previous)``
'''
if isinstance(document, Owned) and hasattr(document, '_previous_owner'):
Owned.on_owner_change.send(document, previous=document._previous_owner) | [
"def",
"owned_post_save",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"document",
",",
"Owned",
")",
"and",
"hasattr",
"(",
"document",
",",
"'_previous_owner'",
")",
":",
"Owned",
".",
"on_owner_change",
".",
"send",
"(",
"document",
",",
"previous",
"=",
"document",
".",
"_previous_owner",
")"
] | Owned mongoengine.post_save signal handler
Dispatch the `Owned.on_owner_change` signal
once the document has been saved including the previous owner.
The signal handler should have the following signature:
``def handler(document, previous)`` | [
"Owned",
"mongoengine",
".",
"post_save",
"signal",
"handler",
"Dispatch",
"the",
"Owned",
".",
"on_owner_change",
"signal",
"once",
"the",
"document",
"has",
"been",
"saved",
"including",
"the",
"previous",
"owner",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/owned.py#L71-L81 | train |
opendatateam/udata | udata/core/dataset/preview.py | get_enabled_plugins | def get_enabled_plugins():
'''
Returns enabled preview plugins.
Plugins are sorted, defaults come last
'''
plugins = entrypoints.get_enabled('udata.preview', current_app).values()
valid = [p for p in plugins if issubclass(p, PreviewPlugin)]
for plugin in plugins:
if plugin not in valid:
clsname = plugin.__name__
msg = '{0} is not a valid preview plugin'.format(clsname)
warnings.warn(msg, PreviewWarning)
return [p() for p in sorted(valid, key=lambda p: 1 if p.fallback else 0)] | python | def get_enabled_plugins():
'''
Returns enabled preview plugins.
Plugins are sorted, defaults come last
'''
plugins = entrypoints.get_enabled('udata.preview', current_app).values()
valid = [p for p in plugins if issubclass(p, PreviewPlugin)]
for plugin in plugins:
if plugin not in valid:
clsname = plugin.__name__
msg = '{0} is not a valid preview plugin'.format(clsname)
warnings.warn(msg, PreviewWarning)
return [p() for p in sorted(valid, key=lambda p: 1 if p.fallback else 0)] | [
"def",
"get_enabled_plugins",
"(",
")",
":",
"plugins",
"=",
"entrypoints",
".",
"get_enabled",
"(",
"'udata.preview'",
",",
"current_app",
")",
".",
"values",
"(",
")",
"valid",
"=",
"[",
"p",
"for",
"p",
"in",
"plugins",
"if",
"issubclass",
"(",
"p",
",",
"PreviewPlugin",
")",
"]",
"for",
"plugin",
"in",
"plugins",
":",
"if",
"plugin",
"not",
"in",
"valid",
":",
"clsname",
"=",
"plugin",
".",
"__name__",
"msg",
"=",
"'{0} is not a valid preview plugin'",
".",
"format",
"(",
"clsname",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"PreviewWarning",
")",
"return",
"[",
"p",
"(",
")",
"for",
"p",
"in",
"sorted",
"(",
"valid",
",",
"key",
"=",
"lambda",
"p",
":",
"1",
"if",
"p",
".",
"fallback",
"else",
"0",
")",
"]"
] | Returns enabled preview plugins.
Plugins are sorted, defaults come last | [
"Returns",
"enabled",
"preview",
"plugins",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/preview.py#L64-L77 | train |
opendatateam/udata | udata/core/dataset/preview.py | get_preview_url | def get_preview_url(resource):
'''
Returns the most pertinent preview URL associated to the resource, if any.
:param ResourceMixin resource: the (community) resource to preview
:return: a preview url to be displayed into an iframe or a new window
:rtype: HttpResponse
'''
candidates = (p.preview_url(resource)
for p in get_enabled_plugins()
if p.can_preview(resource))
return next(iter(candidates), None) | python | def get_preview_url(resource):
'''
Returns the most pertinent preview URL associated to the resource, if any.
:param ResourceMixin resource: the (community) resource to preview
:return: a preview url to be displayed into an iframe or a new window
:rtype: HttpResponse
'''
candidates = (p.preview_url(resource)
for p in get_enabled_plugins()
if p.can_preview(resource))
return next(iter(candidates), None) | [
"def",
"get_preview_url",
"(",
"resource",
")",
":",
"candidates",
"=",
"(",
"p",
".",
"preview_url",
"(",
"resource",
")",
"for",
"p",
"in",
"get_enabled_plugins",
"(",
")",
"if",
"p",
".",
"can_preview",
"(",
"resource",
")",
")",
"return",
"next",
"(",
"iter",
"(",
"candidates",
")",
",",
"None",
")"
] | Returns the most pertinent preview URL associated to the resource, if any.
:param ResourceMixin resource: the (community) resource to preview
:return: a preview url to be displayed into an iframe or a new window
:rtype: HttpResponse | [
"Returns",
"the",
"most",
"pertinent",
"preview",
"URL",
"associated",
"to",
"the",
"resource",
"if",
"any",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/preview.py#L80-L91 | train |
opendatateam/udata | udata/utils.py | get_by | def get_by(lst, field, value):
'''Find an object in a list given a field value'''
for row in lst:
if ((isinstance(row, dict) and row.get(field) == value) or
(getattr(row, field, None) == value)):
return row | python | def get_by(lst, field, value):
'''Find an object in a list given a field value'''
for row in lst:
if ((isinstance(row, dict) and row.get(field) == value) or
(getattr(row, field, None) == value)):
return row | [
"def",
"get_by",
"(",
"lst",
",",
"field",
",",
"value",
")",
":",
"for",
"row",
"in",
"lst",
":",
"if",
"(",
"(",
"isinstance",
"(",
"row",
",",
"dict",
")",
"and",
"row",
".",
"get",
"(",
"field",
")",
"==",
"value",
")",
"or",
"(",
"getattr",
"(",
"row",
",",
"field",
",",
"None",
")",
"==",
"value",
")",
")",
":",
"return",
"row"
] | Find an object in a list given a field value | [
"Find",
"an",
"object",
"in",
"a",
"list",
"given",
"a",
"field",
"value"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L22-L27 | train |
opendatateam/udata | udata/utils.py | multi_to_dict | def multi_to_dict(multi):
'''Transform a Werkzeug multidictionnary into a flat dictionnary'''
return dict(
(key, value[0] if len(value) == 1 else value)
for key, value in multi.to_dict(False).items()
) | python | def multi_to_dict(multi):
'''Transform a Werkzeug multidictionnary into a flat dictionnary'''
return dict(
(key, value[0] if len(value) == 1 else value)
for key, value in multi.to_dict(False).items()
) | [
"def",
"multi_to_dict",
"(",
"multi",
")",
":",
"return",
"dict",
"(",
"(",
"key",
",",
"value",
"[",
"0",
"]",
"if",
"len",
"(",
"value",
")",
"==",
"1",
"else",
"value",
")",
"for",
"key",
",",
"value",
"in",
"multi",
".",
"to_dict",
"(",
"False",
")",
".",
"items",
"(",
")",
")"
] | Transform a Werkzeug multidictionnary into a flat dictionnary | [
"Transform",
"a",
"Werkzeug",
"multidictionnary",
"into",
"a",
"flat",
"dictionnary"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L30-L35 | train |
opendatateam/udata | udata/utils.py | daterange_start | def daterange_start(value):
'''Parse a date range start boundary'''
if not value:
return None
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
return result
elif dashes == 1:
# Year/Month only
return result.replace(day=1)
else:
# Year only
return result.replace(day=1, month=1) | python | def daterange_start(value):
'''Parse a date range start boundary'''
if not value:
return None
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
return result
elif dashes == 1:
# Year/Month only
return result.replace(day=1)
else:
# Year only
return result.replace(day=1, month=1) | [
"def",
"daterange_start",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
")",
":",
"return",
"value",
".",
"date",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"date",
")",
":",
"return",
"value",
"result",
"=",
"parse_dt",
"(",
"value",
")",
".",
"date",
"(",
")",
"dashes",
"=",
"value",
".",
"count",
"(",
"'-'",
")",
"if",
"dashes",
">=",
"2",
":",
"return",
"result",
"elif",
"dashes",
"==",
"1",
":",
"# Year/Month only",
"return",
"result",
".",
"replace",
"(",
"day",
"=",
"1",
")",
"else",
":",
"# Year only",
"return",
"result",
".",
"replace",
"(",
"day",
"=",
"1",
",",
"month",
"=",
"1",
")"
] | Parse a date range start boundary | [
"Parse",
"a",
"date",
"range",
"start",
"boundary"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L100-L119 | train |
opendatateam/udata | udata/utils.py | daterange_end | def daterange_end(value):
'''Parse a date range end boundary'''
if not value:
return None
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
# Full date
return result
elif dashes == 1:
# Year/Month
return result + relativedelta(months=+1, days=-1, day=1)
else:
# Year only
return result.replace(month=12, day=31) | python | def daterange_end(value):
'''Parse a date range end boundary'''
if not value:
return None
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
# Full date
return result
elif dashes == 1:
# Year/Month
return result + relativedelta(months=+1, days=-1, day=1)
else:
# Year only
return result.replace(month=12, day=31) | [
"def",
"daterange_end",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
")",
":",
"return",
"value",
".",
"date",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"date",
")",
":",
"return",
"value",
"result",
"=",
"parse_dt",
"(",
"value",
")",
".",
"date",
"(",
")",
"dashes",
"=",
"value",
".",
"count",
"(",
"'-'",
")",
"if",
"dashes",
">=",
"2",
":",
"# Full date",
"return",
"result",
"elif",
"dashes",
"==",
"1",
":",
"# Year/Month",
"return",
"result",
"+",
"relativedelta",
"(",
"months",
"=",
"+",
"1",
",",
"days",
"=",
"-",
"1",
",",
"day",
"=",
"1",
")",
"else",
":",
"# Year only",
"return",
"result",
".",
"replace",
"(",
"month",
"=",
"12",
",",
"day",
"=",
"31",
")"
] | Parse a date range end boundary | [
"Parse",
"a",
"date",
"range",
"end",
"boundary"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L122-L142 | train |
opendatateam/udata | udata/utils.py | to_iso | def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt) | python | def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt) | [
"def",
"to_iso",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"return",
"to_iso_datetime",
"(",
"dt",
")",
"elif",
"isinstance",
"(",
"dt",
",",
"date",
")",
":",
"return",
"to_iso_date",
"(",
"dt",
")"
] | Format a date or datetime into an ISO-8601 string
Support dates before 1900. | [
"Format",
"a",
"date",
"or",
"datetime",
"into",
"an",
"ISO",
"-",
"8601",
"string"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L145-L154 | train |
opendatateam/udata | udata/utils.py | to_iso_datetime | def to_iso_datetime(dt):
'''
Format a date or datetime into an ISO-8601 datetime string.
Time is set to 00:00:00 for dates.
Support dates before 1900.
'''
if dt:
date_str = to_iso_date(dt)
time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}'.format(
dt=dt) if isinstance(dt, datetime) else '00:00:00'
return 'T'.join((date_str, time_str)) | python | def to_iso_datetime(dt):
'''
Format a date or datetime into an ISO-8601 datetime string.
Time is set to 00:00:00 for dates.
Support dates before 1900.
'''
if dt:
date_str = to_iso_date(dt)
time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}'.format(
dt=dt) if isinstance(dt, datetime) else '00:00:00'
return 'T'.join((date_str, time_str)) | [
"def",
"to_iso_datetime",
"(",
"dt",
")",
":",
"if",
"dt",
":",
"date_str",
"=",
"to_iso_date",
"(",
"dt",
")",
"time_str",
"=",
"'{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}'",
".",
"format",
"(",
"dt",
"=",
"dt",
")",
"if",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
"else",
"'00:00:00'",
"return",
"'T'",
".",
"join",
"(",
"(",
"date_str",
",",
"time_str",
")",
")"
] | Format a date or datetime into an ISO-8601 datetime string.
Time is set to 00:00:00 for dates.
Support dates before 1900. | [
"Format",
"a",
"date",
"or",
"datetime",
"into",
"an",
"ISO",
"-",
"8601",
"datetime",
"string",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L167-L179 | train |
opendatateam/udata | udata/utils.py | recursive_get | def recursive_get(obj, key):
'''
Get an attribute or a key recursively.
:param obj: The object to fetch attribute or key on
:type obj: object|dict
:param key: Either a string in dotted-notation ar an array of string
:type key: string|list|tuple
'''
if not obj or not key:
return
parts = key.split('.') if isinstance(key, basestring) else key
key = parts.pop(0)
if isinstance(obj, dict):
value = obj.get(key, None)
else:
value = getattr(obj, key, None)
return recursive_get(value, parts) if parts else value | python | def recursive_get(obj, key):
'''
Get an attribute or a key recursively.
:param obj: The object to fetch attribute or key on
:type obj: object|dict
:param key: Either a string in dotted-notation ar an array of string
:type key: string|list|tuple
'''
if not obj or not key:
return
parts = key.split('.') if isinstance(key, basestring) else key
key = parts.pop(0)
if isinstance(obj, dict):
value = obj.get(key, None)
else:
value = getattr(obj, key, None)
return recursive_get(value, parts) if parts else value | [
"def",
"recursive_get",
"(",
"obj",
",",
"key",
")",
":",
"if",
"not",
"obj",
"or",
"not",
"key",
":",
"return",
"parts",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"if",
"isinstance",
"(",
"key",
",",
"basestring",
")",
"else",
"key",
"key",
"=",
"parts",
".",
"pop",
"(",
"0",
")",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"value",
"=",
"obj",
".",
"get",
"(",
"key",
",",
"None",
")",
"else",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"key",
",",
"None",
")",
"return",
"recursive_get",
"(",
"value",
",",
"parts",
")",
"if",
"parts",
"else",
"value"
] | Get an attribute or a key recursively.
:param obj: The object to fetch attribute or key on
:type obj: object|dict
:param key: Either a string in dotted-notation ar an array of string
:type key: string|list|tuple | [
"Get",
"an",
"attribute",
"or",
"a",
"key",
"recursively",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L218-L235 | train |
opendatateam/udata | udata/utils.py | unique_string | def unique_string(length=UUID_LENGTH):
'''Generate a unique string'''
# We need a string at least as long as length
string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH)))
return string[:length] if length else string | python | def unique_string(length=UUID_LENGTH):
'''Generate a unique string'''
# We need a string at least as long as length
string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH)))
return string[:length] if length else string | [
"def",
"unique_string",
"(",
"length",
"=",
"UUID_LENGTH",
")",
":",
"# We need a string at least as long as length",
"string",
"=",
"str",
"(",
"uuid4",
"(",
")",
")",
"*",
"int",
"(",
"math",
".",
"ceil",
"(",
"length",
"/",
"float",
"(",
"UUID_LENGTH",
")",
")",
")",
"return",
"string",
"[",
":",
"length",
"]",
"if",
"length",
"else",
"string"
] | Generate a unique string | [
"Generate",
"a",
"unique",
"string"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L238-L242 | train |
opendatateam/udata | udata/utils.py | safe_unicode | def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string | python | def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string | [
"def",
"safe_unicode",
"(",
"string",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"basestring",
")",
":",
"string",
"=",
"unicode",
"(",
"string",
")",
"if",
"isinstance",
"(",
"string",
",",
"unicode",
")",
":",
"string",
"=",
"string",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"string"
] | Safely transform any object into utf8 encoded bytes | [
"Safely",
"transform",
"any",
"object",
"into",
"utf8",
"encoded",
"bytes"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L277-L283 | train |
opendatateam/udata | udata/features/territories/views.py | redirect_territory | def redirect_territory(level, code):
"""
Implicit redirect given the INSEE code.
Optimistically redirect to the latest valid/known INSEE code.
"""
territory = GeoZone.objects.valid_at(datetime.now()).filter(
code=code, level='fr:{level}'.format(level=level)).first()
return redirect(url_for('territories.territory', territory=territory)) | python | def redirect_territory(level, code):
"""
Implicit redirect given the INSEE code.
Optimistically redirect to the latest valid/known INSEE code.
"""
territory = GeoZone.objects.valid_at(datetime.now()).filter(
code=code, level='fr:{level}'.format(level=level)).first()
return redirect(url_for('territories.territory', territory=territory)) | [
"def",
"redirect_territory",
"(",
"level",
",",
"code",
")",
":",
"territory",
"=",
"GeoZone",
".",
"objects",
".",
"valid_at",
"(",
"datetime",
".",
"now",
"(",
")",
")",
".",
"filter",
"(",
"code",
"=",
"code",
",",
"level",
"=",
"'fr:{level}'",
".",
"format",
"(",
"level",
"=",
"level",
")",
")",
".",
"first",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'territories.territory'",
",",
"territory",
"=",
"territory",
")",
")"
] | Implicit redirect given the INSEE code.
Optimistically redirect to the latest valid/known INSEE code. | [
"Implicit",
"redirect",
"given",
"the",
"INSEE",
"code",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/territories/views.py#L86-L94 | train |
opendatateam/udata | udata/core/jobs/commands.py | scheduled | def scheduled():
'''
List scheduled jobs.
'''
for job in sorted(schedulables(), key=lambda s: s.name):
for task in PeriodicTask.objects(task=job.name):
label = job_label(task.task, task.args, task.kwargs)
echo(SCHEDULE_LINE.format(
name=white(task.name.encode('utf8')),
label=label,
schedule=task.schedule_display
).encode('utf8')) | python | def scheduled():
'''
List scheduled jobs.
'''
for job in sorted(schedulables(), key=lambda s: s.name):
for task in PeriodicTask.objects(task=job.name):
label = job_label(task.task, task.args, task.kwargs)
echo(SCHEDULE_LINE.format(
name=white(task.name.encode('utf8')),
label=label,
schedule=task.schedule_display
).encode('utf8')) | [
"def",
"scheduled",
"(",
")",
":",
"for",
"job",
"in",
"sorted",
"(",
"schedulables",
"(",
")",
",",
"key",
"=",
"lambda",
"s",
":",
"s",
".",
"name",
")",
":",
"for",
"task",
"in",
"PeriodicTask",
".",
"objects",
"(",
"task",
"=",
"job",
".",
"name",
")",
":",
"label",
"=",
"job_label",
"(",
"task",
".",
"task",
",",
"task",
".",
"args",
",",
"task",
".",
"kwargs",
")",
"echo",
"(",
"SCHEDULE_LINE",
".",
"format",
"(",
"name",
"=",
"white",
"(",
"task",
".",
"name",
".",
"encode",
"(",
"'utf8'",
")",
")",
",",
"label",
"=",
"label",
",",
"schedule",
"=",
"task",
".",
"schedule_display",
")",
".",
"encode",
"(",
"'utf8'",
")",
")"
] | List scheduled jobs. | [
"List",
"scheduled",
"jobs",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/jobs/commands.py#L140-L151 | train |
opendatateam/udata | udata/commands/purge.py | purge | def purge(datasets, reuses, organizations):
'''
Permanently remove data flagged as deleted.
If no model flag is given, all models are purged.
'''
purge_all = not any((datasets, reuses, organizations))
if purge_all or datasets:
log.info('Purging datasets')
purge_datasets()
if purge_all or reuses:
log.info('Purging reuses')
purge_reuses()
if purge_all or organizations:
log.info('Purging organizations')
purge_organizations()
success('Done') | python | def purge(datasets, reuses, organizations):
'''
Permanently remove data flagged as deleted.
If no model flag is given, all models are purged.
'''
purge_all = not any((datasets, reuses, organizations))
if purge_all or datasets:
log.info('Purging datasets')
purge_datasets()
if purge_all or reuses:
log.info('Purging reuses')
purge_reuses()
if purge_all or organizations:
log.info('Purging organizations')
purge_organizations()
success('Done') | [
"def",
"purge",
"(",
"datasets",
",",
"reuses",
",",
"organizations",
")",
":",
"purge_all",
"=",
"not",
"any",
"(",
"(",
"datasets",
",",
"reuses",
",",
"organizations",
")",
")",
"if",
"purge_all",
"or",
"datasets",
":",
"log",
".",
"info",
"(",
"'Purging datasets'",
")",
"purge_datasets",
"(",
")",
"if",
"purge_all",
"or",
"reuses",
":",
"log",
".",
"info",
"(",
"'Purging reuses'",
")",
"purge_reuses",
"(",
")",
"if",
"purge_all",
"or",
"organizations",
":",
"log",
".",
"info",
"(",
"'Purging organizations'",
")",
"purge_organizations",
"(",
")",
"success",
"(",
"'Done'",
")"
] | Permanently remove data flagged as deleted.
If no model flag is given, all models are purged. | [
"Permanently",
"remove",
"data",
"flagged",
"as",
"deleted",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/purge.py#L21-L41 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.clean_parameters | def clean_parameters(self, params):
'''Only keep known parameters'''
return {k: v for k, v in params.items() if k in self.adapter.facets} | python | def clean_parameters(self, params):
'''Only keep known parameters'''
return {k: v for k, v in params.items() if k in self.adapter.facets} | [
"def",
"clean_parameters",
"(",
"self",
",",
"params",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"k",
"in",
"self",
".",
"adapter",
".",
"facets",
"}"
] | Only keep known parameters | [
"Only",
"keep",
"known",
"parameters"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L40-L42 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.extract_sort | def extract_sort(self, params):
'''Extract and build sort query from parameters'''
sorts = params.pop('sort', [])
sorts = [sorts] if isinstance(sorts, basestring) else sorts
sorts = [(s[1:], 'desc')
if s.startswith('-') else (s, 'asc')
for s in sorts]
self.sorts = [
{self.adapter.sorts[s]: d}
for s, d in sorts if s in self.adapter.sorts
] | python | def extract_sort(self, params):
'''Extract and build sort query from parameters'''
sorts = params.pop('sort', [])
sorts = [sorts] if isinstance(sorts, basestring) else sorts
sorts = [(s[1:], 'desc')
if s.startswith('-') else (s, 'asc')
for s in sorts]
self.sorts = [
{self.adapter.sorts[s]: d}
for s, d in sorts if s in self.adapter.sorts
] | [
"def",
"extract_sort",
"(",
"self",
",",
"params",
")",
":",
"sorts",
"=",
"params",
".",
"pop",
"(",
"'sort'",
",",
"[",
"]",
")",
"sorts",
"=",
"[",
"sorts",
"]",
"if",
"isinstance",
"(",
"sorts",
",",
"basestring",
")",
"else",
"sorts",
"sorts",
"=",
"[",
"(",
"s",
"[",
"1",
":",
"]",
",",
"'desc'",
")",
"if",
"s",
".",
"startswith",
"(",
"'-'",
")",
"else",
"(",
"s",
",",
"'asc'",
")",
"for",
"s",
"in",
"sorts",
"]",
"self",
".",
"sorts",
"=",
"[",
"{",
"self",
".",
"adapter",
".",
"sorts",
"[",
"s",
"]",
":",
"d",
"}",
"for",
"s",
",",
"d",
"in",
"sorts",
"if",
"s",
"in",
"self",
".",
"adapter",
".",
"sorts",
"]"
] | Extract and build sort query from parameters | [
"Extract",
"and",
"build",
"sort",
"query",
"from",
"parameters"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L44-L54 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.extract_pagination | def extract_pagination(self, params):
'''Extract and build pagination from parameters'''
try:
params_page = int(params.pop('page', 1) or 1)
self.page = max(params_page, 1)
except:
# Failsafe, if page cannot be parsed, we falback on first page
self.page = 1
try:
params_page_size = params.pop('page_size', DEFAULT_PAGE_SIZE)
self.page_size = int(params_page_size or DEFAULT_PAGE_SIZE)
except:
# Failsafe, if page_size cannot be parsed, we falback on default
self.page_size = DEFAULT_PAGE_SIZE
self.page_start = (self.page - 1) * self.page_size
self.page_end = self.page_start + self.page_size | python | def extract_pagination(self, params):
'''Extract and build pagination from parameters'''
try:
params_page = int(params.pop('page', 1) or 1)
self.page = max(params_page, 1)
except:
# Failsafe, if page cannot be parsed, we falback on first page
self.page = 1
try:
params_page_size = params.pop('page_size', DEFAULT_PAGE_SIZE)
self.page_size = int(params_page_size or DEFAULT_PAGE_SIZE)
except:
# Failsafe, if page_size cannot be parsed, we falback on default
self.page_size = DEFAULT_PAGE_SIZE
self.page_start = (self.page - 1) * self.page_size
self.page_end = self.page_start + self.page_size | [
"def",
"extract_pagination",
"(",
"self",
",",
"params",
")",
":",
"try",
":",
"params_page",
"=",
"int",
"(",
"params",
".",
"pop",
"(",
"'page'",
",",
"1",
")",
"or",
"1",
")",
"self",
".",
"page",
"=",
"max",
"(",
"params_page",
",",
"1",
")",
"except",
":",
"# Failsafe, if page cannot be parsed, we falback on first page",
"self",
".",
"page",
"=",
"1",
"try",
":",
"params_page_size",
"=",
"params",
".",
"pop",
"(",
"'page_size'",
",",
"DEFAULT_PAGE_SIZE",
")",
"self",
".",
"page_size",
"=",
"int",
"(",
"params_page_size",
"or",
"DEFAULT_PAGE_SIZE",
")",
"except",
":",
"# Failsafe, if page_size cannot be parsed, we falback on default",
"self",
".",
"page_size",
"=",
"DEFAULT_PAGE_SIZE",
"self",
".",
"page_start",
"=",
"(",
"self",
".",
"page",
"-",
"1",
")",
"*",
"self",
".",
"page_size",
"self",
".",
"page_end",
"=",
"self",
".",
"page_start",
"+",
"self",
".",
"page_size"
] | Extract and build pagination from parameters | [
"Extract",
"and",
"build",
"pagination",
"from",
"parameters"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L56-L71 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.aggregate | def aggregate(self, search):
"""
Add aggregations representing the facets selected
"""
for f, facet in self.facets.items():
agg = facet.get_aggregation()
if isinstance(agg, Bucket):
search.aggs.bucket(f, agg)
elif isinstance(agg, Pipeline):
search.aggs.pipeline(f, agg)
else:
search.aggs.metric(f, agg) | python | def aggregate(self, search):
"""
Add aggregations representing the facets selected
"""
for f, facet in self.facets.items():
agg = facet.get_aggregation()
if isinstance(agg, Bucket):
search.aggs.bucket(f, agg)
elif isinstance(agg, Pipeline):
search.aggs.pipeline(f, agg)
else:
search.aggs.metric(f, agg) | [
"def",
"aggregate",
"(",
"self",
",",
"search",
")",
":",
"for",
"f",
",",
"facet",
"in",
"self",
".",
"facets",
".",
"items",
"(",
")",
":",
"agg",
"=",
"facet",
".",
"get_aggregation",
"(",
")",
"if",
"isinstance",
"(",
"agg",
",",
"Bucket",
")",
":",
"search",
".",
"aggs",
".",
"bucket",
"(",
"f",
",",
"agg",
")",
"elif",
"isinstance",
"(",
"agg",
",",
"Pipeline",
")",
":",
"search",
".",
"aggs",
".",
"pipeline",
"(",
"f",
",",
"agg",
")",
"else",
":",
"search",
".",
"aggs",
".",
"metric",
"(",
"f",
",",
"agg",
")"
] | Add aggregations representing the facets selected | [
"Add",
"aggregations",
"representing",
"the",
"facets",
"selected"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L73-L84 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.filter | def filter(self, search):
'''
Perform filtering instead of default post-filtering.
'''
if not self._filters:
return search
filters = Q('match_all')
for f in self._filters.values():
filters &= f
return search.filter(filters) | python | def filter(self, search):
'''
Perform filtering instead of default post-filtering.
'''
if not self._filters:
return search
filters = Q('match_all')
for f in self._filters.values():
filters &= f
return search.filter(filters) | [
"def",
"filter",
"(",
"self",
",",
"search",
")",
":",
"if",
"not",
"self",
".",
"_filters",
":",
"return",
"search",
"filters",
"=",
"Q",
"(",
"'match_all'",
")",
"for",
"f",
"in",
"self",
".",
"_filters",
".",
"values",
"(",
")",
":",
"filters",
"&=",
"f",
"return",
"search",
".",
"filter",
"(",
"filters",
")"
] | Perform filtering instead of default post-filtering. | [
"Perform",
"filtering",
"instead",
"of",
"default",
"post",
"-",
"filtering",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L86-L95 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.query | def query(self, search, query):
'''
Customize the search query if necessary.
It handles the following features:
- negation support
- optional fuzziness
- optional analyzer
- optional match_type
'''
if not query:
return search
included, excluded = [], []
for term in query.split(' '):
if not term.strip():
continue
if term.startswith('-'):
excluded.append(term[1:])
else:
included.append(term)
if included:
search = search.query(self.multi_match(included))
for term in excluded:
search = search.query(~self.multi_match([term]))
return search | python | def query(self, search, query):
'''
Customize the search query if necessary.
It handles the following features:
- negation support
- optional fuzziness
- optional analyzer
- optional match_type
'''
if not query:
return search
included, excluded = [], []
for term in query.split(' '):
if not term.strip():
continue
if term.startswith('-'):
excluded.append(term[1:])
else:
included.append(term)
if included:
search = search.query(self.multi_match(included))
for term in excluded:
search = search.query(~self.multi_match([term]))
return search | [
"def",
"query",
"(",
"self",
",",
"search",
",",
"query",
")",
":",
"if",
"not",
"query",
":",
"return",
"search",
"included",
",",
"excluded",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"term",
"in",
"query",
".",
"split",
"(",
"' '",
")",
":",
"if",
"not",
"term",
".",
"strip",
"(",
")",
":",
"continue",
"if",
"term",
".",
"startswith",
"(",
"'-'",
")",
":",
"excluded",
".",
"append",
"(",
"term",
"[",
"1",
":",
"]",
")",
"else",
":",
"included",
".",
"append",
"(",
"term",
")",
"if",
"included",
":",
"search",
"=",
"search",
".",
"query",
"(",
"self",
".",
"multi_match",
"(",
"included",
")",
")",
"for",
"term",
"in",
"excluded",
":",
"search",
"=",
"search",
".",
"query",
"(",
"~",
"self",
".",
"multi_match",
"(",
"[",
"term",
"]",
")",
")",
"return",
"search"
] | Customize the search query if necessary.
It handles the following features:
- negation support
- optional fuzziness
- optional analyzer
- optional match_type | [
"Customize",
"the",
"search",
"query",
"if",
"necessary",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L114-L139 | train |
opendatateam/udata | udata/search/query.py | SearchQuery.to_url | def to_url(self, url=None, replace=False, **kwargs):
'''Serialize the query into an URL'''
params = copy.deepcopy(self.filter_values)
if self._query:
params['q'] = self._query
if self.page_size != DEFAULT_PAGE_SIZE:
params['page_size'] = self.page_size
if kwargs:
for key, value in kwargs.items():
if not replace and key in params:
if not isinstance(params[key], (list, tuple)):
params[key] = [params[key], value]
else:
params[key].append(value)
else:
params[key] = value
else:
params['page'] = self.page
href = Href(url or request.base_url)
return href(params) | python | def to_url(self, url=None, replace=False, **kwargs):
'''Serialize the query into an URL'''
params = copy.deepcopy(self.filter_values)
if self._query:
params['q'] = self._query
if self.page_size != DEFAULT_PAGE_SIZE:
params['page_size'] = self.page_size
if kwargs:
for key, value in kwargs.items():
if not replace and key in params:
if not isinstance(params[key], (list, tuple)):
params[key] = [params[key], value]
else:
params[key].append(value)
else:
params[key] = value
else:
params['page'] = self.page
href = Href(url or request.base_url)
return href(params) | [
"def",
"to_url",
"(",
"self",
",",
"url",
"=",
"None",
",",
"replace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"filter_values",
")",
"if",
"self",
".",
"_query",
":",
"params",
"[",
"'q'",
"]",
"=",
"self",
".",
"_query",
"if",
"self",
".",
"page_size",
"!=",
"DEFAULT_PAGE_SIZE",
":",
"params",
"[",
"'page_size'",
"]",
"=",
"self",
".",
"page_size",
"if",
"kwargs",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"replace",
"and",
"key",
"in",
"params",
":",
"if",
"not",
"isinstance",
"(",
"params",
"[",
"key",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"params",
"[",
"key",
"]",
"=",
"[",
"params",
"[",
"key",
"]",
",",
"value",
"]",
"else",
":",
"params",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"params",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"params",
"[",
"'page'",
"]",
"=",
"self",
".",
"page",
"href",
"=",
"Href",
"(",
"url",
"or",
"request",
".",
"base_url",
")",
"return",
"href",
"(",
"params",
")"
] | Serialize the query into an URL | [
"Serialize",
"the",
"query",
"into",
"an",
"URL"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/query.py#L169-L188 | train |
opendatateam/udata | udata/frontend/csv.py | safestr | def safestr(value):
'''Ensure type to string serialization'''
if not value or isinstance(value, (int, float, bool, long)):
return value
elif isinstance(value, (date, datetime)):
return value.isoformat()
else:
return unicode(value) | python | def safestr(value):
'''Ensure type to string serialization'''
if not value or isinstance(value, (int, float, bool, long)):
return value
elif isinstance(value, (date, datetime)):
return value.isoformat()
else:
return unicode(value) | [
"def",
"safestr",
"(",
"value",
")",
":",
"if",
"not",
"value",
"or",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"float",
",",
"bool",
",",
"long",
")",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"date",
",",
"datetime",
")",
")",
":",
"return",
"value",
".",
"isoformat",
"(",
")",
"else",
":",
"return",
"unicode",
"(",
"value",
")"
] | Ensure type to string serialization | [
"Ensure",
"type",
"to",
"string",
"serialization"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L30-L37 | train |
opendatateam/udata | udata/frontend/csv.py | yield_rows | def yield_rows(adapter):
'''Yield a dataset catalog line by line'''
csvfile = StringIO()
writer = get_writer(csvfile)
# Generate header
writer.writerow(adapter.header())
yield csvfile.getvalue()
del csvfile
for row in adapter.rows():
csvfile = StringIO()
writer = get_writer(csvfile)
writer.writerow(row)
yield csvfile.getvalue()
del csvfile | python | def yield_rows(adapter):
'''Yield a dataset catalog line by line'''
csvfile = StringIO()
writer = get_writer(csvfile)
# Generate header
writer.writerow(adapter.header())
yield csvfile.getvalue()
del csvfile
for row in adapter.rows():
csvfile = StringIO()
writer = get_writer(csvfile)
writer.writerow(row)
yield csvfile.getvalue()
del csvfile | [
"def",
"yield_rows",
"(",
"adapter",
")",
":",
"csvfile",
"=",
"StringIO",
"(",
")",
"writer",
"=",
"get_writer",
"(",
"csvfile",
")",
"# Generate header",
"writer",
".",
"writerow",
"(",
"adapter",
".",
"header",
"(",
")",
")",
"yield",
"csvfile",
".",
"getvalue",
"(",
")",
"del",
"csvfile",
"for",
"row",
"in",
"adapter",
".",
"rows",
"(",
")",
":",
"csvfile",
"=",
"StringIO",
"(",
")",
"writer",
"=",
"get_writer",
"(",
"csvfile",
")",
"writer",
".",
"writerow",
"(",
"row",
")",
"yield",
"csvfile",
".",
"getvalue",
"(",
")",
"del",
"csvfile"
] | Yield a dataset catalog line by line | [
"Yield",
"a",
"dataset",
"catalog",
"line",
"by",
"line"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L206-L220 | train |
opendatateam/udata | udata/frontend/csv.py | stream | def stream(queryset_or_adapter, basename=None):
"""Stream a csv file from an object list,
a queryset or an instanciated adapter.
"""
if isinstance(queryset_or_adapter, Adapter):
adapter = queryset_or_adapter
elif isinstance(queryset_or_adapter, (list, tuple)):
if not queryset_or_adapter:
raise ValueError(
'Type detection is not possible with an empty list')
cls = _adapters.get(queryset_or_adapter[0].__class__)
adapter = cls(queryset_or_adapter)
elif isinstance(queryset_or_adapter, db.BaseQuerySet):
cls = _adapters.get(queryset_or_adapter._document)
adapter = cls(queryset_or_adapter)
else:
raise ValueError('Unsupported object type')
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M')
headers = {
b'Content-Disposition': 'attachment; filename={0}-{1}.csv'.format(
basename or 'export', timestamp),
}
streamer = stream_with_context(yield_rows(adapter))
return Response(streamer, mimetype="text/csv", headers=headers) | python | def stream(queryset_or_adapter, basename=None):
"""Stream a csv file from an object list,
a queryset or an instanciated adapter.
"""
if isinstance(queryset_or_adapter, Adapter):
adapter = queryset_or_adapter
elif isinstance(queryset_or_adapter, (list, tuple)):
if not queryset_or_adapter:
raise ValueError(
'Type detection is not possible with an empty list')
cls = _adapters.get(queryset_or_adapter[0].__class__)
adapter = cls(queryset_or_adapter)
elif isinstance(queryset_or_adapter, db.BaseQuerySet):
cls = _adapters.get(queryset_or_adapter._document)
adapter = cls(queryset_or_adapter)
else:
raise ValueError('Unsupported object type')
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M')
headers = {
b'Content-Disposition': 'attachment; filename={0}-{1}.csv'.format(
basename or 'export', timestamp),
}
streamer = stream_with_context(yield_rows(adapter))
return Response(streamer, mimetype="text/csv", headers=headers) | [
"def",
"stream",
"(",
"queryset_or_adapter",
",",
"basename",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"queryset_or_adapter",
",",
"Adapter",
")",
":",
"adapter",
"=",
"queryset_or_adapter",
"elif",
"isinstance",
"(",
"queryset_or_adapter",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"not",
"queryset_or_adapter",
":",
"raise",
"ValueError",
"(",
"'Type detection is not possible with an empty list'",
")",
"cls",
"=",
"_adapters",
".",
"get",
"(",
"queryset_or_adapter",
"[",
"0",
"]",
".",
"__class__",
")",
"adapter",
"=",
"cls",
"(",
"queryset_or_adapter",
")",
"elif",
"isinstance",
"(",
"queryset_or_adapter",
",",
"db",
".",
"BaseQuerySet",
")",
":",
"cls",
"=",
"_adapters",
".",
"get",
"(",
"queryset_or_adapter",
".",
"_document",
")",
"adapter",
"=",
"cls",
"(",
"queryset_or_adapter",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported object type'",
")",
"timestamp",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d-%H-%M'",
")",
"headers",
"=",
"{",
"b'Content-Disposition'",
":",
"'attachment; filename={0}-{1}.csv'",
".",
"format",
"(",
"basename",
"or",
"'export'",
",",
"timestamp",
")",
",",
"}",
"streamer",
"=",
"stream_with_context",
"(",
"yield_rows",
"(",
"adapter",
")",
")",
"return",
"Response",
"(",
"streamer",
",",
"mimetype",
"=",
"\"text/csv\"",
",",
"headers",
"=",
"headers",
")"
] | Stream a csv file from an object list,
a queryset or an instanciated adapter. | [
"Stream",
"a",
"csv",
"file",
"from",
"an",
"object",
"list"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L223-L248 | train |
opendatateam/udata | udata/frontend/csv.py | NestedAdapter.header | def header(self):
'''Generate the CSV header row'''
return (super(NestedAdapter, self).header() +
[name for name, getter in self.get_nested_fields()]) | python | def header(self):
'''Generate the CSV header row'''
return (super(NestedAdapter, self).header() +
[name for name, getter in self.get_nested_fields()]) | [
"def",
"header",
"(",
"self",
")",
":",
"return",
"(",
"super",
"(",
"NestedAdapter",
",",
"self",
")",
".",
"header",
"(",
")",
"+",
"[",
"name",
"for",
"name",
",",
"getter",
"in",
"self",
".",
"get_nested_fields",
"(",
")",
"]",
")"
] | Generate the CSV header row | [
"Generate",
"the",
"CSV",
"header",
"row"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L114-L117 | train |
opendatateam/udata | udata/frontend/csv.py | NestedAdapter.rows | def rows(self):
'''Iterate over queryset objects'''
return (self.nested_row(o, n)
for o in self.queryset
for n in getattr(o, self.attribute, [])) | python | def rows(self):
'''Iterate over queryset objects'''
return (self.nested_row(o, n)
for o in self.queryset
for n in getattr(o, self.attribute, [])) | [
"def",
"rows",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"nested_row",
"(",
"o",
",",
"n",
")",
"for",
"o",
"in",
"self",
".",
"queryset",
"for",
"n",
"in",
"getattr",
"(",
"o",
",",
"self",
".",
"attribute",
",",
"[",
"]",
")",
")"
] | Iterate over queryset objects | [
"Iterate",
"over",
"queryset",
"objects"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L148-L152 | train |
opendatateam/udata | udata/frontend/csv.py | NestedAdapter.nested_row | def nested_row(self, obj, nested):
'''Convert an object into a flat csv row'''
row = self.to_row(obj)
for name, getter in self.get_nested_fields():
content = ''
if getter is not None:
try:
content = safestr(getter(nested))
except Exception, e: # Catch all errors intentionally.
log.error('Error exporting CSV for {name}: {error}'.format(
name=self.__class__.__name__, error=e))
row.append(content)
return row | python | def nested_row(self, obj, nested):
'''Convert an object into a flat csv row'''
row = self.to_row(obj)
for name, getter in self.get_nested_fields():
content = ''
if getter is not None:
try:
content = safestr(getter(nested))
except Exception, e: # Catch all errors intentionally.
log.error('Error exporting CSV for {name}: {error}'.format(
name=self.__class__.__name__, error=e))
row.append(content)
return row | [
"def",
"nested_row",
"(",
"self",
",",
"obj",
",",
"nested",
")",
":",
"row",
"=",
"self",
".",
"to_row",
"(",
"obj",
")",
"for",
"name",
",",
"getter",
"in",
"self",
".",
"get_nested_fields",
"(",
")",
":",
"content",
"=",
"''",
"if",
"getter",
"is",
"not",
"None",
":",
"try",
":",
"content",
"=",
"safestr",
"(",
"getter",
"(",
"nested",
")",
")",
"except",
"Exception",
",",
"e",
":",
"# Catch all errors intentionally.",
"log",
".",
"error",
"(",
"'Error exporting CSV for {name}: {error}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"error",
"=",
"e",
")",
")",
"row",
".",
"append",
"(",
"content",
")",
"return",
"row"
] | Convert an object into a flat csv row | [
"Convert",
"an",
"object",
"into",
"a",
"flat",
"csv",
"row"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L154-L166 | train |
opendatateam/udata | udata/features/transfer/notifications.py | transfer_request_notifications | def transfer_request_notifications(user):
'''Notify user about pending transfer requests'''
orgs = [o for o in user.organizations if o.is_member(user)]
notifications = []
qs = Transfer.objects(recipient__in=[user] + orgs, status='pending')
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = qs.only('id', 'created', 'subject')
# Do not dereference subject (so it's a DBRef)
# Also improve performances and memory usage
for transfer in qs.no_dereference():
notifications.append((transfer.created, {
'id': transfer.id,
'subject': {
'class': transfer.subject['_cls'].lower(),
'id': transfer.subject['_ref'].id
}
}))
return notifications | python | def transfer_request_notifications(user):
'''Notify user about pending transfer requests'''
orgs = [o for o in user.organizations if o.is_member(user)]
notifications = []
qs = Transfer.objects(recipient__in=[user] + orgs, status='pending')
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = qs.only('id', 'created', 'subject')
# Do not dereference subject (so it's a DBRef)
# Also improve performances and memory usage
for transfer in qs.no_dereference():
notifications.append((transfer.created, {
'id': transfer.id,
'subject': {
'class': transfer.subject['_cls'].lower(),
'id': transfer.subject['_ref'].id
}
}))
return notifications | [
"def",
"transfer_request_notifications",
"(",
"user",
")",
":",
"orgs",
"=",
"[",
"o",
"for",
"o",
"in",
"user",
".",
"organizations",
"if",
"o",
".",
"is_member",
"(",
"user",
")",
"]",
"notifications",
"=",
"[",
"]",
"qs",
"=",
"Transfer",
".",
"objects",
"(",
"recipient__in",
"=",
"[",
"user",
"]",
"+",
"orgs",
",",
"status",
"=",
"'pending'",
")",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
"qs",
"=",
"qs",
".",
"only",
"(",
"'id'",
",",
"'created'",
",",
"'subject'",
")",
"# Do not dereference subject (so it's a DBRef)",
"# Also improve performances and memory usage",
"for",
"transfer",
"in",
"qs",
".",
"no_dereference",
"(",
")",
":",
"notifications",
".",
"append",
"(",
"(",
"transfer",
".",
"created",
",",
"{",
"'id'",
":",
"transfer",
".",
"id",
",",
"'subject'",
":",
"{",
"'class'",
":",
"transfer",
".",
"subject",
"[",
"'_cls'",
"]",
".",
"lower",
"(",
")",
",",
"'id'",
":",
"transfer",
".",
"subject",
"[",
"'_ref'",
"]",
".",
"id",
"}",
"}",
")",
")",
"return",
"notifications"
] | Notify user about pending transfer requests | [
"Notify",
"user",
"about",
"pending",
"transfer",
"requests"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/notifications.py#L14-L35 | train |
opendatateam/udata | udata/mail.py | send | def send(subject, recipients, template_base, **kwargs):
'''
Send a given email to multiple recipients.
User prefered language is taken in account.
To translate the subject in the right language, you should ugettext_lazy
'''
sender = kwargs.pop('sender', None)
if not isinstance(recipients, (list, tuple)):
recipients = [recipients]
debug = current_app.config.get('DEBUG', False)
send_mail = current_app.config.get('SEND_MAIL', not debug)
connection = send_mail and mail.connect or dummyconnection
with connection() as conn:
for recipient in recipients:
lang = i18n._default_lang(recipient)
with i18n.language(lang):
log.debug(
'Sending mail "%s" to recipient "%s"', subject, recipient)
msg = Message(subject, sender=sender,
recipients=[recipient.email])
msg.body = theme.render(
'mail/{0}.txt'.format(template_base), subject=subject,
sender=sender, recipient=recipient, **kwargs)
msg.html = theme.render(
'mail/{0}.html'.format(template_base), subject=subject,
sender=sender, recipient=recipient, **kwargs)
conn.send(msg) | python | def send(subject, recipients, template_base, **kwargs):
'''
Send a given email to multiple recipients.
User prefered language is taken in account.
To translate the subject in the right language, you should ugettext_lazy
'''
sender = kwargs.pop('sender', None)
if not isinstance(recipients, (list, tuple)):
recipients = [recipients]
debug = current_app.config.get('DEBUG', False)
send_mail = current_app.config.get('SEND_MAIL', not debug)
connection = send_mail and mail.connect or dummyconnection
with connection() as conn:
for recipient in recipients:
lang = i18n._default_lang(recipient)
with i18n.language(lang):
log.debug(
'Sending mail "%s" to recipient "%s"', subject, recipient)
msg = Message(subject, sender=sender,
recipients=[recipient.email])
msg.body = theme.render(
'mail/{0}.txt'.format(template_base), subject=subject,
sender=sender, recipient=recipient, **kwargs)
msg.html = theme.render(
'mail/{0}.html'.format(template_base), subject=subject,
sender=sender, recipient=recipient, **kwargs)
conn.send(msg) | [
"def",
"send",
"(",
"subject",
",",
"recipients",
",",
"template_base",
",",
"*",
"*",
"kwargs",
")",
":",
"sender",
"=",
"kwargs",
".",
"pop",
"(",
"'sender'",
",",
"None",
")",
"if",
"not",
"isinstance",
"(",
"recipients",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"recipients",
"=",
"[",
"recipients",
"]",
"debug",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'DEBUG'",
",",
"False",
")",
"send_mail",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'SEND_MAIL'",
",",
"not",
"debug",
")",
"connection",
"=",
"send_mail",
"and",
"mail",
".",
"connect",
"or",
"dummyconnection",
"with",
"connection",
"(",
")",
"as",
"conn",
":",
"for",
"recipient",
"in",
"recipients",
":",
"lang",
"=",
"i18n",
".",
"_default_lang",
"(",
"recipient",
")",
"with",
"i18n",
".",
"language",
"(",
"lang",
")",
":",
"log",
".",
"debug",
"(",
"'Sending mail \"%s\" to recipient \"%s\"'",
",",
"subject",
",",
"recipient",
")",
"msg",
"=",
"Message",
"(",
"subject",
",",
"sender",
"=",
"sender",
",",
"recipients",
"=",
"[",
"recipient",
".",
"email",
"]",
")",
"msg",
".",
"body",
"=",
"theme",
".",
"render",
"(",
"'mail/{0}.txt'",
".",
"format",
"(",
"template_base",
")",
",",
"subject",
"=",
"subject",
",",
"sender",
"=",
"sender",
",",
"recipient",
"=",
"recipient",
",",
"*",
"*",
"kwargs",
")",
"msg",
".",
"html",
"=",
"theme",
".",
"render",
"(",
"'mail/{0}.html'",
".",
"format",
"(",
"template_base",
")",
",",
"subject",
"=",
"subject",
",",
"sender",
"=",
"sender",
",",
"recipient",
"=",
"recipient",
",",
"*",
"*",
"kwargs",
")",
"conn",
".",
"send",
"(",
"msg",
")"
] | Send a given email to multiple recipients.
User prefered language is taken in account.
To translate the subject in the right language, you should ugettext_lazy | [
"Send",
"a",
"given",
"email",
"to",
"multiple",
"recipients",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/mail.py#L40-L69 | train |
opendatateam/udata | udata/sentry.py | public_dsn | def public_dsn(dsn):
'''Transform a standard Sentry DSN into a public one'''
m = RE_DSN.match(dsn)
if not m:
log.error('Unable to parse Sentry DSN')
public = '{scheme}://{client_id}@{domain}/{site_id}'.format(
**m.groupdict())
return public | python | def public_dsn(dsn):
'''Transform a standard Sentry DSN into a public one'''
m = RE_DSN.match(dsn)
if not m:
log.error('Unable to parse Sentry DSN')
public = '{scheme}://{client_id}@{domain}/{site_id}'.format(
**m.groupdict())
return public | [
"def",
"public_dsn",
"(",
"dsn",
")",
":",
"m",
"=",
"RE_DSN",
".",
"match",
"(",
"dsn",
")",
"if",
"not",
"m",
":",
"log",
".",
"error",
"(",
"'Unable to parse Sentry DSN'",
")",
"public",
"=",
"'{scheme}://{client_id}@{domain}/{site_id}'",
".",
"format",
"(",
"*",
"*",
"m",
".",
"groupdict",
"(",
")",
")",
"return",
"public"
] | Transform a standard Sentry DSN into a public one | [
"Transform",
"a",
"standard",
"Sentry",
"DSN",
"into",
"a",
"public",
"one"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/sentry.py#L24-L31 | train |
opendatateam/udata | tasks.py | update | def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate') | python | def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate') | [
"def",
"update",
"(",
"ctx",
",",
"migrate",
"=",
"False",
")",
":",
"msg",
"=",
"'Update all dependencies'",
"if",
"migrate",
":",
"msg",
"+=",
"' and migrate data'",
"header",
"(",
"msg",
")",
"info",
"(",
"'Updating Python dependencies'",
")",
"lrun",
"(",
"'pip install -r requirements/develop.pip'",
")",
"lrun",
"(",
"'pip install -e .'",
")",
"info",
"(",
"'Updating JavaScript dependencies'",
")",
"lrun",
"(",
"'npm install'",
")",
"if",
"migrate",
":",
"info",
"(",
"'Migrating database'",
")",
"lrun",
"(",
"'udata db migrate'",
")"
] | Perform a development update | [
"Perform",
"a",
"development",
"update"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/tasks.py#L42-L55 | train |
opendatateam/udata | tasks.py | i18n | def i18n(ctx, update=False):
'''Extract translatable strings'''
header('Extract translatable strings')
info('Extract Python strings')
lrun('python setup.py extract_messages')
# Fix crowdin requiring Language with `2-digit` iso code in potfile
# to produce 2-digit iso code pofile
# Opening the catalog also allows to set extra metadata
potfile = join(ROOT, 'udata', 'translations', '{}.pot'.format(I18N_DOMAIN))
with open(potfile, 'rb') as infile:
catalog = read_po(infile, 'en')
catalog.copyright_holder = 'Open Data Team'
catalog.msgid_bugs_address = '[email protected]'
catalog.language_team = 'Open Data Team <[email protected]>'
catalog.last_translator = 'Open Data Team <[email protected]>'
catalog.revision_date = datetime.now(LOCALTZ)
with open(potfile, 'wb') as outfile:
write_po(outfile, catalog, width=80)
if update:
lrun('python setup.py update_catalog')
info('Extract JavaScript strings')
keys = set()
catalog = {}
catalog_filename = join(ROOT, 'js', 'locales',
'{}.en.json'.format(I18N_DOMAIN))
if exists(catalog_filename):
with codecs.open(catalog_filename, encoding='utf8') as f:
catalog = json.load(f)
globs = '*.js', '*.vue', '*.hbs'
regexps = [
re.compile(r'(?:|\.|\s|\{)_\(\s*(?:"|\')(.*?)(?:"|\')\s*(?:\)|,)'), # JS _('trad')
re.compile(r'v-i18n="(.*?)"'), # Vue.js directive v-i18n="trad"
re.compile(r'"\{\{\{?\s*\'(.*?)\'\s*\|\s*i18n\}\}\}?"'), # Vue.js filter {{ 'trad'|i18n }}
re.compile(r'{{_\s*"(.*?)"\s*}}'), # Handlebars {{_ "trad" }}
re.compile(r'{{_\s*\'(.*?)\'\s*}}'), # Handlebars {{_ 'trad' }}
re.compile(r'\:[a-z0-9_\-]+="\s*_\(\'(.*?)\'\)\s*"'), # Vue.js binding :prop="_('trad')"
]
for directory, _, _ in os.walk(join(ROOT, 'js')):
glob_patterns = (iglob(join(directory, g)) for g in globs)
for filename in itertools.chain(*glob_patterns):
print('Extracting messages from {0}'.format(green(filename)))
content = codecs.open(filename, encoding='utf8').read()
for regexp in regexps:
for match in regexp.finditer(content):
key = match.group(1)
key = key.replace('\\n', '\n')
keys.add(key)
if key not in catalog:
catalog[key] = key
# Remove old/not found translations
for key in catalog.keys():
if key not in keys:
del catalog[key]
with codecs.open(catalog_filename, 'w', encoding='utf8') as f:
json.dump(catalog, f, sort_keys=True, indent=4, ensure_ascii=False,
encoding='utf8', separators=(',', ': ')) | python | def i18n(ctx, update=False):
'''Extract translatable strings'''
header('Extract translatable strings')
info('Extract Python strings')
lrun('python setup.py extract_messages')
# Fix crowdin requiring Language with `2-digit` iso code in potfile
# to produce 2-digit iso code pofile
# Opening the catalog also allows to set extra metadata
potfile = join(ROOT, 'udata', 'translations', '{}.pot'.format(I18N_DOMAIN))
with open(potfile, 'rb') as infile:
catalog = read_po(infile, 'en')
catalog.copyright_holder = 'Open Data Team'
catalog.msgid_bugs_address = '[email protected]'
catalog.language_team = 'Open Data Team <[email protected]>'
catalog.last_translator = 'Open Data Team <[email protected]>'
catalog.revision_date = datetime.now(LOCALTZ)
with open(potfile, 'wb') as outfile:
write_po(outfile, catalog, width=80)
if update:
lrun('python setup.py update_catalog')
info('Extract JavaScript strings')
keys = set()
catalog = {}
catalog_filename = join(ROOT, 'js', 'locales',
'{}.en.json'.format(I18N_DOMAIN))
if exists(catalog_filename):
with codecs.open(catalog_filename, encoding='utf8') as f:
catalog = json.load(f)
globs = '*.js', '*.vue', '*.hbs'
regexps = [
re.compile(r'(?:|\.|\s|\{)_\(\s*(?:"|\')(.*?)(?:"|\')\s*(?:\)|,)'), # JS _('trad')
re.compile(r'v-i18n="(.*?)"'), # Vue.js directive v-i18n="trad"
re.compile(r'"\{\{\{?\s*\'(.*?)\'\s*\|\s*i18n\}\}\}?"'), # Vue.js filter {{ 'trad'|i18n }}
re.compile(r'{{_\s*"(.*?)"\s*}}'), # Handlebars {{_ "trad" }}
re.compile(r'{{_\s*\'(.*?)\'\s*}}'), # Handlebars {{_ 'trad' }}
re.compile(r'\:[a-z0-9_\-]+="\s*_\(\'(.*?)\'\)\s*"'), # Vue.js binding :prop="_('trad')"
]
for directory, _, _ in os.walk(join(ROOT, 'js')):
glob_patterns = (iglob(join(directory, g)) for g in globs)
for filename in itertools.chain(*glob_patterns):
print('Extracting messages from {0}'.format(green(filename)))
content = codecs.open(filename, encoding='utf8').read()
for regexp in regexps:
for match in regexp.finditer(content):
key = match.group(1)
key = key.replace('\\n', '\n')
keys.add(key)
if key not in catalog:
catalog[key] = key
# Remove old/not found translations
for key in catalog.keys():
if key not in keys:
del catalog[key]
with codecs.open(catalog_filename, 'w', encoding='utf8') as f:
json.dump(catalog, f, sort_keys=True, indent=4, ensure_ascii=False,
encoding='utf8', separators=(',', ': ')) | [
"def",
"i18n",
"(",
"ctx",
",",
"update",
"=",
"False",
")",
":",
"header",
"(",
"'Extract translatable strings'",
")",
"info",
"(",
"'Extract Python strings'",
")",
"lrun",
"(",
"'python setup.py extract_messages'",
")",
"# Fix crowdin requiring Language with `2-digit` iso code in potfile",
"# to produce 2-digit iso code pofile",
"# Opening the catalog also allows to set extra metadata",
"potfile",
"=",
"join",
"(",
"ROOT",
",",
"'udata'",
",",
"'translations'",
",",
"'{}.pot'",
".",
"format",
"(",
"I18N_DOMAIN",
")",
")",
"with",
"open",
"(",
"potfile",
",",
"'rb'",
")",
"as",
"infile",
":",
"catalog",
"=",
"read_po",
"(",
"infile",
",",
"'en'",
")",
"catalog",
".",
"copyright_holder",
"=",
"'Open Data Team'",
"catalog",
".",
"msgid_bugs_address",
"=",
"'[email protected]'",
"catalog",
".",
"language_team",
"=",
"'Open Data Team <[email protected]>'",
"catalog",
".",
"last_translator",
"=",
"'Open Data Team <[email protected]>'",
"catalog",
".",
"revision_date",
"=",
"datetime",
".",
"now",
"(",
"LOCALTZ",
")",
"with",
"open",
"(",
"potfile",
",",
"'wb'",
")",
"as",
"outfile",
":",
"write_po",
"(",
"outfile",
",",
"catalog",
",",
"width",
"=",
"80",
")",
"if",
"update",
":",
"lrun",
"(",
"'python setup.py update_catalog'",
")",
"info",
"(",
"'Extract JavaScript strings'",
")",
"keys",
"=",
"set",
"(",
")",
"catalog",
"=",
"{",
"}",
"catalog_filename",
"=",
"join",
"(",
"ROOT",
",",
"'js'",
",",
"'locales'",
",",
"'{}.en.json'",
".",
"format",
"(",
"I18N_DOMAIN",
")",
")",
"if",
"exists",
"(",
"catalog_filename",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"catalog_filename",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"catalog",
"=",
"json",
".",
"load",
"(",
"f",
")",
"globs",
"=",
"'*.js'",
",",
"'*.vue'",
",",
"'*.hbs'",
"regexps",
"=",
"[",
"re",
".",
"compile",
"(",
"r'(?:|\\.|\\s|\\{)_\\(\\s*(?:\"|\\')(.*?)(?:\"|\\')\\s*(?:\\)|,)'",
")",
",",
"# JS _('trad')",
"re",
".",
"compile",
"(",
"r'v-i18n=\"(.*?)\"'",
")",
",",
"# Vue.js directive v-i18n=\"trad\"",
"re",
".",
"compile",
"(",
"r'\"\\{\\{\\{?\\s*\\'(.*?)\\'\\s*\\|\\s*i18n\\}\\}\\}?\"'",
")",
",",
"# Vue.js filter {{ 'trad'|i18n }}",
"re",
".",
"compile",
"(",
"r'{{_\\s*\"(.*?)\"\\s*}}'",
")",
",",
"# Handlebars {{_ \"trad\" }}",
"re",
".",
"compile",
"(",
"r'{{_\\s*\\'(.*?)\\'\\s*}}'",
")",
",",
"# Handlebars {{_ 'trad' }}",
"re",
".",
"compile",
"(",
"r'\\:[a-z0-9_\\-]+=\"\\s*_\\(\\'(.*?)\\'\\)\\s*\"'",
")",
",",
"# Vue.js binding :prop=\"_('trad')\"",
"]",
"for",
"directory",
",",
"_",
",",
"_",
"in",
"os",
".",
"walk",
"(",
"join",
"(",
"ROOT",
",",
"'js'",
")",
")",
":",
"glob_patterns",
"=",
"(",
"iglob",
"(",
"join",
"(",
"directory",
",",
"g",
")",
")",
"for",
"g",
"in",
"globs",
")",
"for",
"filename",
"in",
"itertools",
".",
"chain",
"(",
"*",
"glob_patterns",
")",
":",
"print",
"(",
"'Extracting messages from {0}'",
".",
"format",
"(",
"green",
"(",
"filename",
")",
")",
")",
"content",
"=",
"codecs",
".",
"open",
"(",
"filename",
",",
"encoding",
"=",
"'utf8'",
")",
".",
"read",
"(",
")",
"for",
"regexp",
"in",
"regexps",
":",
"for",
"match",
"in",
"regexp",
".",
"finditer",
"(",
"content",
")",
":",
"key",
"=",
"match",
".",
"group",
"(",
"1",
")",
"key",
"=",
"key",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"keys",
".",
"add",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"catalog",
":",
"catalog",
"[",
"key",
"]",
"=",
"key",
"# Remove old/not found translations",
"for",
"key",
"in",
"catalog",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"keys",
":",
"del",
"catalog",
"[",
"key",
"]",
"with",
"codecs",
".",
"open",
"(",
"catalog_filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"catalog",
",",
"f",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"ensure_ascii",
"=",
"False",
",",
"encoding",
"=",
"'utf8'",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")"
] | Extract translatable strings | [
"Extract",
"translatable",
"strings"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/tasks.py#L134-L197 | train |
opendatateam/udata | udata/api/__init__.py | output_json | def output_json(data, code, headers=None):
'''Use Flask JSON to serialize'''
resp = make_response(json.dumps(data), code)
resp.headers.extend(headers or {})
return resp | python | def output_json(data, code, headers=None):
'''Use Flask JSON to serialize'''
resp = make_response(json.dumps(data), code)
resp.headers.extend(headers or {})
return resp | [
"def",
"output_json",
"(",
"data",
",",
"code",
",",
"headers",
"=",
"None",
")",
":",
"resp",
"=",
"make_response",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"code",
")",
"resp",
".",
"headers",
".",
"extend",
"(",
"headers",
"or",
"{",
"}",
")",
"return",
"resp"
] | Use Flask JSON to serialize | [
"Use",
"Flask",
"JSON",
"to",
"serialize"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L191-L195 | train |
opendatateam/udata | udata/api/__init__.py | extract_name_from_path | def extract_name_from_path(path):
"""Return a readable name from a URL path.
Useful to log requests on Piwik with categories tree structure.
See: http://piwik.org/faq/how-to/#faq_62
"""
base_path, query_string = path.split('?')
infos = base_path.strip('/').split('/')[2:] # Removes api/version.
if len(infos) > 1: # This is an object.
name = '{category} / {name}'.format(
category=infos[0].title(),
name=infos[1].replace('-', ' ').title()
)
else: # This is a collection.
name = '{category}'.format(category=infos[0].title())
return safe_unicode(name) | python | def extract_name_from_path(path):
"""Return a readable name from a URL path.
Useful to log requests on Piwik with categories tree structure.
See: http://piwik.org/faq/how-to/#faq_62
"""
base_path, query_string = path.split('?')
infos = base_path.strip('/').split('/')[2:] # Removes api/version.
if len(infos) > 1: # This is an object.
name = '{category} / {name}'.format(
category=infos[0].title(),
name=infos[1].replace('-', ' ').title()
)
else: # This is a collection.
name = '{category}'.format(category=infos[0].title())
return safe_unicode(name) | [
"def",
"extract_name_from_path",
"(",
"path",
")",
":",
"base_path",
",",
"query_string",
"=",
"path",
".",
"split",
"(",
"'?'",
")",
"infos",
"=",
"base_path",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"2",
":",
"]",
"# Removes api/version.",
"if",
"len",
"(",
"infos",
")",
">",
"1",
":",
"# This is an object.",
"name",
"=",
"'{category} / {name}'",
".",
"format",
"(",
"category",
"=",
"infos",
"[",
"0",
"]",
".",
"title",
"(",
")",
",",
"name",
"=",
"infos",
"[",
"1",
"]",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
".",
"title",
"(",
")",
")",
"else",
":",
"# This is a collection.",
"name",
"=",
"'{category}'",
".",
"format",
"(",
"category",
"=",
"infos",
"[",
"0",
"]",
".",
"title",
"(",
")",
")",
"return",
"safe_unicode",
"(",
"name",
")"
] | Return a readable name from a URL path.
Useful to log requests on Piwik with categories tree structure.
See: http://piwik.org/faq/how-to/#faq_62 | [
"Return",
"a",
"readable",
"name",
"from",
"a",
"URL",
"path",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L206-L221 | train |
opendatateam/udata | udata/api/__init__.py | handle_unauthorized_file_type | def handle_unauthorized_file_type(error):
'''Error occuring when the user try to upload a non-allowed file type'''
url = url_for('api.allowed_extensions', _external=True)
msg = (
'This file type is not allowed.'
'The allowed file type list is available at {url}'
).format(url=url)
return {'message': msg}, 400 | python | def handle_unauthorized_file_type(error):
'''Error occuring when the user try to upload a non-allowed file type'''
url = url_for('api.allowed_extensions', _external=True)
msg = (
'This file type is not allowed.'
'The allowed file type list is available at {url}'
).format(url=url)
return {'message': msg}, 400 | [
"def",
"handle_unauthorized_file_type",
"(",
"error",
")",
":",
"url",
"=",
"url_for",
"(",
"'api.allowed_extensions'",
",",
"_external",
"=",
"True",
")",
"msg",
"=",
"(",
"'This file type is not allowed.'",
"'The allowed file type list is available at {url}'",
")",
".",
"format",
"(",
"url",
"=",
"url",
")",
"return",
"{",
"'message'",
":",
"msg",
"}",
",",
"400"
] | Error occuring when the user try to upload a non-allowed file type | [
"Error",
"occuring",
"when",
"the",
"user",
"try",
"to",
"upload",
"a",
"non",
"-",
"allowed",
"file",
"type"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L259-L266 | train |
opendatateam/udata | udata/api/__init__.py | UDataApi.authentify | def authentify(self, func):
'''Authentify the user if credentials are given'''
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
return func(*args, **kwargs)
apikey = request.headers.get(HEADER_API_KEY)
if apikey:
try:
user = User.objects.get(apikey=apikey)
except User.DoesNotExist:
self.abort(401, 'Invalid API Key')
if not login_user(user, False):
self.abort(401, 'Inactive user')
else:
oauth2.check_credentials()
return func(*args, **kwargs)
return wrapper | python | def authentify(self, func):
'''Authentify the user if credentials are given'''
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
return func(*args, **kwargs)
apikey = request.headers.get(HEADER_API_KEY)
if apikey:
try:
user = User.objects.get(apikey=apikey)
except User.DoesNotExist:
self.abort(401, 'Invalid API Key')
if not login_user(user, False):
self.abort(401, 'Inactive user')
else:
oauth2.check_credentials()
return func(*args, **kwargs)
return wrapper | [
"def",
"authentify",
"(",
"self",
",",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_user",
".",
"is_authenticated",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"apikey",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"HEADER_API_KEY",
")",
"if",
"apikey",
":",
"try",
":",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"apikey",
"=",
"apikey",
")",
"except",
"User",
".",
"DoesNotExist",
":",
"self",
".",
"abort",
"(",
"401",
",",
"'Invalid API Key'",
")",
"if",
"not",
"login_user",
"(",
"user",
",",
"False",
")",
":",
"self",
".",
"abort",
"(",
"401",
",",
"'Inactive user'",
")",
"else",
":",
"oauth2",
".",
"check_credentials",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Authentify the user if credentials are given | [
"Authentify",
"the",
"user",
"if",
"credentials",
"are",
"given"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L118-L137 | train |
opendatateam/udata | udata/api/__init__.py | UDataApi.validate | def validate(self, form_cls, obj=None):
'''Validate a form from the request and handle errors'''
if 'application/json' not in request.headers.get('Content-Type'):
errors = {'Content-Type': 'expecting application/json'}
self.abort(400, errors=errors)
form = form_cls.from_json(request.json, obj=obj, instance=obj,
csrf_enabled=False)
if not form.validate():
self.abort(400, errors=form.errors)
return form | python | def validate(self, form_cls, obj=None):
'''Validate a form from the request and handle errors'''
if 'application/json' not in request.headers.get('Content-Type'):
errors = {'Content-Type': 'expecting application/json'}
self.abort(400, errors=errors)
form = form_cls.from_json(request.json, obj=obj, instance=obj,
csrf_enabled=False)
if not form.validate():
self.abort(400, errors=form.errors)
return form | [
"def",
"validate",
"(",
"self",
",",
"form_cls",
",",
"obj",
"=",
"None",
")",
":",
"if",
"'application/json'",
"not",
"in",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
":",
"errors",
"=",
"{",
"'Content-Type'",
":",
"'expecting application/json'",
"}",
"self",
".",
"abort",
"(",
"400",
",",
"errors",
"=",
"errors",
")",
"form",
"=",
"form_cls",
".",
"from_json",
"(",
"request",
".",
"json",
",",
"obj",
"=",
"obj",
",",
"instance",
"=",
"obj",
",",
"csrf_enabled",
"=",
"False",
")",
"if",
"not",
"form",
".",
"validate",
"(",
")",
":",
"self",
".",
"abort",
"(",
"400",
",",
"errors",
"=",
"form",
".",
"errors",
")",
"return",
"form"
] | Validate a form from the request and handle errors | [
"Validate",
"a",
"form",
"from",
"the",
"request",
"and",
"handle",
"errors"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L139-L148 | train |
opendatateam/udata | udata/api/__init__.py | UDataApi.unauthorized | def unauthorized(self, response):
'''Override to change the WWW-Authenticate challenge'''
realm = current_app.config.get('HTTP_OAUTH_REALM', 'uData')
challenge = 'Bearer realm="{0}"'.format(realm)
response.headers['WWW-Authenticate'] = challenge
return response | python | def unauthorized(self, response):
'''Override to change the WWW-Authenticate challenge'''
realm = current_app.config.get('HTTP_OAUTH_REALM', 'uData')
challenge = 'Bearer realm="{0}"'.format(realm)
response.headers['WWW-Authenticate'] = challenge
return response | [
"def",
"unauthorized",
"(",
"self",
",",
"response",
")",
":",
"realm",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'HTTP_OAUTH_REALM'",
",",
"'uData'",
")",
"challenge",
"=",
"'Bearer realm=\"{0}\"'",
".",
"format",
"(",
"realm",
")",
"response",
".",
"headers",
"[",
"'WWW-Authenticate'",
"]",
"=",
"challenge",
"return",
"response"
] | Override to change the WWW-Authenticate challenge | [
"Override",
"to",
"change",
"the",
"WWW",
"-",
"Authenticate",
"challenge"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/__init__.py#L153-L159 | train |
opendatateam/udata | udata/core/followers/api.py | FollowAPI.get | def get(self, id):
'''List all followers for a given object'''
args = parser.parse_args()
model = self.model.objects.only('id').get_or_404(id=id)
qs = Follow.objects(following=model, until=None)
return qs.paginate(args['page'], args['page_size']) | python | def get(self, id):
'''List all followers for a given object'''
args = parser.parse_args()
model = self.model.objects.only('id').get_or_404(id=id)
qs = Follow.objects(following=model, until=None)
return qs.paginate(args['page'], args['page_size']) | [
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"model",
"=",
"self",
".",
"model",
".",
"objects",
".",
"only",
"(",
"'id'",
")",
".",
"get_or_404",
"(",
"id",
"=",
"id",
")",
"qs",
"=",
"Follow",
".",
"objects",
"(",
"following",
"=",
"model",
",",
"until",
"=",
"None",
")",
"return",
"qs",
".",
"paginate",
"(",
"args",
"[",
"'page'",
"]",
",",
"args",
"[",
"'page_size'",
"]",
")"
] | List all followers for a given object | [
"List",
"all",
"followers",
"for",
"a",
"given",
"object"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/followers/api.py#L47-L52 | train |
opendatateam/udata | udata/core/followers/api.py | FollowAPI.post | def post(self, id):
'''Follow an object given its ID'''
model = self.model.objects.only('id').get_or_404(id=id)
follow, created = Follow.objects.get_or_create(
follower=current_user.id, following=model, until=None)
count = Follow.objects.followers(model).count()
if not current_app.config['TESTING']:
tracking.send_signal(on_new_follow, request, current_user)
return {'followers': count}, 201 if created else 200 | python | def post(self, id):
'''Follow an object given its ID'''
model = self.model.objects.only('id').get_or_404(id=id)
follow, created = Follow.objects.get_or_create(
follower=current_user.id, following=model, until=None)
count = Follow.objects.followers(model).count()
if not current_app.config['TESTING']:
tracking.send_signal(on_new_follow, request, current_user)
return {'followers': count}, 201 if created else 200 | [
"def",
"post",
"(",
"self",
",",
"id",
")",
":",
"model",
"=",
"self",
".",
"model",
".",
"objects",
".",
"only",
"(",
"'id'",
")",
".",
"get_or_404",
"(",
"id",
"=",
"id",
")",
"follow",
",",
"created",
"=",
"Follow",
".",
"objects",
".",
"get_or_create",
"(",
"follower",
"=",
"current_user",
".",
"id",
",",
"following",
"=",
"model",
",",
"until",
"=",
"None",
")",
"count",
"=",
"Follow",
".",
"objects",
".",
"followers",
"(",
"model",
")",
".",
"count",
"(",
")",
"if",
"not",
"current_app",
".",
"config",
"[",
"'TESTING'",
"]",
":",
"tracking",
".",
"send_signal",
"(",
"on_new_follow",
",",
"request",
",",
"current_user",
")",
"return",
"{",
"'followers'",
":",
"count",
"}",
",",
"201",
"if",
"created",
"else",
"200"
] | Follow an object given its ID | [
"Follow",
"an",
"object",
"given",
"its",
"ID"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/followers/api.py#L56-L64 | train |
opendatateam/udata | udata/core/followers/api.py | FollowAPI.delete | def delete(self, id):
'''Unfollow an object given its ID'''
model = self.model.objects.only('id').get_or_404(id=id)
follow = Follow.objects.get_or_404(follower=current_user.id,
following=model,
until=None)
follow.until = datetime.now()
follow.save()
count = Follow.objects.followers(model).count()
return {'followers': count}, 200 | python | def delete(self, id):
'''Unfollow an object given its ID'''
model = self.model.objects.only('id').get_or_404(id=id)
follow = Follow.objects.get_or_404(follower=current_user.id,
following=model,
until=None)
follow.until = datetime.now()
follow.save()
count = Follow.objects.followers(model).count()
return {'followers': count}, 200 | [
"def",
"delete",
"(",
"self",
",",
"id",
")",
":",
"model",
"=",
"self",
".",
"model",
".",
"objects",
".",
"only",
"(",
"'id'",
")",
".",
"get_or_404",
"(",
"id",
"=",
"id",
")",
"follow",
"=",
"Follow",
".",
"objects",
".",
"get_or_404",
"(",
"follower",
"=",
"current_user",
".",
"id",
",",
"following",
"=",
"model",
",",
"until",
"=",
"None",
")",
"follow",
".",
"until",
"=",
"datetime",
".",
"now",
"(",
")",
"follow",
".",
"save",
"(",
")",
"count",
"=",
"Follow",
".",
"objects",
".",
"followers",
"(",
"model",
")",
".",
"count",
"(",
")",
"return",
"{",
"'followers'",
":",
"count",
"}",
",",
"200"
] | Unfollow an object given its ID | [
"Unfollow",
"an",
"object",
"given",
"its",
"ID"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/followers/api.py#L68-L77 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.