repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableJSONResponseMixin.get_json_response_object
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
python
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
[ "def", "get_json_response_object", "(", "self", ",", "datatable", ")", ":", "# Ensure the object list is calculated.", "# Calling get_records() will do this implicitly, but we want simultaneous access to the", "# 'total_initial_record_count', and 'unpaged_record_count' values.", "datatable", ".", "populate_records", "(", ")", "draw", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", ".", "get", "(", "'draw'", ",", "None", ")", "if", "draw", "is", "not", "None", ":", "draw", "=", "escape_uri_path", "(", "draw", ")", "response_data", "=", "{", "'draw'", ":", "draw", ",", "'recordsFiltered'", ":", "datatable", ".", "unpaged_record_count", ",", "'recordsTotal'", ":", "datatable", ".", "total_initial_record_count", ",", "'data'", ":", "[", "dict", "(", "record", ",", "*", "*", "{", "'DT_RowId'", ":", "record", ".", "pop", "(", "'pk'", ")", ",", "'DT_RowData'", ":", "record", ".", "pop", "(", "'_extra_data'", ")", ",", "}", ")", "for", "record", "in", "datatable", ".", "get_records", "(", ")", "]", ",", "}", "return", "response_data" ]
Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary.
[ "Returns", "the", "JSON", "-", "compatible", "dictionary", "that", "will", "be", "serialized", "for", "an", "AJAX", "response", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L28-L55
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableJSONResponseMixin.serialize_to_json
def serialize_to_json(self, response_data): """ Returns the JSON string for the compiled data object. """ indent = None if settings.DEBUG: indent = 4 # Serialize to JSON with Django's encoder: Adds date/time, decimal, # and UUID support. return json.dumps(response_data, indent=indent, cls=DjangoJSONEncoder)
python
def serialize_to_json(self, response_data): """ Returns the JSON string for the compiled data object. """ indent = None if settings.DEBUG: indent = 4 # Serialize to JSON with Django's encoder: Adds date/time, decimal, # and UUID support. return json.dumps(response_data, indent=indent, cls=DjangoJSONEncoder)
[ "def", "serialize_to_json", "(", "self", ",", "response_data", ")", ":", "indent", "=", "None", "if", "settings", ".", "DEBUG", ":", "indent", "=", "4", "# Serialize to JSON with Django's encoder: Adds date/time, decimal,", "# and UUID support.", "return", "json", ".", "dumps", "(", "response_data", ",", "indent", "=", "indent", ",", "cls", "=", "DjangoJSONEncoder", ")" ]
Returns the JSON string for the compiled data object.
[ "Returns", "the", "JSON", "string", "for", "the", "compiled", "data", "object", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L57-L66
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableMixin.get_ajax
def get_ajax(self, request, *args, **kwargs): """ Called when accessed via AJAX on the request method specified by the Datatable. """ response_data = self.get_json_response_object(self._datatable) response = HttpResponse(self.serialize_to_json(response_data), content_type="application/json") return response
python
def get_ajax(self, request, *args, **kwargs): """ Called when accessed via AJAX on the request method specified by the Datatable. """ response_data = self.get_json_response_object(self._datatable) response = HttpResponse(self.serialize_to_json(response_data), content_type="application/json") return response
[ "def", "get_ajax", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response_data", "=", "self", ".", "get_json_response_object", "(", "self", ".", "_datatable", ")", "response", "=", "HttpResponse", "(", "self", ".", "serialize_to_json", "(", "response_data", ")", ",", "content_type", "=", "\"application/json\"", ")", "return", "response" ]
Called when accessed via AJAX on the request method specified by the Datatable.
[ "Called", "when", "accessed", "via", "AJAX", "on", "the", "request", "method", "specified", "by", "the", "Datatable", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L79-L86
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_active_ajax_datatable
def get_active_ajax_datatable(self): """ Returns a single datatable according to the hint GET variable from an AJAX request. """ data = getattr(self.request, self.request.method) datatables_dict = self.get_datatables(only=data['datatable']) return list(datatables_dict.values())[0]
python
def get_active_ajax_datatable(self): """ Returns a single datatable according to the hint GET variable from an AJAX request. """ data = getattr(self.request, self.request.method) datatables_dict = self.get_datatables(only=data['datatable']) return list(datatables_dict.values())[0]
[ "def", "get_active_ajax_datatable", "(", "self", ")", ":", "data", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", "datatables_dict", "=", "self", ".", "get_datatables", "(", "only", "=", "data", "[", "'datatable'", "]", ")", "return", "list", "(", "datatables_dict", ".", "values", "(", ")", ")", "[", "0", "]" ]
Returns a single datatable according to the hint GET variable from an AJAX request.
[ "Returns", "a", "single", "datatable", "according", "to", "the", "hint", "GET", "variable", "from", "an", "AJAX", "request", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L199-L203
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_datatables
def get_datatables(self, only=None): """ Returns a dict of the datatables served by this view. """ if not hasattr(self, '_datatables'): self._datatables = {} datatable_classes = self.get_datatable_classes() for name, datatable_class in datatable_classes.items(): if only and name != only: continue queryset_getter_name = 'get_%s_datatable_queryset' % (name,) queryset_getter = getattr(self, queryset_getter_name, None) if queryset_getter is None: raise ValueError("%r must declare a method %r." % (self.__class__.__name__, queryset_getter_name)) queryset = queryset_getter() if datatable_class is None: class AutoMeta: model = queryset.model opts = AutoMeta() datatable_class = Datatable else: opts = datatable_class.options_class(datatable_class._meta) kwargs = self.get_default_datatable_kwargs(object_list=queryset) kwargs_getter_name = 'get_%s_datatable_kwargs' % (name,) kwargs_getter = getattr(self, kwargs_getter_name, None) if kwargs_getter: kwargs = kwargs_getter(**kwargs) if 'url' in kwargs: kwargs['url'] = kwargs['url'] + "?datatable=%s" % (name,) for meta_opt in opts.__dict__: if meta_opt in kwargs: setattr(opts, meta_opt, kwargs.pop(meta_opt)) datatable_class = type('%s_Synthesized' % (datatable_class.__name__,), (datatable_class,), { '__module__': datatable_class.__module__, 'Meta': opts, }) self._datatables[name] = datatable_class(**kwargs) return self._datatables
python
def get_datatables(self, only=None): """ Returns a dict of the datatables served by this view. """ if not hasattr(self, '_datatables'): self._datatables = {} datatable_classes = self.get_datatable_classes() for name, datatable_class in datatable_classes.items(): if only and name != only: continue queryset_getter_name = 'get_%s_datatable_queryset' % (name,) queryset_getter = getattr(self, queryset_getter_name, None) if queryset_getter is None: raise ValueError("%r must declare a method %r." % (self.__class__.__name__, queryset_getter_name)) queryset = queryset_getter() if datatable_class is None: class AutoMeta: model = queryset.model opts = AutoMeta() datatable_class = Datatable else: opts = datatable_class.options_class(datatable_class._meta) kwargs = self.get_default_datatable_kwargs(object_list=queryset) kwargs_getter_name = 'get_%s_datatable_kwargs' % (name,) kwargs_getter = getattr(self, kwargs_getter_name, None) if kwargs_getter: kwargs = kwargs_getter(**kwargs) if 'url' in kwargs: kwargs['url'] = kwargs['url'] + "?datatable=%s" % (name,) for meta_opt in opts.__dict__: if meta_opt in kwargs: setattr(opts, meta_opt, kwargs.pop(meta_opt)) datatable_class = type('%s_Synthesized' % (datatable_class.__name__,), (datatable_class,), { '__module__': datatable_class.__module__, 'Meta': opts, }) self._datatables[name] = datatable_class(**kwargs) return self._datatables
[ "def", "get_datatables", "(", "self", ",", "only", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_datatables'", ")", ":", "self", ".", "_datatables", "=", "{", "}", "datatable_classes", "=", "self", ".", "get_datatable_classes", "(", ")", "for", "name", ",", "datatable_class", "in", "datatable_classes", ".", "items", "(", ")", ":", "if", "only", "and", "name", "!=", "only", ":", "continue", "queryset_getter_name", "=", "'get_%s_datatable_queryset'", "%", "(", "name", ",", ")", "queryset_getter", "=", "getattr", "(", "self", ",", "queryset_getter_name", ",", "None", ")", "if", "queryset_getter", "is", "None", ":", "raise", "ValueError", "(", "\"%r must declare a method %r.\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "queryset_getter_name", ")", ")", "queryset", "=", "queryset_getter", "(", ")", "if", "datatable_class", "is", "None", ":", "class", "AutoMeta", ":", "model", "=", "queryset", ".", "model", "opts", "=", "AutoMeta", "(", ")", "datatable_class", "=", "Datatable", "else", ":", "opts", "=", "datatable_class", ".", "options_class", "(", "datatable_class", ".", "_meta", ")", "kwargs", "=", "self", ".", "get_default_datatable_kwargs", "(", "object_list", "=", "queryset", ")", "kwargs_getter_name", "=", "'get_%s_datatable_kwargs'", "%", "(", "name", ",", ")", "kwargs_getter", "=", "getattr", "(", "self", ",", "kwargs_getter_name", ",", "None", ")", "if", "kwargs_getter", ":", "kwargs", "=", "kwargs_getter", "(", "*", "*", "kwargs", ")", "if", "'url'", "in", "kwargs", ":", "kwargs", "[", "'url'", "]", "=", "kwargs", "[", "'url'", "]", "+", "\"?datatable=%s\"", "%", "(", "name", ",", ")", "for", "meta_opt", "in", "opts", ".", "__dict__", ":", "if", "meta_opt", "in", "kwargs", ":", "setattr", "(", "opts", ",", "meta_opt", ",", "kwargs", ".", "pop", "(", "meta_opt", ")", ")", "datatable_class", "=", "type", "(", "'%s_Synthesized'", "%", "(", "datatable_class", ".", "__name__", ",", ")", ",", "(", "datatable_class", ",", ")", ",", "{", "'__module__'", ":", "datatable_class", ".", "__module__", ",", "'Meta'", ":", "opts", ",", "}", ")", "self", ".", "_datatables", "[", "name", "]", "=", "datatable_class", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_datatables" ]
Returns a dict of the datatables served by this view.
[ "Returns", "a", "dict", "of", "the", "datatables", "served", "by", "this", "view", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L205-L246
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_default_datatable_kwargs
def get_default_datatable_kwargs(self, **kwargs): """ Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details. """ kwargs['view'] = self # This is provided by default, but if the view is instantiated outside of the request cycle # (such as for the purposes of embedding that view's datatable elsewhere), the request may # not be required, so the user may not have a compelling reason to go through the trouble of # putting it on self. if hasattr(self, 'request'): kwargs['url'] = self.request.path kwargs['query_config'] = getattr(self.request, self.request.method) else: kwargs['query_config'] = {} return kwargs
python
def get_default_datatable_kwargs(self, **kwargs): """ Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details. """ kwargs['view'] = self # This is provided by default, but if the view is instantiated outside of the request cycle # (such as for the purposes of embedding that view's datatable elsewhere), the request may # not be required, so the user may not have a compelling reason to go through the trouble of # putting it on self. if hasattr(self, 'request'): kwargs['url'] = self.request.path kwargs['query_config'] = getattr(self.request, self.request.method) else: kwargs['query_config'] = {} return kwargs
[ "def", "get_default_datatable_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'view'", "]", "=", "self", "# This is provided by default, but if the view is instantiated outside of the request cycle", "# (such as for the purposes of embedding that view's datatable elsewhere), the request may", "# not be required, so the user may not have a compelling reason to go through the trouble of", "# putting it on self.", "if", "hasattr", "(", "self", ",", "'request'", ")", ":", "kwargs", "[", "'url'", "]", "=", "self", ".", "request", ".", "path", "kwargs", "[", "'query_config'", "]", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", "else", ":", "kwargs", "[", "'query_config'", "]", "=", "{", "}", "return", "kwargs" ]
Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details.
[ "Builds", "the", "default", "set", "of", "kwargs", "for", "initializing", "a", "Datatable", "class", ".", "Note", "that", "by", "default", "the", "MultipleDatatableMixin", "does", "not", "support", "any", "configuration", "via", "the", "view", "s", "class", "attributes", "and", "instead", "relies", "completely", "on", "the", "Datatable", "class", "itself", "to", "declare", "its", "configuration", "details", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L254-L274
train
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
get_column_for_modelfield
def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
python
def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
[ "def", "get_column_for_modelfield", "(", "model_field", ")", ":", "# If the field points to another model, we want to get the pk field of that other model and use", "# that as the real field. It is possible that a ForeignKey points to a model with table", "# inheritance, however, so we need to traverse the internal OneToOneField as well, so this will", "# climb the 'pk' field chain until we have something real.", "while", "model_field", ".", "related_model", ":", "model_field", "=", "model_field", ".", "related_model", ".", "_meta", ".", "pk", "for", "ColumnClass", ",", "modelfield_classes", "in", "COLUMN_CLASSES", ":", "if", "isinstance", "(", "model_field", ",", "tuple", "(", "modelfield_classes", ")", ")", ":", "return", "ColumnClass" ]
Return the built-in Column class for a model field class.
[ "Return", "the", "built", "-", "in", "Column", "class", "for", "a", "model", "field", "class", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L52-L63
train
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn.get_source_value
def get_source_value(self, obj, source, **kwargs): """ Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated. """ result = [] for sub_source in self.expand_source(source): # Call super() to get default logic, but send it the 'sub_source' sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs) result.extend(sub_result) return result
python
def get_source_value(self, obj, source, **kwargs): """ Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated. """ result = [] for sub_source in self.expand_source(source): # Call super() to get default logic, but send it the 'sub_source' sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs) result.extend(sub_result) return result
[ "def", "get_source_value", "(", "self", ",", "obj", ",", "source", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "]", "for", "sub_source", "in", "self", ".", "expand_source", "(", "source", ")", ":", "# Call super() to get default logic, but send it the 'sub_source'", "sub_result", "=", "super", "(", "CompoundColumn", ",", "self", ")", ".", "get_source_value", "(", "obj", ",", "sub_source", ",", "*", "*", "kwargs", ")", "result", ".", "extend", "(", "sub_result", ")", "return", "result" ]
Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated.
[ "Treat", "field", "as", "a", "nested", "sub", "-", "Column", "instance", "which", "explicitly", "stands", "in", "as", "the", "object", "to", "which", "term", "coercions", "and", "the", "query", "type", "lookup", "are", "delegated", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L544-L554
train
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn._get_flat_db_sources
def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources
python
def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources
[ "def", "_get_flat_db_sources", "(", "self", ",", "model", ")", ":", "sources", "=", "[", "]", "for", "source", "in", "self", ".", "sources", ":", "for", "sub_source", "in", "self", ".", "expand_source", "(", "source", ")", ":", "target_field", "=", "self", ".", "resolve_source", "(", "model", ",", "sub_source", ")", "if", "target_field", ":", "sources", ".", "append", "(", "sub_source", ")", "return", "sources" ]
Return a flattened representation of the individual ``sources`` lists.
[ "Return", "a", "flattened", "representation", "of", "the", "individual", "sources", "lists", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L562-L570
train
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn.get_source_handler
def get_source_handler(self, model, source): """ Allow the nested Column source to be its own handler. """ if isinstance(source, Column): return source # Generate a generic handler for the source modelfield = resolve_orm_path(model, source) column_class = get_column_for_modelfield(modelfield) return column_class()
python
def get_source_handler(self, model, source): """ Allow the nested Column source to be its own handler. """ if isinstance(source, Column): return source # Generate a generic handler for the source modelfield = resolve_orm_path(model, source) column_class = get_column_for_modelfield(modelfield) return column_class()
[ "def", "get_source_handler", "(", "self", ",", "model", ",", "source", ")", ":", "if", "isinstance", "(", "source", ",", "Column", ")", ":", "return", "source", "# Generate a generic handler for the source", "modelfield", "=", "resolve_orm_path", "(", "model", ",", "source", ")", "column_class", "=", "get_column_for_modelfield", "(", "modelfield", ")", "return", "column_class", "(", ")" ]
Allow the nested Column source to be its own handler.
[ "Allow", "the", "nested", "Column", "source", "to", "be", "its", "own", "handler", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L572-L580
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.dispatch
def dispatch(self, request, *args, **kwargs): """ Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """ if request.GET.get(self.xeditable_fieldname_param): return self.get_ajax_xeditable_choices(request, *args, **kwargs) return super(XEditableMixin, self).dispatch(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """ Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """ if request.GET.get(self.xeditable_fieldname_param): return self.get_ajax_xeditable_choices(request, *args, **kwargs) return super(XEditableMixin, self).dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "GET", ".", "get", "(", "self", ".", "xeditable_fieldname_param", ")", ":", "return", "self", ".", "get_ajax_xeditable_choices", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "super", "(", "XEditableMixin", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax.
[ "Introduces", "the", "ensure_csrf_cookie", "decorator", "and", "handles", "xeditable", "choices", "ajax", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L26-L30
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_ajax_xeditable_choices
def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
python
def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
[ "def", "get_ajax_xeditable_choices", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "field_name", "=", "request", ".", "GET", ".", "get", "(", "self", ".", "xeditable_fieldname_param", ")", "if", "not", "field_name", ":", "return", "HttpResponseBadRequest", "(", "\"Field name must be given\"", ")", "queryset", "=", "self", ".", "get_queryset", "(", ")", "if", "not", "self", ".", "model", ":", "self", ".", "model", "=", "queryset", ".", "model", "# Sanitize the requested field name by limiting valid names to the datatable_options columns", "from", "datatableview", ".", "views", "import", "legacy", "if", "isinstance", "(", "self", ",", "legacy", ".", "LegacyDatatableMixin", ")", ":", "columns", "=", "self", ".", "_get_datatable_options", "(", ")", "[", "'columns'", "]", "for", "name", "in", "columns", ":", "if", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", ":", "name", "=", "name", "[", "1", "]", "if", "name", "==", "field_name", ":", "break", "else", ":", "return", "HttpResponseBadRequest", "(", "\"Invalid field name\"", ")", "else", ":", "datatable", "=", "self", ".", "get_datatable", "(", ")", "if", "not", "hasattr", "(", "datatable", ",", "'config'", ")", ":", "datatable", ".", "configure", "(", ")", "if", "field_name", "not", "in", "datatable", ".", "config", "[", "'columns'", "]", ":", "return", "HttpResponseBadRequest", "(", "\"Invalid field name\"", ")", "field", "=", "self", ".", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "choices", "=", "self", ".", "get_field_choices", "(", "field", ",", "field_name", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "choices", ")", ")" ]
AJAX GET handler for xeditable queries asking for field choice lists.
[ "AJAX", "GET", "handler", "for", "xeditable", "queries", "asking", "for", "field", "choice", "lists", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L32-L62
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.post
def post(self, request, *args, **kwargs): """ Builds a dynamic form that targets only the field in question, and saves the modification. """ self.object_list = None form = self.get_xeditable_form(self.get_xeditable_form_class()) if form.is_valid(): obj = self.get_update_object(form) if obj is None: data = json.dumps({ 'status': 'error', 'message': "Object does not exist." }) return HttpResponse(data, content_type="application/json", status=404) return self.update_object(form, obj) else: data = json.dumps({ 'status': 'error', 'message': "Invalid request", 'form_errors': form.errors, }) return HttpResponse(data, content_type="application/json", status=400)
python
def post(self, request, *args, **kwargs): """ Builds a dynamic form that targets only the field in question, and saves the modification. """ self.object_list = None form = self.get_xeditable_form(self.get_xeditable_form_class()) if form.is_valid(): obj = self.get_update_object(form) if obj is None: data = json.dumps({ 'status': 'error', 'message': "Object does not exist." }) return HttpResponse(data, content_type="application/json", status=404) return self.update_object(form, obj) else: data = json.dumps({ 'status': 'error', 'message': "Invalid request", 'form_errors': form.errors, }) return HttpResponse(data, content_type="application/json", status=400)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "object_list", "=", "None", "form", "=", "self", ".", "get_xeditable_form", "(", "self", ".", "get_xeditable_form_class", "(", ")", ")", "if", "form", ".", "is_valid", "(", ")", ":", "obj", "=", "self", ".", "get_update_object", "(", "form", ")", "if", "obj", "is", "None", ":", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'error'", ",", "'message'", ":", "\"Object does not exist.\"", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ",", "status", "=", "404", ")", "return", "self", ".", "update_object", "(", "form", ",", "obj", ")", "else", ":", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'error'", ",", "'message'", ":", "\"Invalid request\"", ",", "'form_errors'", ":", "form", ".", "errors", ",", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ",", "status", "=", "400", ")" ]
Builds a dynamic form that targets only the field in question, and saves the modification.
[ "Builds", "a", "dynamic", "form", "that", "targets", "only", "the", "field", "in", "question", "and", "saves", "the", "modification", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L64-L85
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_xeditable_form_kwargs
def get_xeditable_form_kwargs(self): """ Returns a dict of keyword arguments to be sent to the xeditable form class. """ kwargs = { 'model': self.get_queryset().model, } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, }) return kwargs
python
def get_xeditable_form_kwargs(self): """ Returns a dict of keyword arguments to be sent to the xeditable form class. """ kwargs = { 'model': self.get_queryset().model, } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, }) return kwargs
[ "def", "get_xeditable_form_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "'model'", ":", "self", ".", "get_queryset", "(", ")", ".", "model", ",", "}", "if", "self", ".", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "kwargs", ".", "update", "(", "{", "'data'", ":", "self", ".", "request", ".", "POST", ",", "}", ")", "return", "kwargs" ]
Returns a dict of keyword arguments to be sent to the xeditable form class.
[ "Returns", "a", "dict", "of", "keyword", "arguments", "to", "be", "sent", "to", "the", "xeditable", "form", "class", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L91-L100
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_update_object
def get_update_object(self, form): """ Retrieves the target object based on the update form's ``pk`` and the table's queryset. """ pk = form.cleaned_data['pk'] queryset = self.get_queryset() try: obj = queryset.get(pk=pk) except queryset.model.DoesNotExist: obj = None return obj
python
def get_update_object(self, form): """ Retrieves the target object based on the update form's ``pk`` and the table's queryset. """ pk = form.cleaned_data['pk'] queryset = self.get_queryset() try: obj = queryset.get(pk=pk) except queryset.model.DoesNotExist: obj = None return obj
[ "def", "get_update_object", "(", "self", ",", "form", ")", ":", "pk", "=", "form", ".", "cleaned_data", "[", "'pk'", "]", "queryset", "=", "self", ".", "get_queryset", "(", ")", "try", ":", "obj", "=", "queryset", ".", "get", "(", "pk", "=", "pk", ")", "except", "queryset", ".", "model", ".", "DoesNotExist", ":", "obj", "=", "None", "return", "obj" ]
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
[ "Retrieves", "the", "target", "object", "based", "on", "the", "update", "form", "s", "pk", "and", "the", "table", "s", "queryset", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L106-L117
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.update_object
def update_object(self, form, obj): """ Saves the new value to the target object. """ field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
python
def update_object(self, form, obj): """ Saves the new value to the target object. """ field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
[ "def", "update_object", "(", "self", ",", "form", ",", "obj", ")", ":", "field_name", "=", "form", ".", "cleaned_data", "[", "'name'", "]", "value", "=", "form", ".", "cleaned_data", "[", "'value'", "]", "setattr", "(", "obj", ",", "field_name", ",", "value", ")", "save_kwargs", "=", "{", "}", "if", "CAN_UPDATE_FIELDS", ":", "save_kwargs", "[", "'update_fields'", "]", "=", "[", "field_name", "]", "obj", ".", "save", "(", "*", "*", "save_kwargs", ")", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", ",", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ")" ]
Saves the new value to the target object.
[ "Saves", "the", "new", "value", "to", "the", "target", "object", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L119-L132
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_field_choices
def get_field_choices(self, field, field_name): """ Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience. """ if self.request.GET.get('select2'): names = ['id', 'text'] else: names = ['value', 'text'] choices_getter = getattr(self, 'get_field_%s_choices', None) if choices_getter is None: if isinstance(field, ForeignKey): choices_getter = self._get_foreignkey_choices else: choices_getter = self._get_default_choices return [dict(zip(names, choice)) for choice in choices_getter(field, field_name)]
python
def get_field_choices(self, field, field_name): """ Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience. """ if self.request.GET.get('select2'): names = ['id', 'text'] else: names = ['value', 'text'] choices_getter = getattr(self, 'get_field_%s_choices', None) if choices_getter is None: if isinstance(field, ForeignKey): choices_getter = self._get_foreignkey_choices else: choices_getter = self._get_default_choices return [dict(zip(names, choice)) for choice in choices_getter(field, field_name)]
[ "def", "get_field_choices", "(", "self", ",", "field", ",", "field_name", ")", ":", "if", "self", ".", "request", ".", "GET", ".", "get", "(", "'select2'", ")", ":", "names", "=", "[", "'id'", ",", "'text'", "]", "else", ":", "names", "=", "[", "'value'", ",", "'text'", "]", "choices_getter", "=", "getattr", "(", "self", ",", "'get_field_%s_choices'", ",", "None", ")", "if", "choices_getter", "is", "None", ":", "if", "isinstance", "(", "field", ",", "ForeignKey", ")", ":", "choices_getter", "=", "self", ".", "_get_foreignkey_choices", "else", ":", "choices_getter", "=", "self", ".", "_get_default_choices", "return", "[", "dict", "(", "zip", "(", "names", ",", "choice", ")", ")", "for", "choice", "in", "choices_getter", "(", "field", ",", "field_name", ")", "]" ]
Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience.
[ "Returns", "the", "valid", "choices", "for", "field", ".", "The", "field_name", "argument", "is", "given", "for", "convenience", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L134-L149
train
pivotal-energy-solutions/django-datatable-view
datatableview/datatables.py
ValuesDatatable.preload_record_data
def preload_record_data(self, obj): """ Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``. """ data = {} for orm_path, column_name in self.value_queries.items(): value = obj[orm_path] if column_name not in data: data[column_name] = value else: if not isinstance(data[column_name], (tuple, list)): data[column_name] = [data[column_name]] data[column_name].append(value) obj.update(data) return super(ValuesDatatable, self).preload_record_data(obj)
python
def preload_record_data(self, obj): """ Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``. """ data = {} for orm_path, column_name in self.value_queries.items(): value = obj[orm_path] if column_name not in data: data[column_name] = value else: if not isinstance(data[column_name], (tuple, list)): data[column_name] = [data[column_name]] data[column_name].append(value) obj.update(data) return super(ValuesDatatable, self).preload_record_data(obj)
[ "def", "preload_record_data", "(", "self", ",", "obj", ")", ":", "data", "=", "{", "}", "for", "orm_path", ",", "column_name", "in", "self", ".", "value_queries", ".", "items", "(", ")", ":", "value", "=", "obj", "[", "orm_path", "]", "if", "column_name", "not", "in", "data", ":", "data", "[", "column_name", "]", "=", "value", "else", ":", "if", "not", "isinstance", "(", "data", "[", "column_name", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "data", "[", "column_name", "]", "=", "[", "data", "[", "column_name", "]", "]", "data", "[", "column_name", "]", ".", "append", "(", "value", ")", "obj", ".", "update", "(", "data", ")", "return", "super", "(", "ValuesDatatable", ",", "self", ")", ".", "preload_record_data", "(", "obj", ")" ]
Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``.
[ "Modifies", "the", "obj", "values", "dict", "to", "alias", "the", "selected", "values", "to", "the", "column", "name", "that", "asked", "for", "its", "selection", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/datatables.py#L962-L998
train
pivotal-energy-solutions/django-datatable-view
datatableview/datatables.py
LegacyDatatable.resolve_virtual_columns
def resolve_virtual_columns(self, *names): """ Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax. """ from .views.legacy import get_field_definition virtual_columns = {} for name in names: field = get_field_definition(name) column = TextColumn(sources=field.fields, label=field.pretty_name, processor=field.callback) column.name = field.pretty_name if field.pretty_name else field.fields[0] virtual_columns[name] = column # Make sure it's in the same order as originally defined new_columns = OrderedDict() for name in self._meta.columns: # Can't use self.config yet, hasn't been generated if self.columns.get(name): column = self.columns[name] else: column = virtual_columns[name] new_columns[column.name] = column self.columns = new_columns
python
def resolve_virtual_columns(self, *names): """ Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax. """ from .views.legacy import get_field_definition virtual_columns = {} for name in names: field = get_field_definition(name) column = TextColumn(sources=field.fields, label=field.pretty_name, processor=field.callback) column.name = field.pretty_name if field.pretty_name else field.fields[0] virtual_columns[name] = column # Make sure it's in the same order as originally defined new_columns = OrderedDict() for name in self._meta.columns: # Can't use self.config yet, hasn't been generated if self.columns.get(name): column = self.columns[name] else: column = virtual_columns[name] new_columns[column.name] = column self.columns = new_columns
[ "def", "resolve_virtual_columns", "(", "self", ",", "*", "names", ")", ":", "from", ".", "views", ".", "legacy", "import", "get_field_definition", "virtual_columns", "=", "{", "}", "for", "name", "in", "names", ":", "field", "=", "get_field_definition", "(", "name", ")", "column", "=", "TextColumn", "(", "sources", "=", "field", ".", "fields", ",", "label", "=", "field", ".", "pretty_name", ",", "processor", "=", "field", ".", "callback", ")", "column", ".", "name", "=", "field", ".", "pretty_name", "if", "field", ".", "pretty_name", "else", "field", ".", "fields", "[", "0", "]", "virtual_columns", "[", "name", "]", "=", "column", "# Make sure it's in the same order as originally defined", "new_columns", "=", "OrderedDict", "(", ")", "for", "name", "in", "self", ".", "_meta", ".", "columns", ":", "# Can't use self.config yet, hasn't been generated", "if", "self", ".", "columns", ".", "get", "(", "name", ")", ":", "column", "=", "self", ".", "columns", "[", "name", "]", "else", ":", "column", "=", "virtual_columns", "[", "name", "]", "new_columns", "[", "column", ".", "name", "]", "=", "column", "self", ".", "columns", "=", "new_columns" ]
Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax.
[ "Assume", "that", "all", "names", "are", "legacy", "-", "style", "tuple", "declarations", "and", "generate", "modern", "columns", "instances", "to", "match", "the", "behavior", "of", "the", "old", "syntax", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/datatables.py#L1010-L1032
train
pivotal-energy-solutions/django-datatable-view
datatableview/forms.py
XEditableUpdateForm.set_value_field
def set_value_field(self, model, field_name): """ Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated. """ fields = fields_for_model(model, fields=[field_name]) self.fields['value'] = fields[field_name]
python
def set_value_field(self, model, field_name): """ Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated. """ fields = fields_for_model(model, fields=[field_name]) self.fields['value'] = fields[field_name]
[ "def", "set_value_field", "(", "self", ",", "model", ",", "field_name", ")", ":", "fields", "=", "fields_for_model", "(", "model", ",", "fields", "=", "[", "field_name", "]", ")", "self", ".", "fields", "[", "'value'", "]", "=", "fields", "[", "field_name", "]" ]
Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated.
[ "Adds", "a", "value", "field", "to", "this", "form", "that", "uses", "the", "appropriate", "formfield", "for", "the", "named", "target", "field", ".", "This", "will", "help", "to", "ensure", "that", "the", "value", "is", "correctly", "validated", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/forms.py#L30-L36
train
pivotal-energy-solutions/django-datatable-view
datatableview/forms.py
XEditableUpdateForm.clean_name
def clean_name(self): """ Validates that the ``name`` field corresponds to a field on the model. """ field_name = self.cleaned_data['name'] # get_all_field_names is deprecated in Django 1.8, this also fixes proxied models if hasattr(self.model._meta, 'get_fields'): field_names = [field.name for field in self.model._meta.get_fields()] else: field_names = self.model._meta.get_all_field_names() if field_name not in field_names: raise ValidationError("%r is not a valid field." % field_name) return field_name
python
def clean_name(self): """ Validates that the ``name`` field corresponds to a field on the model. """ field_name = self.cleaned_data['name'] # get_all_field_names is deprecated in Django 1.8, this also fixes proxied models if hasattr(self.model._meta, 'get_fields'): field_names = [field.name for field in self.model._meta.get_fields()] else: field_names = self.model._meta.get_all_field_names() if field_name not in field_names: raise ValidationError("%r is not a valid field." % field_name) return field_name
[ "def", "clean_name", "(", "self", ")", ":", "field_name", "=", "self", ".", "cleaned_data", "[", "'name'", "]", "# get_all_field_names is deprecated in Django 1.8, this also fixes proxied models", "if", "hasattr", "(", "self", ".", "model", ".", "_meta", ",", "'get_fields'", ")", ":", "field_names", "=", "[", "field", ".", "name", "for", "field", "in", "self", ".", "model", ".", "_meta", ".", "get_fields", "(", ")", "]", "else", ":", "field_names", "=", "self", ".", "model", ".", "_meta", ".", "get_all_field_names", "(", ")", "if", "field_name", "not", "in", "field_names", ":", "raise", "ValidationError", "(", "\"%r is not a valid field.\"", "%", "field_name", ")", "return", "field_name" ]
Validates that the ``name`` field corresponds to a field on the model.
[ "Validates", "that", "the", "name", "field", "corresponds", "to", "a", "field", "on", "the", "model", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/forms.py#L38-L48
train
pivotal-energy-solutions/django-datatable-view
datatableview/views/legacy.py
get_field_definition
def get_field_definition(field_definition): """ Normalizes a field definition into its component parts, even if some are missing. """ if not isinstance(field_definition, (tuple, list)): field_definition = [field_definition] else: field_definition = list(field_definition) if len(field_definition) == 1: field = [None, field_definition, None] elif len(field_definition) == 2: field = field_definition + [None] elif len(field_definition) == 3: field = field_definition else: raise ValueError("Invalid field definition format.") if not isinstance(field[1], (tuple, list)): field[1] = (field[1],) field[1] = tuple(name for name in field[1] if name is not None) return FieldDefinitionTuple(*field)
python
def get_field_definition(field_definition): """ Normalizes a field definition into its component parts, even if some are missing. """ if not isinstance(field_definition, (tuple, list)): field_definition = [field_definition] else: field_definition = list(field_definition) if len(field_definition) == 1: field = [None, field_definition, None] elif len(field_definition) == 2: field = field_definition + [None] elif len(field_definition) == 3: field = field_definition else: raise ValueError("Invalid field definition format.") if not isinstance(field[1], (tuple, list)): field[1] = (field[1],) field[1] = tuple(name for name in field[1] if name is not None) return FieldDefinitionTuple(*field)
[ "def", "get_field_definition", "(", "field_definition", ")", ":", "if", "not", "isinstance", "(", "field_definition", ",", "(", "tuple", ",", "list", ")", ")", ":", "field_definition", "=", "[", "field_definition", "]", "else", ":", "field_definition", "=", "list", "(", "field_definition", ")", "if", "len", "(", "field_definition", ")", "==", "1", ":", "field", "=", "[", "None", ",", "field_definition", ",", "None", "]", "elif", "len", "(", "field_definition", ")", "==", "2", ":", "field", "=", "field_definition", "+", "[", "None", "]", "elif", "len", "(", "field_definition", ")", "==", "3", ":", "field", "=", "field_definition", "else", ":", "raise", "ValueError", "(", "\"Invalid field definition format.\"", ")", "if", "not", "isinstance", "(", "field", "[", "1", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "field", "[", "1", "]", "=", "(", "field", "[", "1", "]", ",", ")", "field", "[", "1", "]", "=", "tuple", "(", "name", "for", "name", "in", "field", "[", "1", "]", "if", "name", "is", "not", "None", ")", "return", "FieldDefinitionTuple", "(", "*", "field", ")" ]
Normalizes a field definition into its component parts, even if some are missing.
[ "Normalizes", "a", "field", "definition", "into", "its", "component", "parts", "even", "if", "some", "are", "missing", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/legacy.py#L31-L51
train
pivotal-energy-solutions/django-datatable-view
datatableview/cache.py
get_cached_data
def get_cached_data(datatable, **kwargs): """ Returns the cached object list under the appropriate key, or None if not set. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) data = cache.get(cache_key) log.debug("Reading data from cache at %r: %r", cache_key, data) return data
python
def get_cached_data(datatable, **kwargs): """ Returns the cached object list under the appropriate key, or None if not set. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) data = cache.get(cache_key) log.debug("Reading data from cache at %r: %r", cache_key, data) return data
[ "def", "get_cached_data", "(", "datatable", ",", "*", "*", "kwargs", ")", ":", "cache_key", "=", "'%s%s'", "%", "(", "CACHE_PREFIX", ",", "datatable", ".", "get_cache_key", "(", "*", "*", "kwargs", ")", ")", "data", "=", "cache", ".", "get", "(", "cache_key", ")", "log", ".", "debug", "(", "\"Reading data from cache at %r: %r\"", ",", "cache_key", ",", "data", ")", "return", "data" ]
Returns the cached object list under the appropriate key, or None if not set.
[ "Returns", "the", "cached", "object", "list", "under", "the", "appropriate", "key", "or", "None", "if", "not", "set", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/cache.py#L101-L106
train
pivotal-energy-solutions/django-datatable-view
datatableview/cache.py
cache_data
def cache_data(datatable, data, **kwargs): """ Stores the object list in the cache under the appropriate key. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) log.debug("Setting data to cache at %r: %r", cache_key, data) cache.set(cache_key, data)
python
def cache_data(datatable, data, **kwargs): """ Stores the object list in the cache under the appropriate key. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) log.debug("Setting data to cache at %r: %r", cache_key, data) cache.set(cache_key, data)
[ "def", "cache_data", "(", "datatable", ",", "data", ",", "*", "*", "kwargs", ")", ":", "cache_key", "=", "'%s%s'", "%", "(", "CACHE_PREFIX", ",", "datatable", ".", "get_cache_key", "(", "*", "*", "kwargs", ")", ")", "log", ".", "debug", "(", "\"Setting data to cache at %r: %r\"", ",", "cache_key", ",", "data", ")", "cache", ".", "set", "(", "cache_key", ",", "data", ")" ]
Stores the object list in the cache under the appropriate key.
[ "Stores", "the", "object", "list", "in", "the", "cache", "under", "the", "appropriate", "key", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/cache.py#L109-L113
train
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
keyed_helper
def keyed_helper(helper): """ Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)), """ @wraps(helper) def wrapper(instance=None, key=None, attr=None, *args, **kwargs): if set((instance, key, attr)) == {None}: # helper was called in place with neither important arg raise ValueError("If called directly, helper function '%s' requires either a model" " instance, or a 'key' or 'attr' keyword argument." % helper.__name__) if instance is not None: return helper(instance, *args, **kwargs) if key is None and attr is None: attr = 'self' if attr: if attr == 'self': key = lambda obj: obj else: key = operator.attrgetter(attr) # Helper is used directly in the columns declaration. A new callable is # returned to take the place of a callback. @wraps(helper) def helper_wrapper(instance, *args, **kwargs): return helper(key(instance), *args, **kwargs) return helper_wrapper wrapper._is_wrapped = True return wrapper
python
def keyed_helper(helper): """ Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)), """ @wraps(helper) def wrapper(instance=None, key=None, attr=None, *args, **kwargs): if set((instance, key, attr)) == {None}: # helper was called in place with neither important arg raise ValueError("If called directly, helper function '%s' requires either a model" " instance, or a 'key' or 'attr' keyword argument." % helper.__name__) if instance is not None: return helper(instance, *args, **kwargs) if key is None and attr is None: attr = 'self' if attr: if attr == 'self': key = lambda obj: obj else: key = operator.attrgetter(attr) # Helper is used directly in the columns declaration. A new callable is # returned to take the place of a callback. @wraps(helper) def helper_wrapper(instance, *args, **kwargs): return helper(key(instance), *args, **kwargs) return helper_wrapper wrapper._is_wrapped = True return wrapper
[ "def", "keyed_helper", "(", "helper", ")", ":", "@", "wraps", "(", "helper", ")", "def", "wrapper", "(", "instance", "=", "None", ",", "key", "=", "None", ",", "attr", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "set", "(", "(", "instance", ",", "key", ",", "attr", ")", ")", "==", "{", "None", "}", ":", "# helper was called in place with neither important arg", "raise", "ValueError", "(", "\"If called directly, helper function '%s' requires either a model\"", "\" instance, or a 'key' or 'attr' keyword argument.\"", "%", "helper", ".", "__name__", ")", "if", "instance", "is", "not", "None", ":", "return", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "key", "is", "None", "and", "attr", "is", "None", ":", "attr", "=", "'self'", "if", "attr", ":", "if", "attr", "==", "'self'", ":", "key", "=", "lambda", "obj", ":", "obj", "else", ":", "key", "=", "operator", ".", "attrgetter", "(", "attr", ")", "# Helper is used directly in the columns declaration. A new callable is", "# returned to take the place of a callback.", "@", "wraps", "(", "helper", ")", "def", "helper_wrapper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "helper", "(", "key", "(", "instance", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "helper_wrapper", "wrapper", ".", "_is_wrapped", "=", "True", "return", "wrapper" ]
Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)),
[ "Decorator", "for", "helper", "functions", "that", "operate", "on", "direct", "values", "instead", "of", "model", "instances", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L32-L84
train
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
itemgetter
def itemgetter(k, ellipsis=False, key=None): """ Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True)) """ def helper(instance, *args, **kwargs): default_value = kwargs.get('default_value') if default_value is None: default_value = instance value = default_value[k] if ellipsis and isinstance(k, slice) and isinstance(value, six.string_types) and \ len(default_value) > len(value): if ellipsis is True: value += "..." else: value += ellipsis return value if key: helper = keyed_helper(helper)(key=key) return helper
python
def itemgetter(k, ellipsis=False, key=None): """ Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True)) """ def helper(instance, *args, **kwargs): default_value = kwargs.get('default_value') if default_value is None: default_value = instance value = default_value[k] if ellipsis and isinstance(k, slice) and isinstance(value, six.string_types) and \ len(default_value) > len(value): if ellipsis is True: value += "..." else: value += ellipsis return value if key: helper = keyed_helper(helper)(key=key) return helper
[ "def", "itemgetter", "(", "k", ",", "ellipsis", "=", "False", ",", "key", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "default_value", "=", "kwargs", ".", "get", "(", "'default_value'", ")", "if", "default_value", "is", "None", ":", "default_value", "=", "instance", "value", "=", "default_value", "[", "k", "]", "if", "ellipsis", "and", "isinstance", "(", "k", ",", "slice", ")", "and", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "len", "(", "default_value", ")", ">", "len", "(", "value", ")", ":", "if", "ellipsis", "is", "True", ":", "value", "+=", "\"...\"", "else", ":", "value", "+=", "ellipsis", "return", "value", "if", "key", ":", "helper", "=", "keyed_helper", "(", "helper", ")", "(", "key", "=", "key", ")", "return", "helper" ]
Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True))
[ "Looks", "up", "k", "as", "an", "index", "of", "the", "column", "s", "value", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L150-L187
train
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
attrgetter
def attrgetter(attr, key=None): """ Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address')) """ def helper(instance, *args, **kwargs): value = instance for bit in attr.split('.'): value = getattr(value, bit) if callable(value): value = value() return value if key: helper = keyed_helper(helper)(key=key) return helper
python
def attrgetter(attr, key=None): """ Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address')) """ def helper(instance, *args, **kwargs): value = instance for bit in attr.split('.'): value = getattr(value, bit) if callable(value): value = value() return value if key: helper = keyed_helper(helper)(key=key) return helper
[ "def", "attrgetter", "(", "attr", ",", "key", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "instance", "for", "bit", "in", "attr", ".", "split", "(", "'.'", ")", ":", "value", "=", "getattr", "(", "value", ",", "bit", ")", "if", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "return", "value", "if", "key", ":", "helper", "=", "keyed_helper", "(", "helper", ")", "(", "key", "=", "key", ")", "return", "helper" ]
Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address'))
[ "Looks", "up", "attr", "on", "the", "target", "value", ".", "If", "the", "result", "is", "a", "callable", "it", "will", "be", "called", "in", "place", "without", "arguments", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L190-L218
train
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
make_processor
def make_processor(func, arg=None): """ A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead. """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance if arg is not None: extra_arg = [arg] else: extra_arg = [] return func(value, *extra_arg) return helper
python
def make_processor(func, arg=None): """ A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead. """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance if arg is not None: extra_arg = [arg] else: extra_arg = [] return func(value, *extra_arg) return helper
[ "def", "make_processor", "(", "func", ",", "arg", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "'default_value'", ")", "if", "value", "is", "None", ":", "value", "=", "instance", "if", "arg", "is", "not", "None", ":", "extra_arg", "=", "[", "arg", "]", "else", ":", "extra_arg", "=", "[", "]", "return", "func", "(", "value", ",", "*", "extra_arg", ")", "return", "helper" ]
A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead.
[ "A", "pre", "-", "called", "processor", "that", "wraps", "the", "execution", "of", "the", "target", "callable", "func", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L402-L428
train
Imgur/imgurpython
examples/upload.py
upload_kitten
def upload_kitten(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' # Here's the metadata for the upload. All of these are optional, including # this config dict itself. config = { 'album': album, 'name': 'Catastrophe!', 'title': 'Catastrophe!', 'description': 'Cute kitten being cute on {0}'.format(datetime.now()) } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image
python
def upload_kitten(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' # Here's the metadata for the upload. All of these are optional, including # this config dict itself. config = { 'album': album, 'name': 'Catastrophe!', 'title': 'Catastrophe!', 'description': 'Cute kitten being cute on {0}'.format(datetime.now()) } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image
[ "def", "upload_kitten", "(", "client", ")", ":", "# Here's the metadata for the upload. All of these are optional, including", "# this config dict itself.", "config", "=", "{", "'album'", ":", "album", ",", "'name'", ":", "'Catastrophe!'", ",", "'title'", ":", "'Catastrophe!'", ",", "'description'", ":", "'Cute kitten being cute on {0}'", ".", "format", "(", "datetime", ".", "now", "(", ")", ")", "}", "print", "(", "\"Uploading image... \"", ")", "image", "=", "client", ".", "upload_from_path", "(", "image_path", ",", "config", "=", "config", ",", "anon", "=", "False", ")", "print", "(", "\"Done\"", ")", "print", "(", ")", "return", "image" ]
Upload a picture of a kitten. We don't ship one, so get creative!
[ "Upload", "a", "picture", "of", "a", "kitten", ".", "We", "don", "t", "ship", "one", "so", "get", "creative!" ]
48abc45a143ee9d2485c22a63b7cd55701d8163c
https://github.com/Imgur/imgurpython/blob/48abc45a143ee9d2485c22a63b7cd55701d8163c/examples/upload.py#L19-L38
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_isdst
def _isdst(dt): """Check if date is in dst. """ if type(dt) == datetime.date: dt = datetime.datetime.combine(dt, datetime.datetime.min.time()) dtc = dt.replace(year=datetime.datetime.now().year) if time.localtime(dtc.timestamp()).tm_isdst == 1: return True return False
python
def _isdst(dt): """Check if date is in dst. """ if type(dt) == datetime.date: dt = datetime.datetime.combine(dt, datetime.datetime.min.time()) dtc = dt.replace(year=datetime.datetime.now().year) if time.localtime(dtc.timestamp()).tm_isdst == 1: return True return False
[ "def", "_isdst", "(", "dt", ")", ":", "if", "type", "(", "dt", ")", "==", "datetime", ".", "date", ":", "dt", "=", "datetime", ".", "datetime", ".", "combine", "(", "dt", ",", "datetime", ".", "datetime", ".", "min", ".", "time", "(", ")", ")", "dtc", "=", "dt", ".", "replace", "(", "year", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", ")", "if", "time", ".", "localtime", "(", "dtc", ".", "timestamp", "(", ")", ")", ".", "tm_isdst", "==", "1", ":", "return", "True", "return", "False" ]
Check if date is in dst.
[ "Check", "if", "date", "is", "in", "dst", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L13-L21
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_mktime
def _mktime(time_struct): """Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes. """ try: return time.mktime(time_struct) except OverflowError: dt = datetime.datetime(*time_struct[:6]) ep = datetime.datetime(1970, 1, 1) diff = dt - ep ts = diff.days * 24 * 3600 + diff.seconds + time.timezone if time_struct.tm_isdst == 1: ts -= 3600 # Guess if DST is in effect for -1 if time_struct.tm_isdst == -1 and _isdst(dt): ts -= 3600 return ts
python
def _mktime(time_struct): """Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes. """ try: return time.mktime(time_struct) except OverflowError: dt = datetime.datetime(*time_struct[:6]) ep = datetime.datetime(1970, 1, 1) diff = dt - ep ts = diff.days * 24 * 3600 + diff.seconds + time.timezone if time_struct.tm_isdst == 1: ts -= 3600 # Guess if DST is in effect for -1 if time_struct.tm_isdst == -1 and _isdst(dt): ts -= 3600 return ts
[ "def", "_mktime", "(", "time_struct", ")", ":", "try", ":", "return", "time", ".", "mktime", "(", "time_struct", ")", "except", "OverflowError", ":", "dt", "=", "datetime", ".", "datetime", "(", "*", "time_struct", "[", ":", "6", "]", ")", "ep", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "diff", "=", "dt", "-", "ep", "ts", "=", "diff", ".", "days", "*", "24", "*", "3600", "+", "diff", ".", "seconds", "+", "time", ".", "timezone", "if", "time_struct", ".", "tm_isdst", "==", "1", ":", "ts", "-=", "3600", "# Guess if DST is in effect for -1", "if", "time_struct", ".", "tm_isdst", "==", "-", "1", "and", "_isdst", "(", "dt", ")", ":", "ts", "-=", "3600", "return", "ts" ]
Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes.
[ "Custom", "mktime", "because", "Windows", "can", "t", "be", "arsed", "to", "properly", "do", "pre", "-", "Epoch", "dates", "probably", "because", "it", "s", "busy", "counting", "all", "its", "chromosomes", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L24-L40
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_strftime
def _strftime(pattern, time_struct=time.localtime()): """Custom strftime because Windows is shit again. """ try: return time.strftime(pattern, time_struct) except OSError: dt = datetime.datetime.fromtimestamp(_mktime(time_struct)) # This is incredibly hacky and will probably break with leap # year overlaps and shit. Any complaints should go here: # https://support.microsoft.com/ original = dt.year current = datetime.datetime.now().year dt = dt.replace(year=current) ts = dt.timestamp() if _isdst(dt): ts -= 3600 string = time.strftime(pattern, time.localtime(ts)) string = string.replace(str(current), str(original)) return string
python
def _strftime(pattern, time_struct=time.localtime()): """Custom strftime because Windows is shit again. """ try: return time.strftime(pattern, time_struct) except OSError: dt = datetime.datetime.fromtimestamp(_mktime(time_struct)) # This is incredibly hacky and will probably break with leap # year overlaps and shit. Any complaints should go here: # https://support.microsoft.com/ original = dt.year current = datetime.datetime.now().year dt = dt.replace(year=current) ts = dt.timestamp() if _isdst(dt): ts -= 3600 string = time.strftime(pattern, time.localtime(ts)) string = string.replace(str(current), str(original)) return string
[ "def", "_strftime", "(", "pattern", ",", "time_struct", "=", "time", ".", "localtime", "(", ")", ")", ":", "try", ":", "return", "time", ".", "strftime", "(", "pattern", ",", "time_struct", ")", "except", "OSError", ":", "dt", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "_mktime", "(", "time_struct", ")", ")", "# This is incredibly hacky and will probably break with leap", "# year overlaps and shit. Any complaints should go here:", "# https://support.microsoft.com/", "original", "=", "dt", ".", "year", "current", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "dt", "=", "dt", ".", "replace", "(", "year", "=", "current", ")", "ts", "=", "dt", ".", "timestamp", "(", ")", "if", "_isdst", "(", "dt", ")", ":", "ts", "-=", "3600", "string", "=", "time", ".", "strftime", "(", "pattern", ",", "time", ".", "localtime", "(", "ts", ")", ")", "string", "=", "string", ".", "replace", "(", "str", "(", "current", ")", ",", "str", "(", "original", ")", ")", "return", "string" ]
Custom strftime because Windows is shit again.
[ "Custom", "strftime", "because", "Windows", "is", "shit", "again", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L43-L61
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_gmtime
def _gmtime(timestamp): """Custom gmtime because yada yada. """ try: return time.gmtime(timestamp) except OSError: dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) dst = int(_isdst(dt)) return time.struct_time(dt.timetuple()[:8] + tuple([dst]))
python
def _gmtime(timestamp): """Custom gmtime because yada yada. """ try: return time.gmtime(timestamp) except OSError: dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) dst = int(_isdst(dt)) return time.struct_time(dt.timetuple()[:8] + tuple([dst]))
[ "def", "_gmtime", "(", "timestamp", ")", ":", "try", ":", "return", "time", ".", "gmtime", "(", "timestamp", ")", "except", "OSError", ":", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "dst", "=", "int", "(", "_isdst", "(", "dt", ")", ")", "return", "time", ".", "struct_time", "(", "dt", ".", "timetuple", "(", ")", "[", ":", "8", "]", "+", "tuple", "(", "[", "dst", "]", ")", ")" ]
Custom gmtime because yada yada.
[ "Custom", "gmtime", "because", "yada", "yada", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L64-L72
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_dtfromtimestamp
def _dtfromtimestamp(timestamp): """Custom datetime timestamp constructor. because Windows. again. """ try: return datetime.datetime.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(dt): timestamp += 3600 dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return dt
python
def _dtfromtimestamp(timestamp): """Custom datetime timestamp constructor. because Windows. again. """ try: return datetime.datetime.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(dt): timestamp += 3600 dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return dt
[ "def", "_dtfromtimestamp", "(", "timestamp", ")", ":", "try", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", "except", "OSError", ":", "timestamp", "-=", "time", ".", "timezone", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "if", "_isdst", "(", "dt", ")", ":", "timestamp", "+=", "3600", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "return", "dt" ]
Custom datetime timestamp constructor. because Windows. again.
[ "Custom", "datetime", "timestamp", "constructor", ".", "because", "Windows", ".", "again", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L75-L86
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_dfromtimestamp
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
python
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
[ "def", "_dfromtimestamp", "(", "timestamp", ")", ":", "try", ":", "return", "datetime", ".", "date", ".", "fromtimestamp", "(", "timestamp", ")", "except", "OSError", ":", "timestamp", "-=", "time", ".", "timezone", "d", "=", "datetime", ".", "date", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "if", "_isdst", "(", "d", ")", ":", "timestamp", "+=", "3600", "d", "=", "datetime", ".", "date", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "return", "d" ]
Custom date timestamp constructor. ditto
[ "Custom", "date", "timestamp", "constructor", ".", "ditto" ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L89-L100
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
guesstype
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
python
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
[ "def", "guesstype", "(", "timestr", ")", ":", "timestr_full", "=", "\" {} \"", ".", "format", "(", "timestr", ")", "if", "timestr_full", ".", "find", "(", "\" in \"", ")", "!=", "-", "1", "or", "timestr_full", ".", "find", "(", "\" ago \"", ")", "!=", "-", "1", ":", "return", "Chronyk", "(", "timestr", ")", "comps", "=", "[", "\"second\"", ",", "\"minute\"", ",", "\"hour\"", ",", "\"day\"", ",", "\"week\"", ",", "\"month\"", ",", "\"year\"", "]", "for", "comp", "in", "comps", ":", "if", "timestr_full", ".", "find", "(", "comp", ")", "!=", "-", "1", ":", "return", "ChronykDelta", "(", "timestr", ")", "return", "Chronyk", "(", "timestr", ")" ]
Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed
[ "Tries", "to", "guess", "whether", "a", "string", "represents", "a", "time", "or", "a", "time", "delta", "and", "returns", "the", "appropriate", "object", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L112-L128
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_round
def _round(num): """A custom rounding function that's a bit more 'strict'. """ deci = num - math.floor(num) if deci > 0.8: return int(math.floor(num) + 1) else: return int(math.floor(num))
python
def _round(num): """A custom rounding function that's a bit more 'strict'. """ deci = num - math.floor(num) if deci > 0.8: return int(math.floor(num) + 1) else: return int(math.floor(num))
[ "def", "_round", "(", "num", ")", ":", "deci", "=", "num", "-", "math", ".", "floor", "(", "num", ")", "if", "deci", ">", "0.8", ":", "return", "int", "(", "math", ".", "floor", "(", "num", ")", "+", "1", ")", "else", ":", "return", "int", "(", "math", ".", "floor", "(", "num", ")", ")" ]
A custom rounding function that's a bit more 'strict'.
[ "A", "custom", "rounding", "function", "that", "s", "a", "bit", "more", "strict", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L131-L138
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.datetime
def datetime(self, timezone=None): """Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return _dtfromtimestamp(self.__timestamp__ - timezone)
python
def datetime(self, timezone=None): """Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return _dtfromtimestamp(self.__timestamp__ - timezone)
[ "def", "datetime", "(", "self", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "return", "_dtfromtimestamp", "(", "self", ".", "__timestamp__", "-", "timezone", ")" ]
Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "datetime", "object", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L499-L512
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.ctime
def ctime(self, timezone=None): """Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
python
def ctime(self, timezone=None): """Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
[ "def", "ctime", "(", "self", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "return", "time", ".", "ctime", "(", "self", ".", "__timestamp__", "-", "timezone", ")" ]
Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "ctime", "string", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L541-L552
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.timestring
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
python
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
[ "def", "timestring", "(", "self", ",", "pattern", "=", "\"%Y-%m-%d %H:%M:%S\"", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "timestamp", "=", "self", ".", "__timestamp__", "-", "timezone", "timestamp", "-=", "LOCALTZ", "return", "_strftime", "(", "pattern", ",", "_gmtime", "(", "timestamp", ")", ")" ]
Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "time", "string", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L554-L572
train
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.get_ticket
def get_ticket(self, ticket_id): """Fetches the ticket for the given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._get(url) return Ticket(**ticket)
python
def get_ticket(self, ticket_id): """Fetches the ticket for the given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._get(url) return Ticket(**ticket)
[ "def", "get_ticket", "(", "self", ",", "ticket_id", ")", ":", "url", "=", "'tickets/%d'", "%", "ticket_id", "ticket", "=", "self", ".", "_api", ".", "_get", "(", "url", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Fetches the ticket for the given ticket ID
[ "Fetches", "the", "ticket", "for", "the", "given", "ticket", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L11-L15
train
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.create_outbound_email
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs): """Creates an outbound email""" url = 'tickets/outbound_email' priority = kwargs.get('priority', 1) data = { 'subject': subject, 'description': description, 'priority': priority, 'email': email, 'email_config_id': email_config_id, } data.update(kwargs) ticket = self._api._post(url, data=json.dumps(data)) return Ticket(**ticket)
python
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs): """Creates an outbound email""" url = 'tickets/outbound_email' priority = kwargs.get('priority', 1) data = { 'subject': subject, 'description': description, 'priority': priority, 'email': email, 'email_config_id': email_config_id, } data.update(kwargs) ticket = self._api._post(url, data=json.dumps(data)) return Ticket(**ticket)
[ "def", "create_outbound_email", "(", "self", ",", "subject", ",", "description", ",", "email", ",", "email_config_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'tickets/outbound_email'", "priority", "=", "kwargs", ".", "get", "(", "'priority'", ",", "1", ")", "data", "=", "{", "'subject'", ":", "subject", ",", "'description'", ":", "description", ",", "'priority'", ":", "priority", ",", "'email'", ":", "email", ",", "'email_config_id'", ":", "email_config_id", ",", "}", "data", ".", "update", "(", "kwargs", ")", "ticket", "=", "self", ".", "_api", ".", "_post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Creates an outbound email
[ "Creates", "an", "outbound", "email" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L53-L66
train
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.update_ticket
def update_ticket(self, ticket_id, **kwargs): """Updates a ticket from a given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._put(url, data=json.dumps(kwargs)) return Ticket(**ticket)
python
def update_ticket(self, ticket_id, **kwargs): """Updates a ticket from a given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._put(url, data=json.dumps(kwargs)) return Ticket(**ticket)
[ "def", "update_ticket", "(", "self", ",", "ticket_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'tickets/%d'", "%", "ticket_id", "ticket", "=", "self", ".", "_api", ".", "_put", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "kwargs", ")", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Updates a ticket from a given ticket ID
[ "Updates", "a", "ticket", "from", "a", "given", "ticket", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L68-L72
train
sjkingo/python-freshdesk
freshdesk/v2/api.py
AgentAPI.get_agent
def get_agent(self, agent_id): """Fetches the agent for the given agent ID""" url = 'agents/%s' % agent_id return Agent(**self._api._get(url))
python
def get_agent(self, agent_id): """Fetches the agent for the given agent ID""" url = 'agents/%s' % agent_id return Agent(**self._api._get(url))
[ "def", "get_agent", "(", "self", ",", "agent_id", ")", ":", "url", "=", "'agents/%s'", "%", "agent_id", "return", "Agent", "(", "*", "*", "self", ".", "_api", ".", "_get", "(", "url", ")", ")" ]
Fetches the agent for the given agent ID
[ "Fetches", "the", "agent", "for", "the", "given", "agent", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L360-L363
train
sjkingo/python-freshdesk
freshdesk/v2/api.py
AgentAPI.update_agent
def update_agent(self, agent_id, **kwargs): """Updates an agent""" url = 'agents/%s' % agent_id agent = self._api._put(url, data=json.dumps(kwargs)) return Agent(**agent)
python
def update_agent(self, agent_id, **kwargs): """Updates an agent""" url = 'agents/%s' % agent_id agent = self._api._put(url, data=json.dumps(kwargs)) return Agent(**agent)
[ "def", "update_agent", "(", "self", ",", "agent_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'agents/%s'", "%", "agent_id", "agent", "=", "self", ".", "_api", ".", "_put", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "kwargs", ")", ")", "return", "Agent", "(", "*", "*", "agent", ")" ]
Updates an agent
[ "Updates", "an", "agent" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L365-L369
train
sjkingo/python-freshdesk
freshdesk/v1/api.py
API._action
def _action(self, res): """Returns JSON response or raise exception if errors are present""" try: j = res.json() except: res.raise_for_status() j = {} if 'Retry-After' in res.headers: raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.' 'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After'])) if 'require_login' in j: raise HTTPError('403 Forbidden: API key is incorrect for this domain') if 'error' in j: raise HTTPError('{}: {}'.format(j.get('description'), j.get('errors'))) # Catch any other errors try: res.raise_for_status() except Exception as e: raise HTTPError("{}: {}".format(e, j)) return j
python
def _action(self, res): """Returns JSON response or raise exception if errors are present""" try: j = res.json() except: res.raise_for_status() j = {} if 'Retry-After' in res.headers: raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.' 'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After'])) if 'require_login' in j: raise HTTPError('403 Forbidden: API key is incorrect for this domain') if 'error' in j: raise HTTPError('{}: {}'.format(j.get('description'), j.get('errors'))) # Catch any other errors try: res.raise_for_status() except Exception as e: raise HTTPError("{}: {}".format(e, j)) return j
[ "def", "_action", "(", "self", ",", "res", ")", ":", "try", ":", "j", "=", "res", ".", "json", "(", ")", "except", ":", "res", ".", "raise_for_status", "(", ")", "j", "=", "{", "}", "if", "'Retry-After'", "in", "res", ".", "headers", ":", "raise", "HTTPError", "(", "'403 Forbidden: API rate-limit has been reached until {}.'", "'See http://freshdesk.com/api#ratelimit'", ".", "format", "(", "res", ".", "headers", "[", "'Retry-After'", "]", ")", ")", "if", "'require_login'", "in", "j", ":", "raise", "HTTPError", "(", "'403 Forbidden: API key is incorrect for this domain'", ")", "if", "'error'", "in", "j", ":", "raise", "HTTPError", "(", "'{}: {}'", ".", "format", "(", "j", ".", "get", "(", "'description'", ")", ",", "j", ".", "get", "(", "'errors'", ")", ")", ")", "# Catch any other errors", "try", ":", "res", ".", "raise_for_status", "(", ")", "except", "Exception", "as", "e", ":", "raise", "HTTPError", "(", "\"{}: {}\"", ".", "format", "(", "e", ",", "j", ")", ")", "return", "j" ]
Returns JSON response or raise exception if errors are present
[ "Returns", "JSON", "response", "or", "raise", "exception", "if", "errors", "are", "present" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L293-L318
train
pysal/mapclassify
mapclassify/classifiers.py
headTail_breaks
def headTail_breaks(values, cuts): """ head tail breaks helper function """ values = np.array(values) mean = np.mean(values) cuts.append(mean) if len(values) > 1: return headTail_breaks(values[values >= mean], cuts) return cuts
python
def headTail_breaks(values, cuts): """ head tail breaks helper function """ values = np.array(values) mean = np.mean(values) cuts.append(mean) if len(values) > 1: return headTail_breaks(values[values >= mean], cuts) return cuts
[ "def", "headTail_breaks", "(", "values", ",", "cuts", ")", ":", "values", "=", "np", ".", "array", "(", "values", ")", "mean", "=", "np", ".", "mean", "(", "values", ")", "cuts", ".", "append", "(", "mean", ")", "if", "len", "(", "values", ")", ">", "1", ":", "return", "headTail_breaks", "(", "values", "[", "values", ">=", "mean", "]", ",", "cuts", ")", "return", "cuts" ]
head tail breaks helper function
[ "head", "tail", "breaks", "helper", "function" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L35-L44
train
pysal/mapclassify
mapclassify/classifiers.py
quantile
def quantile(y, k=4): """ Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.]) """ w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % k_q, UserWarning) return q
python
def quantile(y, k=4): """ Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.]) """ w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % k_q, UserWarning) return q
[ "def", "quantile", "(", "y", ",", "k", "=", "4", ")", ":", "w", "=", "100.", "/", "k", "p", "=", "np", ".", "arange", "(", "w", ",", "100", "+", "w", ",", "w", ")", "if", "p", "[", "-", "1", "]", ">", "100.0", ":", "p", "[", "-", "1", "]", "=", "100.0", "q", "=", "np", ".", "array", "(", "[", "stats", ".", "scoreatpercentile", "(", "y", ",", "pct", ")", "for", "pct", "in", "p", "]", ")", "q", "=", "np", ".", "unique", "(", "q", ")", "k_q", "=", "len", "(", "q", ")", "if", "k_q", "<", "k", ":", "Warn", "(", "'Warning: Not enough unique values in array to form k classes'", ",", "UserWarning", ")", "Warn", "(", "'Warning: setting k to %d'", "%", "k_q", ",", "UserWarning", ")", "return", "q" ]
Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.])
[ "Calculates", "the", "quantiles", "for", "an", "array" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L47-L97
train
pysal/mapclassify
mapclassify/classifiers.py
bin1d
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
python
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
[ "def", "bin1d", "(", "x", ",", "bins", ")", ":", "left", "=", "[", "-", "float", "(", "\"inf\"", ")", "]", "left", ".", "extend", "(", "bins", "[", "0", ":", "-", "1", "]", ")", "right", "=", "bins", "cuts", "=", "list", "(", "zip", "(", "left", ",", "right", ")", ")", "k", "=", "len", "(", "bins", ")", "binIds", "=", "np", ".", "zeros", "(", "x", ".", "shape", ",", "dtype", "=", "'int'", ")", "while", "cuts", ":", "k", "-=", "1", "l", ",", "r", "=", "cuts", ".", "pop", "(", "-", "1", ")", "binIds", "+=", "(", "x", ">", "l", ")", "*", "(", "x", "<=", "r", ")", "*", "k", "counts", "=", "np", ".", "bincount", "(", "binIds", ",", "minlength", "=", "len", "(", "bins", ")", ")", "return", "(", "binIds", ",", "counts", ")" ]
Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25])
[ "Place", "values", "of", "a", "1", "-", "d", "array", "into", "bins", "and", "determine", "counts", "of", "values", "in", "each", "bin" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L231-L278
train
pysal/mapclassify
mapclassify/classifiers.py
_kmeans
def _kmeans(y, k=5): """ Helper function to do kmeans in one dimension """ y = y * 1. # KMEANS needs float or double dtype centroids = KMEANS(y, k)[0] centroids.sort() try: class_ids = np.abs(y - centroids).argmin(axis=1) except: class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1) uc = np.unique(class_ids) cuts = np.array([y[class_ids == c].max() for c in uc]) y_cent = np.zeros_like(y) for c in uc: y_cent[class_ids == c] = centroids[c] diffs = y - y_cent diffs *= diffs return class_ids, cuts, diffs.sum(), centroids
python
def _kmeans(y, k=5): """ Helper function to do kmeans in one dimension """ y = y * 1. # KMEANS needs float or double dtype centroids = KMEANS(y, k)[0] centroids.sort() try: class_ids = np.abs(y - centroids).argmin(axis=1) except: class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1) uc = np.unique(class_ids) cuts = np.array([y[class_ids == c].max() for c in uc]) y_cent = np.zeros_like(y) for c in uc: y_cent[class_ids == c] = centroids[c] diffs = y - y_cent diffs *= diffs return class_ids, cuts, diffs.sum(), centroids
[ "def", "_kmeans", "(", "y", ",", "k", "=", "5", ")", ":", "y", "=", "y", "*", "1.", "# KMEANS needs float or double dtype", "centroids", "=", "KMEANS", "(", "y", ",", "k", ")", "[", "0", "]", "centroids", ".", "sort", "(", ")", "try", ":", "class_ids", "=", "np", ".", "abs", "(", "y", "-", "centroids", ")", ".", "argmin", "(", "axis", "=", "1", ")", "except", ":", "class_ids", "=", "np", ".", "abs", "(", "y", "[", ":", ",", "np", ".", "newaxis", "]", "-", "centroids", ")", ".", "argmin", "(", "axis", "=", "1", ")", "uc", "=", "np", ".", "unique", "(", "class_ids", ")", "cuts", "=", "np", ".", "array", "(", "[", "y", "[", "class_ids", "==", "c", "]", ".", "max", "(", ")", "for", "c", "in", "uc", "]", ")", "y_cent", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "c", "in", "uc", ":", "y_cent", "[", "class_ids", "==", "c", "]", "=", "centroids", "[", "c", "]", "diffs", "=", "y", "-", "y_cent", "diffs", "*=", "diffs", "return", "class_ids", ",", "cuts", ",", "diffs", ".", "sum", "(", ")", ",", "centroids" ]
Helper function to do kmeans in one dimension
[ "Helper", "function", "to", "do", "kmeans", "in", "one", "dimension" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L289-L310
train
pysal/mapclassify
mapclassify/classifiers.py
natural_breaks
def natural_breaks(values, k=5): """ natural breaks helper function Jenks natural breaks is kmeans in one dimension """ values = np.array(values) uv = np.unique(values) uvk = len(uv) if uvk < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % uvk, UserWarning) k = uvk kres = _kmeans(values, k) sids = kres[-1] # centroids fit = kres[-2] class_ids = kres[0] cuts = kres[1] return (sids, class_ids, fit, cuts)
python
def natural_breaks(values, k=5): """ natural breaks helper function Jenks natural breaks is kmeans in one dimension """ values = np.array(values) uv = np.unique(values) uvk = len(uv) if uvk < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % uvk, UserWarning) k = uvk kres = _kmeans(values, k) sids = kres[-1] # centroids fit = kres[-2] class_ids = kres[0] cuts = kres[1] return (sids, class_ids, fit, cuts)
[ "def", "natural_breaks", "(", "values", ",", "k", "=", "5", ")", ":", "values", "=", "np", ".", "array", "(", "values", ")", "uv", "=", "np", ".", "unique", "(", "values", ")", "uvk", "=", "len", "(", "uv", ")", "if", "uvk", "<", "k", ":", "Warn", "(", "'Warning: Not enough unique values in array to form k classes'", ",", "UserWarning", ")", "Warn", "(", "'Warning: setting k to %d'", "%", "uvk", ",", "UserWarning", ")", "k", "=", "uvk", "kres", "=", "_kmeans", "(", "values", ",", "k", ")", "sids", "=", "kres", "[", "-", "1", "]", "# centroids", "fit", "=", "kres", "[", "-", "2", "]", "class_ids", "=", "kres", "[", "0", "]", "cuts", "=", "kres", "[", "1", "]", "return", "(", "sids", ",", "class_ids", ",", "fit", ",", "cuts", ")" ]
natural breaks helper function Jenks natural breaks is kmeans in one dimension
[ "natural", "breaks", "helper", "function" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L313-L332
train
pysal/mapclassify
mapclassify/classifiers.py
_fit
def _fit(y, classes): """Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership """ tss = 0 for class_def in classes: yc = y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
python
def _fit(y, classes): """Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership """ tss = 0 for class_def in classes: yc = y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
[ "def", "_fit", "(", "y", ",", "classes", ")", ":", "tss", "=", "0", "for", "class_def", "in", "classes", ":", "yc", "=", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "tss", "+=", "sum", "(", "css", ")", "return", "tss" ]
Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership
[ "Calculate", "the", "total", "sum", "of", "squares", "for", "a", "vector", "y", "classified", "into", "classes" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2226-L2245
train
pysal/mapclassify
mapclassify/classifiers.py
gadf
def gadf(y, method="Quantiles", maxk=15, pct=0.8): """ Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers """ y = np.array(y) adam = (np.abs(y - np.median(y))).sum() for k in range(2, maxk + 1): cl = kmethods[method](y, k) gadf = 1 - cl.adcm / adam if gadf > pct: break return (k, cl, gadf)
python
def gadf(y, method="Quantiles", maxk=15, pct=0.8): """ Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers """ y = np.array(y) adam = (np.abs(y - np.median(y))).sum() for k in range(2, maxk + 1): cl = kmethods[method](y, k) gadf = 1 - cl.adcm / adam if gadf > pct: break return (k, cl, gadf)
[ "def", "gadf", "(", "y", ",", "method", "=", "\"Quantiles\"", ",", "maxk", "=", "15", ",", "pct", "=", "0.8", ")", ":", "y", "=", "np", ".", "array", "(", "y", ")", "adam", "=", "(", "np", ".", "abs", "(", "y", "-", "np", ".", "median", "(", "y", ")", ")", ")", ".", "sum", "(", ")", "for", "k", "in", "range", "(", "2", ",", "maxk", "+", "1", ")", ":", "cl", "=", "kmethods", "[", "method", "]", "(", "y", ",", "k", ")", "gadf", "=", "1", "-", "cl", ".", "adcm", "/", "adam", "if", "gadf", ">", "pct", ":", "break", "return", "(", "k", ",", "cl", ",", "gadf", ")" ]
Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers
[ "Evaluate", "the", "Goodness", "of", "Absolute", "Deviation", "Fit", "of", "a", "Classifier", "Finds", "the", "minimum", "value", "of", "k", "for", "which", "gadf", ">", "pct" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2255-L2325
train
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.make
def make(cls, *args, **kwargs): """ Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5]) """ # only flag overrides return flag to_annotate = copy.deepcopy(kwargs) return_object = kwargs.pop('return_object', False) return_bins = kwargs.pop('return_bins', False) return_counts = kwargs.pop('return_counts', False) rolling = kwargs.pop('rolling', False) if rolling: # just initialize a fake classifier data = list(range(10)) cls_instance = cls(data, *args, **kwargs) # and empty it, since we'll be using the update cls_instance.y = np.array([]) else: cls_instance = None # wrap init in a closure to make a consumer. # Qc Na: "Objects/Closures are poor man's Closures/Objects" def classifier(data, cls_instance=cls_instance): if rolling: cls_instance.update(data, inplace=True, **kwargs) yb = cls_instance.find_bin(data) else: cls_instance = cls(data, *args, **kwargs) yb = cls_instance.yb outs = [yb, None, None, None] outs[1] = cls_instance if return_object else None outs[2] = cls_instance.bins if return_bins else None outs[3] = cls_instance.counts if return_counts else None outs = [a for a in outs if a is not None] if len(outs) == 1: return outs[0] else: return outs # for debugging/jic, keep around the kwargs. # in future, we might want to make this a thin class, so that we can # set a custom repr. Call the class `Binner` or something, that's a # pre-configured Classifier that just consumes data, bins it, & # possibly updates the bins. classifier._options = to_annotate return classifier
python
def make(cls, *args, **kwargs): """ Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5]) """ # only flag overrides return flag to_annotate = copy.deepcopy(kwargs) return_object = kwargs.pop('return_object', False) return_bins = kwargs.pop('return_bins', False) return_counts = kwargs.pop('return_counts', False) rolling = kwargs.pop('rolling', False) if rolling: # just initialize a fake classifier data = list(range(10)) cls_instance = cls(data, *args, **kwargs) # and empty it, since we'll be using the update cls_instance.y = np.array([]) else: cls_instance = None # wrap init in a closure to make a consumer. # Qc Na: "Objects/Closures are poor man's Closures/Objects" def classifier(data, cls_instance=cls_instance): if rolling: cls_instance.update(data, inplace=True, **kwargs) yb = cls_instance.find_bin(data) else: cls_instance = cls(data, *args, **kwargs) yb = cls_instance.yb outs = [yb, None, None, None] outs[1] = cls_instance if return_object else None outs[2] = cls_instance.bins if return_bins else None outs[3] = cls_instance.counts if return_counts else None outs = [a for a in outs if a is not None] if len(outs) == 1: return outs[0] else: return outs # for debugging/jic, keep around the kwargs. # in future, we might want to make this a thin class, so that we can # set a custom repr. Call the class `Binner` or something, that's a # pre-configured Classifier that just consumes data, bins it, & # possibly updates the bins. classifier._options = to_annotate return classifier
[ "def", "make", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# only flag overrides return flag", "to_annotate", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "return_object", "=", "kwargs", ".", "pop", "(", "'return_object'", ",", "False", ")", "return_bins", "=", "kwargs", ".", "pop", "(", "'return_bins'", ",", "False", ")", "return_counts", "=", "kwargs", ".", "pop", "(", "'return_counts'", ",", "False", ")", "rolling", "=", "kwargs", ".", "pop", "(", "'rolling'", ",", "False", ")", "if", "rolling", ":", "# just initialize a fake classifier", "data", "=", "list", "(", "range", "(", "10", ")", ")", "cls_instance", "=", "cls", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# and empty it, since we'll be using the update", "cls_instance", ".", "y", "=", "np", ".", "array", "(", "[", "]", ")", "else", ":", "cls_instance", "=", "None", "# wrap init in a closure to make a consumer.", "# Qc Na: \"Objects/Closures are poor man's Closures/Objects\"", "def", "classifier", "(", "data", ",", "cls_instance", "=", "cls_instance", ")", ":", "if", "rolling", ":", "cls_instance", ".", "update", "(", "data", ",", "inplace", "=", "True", ",", "*", "*", "kwargs", ")", "yb", "=", "cls_instance", ".", "find_bin", "(", "data", ")", "else", ":", "cls_instance", "=", "cls", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "yb", "=", "cls_instance", ".", "yb", "outs", "=", "[", "yb", ",", "None", ",", "None", ",", "None", "]", "outs", "[", "1", "]", "=", "cls_instance", "if", "return_object", "else", "None", "outs", "[", "2", "]", "=", "cls_instance", ".", "bins", "if", "return_bins", "else", "None", "outs", "[", "3", "]", "=", "cls_instance", ".", "counts", "if", "return_counts", "else", "None", "outs", "=", "[", "a", "for", "a", "in", "outs", "if", "a", "is", "not", "None", "]", "if", "len", "(", "outs", ")", "==", "1", ":", "return", "outs", "[", "0", "]", "else", ":", "return", "outs", "# for debugging/jic, keep around the kwargs.", "# in future, we might want to make this a thin class, so that we can", "# set a custom repr. Call the class `Binner` or something, that's a", "# pre-configured Classifier that just consumes data, bins it, &", "# possibly updates the bins.", "classifier", ".", "_options", "=", "to_annotate", "return", "classifier" ]
Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
[ "Configure", "and", "create", "a", "classifier", "that", "will", "consume", "data", "and", "produce", "classifications", "given", "the", "configuration", "options", "specified", "by", "this", "function", "." ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L476-L618
train
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.get_tss
def get_tss(self): """ Total sum of squares around class means Returns sum of squares over all class means """ tss = 0 for class_def in self.classes: if len(class_def) > 0: yc = self.y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
python
def get_tss(self): """ Total sum of squares around class means Returns sum of squares over all class means """ tss = 0 for class_def in self.classes: if len(class_def) > 0: yc = self.y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
[ "def", "get_tss", "(", "self", ")", ":", "tss", "=", "0", "for", "class_def", "in", "self", ".", "classes", ":", "if", "len", "(", "class_def", ")", ">", "0", ":", "yc", "=", "self", ".", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "tss", "+=", "sum", "(", "css", ")", "return", "tss" ]
Total sum of squares around class means Returns sum of squares over all class means
[ "Total", "sum", "of", "squares", "around", "class", "means" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L663-L676
train
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.get_gadf
def get_gadf(self): """ Goodness of absolute deviation of fit """ adam = (np.abs(self.y - np.median(self.y))).sum() gadf = 1 - self.adcm / adam return gadf
python
def get_gadf(self): """ Goodness of absolute deviation of fit """ adam = (np.abs(self.y - np.median(self.y))).sum() gadf = 1 - self.adcm / adam return gadf
[ "def", "get_gadf", "(", "self", ")", ":", "adam", "=", "(", "np", ".", "abs", "(", "self", ".", "y", "-", "np", ".", "median", "(", "self", ".", "y", ")", ")", ")", ".", "sum", "(", ")", "gadf", "=", "1", "-", "self", ".", "adcm", "/", "adam", "return", "gadf" ]
Goodness of absolute deviation of fit
[ "Goodness", "of", "absolute", "deviation", "of", "fit" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L699-L705
train
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.find_bin
def find_bin(self, x): """ Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin. """ x = np.asarray(x).flatten() right = np.digitize(x, self.bins, right=True) if right.max() == len(self.bins): right[right == len(self.bins)] = len(self.bins) - 1 return right
python
def find_bin(self, x): """ Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin. """ x = np.asarray(x).flatten() right = np.digitize(x, self.bins, right=True) if right.max() == len(self.bins): right[right == len(self.bins)] = len(self.bins) - 1 return right
[ "def", "find_bin", "(", "self", ",", "x", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", ".", "flatten", "(", ")", "right", "=", "np", ".", "digitize", "(", "x", ",", "self", ".", "bins", ",", "right", "=", "True", ")", "if", "right", ".", "max", "(", ")", "==", "len", "(", "self", ".", "bins", ")", ":", "right", "[", "right", "==", "len", "(", "self", ".", "bins", ")", "]", "=", "len", "(", "self", ".", "bins", ")", "-", "1", "return", "right" ]
Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin.
[ "Sort", "input", "or", "inputs", "according", "to", "the", "current", "bin", "estimate" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L751-L779
train
pysal/mapclassify
mapclassify/classifiers.py
Fisher_Jenks_Sampled.update
def update(self, y=None, inplace=False, **kwargs): """ Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor. """ kwargs.update({'k': kwargs.pop('k', self.k)}) kwargs.update({'pct': kwargs.pop('pct', self.pct)}) kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)}) if inplace: self._update(y, **kwargs) else: new = copy.deepcopy(self) new._update(y, **kwargs) return new
python
def update(self, y=None, inplace=False, **kwargs): """ Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor. """ kwargs.update({'k': kwargs.pop('k', self.k)}) kwargs.update({'pct': kwargs.pop('pct', self.pct)}) kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)}) if inplace: self._update(y, **kwargs) else: new = copy.deepcopy(self) new._update(y, **kwargs) return new
[ "def", "update", "(", "self", ",", "y", "=", "None", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'k'", ":", "kwargs", ".", "pop", "(", "'k'", ",", "self", ".", "k", ")", "}", ")", "kwargs", ".", "update", "(", "{", "'pct'", ":", "kwargs", ".", "pop", "(", "'pct'", ",", "self", ".", "pct", ")", "}", ")", "kwargs", ".", "update", "(", "{", "'truncate'", ":", "kwargs", ".", "pop", "(", "'truncate'", ",", "self", ".", "_truncated", ")", "}", ")", "if", "inplace", ":", "self", ".", "_update", "(", "y", ",", "*", "*", "kwargs", ")", "else", ":", "new", "=", "copy", ".", "deepcopy", "(", "self", ")", "new", ".", "_update", "(", "y", ",", "*", "*", "kwargs", ")", "return", "new" ]
Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor.
[ "Add", "data", "or", "change", "classification", "parameters", "." ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L1586-L1609
train
pysal/mapclassify
mapclassify/classifiers.py
Max_P_Classifier._ss
def _ss(self, class_def): """calculates sum of squares for a class""" yc = self.y[class_def] css = yc - yc.mean() css *= css return sum(css)
python
def _ss(self, class_def): """calculates sum of squares for a class""" yc = self.y[class_def] css = yc - yc.mean() css *= css return sum(css)
[ "def", "_ss", "(", "self", ",", "class_def", ")", ":", "yc", "=", "self", ".", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "return", "sum", "(", "css", ")" ]
calculates sum of squares for a class
[ "calculates", "sum", "of", "squares", "for", "a", "class" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2178-L2183
train
pysal/mapclassify
mapclassify/classifiers.py
Max_P_Classifier._swap
def _swap(self, class1, class2, a): """evaluate cost of moving a from class1 to class2""" ss1 = self._ss(class1) ss2 = self._ss(class2) tss1 = ss1 + ss2 class1c = copy.copy(class1) class2c = copy.copy(class2) class1c.remove(a) class2c.append(a) ss1 = self._ss(class1c) ss2 = self._ss(class2c) tss2 = ss1 + ss2 if tss1 < tss2: return False else: return True
python
def _swap(self, class1, class2, a): """evaluate cost of moving a from class1 to class2""" ss1 = self._ss(class1) ss2 = self._ss(class2) tss1 = ss1 + ss2 class1c = copy.copy(class1) class2c = copy.copy(class2) class1c.remove(a) class2c.append(a) ss1 = self._ss(class1c) ss2 = self._ss(class2c) tss2 = ss1 + ss2 if tss1 < tss2: return False else: return True
[ "def", "_swap", "(", "self", ",", "class1", ",", "class2", ",", "a", ")", ":", "ss1", "=", "self", ".", "_ss", "(", "class1", ")", "ss2", "=", "self", ".", "_ss", "(", "class2", ")", "tss1", "=", "ss1", "+", "ss2", "class1c", "=", "copy", ".", "copy", "(", "class1", ")", "class2c", "=", "copy", ".", "copy", "(", "class2", ")", "class1c", ".", "remove", "(", "a", ")", "class2c", ".", "append", "(", "a", ")", "ss1", "=", "self", ".", "_ss", "(", "class1c", ")", "ss2", "=", "self", ".", "_ss", "(", "class2c", ")", "tss2", "=", "ss1", "+", "ss2", "if", "tss1", "<", "tss2", ":", "return", "False", "else", ":", "return", "True" ]
evaluate cost of moving a from class1 to class2
[ "evaluate", "cost", "of", "moving", "a", "from", "class1", "to", "class2" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2185-L2200
train
abarker/pdfCropMargins
src/pdfCropMargins/calculate_bounding_boxes.py
get_bounding_box_list_render_image
def get_bounding_box_list_render_image(pdf_file_name, input_doc): """Calculate the bounding box list by directly rendering each page of the PDF as an image file. The MediaBox and CropBox values in input_doc should have already been set to the chosen page size before the rendering.""" program_to_use = "pdftoppm" # default to pdftoppm if args.gsRender: program_to_use = "Ghostscript" # Threshold value set in range 0-255, where 0 is black, with 191 default. if not args.threshold: args.threshold = 191 threshold = args.threshold if not args.numSmooths: args.numSmooths = 0 if not args.numBlurs: args.numBlurs = 0 temp_dir = ex.program_temp_directory # use the program default; don't delete dir! temp_image_file_root = os.path.join(temp_dir, ex.temp_file_prefix + "PageImage") if args.verbose: print("\nRendering the PDF to images using the " + program_to_use + " program," "\nthis may take a while...") # Do the rendering of all the files. render_pdf_file_to_image_files(pdf_file_name, temp_image_file_root, program_to_use) # Currently assuming that sorting the output will always put them in correct order. outfiles = sorted(glob.glob(temp_image_file_root + "*")) if args.verbose: print("\nAnalyzing the page images with PIL to find bounding boxes," "\nusing the threshold " + str(args.threshold) + "." " Finding the bounding box for page:\n") bounding_box_list = [] for page_num, tmp_image_file_name in enumerate(outfiles): curr_page = input_doc.getPage(page_num) # Open the image in PIL. Retry a few times on fail in case race conditions. max_num_tries = 3 time_between_tries = 1 curr_num_tries = 0 while True: try: # PIL for some reason fails in Python 3.4 if you open the image # from a file you opened yourself. Works in Python 2 and earlier # Python 3. So original code is commented out, and path passed. # # tmpImageFile = open(tmpImageFileName) # im = Image.open(tmpImageFile) im = Image.open(tmp_image_file_name) break except (IOError, UnicodeDecodeError) as e: curr_num_tries += 1 if args.verbose: print("Warning: Exception opening image", tmp_image_file_name, "on try", curr_num_tries, "\nError is", e, file=sys.stderr) # tmpImageFile.close() # see above comment if curr_num_tries > max_num_tries: raise # re-raise exception time.sleep(time_between_tries) # Apply any blur or smooth operations specified by the user. for i in range(args.numBlurs): im = im.filter(ImageFilter.BLUR) for i in range(args.numSmooths): im = im.filter(ImageFilter.SMOOTH_MORE) # Convert the image to black and white, according to a threshold. # Make a negative image, because that works with the PIL getbbox routine. if args.verbose: print(page_num+1, end=" ") # page num numbering from 1 # Note that the point method calls the function on each pixel, replacing it. #im = im.point(lambda p: p > threshold and 255) # create a positive image #im = im.point(lambda p: p < threshold and 255) # create a negative image # Below code is easier to understand than tricky use of "and" in evaluation. im = im.point(lambda p: 255 if p < threshold else 0) # create a negative image if args.showImages: im.show() # usually for debugging or param-setting # Calculate the bounding box of the negative image, and append to list. bounding_box = calculate_bounding_box_from_image(im, curr_page) bounding_box_list.append(bounding_box) # Clean up the image files after they are no longer needed. # tmpImageFile.close() # see above comment os.remove(tmp_image_file_name) if args.verbose: print() return bounding_box_list
python
def get_bounding_box_list_render_image(pdf_file_name, input_doc): """Calculate the bounding box list by directly rendering each page of the PDF as an image file. The MediaBox and CropBox values in input_doc should have already been set to the chosen page size before the rendering.""" program_to_use = "pdftoppm" # default to pdftoppm if args.gsRender: program_to_use = "Ghostscript" # Threshold value set in range 0-255, where 0 is black, with 191 default. if not args.threshold: args.threshold = 191 threshold = args.threshold if not args.numSmooths: args.numSmooths = 0 if not args.numBlurs: args.numBlurs = 0 temp_dir = ex.program_temp_directory # use the program default; don't delete dir! temp_image_file_root = os.path.join(temp_dir, ex.temp_file_prefix + "PageImage") if args.verbose: print("\nRendering the PDF to images using the " + program_to_use + " program," "\nthis may take a while...") # Do the rendering of all the files. render_pdf_file_to_image_files(pdf_file_name, temp_image_file_root, program_to_use) # Currently assuming that sorting the output will always put them in correct order. outfiles = sorted(glob.glob(temp_image_file_root + "*")) if args.verbose: print("\nAnalyzing the page images with PIL to find bounding boxes," "\nusing the threshold " + str(args.threshold) + "." " Finding the bounding box for page:\n") bounding_box_list = [] for page_num, tmp_image_file_name in enumerate(outfiles): curr_page = input_doc.getPage(page_num) # Open the image in PIL. Retry a few times on fail in case race conditions. max_num_tries = 3 time_between_tries = 1 curr_num_tries = 0 while True: try: # PIL for some reason fails in Python 3.4 if you open the image # from a file you opened yourself. Works in Python 2 and earlier # Python 3. So original code is commented out, and path passed. # # tmpImageFile = open(tmpImageFileName) # im = Image.open(tmpImageFile) im = Image.open(tmp_image_file_name) break except (IOError, UnicodeDecodeError) as e: curr_num_tries += 1 if args.verbose: print("Warning: Exception opening image", tmp_image_file_name, "on try", curr_num_tries, "\nError is", e, file=sys.stderr) # tmpImageFile.close() # see above comment if curr_num_tries > max_num_tries: raise # re-raise exception time.sleep(time_between_tries) # Apply any blur or smooth operations specified by the user. for i in range(args.numBlurs): im = im.filter(ImageFilter.BLUR) for i in range(args.numSmooths): im = im.filter(ImageFilter.SMOOTH_MORE) # Convert the image to black and white, according to a threshold. # Make a negative image, because that works with the PIL getbbox routine. if args.verbose: print(page_num+1, end=" ") # page num numbering from 1 # Note that the point method calls the function on each pixel, replacing it. #im = im.point(lambda p: p > threshold and 255) # create a positive image #im = im.point(lambda p: p < threshold and 255) # create a negative image # Below code is easier to understand than tricky use of "and" in evaluation. im = im.point(lambda p: 255 if p < threshold else 0) # create a negative image if args.showImages: im.show() # usually for debugging or param-setting # Calculate the bounding box of the negative image, and append to list. bounding_box = calculate_bounding_box_from_image(im, curr_page) bounding_box_list.append(bounding_box) # Clean up the image files after they are no longer needed. # tmpImageFile.close() # see above comment os.remove(tmp_image_file_name) if args.verbose: print() return bounding_box_list
[ "def", "get_bounding_box_list_render_image", "(", "pdf_file_name", ",", "input_doc", ")", ":", "program_to_use", "=", "\"pdftoppm\"", "# default to pdftoppm", "if", "args", ".", "gsRender", ":", "program_to_use", "=", "\"Ghostscript\"", "# Threshold value set in range 0-255, where 0 is black, with 191 default.", "if", "not", "args", ".", "threshold", ":", "args", ".", "threshold", "=", "191", "threshold", "=", "args", ".", "threshold", "if", "not", "args", ".", "numSmooths", ":", "args", ".", "numSmooths", "=", "0", "if", "not", "args", ".", "numBlurs", ":", "args", ".", "numBlurs", "=", "0", "temp_dir", "=", "ex", ".", "program_temp_directory", "# use the program default; don't delete dir!", "temp_image_file_root", "=", "os", ".", "path", ".", "join", "(", "temp_dir", ",", "ex", ".", "temp_file_prefix", "+", "\"PageImage\"", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nRendering the PDF to images using the \"", "+", "program_to_use", "+", "\" program,\"", "\"\\nthis may take a while...\"", ")", "# Do the rendering of all the files.", "render_pdf_file_to_image_files", "(", "pdf_file_name", ",", "temp_image_file_root", ",", "program_to_use", ")", "# Currently assuming that sorting the output will always put them in correct order.", "outfiles", "=", "sorted", "(", "glob", ".", "glob", "(", "temp_image_file_root", "+", "\"*\"", ")", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nAnalyzing the page images with PIL to find bounding boxes,\"", "\"\\nusing the threshold \"", "+", "str", "(", "args", ".", "threshold", ")", "+", "\".\"", "\" Finding the bounding box for page:\\n\"", ")", "bounding_box_list", "=", "[", "]", "for", "page_num", ",", "tmp_image_file_name", "in", "enumerate", "(", "outfiles", ")", ":", "curr_page", "=", "input_doc", ".", "getPage", "(", "page_num", ")", "# Open the image in PIL. Retry a few times on fail in case race conditions.", "max_num_tries", "=", "3", "time_between_tries", "=", "1", "curr_num_tries", "=", "0", "while", "True", ":", "try", ":", "# PIL for some reason fails in Python 3.4 if you open the image", "# from a file you opened yourself. Works in Python 2 and earlier", "# Python 3. So original code is commented out, and path passed.", "#", "# tmpImageFile = open(tmpImageFileName)", "# im = Image.open(tmpImageFile)", "im", "=", "Image", ".", "open", "(", "tmp_image_file_name", ")", "break", "except", "(", "IOError", ",", "UnicodeDecodeError", ")", "as", "e", ":", "curr_num_tries", "+=", "1", "if", "args", ".", "verbose", ":", "print", "(", "\"Warning: Exception opening image\"", ",", "tmp_image_file_name", ",", "\"on try\"", ",", "curr_num_tries", ",", "\"\\nError is\"", ",", "e", ",", "file", "=", "sys", ".", "stderr", ")", "# tmpImageFile.close() # see above comment", "if", "curr_num_tries", ">", "max_num_tries", ":", "raise", "# re-raise exception", "time", ".", "sleep", "(", "time_between_tries", ")", "# Apply any blur or smooth operations specified by the user.", "for", "i", "in", "range", "(", "args", ".", "numBlurs", ")", ":", "im", "=", "im", ".", "filter", "(", "ImageFilter", ".", "BLUR", ")", "for", "i", "in", "range", "(", "args", ".", "numSmooths", ")", ":", "im", "=", "im", ".", "filter", "(", "ImageFilter", ".", "SMOOTH_MORE", ")", "# Convert the image to black and white, according to a threshold.", "# Make a negative image, because that works with the PIL getbbox routine.", "if", "args", ".", "verbose", ":", "print", "(", "page_num", "+", "1", ",", "end", "=", "\" \"", ")", "# page num numbering from 1", "# Note that the point method calls the function on each pixel, replacing it.", "#im = im.point(lambda p: p > threshold and 255) # create a positive image", "#im = im.point(lambda p: p < threshold and 255) # create a negative image", "# Below code is easier to understand than tricky use of \"and\" in evaluation.", "im", "=", "im", ".", "point", "(", "lambda", "p", ":", "255", "if", "p", "<", "threshold", "else", "0", ")", "# create a negative image", "if", "args", ".", "showImages", ":", "im", ".", "show", "(", ")", "# usually for debugging or param-setting", "# Calculate the bounding box of the negative image, and append to list.", "bounding_box", "=", "calculate_bounding_box_from_image", "(", "im", ",", "curr_page", ")", "bounding_box_list", ".", "append", "(", "bounding_box", ")", "# Clean up the image files after they are no longer needed.", "# tmpImageFile.close() # see above comment", "os", ".", "remove", "(", "tmp_image_file_name", ")", "if", "args", ".", "verbose", ":", "print", "(", ")", "return", "bounding_box_list" ]
Calculate the bounding box list by directly rendering each page of the PDF as an image file. The MediaBox and CropBox values in input_doc should have already been set to the chosen page size before the rendering.
[ "Calculate", "the", "bounding", "box", "list", "by", "directly", "rendering", "each", "page", "of", "the", "PDF", "as", "an", "image", "file", ".", "The", "MediaBox", "and", "CropBox", "values", "in", "input_doc", "should", "have", "already", "been", "set", "to", "the", "chosen", "page", "size", "before", "the", "rendering", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L116-L206
train
abarker/pdfCropMargins
src/pdfCropMargins/calculate_bounding_boxes.py
render_pdf_file_to_image_files
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use): """Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root. Any directories must have already been created, and the calling program is responsible for deleting any directories or image files. The program program_to_use, currently either the string "pdftoppm" or the string "Ghostscript", will be called externally. The image type that the PDF is converted into must to be directly openable by PIL.""" res_x = str(args.resX) res_y = str(args.resY) if program_to_use == "Ghostscript": if ex.system_os == "Windows": # Windows PIL is more likely to know BMP ex.render_pdf_file_to_image_files__ghostscript_bmp( pdf_file_name, output_filename_root, res_x, res_y) else: # Linux and Cygwin should be fine with PNG ex.render_pdf_file_to_image_files__ghostscript_png( pdf_file_name, output_filename_root, res_x, res_y) elif program_to_use == "pdftoppm": use_gray = False # this is currently hardcoded, but can be changed to use pgm if use_gray: ex.render_pdf_file_to_image_files_pdftoppm_pgm( pdf_file_name, output_filename_root, res_x, res_y) else: ex.render_pdf_file_to_image_files_pdftoppm_ppm( pdf_file_name, output_filename_root, res_x, res_y) else: print("Error in renderPdfFileToImageFile: Unrecognized external program.", file=sys.stderr) ex.cleanup_and_exit(1)
python
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use): """Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root. Any directories must have already been created, and the calling program is responsible for deleting any directories or image files. The program program_to_use, currently either the string "pdftoppm" or the string "Ghostscript", will be called externally. The image type that the PDF is converted into must to be directly openable by PIL.""" res_x = str(args.resX) res_y = str(args.resY) if program_to_use == "Ghostscript": if ex.system_os == "Windows": # Windows PIL is more likely to know BMP ex.render_pdf_file_to_image_files__ghostscript_bmp( pdf_file_name, output_filename_root, res_x, res_y) else: # Linux and Cygwin should be fine with PNG ex.render_pdf_file_to_image_files__ghostscript_png( pdf_file_name, output_filename_root, res_x, res_y) elif program_to_use == "pdftoppm": use_gray = False # this is currently hardcoded, but can be changed to use pgm if use_gray: ex.render_pdf_file_to_image_files_pdftoppm_pgm( pdf_file_name, output_filename_root, res_x, res_y) else: ex.render_pdf_file_to_image_files_pdftoppm_ppm( pdf_file_name, output_filename_root, res_x, res_y) else: print("Error in renderPdfFileToImageFile: Unrecognized external program.", file=sys.stderr) ex.cleanup_and_exit(1)
[ "def", "render_pdf_file_to_image_files", "(", "pdf_file_name", ",", "output_filename_root", ",", "program_to_use", ")", ":", "res_x", "=", "str", "(", "args", ".", "resX", ")", "res_y", "=", "str", "(", "args", ".", "resY", ")", "if", "program_to_use", "==", "\"Ghostscript\"", ":", "if", "ex", ".", "system_os", "==", "\"Windows\"", ":", "# Windows PIL is more likely to know BMP", "ex", ".", "render_pdf_file_to_image_files__ghostscript_bmp", "(", "pdf_file_name", ",", "output_filename_root", ",", "res_x", ",", "res_y", ")", "else", ":", "# Linux and Cygwin should be fine with PNG", "ex", ".", "render_pdf_file_to_image_files__ghostscript_png", "(", "pdf_file_name", ",", "output_filename_root", ",", "res_x", ",", "res_y", ")", "elif", "program_to_use", "==", "\"pdftoppm\"", ":", "use_gray", "=", "False", "# this is currently hardcoded, but can be changed to use pgm", "if", "use_gray", ":", "ex", ".", "render_pdf_file_to_image_files_pdftoppm_pgm", "(", "pdf_file_name", ",", "output_filename_root", ",", "res_x", ",", "res_y", ")", "else", ":", "ex", ".", "render_pdf_file_to_image_files_pdftoppm_ppm", "(", "pdf_file_name", ",", "output_filename_root", ",", "res_x", ",", "res_y", ")", "else", ":", "print", "(", "\"Error in renderPdfFileToImageFile: Unrecognized external program.\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")" ]
Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root. Any directories must have already been created, and the calling program is responsible for deleting any directories or image files. The program program_to_use, currently either the string "pdftoppm" or the string "Ghostscript", will be called externally. The image type that the PDF is converted into must to be directly openable by PIL.
[ "Render", "all", "the", "pages", "of", "the", "PDF", "file", "at", "pdf_file_name", "to", "image", "files", "with", "path", "and", "filename", "prefix", "given", "by", "output_filename_root", ".", "Any", "directories", "must", "have", "already", "been", "created", "and", "the", "calling", "program", "is", "responsible", "for", "deleting", "any", "directories", "or", "image", "files", ".", "The", "program", "program_to_use", "currently", "either", "the", "string", "pdftoppm", "or", "the", "string", "Ghostscript", "will", "be", "called", "externally", ".", "The", "image", "type", "that", "the", "PDF", "is", "converted", "into", "must", "to", "be", "directly", "openable", "by", "PIL", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L208-L237
train
abarker/pdfCropMargins
src/pdfCropMargins/calculate_bounding_boxes.py
calculate_bounding_box_from_image
def calculate_bounding_box_from_image(im, curr_page): """This function uses a PIL routine to get the bounding box of the rendered image.""" xMax, y_max = im.size bounding_box = im.getbbox() # note this uses ltrb convention if not bounding_box: #print("\nWarning: could not calculate a bounding box for this page." # "\nAn empty page is assumed.", file=sys.stderr) bounding_box = (xMax/2, y_max/2, xMax/2, y_max/2) bounding_box = list(bounding_box) # make temporarily mutable # Compensate for reversal of the image y convention versus PDF. bounding_box[1] = y_max - bounding_box[1] bounding_box[3] = y_max - bounding_box[3] full_page_box = curr_page.mediaBox # should have been set already to chosen box # Convert pixel units to PDF's bp units. convert_x = float(full_page_box.getUpperRight_x() - full_page_box.getLowerLeft_x()) / xMax convert_y = float(full_page_box.getUpperRight_y() - full_page_box.getLowerLeft_y()) / y_max # Get final box; note conversion to lower-left point, upper-right point format. final_box = [ bounding_box[0] * convert_x, bounding_box[3] * convert_y, bounding_box[2] * convert_x, bounding_box[1] * convert_y] return final_box
python
def calculate_bounding_box_from_image(im, curr_page): """This function uses a PIL routine to get the bounding box of the rendered image.""" xMax, y_max = im.size bounding_box = im.getbbox() # note this uses ltrb convention if not bounding_box: #print("\nWarning: could not calculate a bounding box for this page." # "\nAn empty page is assumed.", file=sys.stderr) bounding_box = (xMax/2, y_max/2, xMax/2, y_max/2) bounding_box = list(bounding_box) # make temporarily mutable # Compensate for reversal of the image y convention versus PDF. bounding_box[1] = y_max - bounding_box[1] bounding_box[3] = y_max - bounding_box[3] full_page_box = curr_page.mediaBox # should have been set already to chosen box # Convert pixel units to PDF's bp units. convert_x = float(full_page_box.getUpperRight_x() - full_page_box.getLowerLeft_x()) / xMax convert_y = float(full_page_box.getUpperRight_y() - full_page_box.getLowerLeft_y()) / y_max # Get final box; note conversion to lower-left point, upper-right point format. final_box = [ bounding_box[0] * convert_x, bounding_box[3] * convert_y, bounding_box[2] * convert_x, bounding_box[1] * convert_y] return final_box
[ "def", "calculate_bounding_box_from_image", "(", "im", ",", "curr_page", ")", ":", "xMax", ",", "y_max", "=", "im", ".", "size", "bounding_box", "=", "im", ".", "getbbox", "(", ")", "# note this uses ltrb convention", "if", "not", "bounding_box", ":", "#print(\"\\nWarning: could not calculate a bounding box for this page.\"", "# \"\\nAn empty page is assumed.\", file=sys.stderr)", "bounding_box", "=", "(", "xMax", "/", "2", ",", "y_max", "/", "2", ",", "xMax", "/", "2", ",", "y_max", "/", "2", ")", "bounding_box", "=", "list", "(", "bounding_box", ")", "# make temporarily mutable", "# Compensate for reversal of the image y convention versus PDF.", "bounding_box", "[", "1", "]", "=", "y_max", "-", "bounding_box", "[", "1", "]", "bounding_box", "[", "3", "]", "=", "y_max", "-", "bounding_box", "[", "3", "]", "full_page_box", "=", "curr_page", ".", "mediaBox", "# should have been set already to chosen box", "# Convert pixel units to PDF's bp units.", "convert_x", "=", "float", "(", "full_page_box", ".", "getUpperRight_x", "(", ")", "-", "full_page_box", ".", "getLowerLeft_x", "(", ")", ")", "/", "xMax", "convert_y", "=", "float", "(", "full_page_box", ".", "getUpperRight_y", "(", ")", "-", "full_page_box", ".", "getLowerLeft_y", "(", ")", ")", "/", "y_max", "# Get final box; note conversion to lower-left point, upper-right point format.", "final_box", "=", "[", "bounding_box", "[", "0", "]", "*", "convert_x", ",", "bounding_box", "[", "3", "]", "*", "convert_y", ",", "bounding_box", "[", "2", "]", "*", "convert_x", ",", "bounding_box", "[", "1", "]", "*", "convert_y", "]", "return", "final_box" ]
This function uses a PIL routine to get the bounding box of the rendered image.
[ "This", "function", "uses", "a", "PIL", "routine", "to", "get", "the", "bounding", "box", "of", "the", "rendered", "image", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L239-L270
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
samefile
def samefile(path1, path2): """Test if paths refer to the same file or directory.""" if system_os == "Linux" or system_os == "Cygwin": return os.path.samefile(path1, path2) return (get_canonical_absolute_expanded_path(path1) == get_canonical_absolute_expanded_path(path2))
python
def samefile(path1, path2): """Test if paths refer to the same file or directory.""" if system_os == "Linux" or system_os == "Cygwin": return os.path.samefile(path1, path2) return (get_canonical_absolute_expanded_path(path1) == get_canonical_absolute_expanded_path(path2))
[ "def", "samefile", "(", "path1", ",", "path2", ")", ":", "if", "system_os", "==", "\"Linux\"", "or", "system_os", "==", "\"Cygwin\"", ":", "return", "os", ".", "path", ".", "samefile", "(", "path1", ",", "path2", ")", "return", "(", "get_canonical_absolute_expanded_path", "(", "path1", ")", "==", "get_canonical_absolute_expanded_path", "(", "path2", ")", ")" ]
Test if paths refer to the same file or directory.
[ "Test", "if", "paths", "refer", "to", "the", "same", "file", "or", "directory", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L135-L140
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
convert_windows_path_to_cygwin
def convert_windows_path_to_cygwin(path): """Convert a Windows path to a Cygwin path. Just handles the basic case.""" if len(path) > 2 and path[1] == ":" and path[2] == "\\": newpath = cygwin_full_path_prefix + "/" + path[0] if len(path) > 3: newpath += "/" + path[3:] path = newpath path = path.replace("\\", "/") return path
python
def convert_windows_path_to_cygwin(path): """Convert a Windows path to a Cygwin path. Just handles the basic case.""" if len(path) > 2 and path[1] == ":" and path[2] == "\\": newpath = cygwin_full_path_prefix + "/" + path[0] if len(path) > 3: newpath += "/" + path[3:] path = newpath path = path.replace("\\", "/") return path
[ "def", "convert_windows_path_to_cygwin", "(", "path", ")", ":", "if", "len", "(", "path", ")", ">", "2", "and", "path", "[", "1", "]", "==", "\":\"", "and", "path", "[", "2", "]", "==", "\"\\\\\"", ":", "newpath", "=", "cygwin_full_path_prefix", "+", "\"/\"", "+", "path", "[", "0", "]", "if", "len", "(", "path", ")", ">", "3", ":", "newpath", "+=", "\"/\"", "+", "path", "[", "3", ":", "]", "path", "=", "newpath", "path", "=", "path", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "return", "path" ]
Convert a Windows path to a Cygwin path. Just handles the basic case.
[ "Convert", "a", "Windows", "path", "to", "a", "Cygwin", "path", ".", "Just", "handles", "the", "basic", "case", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L167-L174
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
remove_program_temp_directory
def remove_program_temp_directory(): """Remove the global temp directory and all its contents.""" if os.path.exists(program_temp_directory): max_retries = 3 curr_retries = 0 time_between_retries = 1 while True: try: shutil.rmtree(program_temp_directory) break except IOError: curr_retries += 1 if curr_retries > max_retries: raise # re-raise the exception time.sleep(time_between_retries) except: print("Cleaning up temp dir...", file=sys.stderr) raise
python
def remove_program_temp_directory(): """Remove the global temp directory and all its contents.""" if os.path.exists(program_temp_directory): max_retries = 3 curr_retries = 0 time_between_retries = 1 while True: try: shutil.rmtree(program_temp_directory) break except IOError: curr_retries += 1 if curr_retries > max_retries: raise # re-raise the exception time.sleep(time_between_retries) except: print("Cleaning up temp dir...", file=sys.stderr) raise
[ "def", "remove_program_temp_directory", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "program_temp_directory", ")", ":", "max_retries", "=", "3", "curr_retries", "=", "0", "time_between_retries", "=", "1", "while", "True", ":", "try", ":", "shutil", ".", "rmtree", "(", "program_temp_directory", ")", "break", "except", "IOError", ":", "curr_retries", "+=", "1", "if", "curr_retries", ">", "max_retries", ":", "raise", "# re-raise the exception", "time", ".", "sleep", "(", "time_between_retries", ")", "except", ":", "print", "(", "\"Cleaning up temp dir...\"", ",", "file", "=", "sys", ".", "stderr", ")", "raise" ]
Remove the global temp directory and all its contents.
[ "Remove", "the", "global", "temp", "directory", "and", "all", "its", "contents", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L191-L208
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
call_external_subprocess
def call_external_subprocess(command_list, stdin_filename=None, stdout_filename=None, stderr_filename=None, env=None): """Run the command and arguments in the command_list. Will search the system PATH for commands to execute, but no shell is started. Redirects any selected outputs to the given filename. Waits for command completion.""" if stdin_filename: stdin = open(stdin_filename, "r") else: stdin = None if stdout_filename: stdout = open(stdout_filename, "w") else: stdout = None if stderr_filename: stderr = open(stderr_filename, "w") else: stderr = None subprocess.check_call(command_list, stdin=stdin, stdout=stdout, stderr=stderr, env=env) if stdin_filename: stdin.close() if stdout_filename: stdout.close() if stderr_filename: stderr.close() # The older way to do the above with os.system is below, just for reference. # command = " ".join(command_list) # if stdin_filename: command += " < " + stdin_filename # if stdout_filename: command += " > " + stdout_filename # if stderr_filename: command += " 2> " + stderr_filename # os.system(command) return
python
def call_external_subprocess(command_list, stdin_filename=None, stdout_filename=None, stderr_filename=None, env=None): """Run the command and arguments in the command_list. Will search the system PATH for commands to execute, but no shell is started. Redirects any selected outputs to the given filename. Waits for command completion.""" if stdin_filename: stdin = open(stdin_filename, "r") else: stdin = None if stdout_filename: stdout = open(stdout_filename, "w") else: stdout = None if stderr_filename: stderr = open(stderr_filename, "w") else: stderr = None subprocess.check_call(command_list, stdin=stdin, stdout=stdout, stderr=stderr, env=env) if stdin_filename: stdin.close() if stdout_filename: stdout.close() if stderr_filename: stderr.close() # The older way to do the above with os.system is below, just for reference. # command = " ".join(command_list) # if stdin_filename: command += " < " + stdin_filename # if stdout_filename: command += " > " + stdout_filename # if stderr_filename: command += " 2> " + stderr_filename # os.system(command) return
[ "def", "call_external_subprocess", "(", "command_list", ",", "stdin_filename", "=", "None", ",", "stdout_filename", "=", "None", ",", "stderr_filename", "=", "None", ",", "env", "=", "None", ")", ":", "if", "stdin_filename", ":", "stdin", "=", "open", "(", "stdin_filename", ",", "\"r\"", ")", "else", ":", "stdin", "=", "None", "if", "stdout_filename", ":", "stdout", "=", "open", "(", "stdout_filename", ",", "\"w\"", ")", "else", ":", "stdout", "=", "None", "if", "stderr_filename", ":", "stderr", "=", "open", "(", "stderr_filename", ",", "\"w\"", ")", "else", ":", "stderr", "=", "None", "subprocess", ".", "check_call", "(", "command_list", ",", "stdin", "=", "stdin", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "env", "=", "env", ")", "if", "stdin_filename", ":", "stdin", ".", "close", "(", ")", "if", "stdout_filename", ":", "stdout", ".", "close", "(", ")", "if", "stderr_filename", ":", "stderr", ".", "close", "(", ")", "# The older way to do the above with os.system is below, just for reference.", "# command = \" \".join(command_list)", "# if stdin_filename: command += \" < \" + stdin_filename", "# if stdout_filename: command += \" > \" + stdout_filename", "# if stderr_filename: command += \" 2> \" + stderr_filename", "# os.system(command)", "return" ]
Run the command and arguments in the command_list. Will search the system PATH for commands to execute, but no shell is started. Redirects any selected outputs to the given filename. Waits for command completion.
[ "Run", "the", "command", "and", "arguments", "in", "the", "command_list", ".", "Will", "search", "the", "system", "PATH", "for", "commands", "to", "execute", "but", "no", "shell", "is", "started", ".", "Redirects", "any", "selected", "outputs", "to", "the", "given", "filename", ".", "Waits", "for", "command", "completion", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L279-L306
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
run_external_subprocess_in_background
def run_external_subprocess_in_background(command_list, env=None): """Runs the command and arguments in the list as a background process.""" if system_os == "Windows": DETACHED_PROCESS = 0x00000008 p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None, stderr=None, close_fds=True, creationflags=DETACHED_PROCESS, env=env) else: p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None, stderr=None, close_fds=True, env=env) return p
python
def run_external_subprocess_in_background(command_list, env=None): """Runs the command and arguments in the list as a background process.""" if system_os == "Windows": DETACHED_PROCESS = 0x00000008 p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None, stderr=None, close_fds=True, creationflags=DETACHED_PROCESS, env=env) else: p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None, stderr=None, close_fds=True, env=env) return p
[ "def", "run_external_subprocess_in_background", "(", "command_list", ",", "env", "=", "None", ")", ":", "if", "system_os", "==", "\"Windows\"", ":", "DETACHED_PROCESS", "=", "0x00000008", "p", "=", "subprocess", ".", "Popen", "(", "command_list", ",", "shell", "=", "False", ",", "stdin", "=", "None", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "close_fds", "=", "True", ",", "creationflags", "=", "DETACHED_PROCESS", ",", "env", "=", "env", ")", "else", ":", "p", "=", "subprocess", ".", "Popen", "(", "command_list", ",", "shell", "=", "False", ",", "stdin", "=", "None", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "close_fds", "=", "True", ",", "env", "=", "env", ")", "return", "p" ]
Runs the command and arguments in the list as a background process.
[ "Runs", "the", "command", "and", "arguments", "in", "the", "list", "as", "a", "background", "process", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L308-L317
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
function_call_with_timeout
def function_call_with_timeout(fun_name, fun_args, secs=5): """Run a Python function with a timeout. No interprocess communication or return values are handled. Setting secs to 0 gives infinite timeout.""" from multiprocessing import Process, Queue p = Process(target=fun_name, args=tuple(fun_args)) p.start() curr_secs = 0 no_timeout = False if secs == 0: no_timeout = True else: timeout = secs while p.is_alive() and not no_timeout: if curr_secs > timeout: print("Process time has exceeded timeout, terminating it.") p.terminate() return False time.sleep(0.1) curr_secs += 0.1 p.join() # Blocks if process hasn't terminated. return True
python
def function_call_with_timeout(fun_name, fun_args, secs=5): """Run a Python function with a timeout. No interprocess communication or return values are handled. Setting secs to 0 gives infinite timeout.""" from multiprocessing import Process, Queue p = Process(target=fun_name, args=tuple(fun_args)) p.start() curr_secs = 0 no_timeout = False if secs == 0: no_timeout = True else: timeout = secs while p.is_alive() and not no_timeout: if curr_secs > timeout: print("Process time has exceeded timeout, terminating it.") p.terminate() return False time.sleep(0.1) curr_secs += 0.1 p.join() # Blocks if process hasn't terminated. return True
[ "def", "function_call_with_timeout", "(", "fun_name", ",", "fun_args", ",", "secs", "=", "5", ")", ":", "from", "multiprocessing", "import", "Process", ",", "Queue", "p", "=", "Process", "(", "target", "=", "fun_name", ",", "args", "=", "tuple", "(", "fun_args", ")", ")", "p", ".", "start", "(", ")", "curr_secs", "=", "0", "no_timeout", "=", "False", "if", "secs", "==", "0", ":", "no_timeout", "=", "True", "else", ":", "timeout", "=", "secs", "while", "p", ".", "is_alive", "(", ")", "and", "not", "no_timeout", ":", "if", "curr_secs", ">", "timeout", ":", "print", "(", "\"Process time has exceeded timeout, terminating it.\"", ")", "p", ".", "terminate", "(", ")", "return", "False", "time", ".", "sleep", "(", "0.1", ")", "curr_secs", "+=", "0.1", "p", ".", "join", "(", ")", "# Blocks if process hasn't terminated.", "return", "True" ]
Run a Python function with a timeout. No interprocess communication or return values are handled. Setting secs to 0 gives infinite timeout.
[ "Run", "a", "Python", "function", "with", "a", "timeout", ".", "No", "interprocess", "communication", "or", "return", "values", "are", "handled", ".", "Setting", "secs", "to", "0", "gives", "infinite", "timeout", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L331-L349
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
fix_pdf_with_ghostscript_to_tmp_file
def fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname): """Attempt to fix a bad PDF file with a Ghostscript command, writing the output PDF to a temporary file and returning the filename. Caller is responsible for deleting the file.""" if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) temp_file_name = get_temporary_filename(extension=".pdf") gs_run_command = [gs_executable, "-dSAFER", "-o", temp_file_name, "-dPDFSETTINGS=/prepress", "-sDEVICE=pdfwrite", input_doc_fname] try: gs_output = get_external_subprocess_output(gs_run_command, print_output=True, indent_string=" ", env=gs_environment) except subprocess.CalledProcessError: print("\nError in pdfCropMargins: Ghostscript returned a non-zero exit" "\nstatus when attempting to fix the file:\n ", input_doc_fname, file=sys.stderr) cleanup_and_exit(1) except UnicodeDecodeError: print("\nWarning in pdfCropMargins: In attempting to repair the PDF file" "\nGhostscript produced a message containing characters which cannot" "\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.", file=sys.stderr) return temp_file_name
python
def fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname): """Attempt to fix a bad PDF file with a Ghostscript command, writing the output PDF to a temporary file and returning the filename. Caller is responsible for deleting the file.""" if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) temp_file_name = get_temporary_filename(extension=".pdf") gs_run_command = [gs_executable, "-dSAFER", "-o", temp_file_name, "-dPDFSETTINGS=/prepress", "-sDEVICE=pdfwrite", input_doc_fname] try: gs_output = get_external_subprocess_output(gs_run_command, print_output=True, indent_string=" ", env=gs_environment) except subprocess.CalledProcessError: print("\nError in pdfCropMargins: Ghostscript returned a non-zero exit" "\nstatus when attempting to fix the file:\n ", input_doc_fname, file=sys.stderr) cleanup_and_exit(1) except UnicodeDecodeError: print("\nWarning in pdfCropMargins: In attempting to repair the PDF file" "\nGhostscript produced a message containing characters which cannot" "\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.", file=sys.stderr) return temp_file_name
[ "def", "fix_pdf_with_ghostscript_to_tmp_file", "(", "input_doc_fname", ")", ":", "if", "not", "gs_executable", ":", "init_and_test_gs_executable", "(", "exit_on_fail", "=", "True", ")", "temp_file_name", "=", "get_temporary_filename", "(", "extension", "=", "\".pdf\"", ")", "gs_run_command", "=", "[", "gs_executable", ",", "\"-dSAFER\"", ",", "\"-o\"", ",", "temp_file_name", ",", "\"-dPDFSETTINGS=/prepress\"", ",", "\"-sDEVICE=pdfwrite\"", ",", "input_doc_fname", "]", "try", ":", "gs_output", "=", "get_external_subprocess_output", "(", "gs_run_command", ",", "print_output", "=", "True", ",", "indent_string", "=", "\" \"", ",", "env", "=", "gs_environment", ")", "except", "subprocess", ".", "CalledProcessError", ":", "print", "(", "\"\\nError in pdfCropMargins: Ghostscript returned a non-zero exit\"", "\"\\nstatus when attempting to fix the file:\\n \"", ",", "input_doc_fname", ",", "file", "=", "sys", ".", "stderr", ")", "cleanup_and_exit", "(", "1", ")", "except", "UnicodeDecodeError", ":", "print", "(", "\"\\nWarning in pdfCropMargins: In attempting to repair the PDF file\"", "\"\\nGhostscript produced a message containing characters which cannot\"", "\"\\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "temp_file_name" ]
Attempt to fix a bad PDF file with a Ghostscript command, writing the output PDF to a temporary file and returning the filename. Caller is responsible for deleting the file.
[ "Attempt", "to", "fix", "a", "bad", "PDF", "file", "with", "a", "Ghostscript", "command", "writing", "the", "output", "PDF", "to", "a", "temporary", "file", "and", "returning", "the", "filename", ".", "Caller", "is", "responsible", "for", "deleting", "the", "file", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L532-L554
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
get_bounding_box_list_ghostscript
def get_bounding_box_list_ghostscript(input_doc_fname, res_x, res_y, full_page_box): """Call Ghostscript to get the bounding box list. Cannot set a threshold with this method.""" if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) res = str(res_x) + "x" + str(res_y) box_arg = "-dUseMediaBox" # should be default, but set anyway if "c" in full_page_box: box_arg = "-dUseCropBox" if "t" in full_page_box: box_arg = "-dUseTrimBox" if "a" in full_page_box: box_arg = "-dUseArtBox" if "b" in full_page_box: box_arg = "-dUseBleedBox" # may not be defined in gs gs_run_command = [gs_executable, "-dSAFER", "-dNOPAUSE", "-dBATCH", "-sDEVICE=bbox", box_arg, "-r"+res, input_doc_fname] # Set printOutput to True for debugging or extra-verbose with Ghostscript's output. # Note Ghostscript writes the data to stderr, so the command below must capture it. try: gs_output = get_external_subprocess_output(gs_run_command, print_output=False, indent_string=" ", env=gs_environment) except UnicodeDecodeError: print("\nError in pdfCropMargins: In attempting to get the bounding boxes" "\nGhostscript encountered characters which cannot be decoded by the" "\n'utf-8' codec.", file=sys.stderr) cleanup_and_exit(1) bounding_box_list = [] for line in gs_output: split_line = line.split() if split_line and split_line[0] == r"%%HiResBoundingBox:": del split_line[0] if len(split_line) != 4: print("\nWarning from pdfCropMargins: Ignoring this unparsable line" "\nwhen finding the bounding boxes with Ghostscript:", line, "\n", file=sys.stderr) continue # Note gs reports values in order left, bottom, right, top, # i.e., lower left point followed by top right point. bounding_box_list.append([float(split_line[0]), float(split_line[1]), float(split_line[2]), float(split_line[3])]) if not bounding_box_list: print("\nError in pdfCropMargins: Ghostscript failed to find any bounding" "\nboxes in the document.", file=sys.stderr) cleanup_and_exit(1) return bounding_box_list
python
def get_bounding_box_list_ghostscript(input_doc_fname, res_x, res_y, full_page_box): """Call Ghostscript to get the bounding box list. Cannot set a threshold with this method.""" if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) res = str(res_x) + "x" + str(res_y) box_arg = "-dUseMediaBox" # should be default, but set anyway if "c" in full_page_box: box_arg = "-dUseCropBox" if "t" in full_page_box: box_arg = "-dUseTrimBox" if "a" in full_page_box: box_arg = "-dUseArtBox" if "b" in full_page_box: box_arg = "-dUseBleedBox" # may not be defined in gs gs_run_command = [gs_executable, "-dSAFER", "-dNOPAUSE", "-dBATCH", "-sDEVICE=bbox", box_arg, "-r"+res, input_doc_fname] # Set printOutput to True for debugging or extra-verbose with Ghostscript's output. # Note Ghostscript writes the data to stderr, so the command below must capture it. try: gs_output = get_external_subprocess_output(gs_run_command, print_output=False, indent_string=" ", env=gs_environment) except UnicodeDecodeError: print("\nError in pdfCropMargins: In attempting to get the bounding boxes" "\nGhostscript encountered characters which cannot be decoded by the" "\n'utf-8' codec.", file=sys.stderr) cleanup_and_exit(1) bounding_box_list = [] for line in gs_output: split_line = line.split() if split_line and split_line[0] == r"%%HiResBoundingBox:": del split_line[0] if len(split_line) != 4: print("\nWarning from pdfCropMargins: Ignoring this unparsable line" "\nwhen finding the bounding boxes with Ghostscript:", line, "\n", file=sys.stderr) continue # Note gs reports values in order left, bottom, right, top, # i.e., lower left point followed by top right point. bounding_box_list.append([float(split_line[0]), float(split_line[1]), float(split_line[2]), float(split_line[3])]) if not bounding_box_list: print("\nError in pdfCropMargins: Ghostscript failed to find any bounding" "\nboxes in the document.", file=sys.stderr) cleanup_and_exit(1) return bounding_box_list
[ "def", "get_bounding_box_list_ghostscript", "(", "input_doc_fname", ",", "res_x", ",", "res_y", ",", "full_page_box", ")", ":", "if", "not", "gs_executable", ":", "init_and_test_gs_executable", "(", "exit_on_fail", "=", "True", ")", "res", "=", "str", "(", "res_x", ")", "+", "\"x\"", "+", "str", "(", "res_y", ")", "box_arg", "=", "\"-dUseMediaBox\"", "# should be default, but set anyway", "if", "\"c\"", "in", "full_page_box", ":", "box_arg", "=", "\"-dUseCropBox\"", "if", "\"t\"", "in", "full_page_box", ":", "box_arg", "=", "\"-dUseTrimBox\"", "if", "\"a\"", "in", "full_page_box", ":", "box_arg", "=", "\"-dUseArtBox\"", "if", "\"b\"", "in", "full_page_box", ":", "box_arg", "=", "\"-dUseBleedBox\"", "# may not be defined in gs", "gs_run_command", "=", "[", "gs_executable", ",", "\"-dSAFER\"", ",", "\"-dNOPAUSE\"", ",", "\"-dBATCH\"", ",", "\"-sDEVICE=bbox\"", ",", "box_arg", ",", "\"-r\"", "+", "res", ",", "input_doc_fname", "]", "# Set printOutput to True for debugging or extra-verbose with Ghostscript's output.", "# Note Ghostscript writes the data to stderr, so the command below must capture it.", "try", ":", "gs_output", "=", "get_external_subprocess_output", "(", "gs_run_command", ",", "print_output", "=", "False", ",", "indent_string", "=", "\" \"", ",", "env", "=", "gs_environment", ")", "except", "UnicodeDecodeError", ":", "print", "(", "\"\\nError in pdfCropMargins: In attempting to get the bounding boxes\"", "\"\\nGhostscript encountered characters which cannot be decoded by the\"", "\"\\n'utf-8' codec.\"", ",", "file", "=", "sys", ".", "stderr", ")", "cleanup_and_exit", "(", "1", ")", "bounding_box_list", "=", "[", "]", "for", "line", "in", "gs_output", ":", "split_line", "=", "line", ".", "split", "(", ")", "if", "split_line", "and", "split_line", "[", "0", "]", "==", "r\"%%HiResBoundingBox:\"", ":", "del", "split_line", "[", "0", "]", "if", "len", "(", "split_line", ")", "!=", "4", ":", "print", "(", "\"\\nWarning from pdfCropMargins: Ignoring this unparsable line\"", "\"\\nwhen finding the bounding boxes with Ghostscript:\"", ",", "line", ",", "\"\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "# Note gs reports values in order left, bottom, right, top,", "# i.e., lower left point followed by top right point.", "bounding_box_list", ".", "append", "(", "[", "float", "(", "split_line", "[", "0", "]", ")", ",", "float", "(", "split_line", "[", "1", "]", ")", ",", "float", "(", "split_line", "[", "2", "]", ")", ",", "float", "(", "split_line", "[", "3", "]", ")", "]", ")", "if", "not", "bounding_box_list", ":", "print", "(", "\"\\nError in pdfCropMargins: Ghostscript failed to find any bounding\"", "\"\\nboxes in the document.\"", ",", "file", "=", "sys", ".", "stderr", ")", "cleanup_and_exit", "(", "1", ")", "return", "bounding_box_list" ]
Call Ghostscript to get the bounding box list. Cannot set a threshold with this method.
[ "Call", "Ghostscript", "to", "get", "the", "bounding", "box", "list", ".", "Cannot", "set", "a", "threshold", "with", "this", "method", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L556-L602
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
render_pdf_file_to_image_files_pdftoppm_ppm
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x=150, res_y=150, extra_args=None): """Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.""" if extra_args is None: extra_args = [] if not pdftoppm_executable: init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True) if old_pdftoppm_version: # We only have -r, not -rx and -ry. command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name, root_output_file_path] else: command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y, pdf_file_name, root_output_file_path] comm_output = get_external_subprocess_output(command) return comm_output
python
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x=150, res_y=150, extra_args=None): """Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.""" if extra_args is None: extra_args = [] if not pdftoppm_executable: init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True) if old_pdftoppm_version: # We only have -r, not -rx and -ry. command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name, root_output_file_path] else: command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y, pdf_file_name, root_output_file_path] comm_output = get_external_subprocess_output(command) return comm_output
[ "def", "render_pdf_file_to_image_files_pdftoppm_ppm", "(", "pdf_file_name", ",", "root_output_file_path", ",", "res_x", "=", "150", ",", "res_y", "=", "150", ",", "extra_args", "=", "None", ")", ":", "if", "extra_args", "is", "None", ":", "extra_args", "=", "[", "]", "if", "not", "pdftoppm_executable", ":", "init_and_test_pdftoppm_executable", "(", "prefer_local", "=", "False", ",", "exit_on_fail", "=", "True", ")", "if", "old_pdftoppm_version", ":", "# We only have -r, not -rx and -ry.", "command", "=", "[", "pdftoppm_executable", "]", "+", "extra_args", "+", "[", "\"-r\"", ",", "res_x", ",", "pdf_file_name", ",", "root_output_file_path", "]", "else", ":", "command", "=", "[", "pdftoppm_executable", "]", "+", "extra_args", "+", "[", "\"-rx\"", ",", "res_x", ",", "\"-ry\"", ",", "res_y", ",", "pdf_file_name", ",", "root_output_file_path", "]", "comm_output", "=", "get_external_subprocess_output", "(", "command", ")", "return", "comm_output" ]
Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.
[ "Use", "the", "pdftoppm", "program", "to", "render", "a", "PDF", "file", "to", ".", "png", "images", ".", "The", "root_output_file_path", "is", "prepended", "to", "all", "the", "output", "files", "which", "have", "numbers", "and", "extensions", "added", ".", "Extra", "arguments", "can", "be", "passed", "as", "a", "list", "in", "extra_args", ".", "Return", "the", "command", "output", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L604-L624
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
render_pdf_file_to_image_files_pdftoppm_pgm
def render_pdf_file_to_image_files_pdftoppm_pgm(pdf_file_name, root_output_file_path, res_x=150, res_y=150): """Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm.""" comm_output = render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x, res_y, ["-gray"]) return comm_output
python
def render_pdf_file_to_image_files_pdftoppm_pgm(pdf_file_name, root_output_file_path, res_x=150, res_y=150): """Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm.""" comm_output = render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x, res_y, ["-gray"]) return comm_output
[ "def", "render_pdf_file_to_image_files_pdftoppm_pgm", "(", "pdf_file_name", ",", "root_output_file_path", ",", "res_x", "=", "150", ",", "res_y", "=", "150", ")", ":", "comm_output", "=", "render_pdf_file_to_image_files_pdftoppm_ppm", "(", "pdf_file_name", ",", "root_output_file_path", ",", "res_x", ",", "res_y", ",", "[", "\"-gray\"", "]", ")", "return", "comm_output" ]
Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm.
[ "Same", "as", "renderPdfFileToImageFile_pdftoppm_ppm", "but", "with", "-", "gray", "option", "for", "pgm", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L626-L632
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
render_pdf_file_to_image_files__ghostscript_png
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name, root_output_file_path, res_x=150, res_y=150): """Use Ghostscript to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Return the command output.""" # For gs commands see # http://ghostscript.com/doc/current/Devices.htm#File_formats # http://ghostscript.com/doc/current/Devices.htm#PNG if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray", "-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png", pdf_file_name] comm_output = get_external_subprocess_output(command, env=gs_environment) return comm_output
python
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name, root_output_file_path, res_x=150, res_y=150): """Use Ghostscript to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Return the command output.""" # For gs commands see # http://ghostscript.com/doc/current/Devices.htm#File_formats # http://ghostscript.com/doc/current/Devices.htm#PNG if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray", "-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png", pdf_file_name] comm_output = get_external_subprocess_output(command, env=gs_environment) return comm_output
[ "def", "render_pdf_file_to_image_files__ghostscript_png", "(", "pdf_file_name", ",", "root_output_file_path", ",", "res_x", "=", "150", ",", "res_y", "=", "150", ")", ":", "# For gs commands see", "# http://ghostscript.com/doc/current/Devices.htm#File_formats", "# http://ghostscript.com/doc/current/Devices.htm#PNG", "if", "not", "gs_executable", ":", "init_and_test_gs_executable", "(", "exit_on_fail", "=", "True", ")", "command", "=", "[", "gs_executable", ",", "\"-dBATCH\"", ",", "\"-dNOPAUSE\"", ",", "\"-sDEVICE=pnggray\"", ",", "\"-r\"", "+", "res_x", "+", "\"x\"", "+", "res_y", ",", "\"-sOutputFile=\"", "+", "root_output_file_path", "+", "\"-%06d.png\"", ",", "pdf_file_name", "]", "comm_output", "=", "get_external_subprocess_output", "(", "command", ",", "env", "=", "gs_environment", ")", "return", "comm_output" ]
Use Ghostscript to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Return the command output.
[ "Use", "Ghostscript", "to", "render", "a", "PDF", "file", "to", ".", "png", "images", ".", "The", "root_output_file_path", "is", "prepended", "to", "all", "the", "output", "files", "which", "have", "numbers", "and", "extensions", "added", ".", "Return", "the", "command", "output", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L634-L648
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
show_preview
def show_preview(viewer_path, pdf_file_name): """Run the PDF viewer at the path viewer_path on the file pdf_file_name.""" try: cmd = [viewer_path, pdf_file_name] run_external_subprocess_in_background(cmd) except (subprocess.CalledProcessError, OSError, IOError) as e: print("\nWarning from pdfCropMargins: The argument to the '--viewer' option:" "\n ", viewer_path, "\nwas not found or failed to execute correctly.\n", file=sys.stderr) return
python
def show_preview(viewer_path, pdf_file_name): """Run the PDF viewer at the path viewer_path on the file pdf_file_name.""" try: cmd = [viewer_path, pdf_file_name] run_external_subprocess_in_background(cmd) except (subprocess.CalledProcessError, OSError, IOError) as e: print("\nWarning from pdfCropMargins: The argument to the '--viewer' option:" "\n ", viewer_path, "\nwas not found or failed to execute correctly.\n", file=sys.stderr) return
[ "def", "show_preview", "(", "viewer_path", ",", "pdf_file_name", ")", ":", "try", ":", "cmd", "=", "[", "viewer_path", ",", "pdf_file_name", "]", "run_external_subprocess_in_background", "(", "cmd", ")", "except", "(", "subprocess", ".", "CalledProcessError", ",", "OSError", ",", "IOError", ")", "as", "e", ":", "print", "(", "\"\\nWarning from pdfCropMargins: The argument to the '--viewer' option:\"", "\"\\n \"", ",", "viewer_path", ",", "\"\\nwas not found or failed to execute correctly.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "return" ]
Run the PDF viewer at the path viewer_path on the file pdf_file_name.
[ "Run", "the", "PDF", "viewer", "at", "the", "path", "viewer_path", "on", "the", "file", "pdf_file_name", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L673-L682
train
abarker/pdfCropMargins
src/pdfCropMargins/pdfCropMargins.py
main
def main(): """Run main, catching any exceptions and cleaning up the temp directories.""" cleanup_and_exit = sys.exit # Function to do cleanup and exit before the import. exit_code = 0 # Imports are done here inside the try block so some ugly (and useless) # traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows). try: from . import external_program_calls as ex # Creates tmp dir as side effect. cleanup_and_exit = ex.cleanup_and_exit # Switch to the real one, deletes temp dir. from . import main_pdfCropMargins # Imports external_program_calls, don't do first. main_pdfCropMargins.main_crop() # Run the actual program. except (KeyboardInterrupt, EOFError): # Windows raises EOFError on ^C. print("\nGot a KeyboardInterrupt, cleaning up and exiting...\n", file=sys.stderr) except SystemExit: exit_code = sys.exc_info()[1] print() except: # Echo back the unexpected error so the user can see it. print("\nCaught an unexpected exception in the pdfCropMargins program.", file=sys.stderr) print("Unexpected error: ", sys.exc_info()[0], file=sys.stderr) print("Error message : ", sys.exc_info()[1], file=sys.stderr) print() exit_code = 1 import traceback max_traceback_length = 30 traceback.print_tb(sys.exc_info()[2], limit=max_traceback_length) # raise # Re-raise the error. finally: # Some people like to hit multiple ^C chars, which kills cleanup. # Call cleanup again each time. for i in range(30): # Give up after 30 tries. try: cleanup_and_exit(exit_code) except (KeyboardInterrupt, EOFError): continue
python
def main(): """Run main, catching any exceptions and cleaning up the temp directories.""" cleanup_and_exit = sys.exit # Function to do cleanup and exit before the import. exit_code = 0 # Imports are done here inside the try block so some ugly (and useless) # traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows). try: from . import external_program_calls as ex # Creates tmp dir as side effect. cleanup_and_exit = ex.cleanup_and_exit # Switch to the real one, deletes temp dir. from . import main_pdfCropMargins # Imports external_program_calls, don't do first. main_pdfCropMargins.main_crop() # Run the actual program. except (KeyboardInterrupt, EOFError): # Windows raises EOFError on ^C. print("\nGot a KeyboardInterrupt, cleaning up and exiting...\n", file=sys.stderr) except SystemExit: exit_code = sys.exc_info()[1] print() except: # Echo back the unexpected error so the user can see it. print("\nCaught an unexpected exception in the pdfCropMargins program.", file=sys.stderr) print("Unexpected error: ", sys.exc_info()[0], file=sys.stderr) print("Error message : ", sys.exc_info()[1], file=sys.stderr) print() exit_code = 1 import traceback max_traceback_length = 30 traceback.print_tb(sys.exc_info()[2], limit=max_traceback_length) # raise # Re-raise the error. finally: # Some people like to hit multiple ^C chars, which kills cleanup. # Call cleanup again each time. for i in range(30): # Give up after 30 tries. try: cleanup_and_exit(exit_code) except (KeyboardInterrupt, EOFError): continue
[ "def", "main", "(", ")", ":", "cleanup_and_exit", "=", "sys", ".", "exit", "# Function to do cleanup and exit before the import.", "exit_code", "=", "0", "# Imports are done here inside the try block so some ugly (and useless)", "# traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows).", "try", ":", "from", ".", "import", "external_program_calls", "as", "ex", "# Creates tmp dir as side effect.", "cleanup_and_exit", "=", "ex", ".", "cleanup_and_exit", "# Switch to the real one, deletes temp dir.", "from", ".", "import", "main_pdfCropMargins", "# Imports external_program_calls, don't do first.", "main_pdfCropMargins", ".", "main_crop", "(", ")", "# Run the actual program.", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "# Windows raises EOFError on ^C.", "print", "(", "\"\\nGot a KeyboardInterrupt, cleaning up and exiting...\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "except", "SystemExit", ":", "exit_code", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "print", "(", ")", "except", ":", "# Echo back the unexpected error so the user can see it.", "print", "(", "\"\\nCaught an unexpected exception in the pdfCropMargins program.\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Unexpected error: \"", ",", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Error message : \"", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", ")", "exit_code", "=", "1", "import", "traceback", "max_traceback_length", "=", "30", "traceback", ".", "print_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ",", "limit", "=", "max_traceback_length", ")", "# raise # Re-raise the error.", "finally", ":", "# Some people like to hit multiple ^C chars, which kills cleanup.", "# Call cleanup again each time.", "for", "i", "in", "range", "(", "30", ")", ":", "# Give up after 30 tries.", "try", ":", "cleanup_and_exit", "(", "exit_code", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "continue" ]
Run main, catching any exceptions and cleaning up the temp directories.
[ "Run", "main", "catching", "any", "exceptions", "and", "cleaning", "up", "the", "temp", "directories", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/pdfCropMargins.py#L70-L113
train
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
get_full_page_box_list_assigning_media_and_crop
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False): """Get a list of all the full-page box values for each page. The argument input_doc should be a PdfFileReader object. The boxes on the list are in the simple 4-float list format used by this program, not RectangleObject format.""" full_page_box_list = [] rotation_list = [] if args.verbose and not quiet: print("\nOriginal full page sizes, in PDF format (lbrt):") for page_num in range(input_doc.getNumPages()): # Get the current page and find the full-page box. curr_page = input_doc.getPage(page_num) full_page_box = get_full_page_box_assigning_media_and_crop(curr_page) if args.verbose and not quiet: # want to display page num numbering from 1, so add one print("\t"+str(page_num+1), " rot =", curr_page.rotationAngle, "\t", full_page_box) # Convert the RectangleObject to floats in an ordinary list and append. ordinary_box = [float(b) for b in full_page_box] full_page_box_list.append(ordinary_box) # Append the rotation value to the rotation_list. rotation_list.append(curr_page.rotationAngle) return full_page_box_list, rotation_list
python
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False): """Get a list of all the full-page box values for each page. The argument input_doc should be a PdfFileReader object. The boxes on the list are in the simple 4-float list format used by this program, not RectangleObject format.""" full_page_box_list = [] rotation_list = [] if args.verbose and not quiet: print("\nOriginal full page sizes, in PDF format (lbrt):") for page_num in range(input_doc.getNumPages()): # Get the current page and find the full-page box. curr_page = input_doc.getPage(page_num) full_page_box = get_full_page_box_assigning_media_and_crop(curr_page) if args.verbose and not quiet: # want to display page num numbering from 1, so add one print("\t"+str(page_num+1), " rot =", curr_page.rotationAngle, "\t", full_page_box) # Convert the RectangleObject to floats in an ordinary list and append. ordinary_box = [float(b) for b in full_page_box] full_page_box_list.append(ordinary_box) # Append the rotation value to the rotation_list. rotation_list.append(curr_page.rotationAngle) return full_page_box_list, rotation_list
[ "def", "get_full_page_box_list_assigning_media_and_crop", "(", "input_doc", ",", "quiet", "=", "False", ")", ":", "full_page_box_list", "=", "[", "]", "rotation_list", "=", "[", "]", "if", "args", ".", "verbose", "and", "not", "quiet", ":", "print", "(", "\"\\nOriginal full page sizes, in PDF format (lbrt):\"", ")", "for", "page_num", "in", "range", "(", "input_doc", ".", "getNumPages", "(", ")", ")", ":", "# Get the current page and find the full-page box.", "curr_page", "=", "input_doc", ".", "getPage", "(", "page_num", ")", "full_page_box", "=", "get_full_page_box_assigning_media_and_crop", "(", "curr_page", ")", "if", "args", ".", "verbose", "and", "not", "quiet", ":", "# want to display page num numbering from 1, so add one", "print", "(", "\"\\t\"", "+", "str", "(", "page_num", "+", "1", ")", ",", "\" rot =\"", ",", "curr_page", ".", "rotationAngle", ",", "\"\\t\"", ",", "full_page_box", ")", "# Convert the RectangleObject to floats in an ordinary list and append.", "ordinary_box", "=", "[", "float", "(", "b", ")", "for", "b", "in", "full_page_box", "]", "full_page_box_list", ".", "append", "(", "ordinary_box", ")", "# Append the rotation value to the rotation_list.", "rotation_list", ".", "append", "(", "curr_page", ".", "rotationAngle", ")", "return", "full_page_box_list", ",", "rotation_list" ]
Get a list of all the full-page box values for each page. The argument input_doc should be a PdfFileReader object. The boxes on the list are in the simple 4-float list format used by this program, not RectangleObject format.
[ "Get", "a", "list", "of", "all", "the", "full", "-", "page", "box", "values", "for", "each", "page", ".", "The", "argument", "input_doc", "should", "be", "a", "PdfFileReader", "object", ".", "The", "boxes", "on", "the", "list", "are", "in", "the", "simple", "4", "-", "float", "list", "format", "used", "by", "this", "program", "not", "RectangleObject", "format", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L207-L236
train
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
set_cropped_metadata
def set_cropped_metadata(input_doc, output_doc, metadata_info): """Set the metadata for the output document. Mostly just copied over, but "Producer" has a string appended to indicate that this program modified the file. That allows for the undo operation to make sure that this program cropped the file in the first place.""" # Setting metadata with pyPdf requires low-level pyPdf operations, see # http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf if not metadata_info: # In case it's null, just set values to empty strings. This class just holds # data temporary in the same format; this is not sent into PyPDF2. class MetadataInfo(object): author = "" creator = "" producer = "" subject = "" title = "" metadata_info = MetadataInfo() output_info_dict = output_doc._info.getObject() # Check Producer metadata attribute to see if this program cropped document before. producer_mod = PRODUCER_MODIFIER already_cropped_by_this_program = False old_producer_string = metadata_info.producer if old_producer_string and old_producer_string.endswith(producer_mod): if args.verbose: print("\nThe document was already cropped at least once by this program.") already_cropped_by_this_program = True producer_mod = "" # No need to pile up suffixes each time on Producer. # Note that all None metadata attributes are currently set to the empty string # when passing along the metadata information. def st(item): if item is None: return "" else: return item output_info_dict.update({ NameObject("/Author"): createStringObject(st(metadata_info.author)), NameObject("/Creator"): createStringObject(st(metadata_info.creator)), NameObject("/Producer"): createStringObject(st(metadata_info.producer) + producer_mod), NameObject("/Subject"): createStringObject(st(metadata_info.subject)), NameObject("/Title"): createStringObject(st(metadata_info.title)) }) return already_cropped_by_this_program
python
def set_cropped_metadata(input_doc, output_doc, metadata_info): """Set the metadata for the output document. Mostly just copied over, but "Producer" has a string appended to indicate that this program modified the file. That allows for the undo operation to make sure that this program cropped the file in the first place.""" # Setting metadata with pyPdf requires low-level pyPdf operations, see # http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf if not metadata_info: # In case it's null, just set values to empty strings. This class just holds # data temporary in the same format; this is not sent into PyPDF2. class MetadataInfo(object): author = "" creator = "" producer = "" subject = "" title = "" metadata_info = MetadataInfo() output_info_dict = output_doc._info.getObject() # Check Producer metadata attribute to see if this program cropped document before. producer_mod = PRODUCER_MODIFIER already_cropped_by_this_program = False old_producer_string = metadata_info.producer if old_producer_string and old_producer_string.endswith(producer_mod): if args.verbose: print("\nThe document was already cropped at least once by this program.") already_cropped_by_this_program = True producer_mod = "" # No need to pile up suffixes each time on Producer. # Note that all None metadata attributes are currently set to the empty string # when passing along the metadata information. def st(item): if item is None: return "" else: return item output_info_dict.update({ NameObject("/Author"): createStringObject(st(metadata_info.author)), NameObject("/Creator"): createStringObject(st(metadata_info.creator)), NameObject("/Producer"): createStringObject(st(metadata_info.producer) + producer_mod), NameObject("/Subject"): createStringObject(st(metadata_info.subject)), NameObject("/Title"): createStringObject(st(metadata_info.title)) }) return already_cropped_by_this_program
[ "def", "set_cropped_metadata", "(", "input_doc", ",", "output_doc", ",", "metadata_info", ")", ":", "# Setting metadata with pyPdf requires low-level pyPdf operations, see", "# http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf", "if", "not", "metadata_info", ":", "# In case it's null, just set values to empty strings. This class just holds", "# data temporary in the same format; this is not sent into PyPDF2.", "class", "MetadataInfo", "(", "object", ")", ":", "author", "=", "\"\"", "creator", "=", "\"\"", "producer", "=", "\"\"", "subject", "=", "\"\"", "title", "=", "\"\"", "metadata_info", "=", "MetadataInfo", "(", ")", "output_info_dict", "=", "output_doc", ".", "_info", ".", "getObject", "(", ")", "# Check Producer metadata attribute to see if this program cropped document before.", "producer_mod", "=", "PRODUCER_MODIFIER", "already_cropped_by_this_program", "=", "False", "old_producer_string", "=", "metadata_info", ".", "producer", "if", "old_producer_string", "and", "old_producer_string", ".", "endswith", "(", "producer_mod", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe document was already cropped at least once by this program.\"", ")", "already_cropped_by_this_program", "=", "True", "producer_mod", "=", "\"\"", "# No need to pile up suffixes each time on Producer.", "# Note that all None metadata attributes are currently set to the empty string", "# when passing along the metadata information.", "def", "st", "(", "item", ")", ":", "if", "item", "is", "None", ":", "return", "\"\"", "else", ":", "return", "item", "output_info_dict", ".", "update", "(", "{", "NameObject", "(", "\"/Author\"", ")", ":", "createStringObject", "(", "st", "(", "metadata_info", ".", "author", ")", ")", ",", "NameObject", "(", "\"/Creator\"", ")", ":", "createStringObject", "(", "st", "(", "metadata_info", ".", "creator", ")", ")", ",", "NameObject", "(", "\"/Producer\"", ")", ":", "createStringObject", "(", "st", "(", "metadata_info", ".", "producer", ")", "+", "producer_mod", ")", ",", "NameObject", "(", "\"/Subject\"", ")", ":", "createStringObject", "(", "st", "(", "metadata_info", ".", "subject", ")", ")", ",", "NameObject", "(", "\"/Title\"", ")", ":", "createStringObject", "(", "st", "(", "metadata_info", ".", "title", ")", ")", "}", ")", "return", "already_cropped_by_this_program" ]
Set the metadata for the output document. Mostly just copied over, but "Producer" has a string appended to indicate that this program modified the file. That allows for the undo operation to make sure that this program cropped the file in the first place.
[ "Set", "the", "metadata", "for", "the", "output", "document", ".", "Mostly", "just", "copied", "over", "but", "Producer", "has", "a", "string", "appended", "to", "indicate", "that", "this", "program", "modified", "the", "file", ".", "That", "allows", "for", "the", "undo", "operation", "to", "make", "sure", "that", "this", "program", "cropped", "the", "file", "in", "the", "first", "place", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L448-L494
train
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
apply_crop_list
def apply_crop_list(crop_list, input_doc, page_nums_to_crop, already_cropped_by_this_program): """Apply the crop list to the pages of the input PdfFileReader object.""" if args.restore and not already_cropped_by_this_program: print("\nWarning from pdfCropMargins: The Producer string indicates that" "\neither this document was not previously cropped by pdfCropMargins" "\nor else it was modified by another program after that. Trying the" "\nundo anyway...", file=sys.stderr) if args.restore and args.verbose: print("\nRestoring the document to margins saved for each page in the ArtBox.") if args.verbose and not args.restore: print("\nNew full page sizes after cropping, in PDF format (lbrt):") # Copy over each page, after modifying the appropriate PDF boxes. for page_num in range(input_doc.getNumPages()): curr_page = input_doc.getPage(page_num) # Restore any rotation which was originally on the page. curr_page.rotateClockwise(curr_page.rotationAngle) # Only do the restore from ArtBox if '--restore' option was selected. if args.restore: if not curr_page.artBox: print("\nWarning from pdfCropMargins: Attempting to restore pages from" "\nthe ArtBox in each page, but page", page_num, "has no readable" "\nArtBox. Leaving that page unchanged.", file=sys.stderr) continue curr_page.mediaBox = curr_page.artBox curr_page.cropBox = curr_page.artBox continue # Do the save to ArtBox if that option is chosen and Producer is set. if not args.noundosave and not already_cropped_by_this_program: curr_page.artBox = intersect_boxes(curr_page.mediaBox, curr_page.cropBox) # Reset the CropBox and MediaBox to their saved original values # (which were set in getFullPageBox, in the curr_page object's namespace). curr_page.mediaBox = curr_page.originalMediaBox curr_page.cropBox = curr_page.originalCropBox # Copy the original page without further mods if it wasn't in the range # selected for cropping. if page_num not in page_nums_to_crop: continue # Convert the computed "box to crop to" into a RectangleObject (for pyPdf). new_cropped_box = RectangleObject(crop_list[page_num]) if args.verbose: print("\t"+str(page_num+1)+"\t", new_cropped_box) # page numbering from 1 if not args.boxesToSet: args.boxesToSet = ["m", "c"] # Now set any boxes which were selected to be set via the --boxesToSet option. if "m" in args.boxesToSet: curr_page.mediaBox = new_cropped_box if "c" in args.boxesToSet: curr_page.cropBox = new_cropped_box if "t" in args.boxesToSet: curr_page.trimBox = new_cropped_box if "a" in args.boxesToSet: curr_page.artBox = new_cropped_box if "b" in args.boxesToSet: curr_page.bleedBox = new_cropped_box return
python
def apply_crop_list(crop_list, input_doc, page_nums_to_crop, already_cropped_by_this_program): """Apply the crop list to the pages of the input PdfFileReader object.""" if args.restore and not already_cropped_by_this_program: print("\nWarning from pdfCropMargins: The Producer string indicates that" "\neither this document was not previously cropped by pdfCropMargins" "\nor else it was modified by another program after that. Trying the" "\nundo anyway...", file=sys.stderr) if args.restore and args.verbose: print("\nRestoring the document to margins saved for each page in the ArtBox.") if args.verbose and not args.restore: print("\nNew full page sizes after cropping, in PDF format (lbrt):") # Copy over each page, after modifying the appropriate PDF boxes. for page_num in range(input_doc.getNumPages()): curr_page = input_doc.getPage(page_num) # Restore any rotation which was originally on the page. curr_page.rotateClockwise(curr_page.rotationAngle) # Only do the restore from ArtBox if '--restore' option was selected. if args.restore: if not curr_page.artBox: print("\nWarning from pdfCropMargins: Attempting to restore pages from" "\nthe ArtBox in each page, but page", page_num, "has no readable" "\nArtBox. Leaving that page unchanged.", file=sys.stderr) continue curr_page.mediaBox = curr_page.artBox curr_page.cropBox = curr_page.artBox continue # Do the save to ArtBox if that option is chosen and Producer is set. if not args.noundosave and not already_cropped_by_this_program: curr_page.artBox = intersect_boxes(curr_page.mediaBox, curr_page.cropBox) # Reset the CropBox and MediaBox to their saved original values # (which were set in getFullPageBox, in the curr_page object's namespace). curr_page.mediaBox = curr_page.originalMediaBox curr_page.cropBox = curr_page.originalCropBox # Copy the original page without further mods if it wasn't in the range # selected for cropping. if page_num not in page_nums_to_crop: continue # Convert the computed "box to crop to" into a RectangleObject (for pyPdf). new_cropped_box = RectangleObject(crop_list[page_num]) if args.verbose: print("\t"+str(page_num+1)+"\t", new_cropped_box) # page numbering from 1 if not args.boxesToSet: args.boxesToSet = ["m", "c"] # Now set any boxes which were selected to be set via the --boxesToSet option. if "m" in args.boxesToSet: curr_page.mediaBox = new_cropped_box if "c" in args.boxesToSet: curr_page.cropBox = new_cropped_box if "t" in args.boxesToSet: curr_page.trimBox = new_cropped_box if "a" in args.boxesToSet: curr_page.artBox = new_cropped_box if "b" in args.boxesToSet: curr_page.bleedBox = new_cropped_box return
[ "def", "apply_crop_list", "(", "crop_list", ",", "input_doc", ",", "page_nums_to_crop", ",", "already_cropped_by_this_program", ")", ":", "if", "args", ".", "restore", "and", "not", "already_cropped_by_this_program", ":", "print", "(", "\"\\nWarning from pdfCropMargins: The Producer string indicates that\"", "\"\\neither this document was not previously cropped by pdfCropMargins\"", "\"\\nor else it was modified by another program after that. Trying the\"", "\"\\nundo anyway...\"", ",", "file", "=", "sys", ".", "stderr", ")", "if", "args", ".", "restore", "and", "args", ".", "verbose", ":", "print", "(", "\"\\nRestoring the document to margins saved for each page in the ArtBox.\"", ")", "if", "args", ".", "verbose", "and", "not", "args", ".", "restore", ":", "print", "(", "\"\\nNew full page sizes after cropping, in PDF format (lbrt):\"", ")", "# Copy over each page, after modifying the appropriate PDF boxes.", "for", "page_num", "in", "range", "(", "input_doc", ".", "getNumPages", "(", ")", ")", ":", "curr_page", "=", "input_doc", ".", "getPage", "(", "page_num", ")", "# Restore any rotation which was originally on the page.", "curr_page", ".", "rotateClockwise", "(", "curr_page", ".", "rotationAngle", ")", "# Only do the restore from ArtBox if '--restore' option was selected.", "if", "args", ".", "restore", ":", "if", "not", "curr_page", ".", "artBox", ":", "print", "(", "\"\\nWarning from pdfCropMargins: Attempting to restore pages from\"", "\"\\nthe ArtBox in each page, but page\"", ",", "page_num", ",", "\"has no readable\"", "\"\\nArtBox. Leaving that page unchanged.\"", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "curr_page", ".", "mediaBox", "=", "curr_page", ".", "artBox", "curr_page", ".", "cropBox", "=", "curr_page", ".", "artBox", "continue", "# Do the save to ArtBox if that option is chosen and Producer is set.", "if", "not", "args", ".", "noundosave", "and", "not", "already_cropped_by_this_program", ":", "curr_page", ".", "artBox", "=", "intersect_boxes", "(", "curr_page", ".", "mediaBox", ",", "curr_page", ".", "cropBox", ")", "# Reset the CropBox and MediaBox to their saved original values", "# (which were set in getFullPageBox, in the curr_page object's namespace).", "curr_page", ".", "mediaBox", "=", "curr_page", ".", "originalMediaBox", "curr_page", ".", "cropBox", "=", "curr_page", ".", "originalCropBox", "# Copy the original page without further mods if it wasn't in the range", "# selected for cropping.", "if", "page_num", "not", "in", "page_nums_to_crop", ":", "continue", "# Convert the computed \"box to crop to\" into a RectangleObject (for pyPdf).", "new_cropped_box", "=", "RectangleObject", "(", "crop_list", "[", "page_num", "]", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\t\"", "+", "str", "(", "page_num", "+", "1", ")", "+", "\"\\t\"", ",", "new_cropped_box", ")", "# page numbering from 1", "if", "not", "args", ".", "boxesToSet", ":", "args", ".", "boxesToSet", "=", "[", "\"m\"", ",", "\"c\"", "]", "# Now set any boxes which were selected to be set via the --boxesToSet option.", "if", "\"m\"", "in", "args", ".", "boxesToSet", ":", "curr_page", ".", "mediaBox", "=", "new_cropped_box", "if", "\"c\"", "in", "args", ".", "boxesToSet", ":", "curr_page", ".", "cropBox", "=", "new_cropped_box", "if", "\"t\"", "in", "args", ".", "boxesToSet", ":", "curr_page", ".", "trimBox", "=", "new_cropped_box", "if", "\"a\"", "in", "args", ".", "boxesToSet", ":", "curr_page", ".", "artBox", "=", "new_cropped_box", "if", "\"b\"", "in", "args", ".", "boxesToSet", ":", "curr_page", ".", "bleedBox", "=", "new_cropped_box", "return" ]
Apply the crop list to the pages of the input PdfFileReader object.
[ "Apply", "the", "crop", "list", "to", "the", "pages", "of", "the", "input", "PdfFileReader", "object", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L497-L562
train
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
setup_output_document
def setup_output_document(input_doc, tmp_input_doc, metadata_info, copy_document_catalog=True): """Create the output `PdfFileWriter` objects and copy over the relevant info.""" # NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters # seems to cause problems (writer can hang on write), so only one is used. # This is why the tmp_input_doc file was created earlier, to get copies of # the page objects which are independent of those in input_doc. An ugly # hack for a nasty bug to track down. # NOTE: You can get the _root_object attribute (dict for the document # catalog) from the output document after calling cloneReaderDocumentRoot # or else you can just directly get it from the input_doc.trailer dict, as # below (which is from the code for cloneReaderDocumentRoot), but you # CANNOT set the full _root_object to be the _root_object attribute for the # actual output_doc or else only blank pages show up in acroread (whether # or not there is any attempt to explicitly copy the pages over). The same # is true for using cloneDocumentFromReader (which just calls # cloneReaderDocumentRoot followed by appendPagesFromReader). At least the # '/Pages' key and value in _root_object cause problems, so they are # skipped in the partial copy. Probably a bug in PyPDF2. See the original # code for the routines on the github pages below. # # https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py # https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py # # Files still can change zoom mode on clicking outline links, but that is # an Adobe implementation problem, and happens even in the uncropped files: # https://superuser.com/questions/278302/ output_doc = PdfFileWriter() def root_objects_not_indirect(input_doc, root_object): """This can expand some of the `IndirectObject` objects in a root object to see the actual values. Currently only used for debugging. May mess up the input doc and require a temporary one.""" if isinstance(root_object, dict): return {root_objects_not_indirect(input_doc, key): root_objects_not_indirect(input_doc, value) for key, value in root_object.items()} elif isinstance(root_object, list): return [root_objects_not_indirect(input_doc, item) for item in root_object] elif isinstance(root_object, IndirectObject): return input_doc.getObject(root_object) else: return root_object doc_cat_whitelist = args.docCatWhitelist.split() if "ALL" in doc_cat_whitelist: doc_cat_whitelist = ["ALL"] doc_cat_blacklist = args.docCatBlacklist.split() if "ALL" in doc_cat_blacklist: doc_cat_blacklist = ["ALL"] # Partially copy over document catalog data from input_doc to output_doc. if not copy_document_catalog or ( not doc_cat_whitelist and doc_cat_blacklist == ["ALL"]): # Check this first, to completely skip the possibly problematic code getting # document catalog items when possible. Does not print a skipped list, though. if args.verbose: print("\nNot copying any document catalog items to the cropped document.") else: try: root_object = input_doc.trailer["/Root"] copied_items = [] skipped_items = [] for key, value in root_object.items(): # Some possible keys can be: # # /Type -- required, must have value /Catalog # /Pages -- required, indirect ref to page tree; skip, will change # /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen, # /UseOC, or /UseAttachments, with /UseNone default. # /OpenAction -- action to take when document is opened, like zooming # /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft, # /TwoColumnRight, /TwoPageLeft, /TwoPageRight # /Names -- a name dictionary to avoid having to use object numbers # /Outlines -- indirect ref to document outline, i.e., bookmarks # /Dests -- a dict of destinations in the PDF # /ViewerPreferences -- a viewer preferences dict # /MetaData -- XMP metadata, as opposed to other metadata # /PageLabels -- alternate numbering for pages, only affect PDF viewers if key == "/Pages": skipped_items.append(key) continue if doc_cat_whitelist != ["ALL"] and key not in doc_cat_whitelist: if doc_cat_blacklist == ["ALL"] or key in doc_cat_blacklist: skipped_items.append(key) continue copied_items.append(key) output_doc._root_object[NameObject(key)] = value if args.verbose: print("\nCopied these items from the document catalog:\n ", end="") print(*copied_items) print("Skipped copy of these items from the document catalog:\n ", end="") print(*skipped_items) except (KeyboardInterrupt, EOFError): raise except: # Just catch any errors here; don't know which might be raised. # On exception just warn and get a new PdfFileWriter object, to be safe. print("\nWarning: The document catalog data could not be copied to the" "\nnew, cropped document. Try fixing the PDF document using" "\n'--gsFix' if you have Ghostscript installed.", file=sys.stderr) output_doc = PdfFileWriter() #output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more. for page in [input_doc.getPage(i) for i in range(input_doc.getNumPages())]: output_doc.addPage(page) tmp_output_doc = PdfFileWriter() #tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more. for page in [tmp_input_doc.getPage(i) for i in range(tmp_input_doc.getNumPages())]: tmp_output_doc.addPage(page) ## ## Copy the metadata from input_doc to output_doc, modifying the Producer string ## if this program didn't already set it. Get bool for whether this program ## cropped the document already. ## already_cropped_by_this_program = set_cropped_metadata(input_doc, output_doc, metadata_info) return output_doc, tmp_output_doc, already_cropped_by_this_program
python
def setup_output_document(input_doc, tmp_input_doc, metadata_info, copy_document_catalog=True): """Create the output `PdfFileWriter` objects and copy over the relevant info.""" # NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters # seems to cause problems (writer can hang on write), so only one is used. # This is why the tmp_input_doc file was created earlier, to get copies of # the page objects which are independent of those in input_doc. An ugly # hack for a nasty bug to track down. # NOTE: You can get the _root_object attribute (dict for the document # catalog) from the output document after calling cloneReaderDocumentRoot # or else you can just directly get it from the input_doc.trailer dict, as # below (which is from the code for cloneReaderDocumentRoot), but you # CANNOT set the full _root_object to be the _root_object attribute for the # actual output_doc or else only blank pages show up in acroread (whether # or not there is any attempt to explicitly copy the pages over). The same # is true for using cloneDocumentFromReader (which just calls # cloneReaderDocumentRoot followed by appendPagesFromReader). At least the # '/Pages' key and value in _root_object cause problems, so they are # skipped in the partial copy. Probably a bug in PyPDF2. See the original # code for the routines on the github pages below. # # https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py # https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py # # Files still can change zoom mode on clicking outline links, but that is # an Adobe implementation problem, and happens even in the uncropped files: # https://superuser.com/questions/278302/ output_doc = PdfFileWriter() def root_objects_not_indirect(input_doc, root_object): """This can expand some of the `IndirectObject` objects in a root object to see the actual values. Currently only used for debugging. May mess up the input doc and require a temporary one.""" if isinstance(root_object, dict): return {root_objects_not_indirect(input_doc, key): root_objects_not_indirect(input_doc, value) for key, value in root_object.items()} elif isinstance(root_object, list): return [root_objects_not_indirect(input_doc, item) for item in root_object] elif isinstance(root_object, IndirectObject): return input_doc.getObject(root_object) else: return root_object doc_cat_whitelist = args.docCatWhitelist.split() if "ALL" in doc_cat_whitelist: doc_cat_whitelist = ["ALL"] doc_cat_blacklist = args.docCatBlacklist.split() if "ALL" in doc_cat_blacklist: doc_cat_blacklist = ["ALL"] # Partially copy over document catalog data from input_doc to output_doc. if not copy_document_catalog or ( not doc_cat_whitelist and doc_cat_blacklist == ["ALL"]): # Check this first, to completely skip the possibly problematic code getting # document catalog items when possible. Does not print a skipped list, though. if args.verbose: print("\nNot copying any document catalog items to the cropped document.") else: try: root_object = input_doc.trailer["/Root"] copied_items = [] skipped_items = [] for key, value in root_object.items(): # Some possible keys can be: # # /Type -- required, must have value /Catalog # /Pages -- required, indirect ref to page tree; skip, will change # /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen, # /UseOC, or /UseAttachments, with /UseNone default. # /OpenAction -- action to take when document is opened, like zooming # /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft, # /TwoColumnRight, /TwoPageLeft, /TwoPageRight # /Names -- a name dictionary to avoid having to use object numbers # /Outlines -- indirect ref to document outline, i.e., bookmarks # /Dests -- a dict of destinations in the PDF # /ViewerPreferences -- a viewer preferences dict # /MetaData -- XMP metadata, as opposed to other metadata # /PageLabels -- alternate numbering for pages, only affect PDF viewers if key == "/Pages": skipped_items.append(key) continue if doc_cat_whitelist != ["ALL"] and key not in doc_cat_whitelist: if doc_cat_blacklist == ["ALL"] or key in doc_cat_blacklist: skipped_items.append(key) continue copied_items.append(key) output_doc._root_object[NameObject(key)] = value if args.verbose: print("\nCopied these items from the document catalog:\n ", end="") print(*copied_items) print("Skipped copy of these items from the document catalog:\n ", end="") print(*skipped_items) except (KeyboardInterrupt, EOFError): raise except: # Just catch any errors here; don't know which might be raised. # On exception just warn and get a new PdfFileWriter object, to be safe. print("\nWarning: The document catalog data could not be copied to the" "\nnew, cropped document. Try fixing the PDF document using" "\n'--gsFix' if you have Ghostscript installed.", file=sys.stderr) output_doc = PdfFileWriter() #output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more. for page in [input_doc.getPage(i) for i in range(input_doc.getNumPages())]: output_doc.addPage(page) tmp_output_doc = PdfFileWriter() #tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more. for page in [tmp_input_doc.getPage(i) for i in range(tmp_input_doc.getNumPages())]: tmp_output_doc.addPage(page) ## ## Copy the metadata from input_doc to output_doc, modifying the Producer string ## if this program didn't already set it. Get bool for whether this program ## cropped the document already. ## already_cropped_by_this_program = set_cropped_metadata(input_doc, output_doc, metadata_info) return output_doc, tmp_output_doc, already_cropped_by_this_program
[ "def", "setup_output_document", "(", "input_doc", ",", "tmp_input_doc", ",", "metadata_info", ",", "copy_document_catalog", "=", "True", ")", ":", "# NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters", "# seems to cause problems (writer can hang on write), so only one is used.", "# This is why the tmp_input_doc file was created earlier, to get copies of", "# the page objects which are independent of those in input_doc. An ugly", "# hack for a nasty bug to track down.", "# NOTE: You can get the _root_object attribute (dict for the document", "# catalog) from the output document after calling cloneReaderDocumentRoot", "# or else you can just directly get it from the input_doc.trailer dict, as", "# below (which is from the code for cloneReaderDocumentRoot), but you", "# CANNOT set the full _root_object to be the _root_object attribute for the", "# actual output_doc or else only blank pages show up in acroread (whether", "# or not there is any attempt to explicitly copy the pages over). The same", "# is true for using cloneDocumentFromReader (which just calls", "# cloneReaderDocumentRoot followed by appendPagesFromReader). At least the", "# '/Pages' key and value in _root_object cause problems, so they are", "# skipped in the partial copy. Probably a bug in PyPDF2. See the original", "# code for the routines on the github pages below.", "#", "# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py", "# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py", "#", "# Files still can change zoom mode on clicking outline links, but that is", "# an Adobe implementation problem, and happens even in the uncropped files:", "# https://superuser.com/questions/278302/", "output_doc", "=", "PdfFileWriter", "(", ")", "def", "root_objects_not_indirect", "(", "input_doc", ",", "root_object", ")", ":", "\"\"\"This can expand some of the `IndirectObject` objects in a root object to\n see the actual values. Currently only used for debugging. May mess up the\n input doc and require a temporary one.\"\"\"", "if", "isinstance", "(", "root_object", ",", "dict", ")", ":", "return", "{", "root_objects_not_indirect", "(", "input_doc", ",", "key", ")", ":", "root_objects_not_indirect", "(", "input_doc", ",", "value", ")", "for", "key", ",", "value", "in", "root_object", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "root_object", ",", "list", ")", ":", "return", "[", "root_objects_not_indirect", "(", "input_doc", ",", "item", ")", "for", "item", "in", "root_object", "]", "elif", "isinstance", "(", "root_object", ",", "IndirectObject", ")", ":", "return", "input_doc", ".", "getObject", "(", "root_object", ")", "else", ":", "return", "root_object", "doc_cat_whitelist", "=", "args", ".", "docCatWhitelist", ".", "split", "(", ")", "if", "\"ALL\"", "in", "doc_cat_whitelist", ":", "doc_cat_whitelist", "=", "[", "\"ALL\"", "]", "doc_cat_blacklist", "=", "args", ".", "docCatBlacklist", ".", "split", "(", ")", "if", "\"ALL\"", "in", "doc_cat_blacklist", ":", "doc_cat_blacklist", "=", "[", "\"ALL\"", "]", "# Partially copy over document catalog data from input_doc to output_doc.", "if", "not", "copy_document_catalog", "or", "(", "not", "doc_cat_whitelist", "and", "doc_cat_blacklist", "==", "[", "\"ALL\"", "]", ")", ":", "# Check this first, to completely skip the possibly problematic code getting", "# document catalog items when possible. Does not print a skipped list, though.", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nNot copying any document catalog items to the cropped document.\"", ")", "else", ":", "try", ":", "root_object", "=", "input_doc", ".", "trailer", "[", "\"/Root\"", "]", "copied_items", "=", "[", "]", "skipped_items", "=", "[", "]", "for", "key", ",", "value", "in", "root_object", ".", "items", "(", ")", ":", "# Some possible keys can be:", "#", "# /Type -- required, must have value /Catalog", "# /Pages -- required, indirect ref to page tree; skip, will change", "# /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen,", "# /UseOC, or /UseAttachments, with /UseNone default.", "# /OpenAction -- action to take when document is opened, like zooming", "# /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft,", "# /TwoColumnRight, /TwoPageLeft, /TwoPageRight", "# /Names -- a name dictionary to avoid having to use object numbers", "# /Outlines -- indirect ref to document outline, i.e., bookmarks", "# /Dests -- a dict of destinations in the PDF", "# /ViewerPreferences -- a viewer preferences dict", "# /MetaData -- XMP metadata, as opposed to other metadata", "# /PageLabels -- alternate numbering for pages, only affect PDF viewers", "if", "key", "==", "\"/Pages\"", ":", "skipped_items", ".", "append", "(", "key", ")", "continue", "if", "doc_cat_whitelist", "!=", "[", "\"ALL\"", "]", "and", "key", "not", "in", "doc_cat_whitelist", ":", "if", "doc_cat_blacklist", "==", "[", "\"ALL\"", "]", "or", "key", "in", "doc_cat_blacklist", ":", "skipped_items", ".", "append", "(", "key", ")", "continue", "copied_items", ".", "append", "(", "key", ")", "output_doc", ".", "_root_object", "[", "NameObject", "(", "key", ")", "]", "=", "value", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nCopied these items from the document catalog:\\n \"", ",", "end", "=", "\"\"", ")", "print", "(", "*", "copied_items", ")", "print", "(", "\"Skipped copy of these items from the document catalog:\\n \"", ",", "end", "=", "\"\"", ")", "print", "(", "*", "skipped_items", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "raise", "except", ":", "# Just catch any errors here; don't know which might be raised.", "# On exception just warn and get a new PdfFileWriter object, to be safe.", "print", "(", "\"\\nWarning: The document catalog data could not be copied to the\"", "\"\\nnew, cropped document. Try fixing the PDF document using\"", "\"\\n'--gsFix' if you have Ghostscript installed.\"", ",", "file", "=", "sys", ".", "stderr", ")", "output_doc", "=", "PdfFileWriter", "(", ")", "#output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more.", "for", "page", "in", "[", "input_doc", ".", "getPage", "(", "i", ")", "for", "i", "in", "range", "(", "input_doc", ".", "getNumPages", "(", ")", ")", "]", ":", "output_doc", ".", "addPage", "(", "page", ")", "tmp_output_doc", "=", "PdfFileWriter", "(", ")", "#tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more.", "for", "page", "in", "[", "tmp_input_doc", ".", "getPage", "(", "i", ")", "for", "i", "in", "range", "(", "tmp_input_doc", ".", "getNumPages", "(", ")", ")", "]", ":", "tmp_output_doc", ".", "addPage", "(", "page", ")", "##", "## Copy the metadata from input_doc to output_doc, modifying the Producer string", "## if this program didn't already set it. Get bool for whether this program", "## cropped the document already.", "##", "already_cropped_by_this_program", "=", "set_cropped_metadata", "(", "input_doc", ",", "output_doc", ",", "metadata_info", ")", "return", "output_doc", ",", "tmp_output_doc", ",", "already_cropped_by_this_program" ]
Create the output `PdfFileWriter` objects and copy over the relevant info.
[ "Create", "the", "output", "PdfFileWriter", "objects", "and", "copy", "over", "the", "relevant", "info", "." ]
55aca874613750ebf4ae69fd8851bdbb7696d6ac
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L564-L689
train
miracle2k/flask-assets
src/flask_assets.py
FlaskConfigStorage.setdefault
def setdefault(self, key, value): """We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults. """ try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
python
def setdefault(self, key, value): """We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults. """ try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
[ "def", "setdefault", "(", "self", ",", "key", ",", "value", ")", ":", "try", ":", "super", "(", "FlaskConfigStorage", ",", "self", ")", ".", "setdefault", "(", "key", ",", "value", ")", "except", "RuntimeError", ":", "self", ".", "_defaults", ".", "__setitem__", "(", "key", ",", "value", ")" ]
We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults.
[ "We", "may", "not", "always", "be", "connected", "to", "an", "app", "but", "we", "still", "need", "to", "provide", "a", "way", "to", "the", "base", "environment", "to", "set", "it", "s", "defaults", "." ]
ea9ff985bc96b79edb12ad4bed69403173f75562
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L75-L82
train
miracle2k/flask-assets
src/flask_assets.py
Environment._app
def _app(self): """The application object to work with; this is either the app that we have been bound to, or the current application. """ if self.app is not None: return self.app ctx = _request_ctx_stack.top if ctx is not None: return ctx.app try: from flask import _app_ctx_stack app_ctx = _app_ctx_stack.top if app_ctx is not None: return app_ctx.app except ImportError: pass raise RuntimeError('assets instance not bound to an application, '+ 'and no application in current context')
python
def _app(self): """The application object to work with; this is either the app that we have been bound to, or the current application. """ if self.app is not None: return self.app ctx = _request_ctx_stack.top if ctx is not None: return ctx.app try: from flask import _app_ctx_stack app_ctx = _app_ctx_stack.top if app_ctx is not None: return app_ctx.app except ImportError: pass raise RuntimeError('assets instance not bound to an application, '+ 'and no application in current context')
[ "def", "_app", "(", "self", ")", ":", "if", "self", ".", "app", "is", "not", "None", ":", "return", "self", ".", "app", "ctx", "=", "_request_ctx_stack", ".", "top", "if", "ctx", "is", "not", "None", ":", "return", "ctx", ".", "app", "try", ":", "from", "flask", "import", "_app_ctx_stack", "app_ctx", "=", "_app_ctx_stack", ".", "top", "if", "app_ctx", "is", "not", "None", ":", "return", "app_ctx", ".", "app", "except", "ImportError", ":", "pass", "raise", "RuntimeError", "(", "'assets instance not bound to an application, '", "+", "'and no application in current context'", ")" ]
The application object to work with; this is either the app that we have been bound to, or the current application.
[ "The", "application", "object", "to", "work", "with", ";", "this", "is", "either", "the", "app", "that", "we", "have", "been", "bound", "to", "or", "the", "current", "application", "." ]
ea9ff985bc96b79edb12ad4bed69403173f75562
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L310-L330
train
miracle2k/flask-assets
src/flask_assets.py
Environment.from_yaml
def from_yaml(self, path): """Register bundles from a YAML configuration file""" bundles = YAMLLoader(path).load_bundles() for name in bundles: self.register(name, bundles[name])
python
def from_yaml(self, path): """Register bundles from a YAML configuration file""" bundles = YAMLLoader(path).load_bundles() for name in bundles: self.register(name, bundles[name])
[ "def", "from_yaml", "(", "self", ",", "path", ")", ":", "bundles", "=", "YAMLLoader", "(", "path", ")", ".", "load_bundles", "(", ")", "for", "name", "in", "bundles", ":", "self", ".", "register", "(", "name", ",", "bundles", "[", "name", "]", ")" ]
Register bundles from a YAML configuration file
[ "Register", "bundles", "from", "a", "YAML", "configuration", "file" ]
ea9ff985bc96b79edb12ad4bed69403173f75562
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L361-L365
train
miracle2k/flask-assets
src/flask_assets.py
Environment.from_module
def from_module(self, path): """Register bundles from a Python module""" bundles = PythonLoader(path).load_bundles() for name in bundles: self.register(name, bundles[name])
python
def from_module(self, path): """Register bundles from a Python module""" bundles = PythonLoader(path).load_bundles() for name in bundles: self.register(name, bundles[name])
[ "def", "from_module", "(", "self", ",", "path", ")", ":", "bundles", "=", "PythonLoader", "(", "path", ")", ".", "load_bundles", "(", ")", "for", "name", "in", "bundles", ":", "self", ".", "register", "(", "name", ",", "bundles", "[", "name", "]", ")" ]
Register bundles from a Python module
[ "Register", "bundles", "from", "a", "Python", "module" ]
ea9ff985bc96b79edb12ad4bed69403173f75562
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L367-L371
train
persephone-tools/persephone
persephone/__init__.py
handle_unhandled_exception
def handle_unhandled_exception(exc_type, exc_value, exc_traceback): """Handler for unhandled exceptions that will write to the logs""" if issubclass(exc_type, KeyboardInterrupt): # call the default excepthook saved at __excepthook__ sys.__excepthook__(exc_type, exc_value, exc_traceback) return logger = logging.getLogger(__name__) # type: ignore logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
python
def handle_unhandled_exception(exc_type, exc_value, exc_traceback): """Handler for unhandled exceptions that will write to the logs""" if issubclass(exc_type, KeyboardInterrupt): # call the default excepthook saved at __excepthook__ sys.__excepthook__(exc_type, exc_value, exc_traceback) return logger = logging.getLogger(__name__) # type: ignore logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
[ "def", "handle_unhandled_exception", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ":", "if", "issubclass", "(", "exc_type", ",", "KeyboardInterrupt", ")", ":", "# call the default excepthook saved at __excepthook__", "sys", ".", "__excepthook__", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", "return", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# type: ignore", "logger", ".", "critical", "(", "\"Unhandled exception\"", ",", "exc_info", "=", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ")" ]
Handler for unhandled exceptions that will write to the logs
[ "Handler", "for", "unhandled", "exceptions", "that", "will", "write", "to", "the", "logs" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/__init__.py#L6-L13
train
persephone-tools/persephone
persephone/utterance.py
write_transcriptions
def write_transcriptions(utterances: List[Utterance], tgt_dir: Path, ext: str, lazy: bool) -> None: """ Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones". """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: out_path = tgt_dir / "{}.{}".format(utter.prefix, ext) if lazy and out_path.is_file(): continue with out_path.open("w") as f: print(utter.text, file=f)
python
def write_transcriptions(utterances: List[Utterance], tgt_dir: Path, ext: str, lazy: bool) -> None: """ Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones". """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: out_path = tgt_dir / "{}.{}".format(utter.prefix, ext) if lazy and out_path.is_file(): continue with out_path.open("w") as f: print(utter.text, file=f)
[ "def", "write_transcriptions", "(", "utterances", ":", "List", "[", "Utterance", "]", ",", "tgt_dir", ":", "Path", ",", "ext", ":", "str", ",", "lazy", ":", "bool", ")", "->", "None", ":", "tgt_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "for", "utter", "in", "utterances", ":", "out_path", "=", "tgt_dir", "/", "\"{}.{}\"", ".", "format", "(", "utter", ".", "prefix", ",", "ext", ")", "if", "lazy", "and", "out_path", ".", "is_file", "(", ")", ":", "continue", "with", "out_path", ".", "open", "(", "\"w\"", ")", "as", "f", ":", "print", "(", "utter", ".", "text", ",", "file", "=", "f", ")" ]
Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones".
[ "Write", "the", "utterance", "transcriptions", "to", "files", "in", "the", "tgt_dir", ".", "Is", "lazy", "and", "checks", "if", "the", "file", "already", "exists", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L45-L65
train
persephone-tools/persephone
persephone/utterance.py
remove_duplicates
def remove_duplicates(utterances: List[Utterance]) -> List[Utterance]: """ Removes utterances with the same start_time, end_time and text. Other metadata isn't considered. """ filtered_utters = [] utter_set = set() # type: Set[Tuple[int, int, str]] for utter in utterances: if (utter.start_time, utter.end_time, utter.text) in utter_set: continue filtered_utters.append(utter) utter_set.add((utter.start_time, utter.end_time, utter.text)) return filtered_utters
python
def remove_duplicates(utterances: List[Utterance]) -> List[Utterance]: """ Removes utterances with the same start_time, end_time and text. Other metadata isn't considered. """ filtered_utters = [] utter_set = set() # type: Set[Tuple[int, int, str]] for utter in utterances: if (utter.start_time, utter.end_time, utter.text) in utter_set: continue filtered_utters.append(utter) utter_set.add((utter.start_time, utter.end_time, utter.text)) return filtered_utters
[ "def", "remove_duplicates", "(", "utterances", ":", "List", "[", "Utterance", "]", ")", "->", "List", "[", "Utterance", "]", ":", "filtered_utters", "=", "[", "]", "utter_set", "=", "set", "(", ")", "# type: Set[Tuple[int, int, str]]", "for", "utter", "in", "utterances", ":", "if", "(", "utter", ".", "start_time", ",", "utter", ".", "end_time", ",", "utter", ".", "text", ")", "in", "utter_set", ":", "continue", "filtered_utters", ".", "append", "(", "utter", ")", "utter_set", ".", "add", "(", "(", "utter", ".", "start_time", ",", "utter", ".", "end_time", ",", "utter", ".", "text", ")", ")", "return", "filtered_utters" ]
Removes utterances with the same start_time, end_time and text. Other metadata isn't considered.
[ "Removes", "utterances", "with", "the", "same", "start_time", "end_time", "and", "text", ".", "Other", "metadata", "isn", "t", "considered", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L67-L80
train
persephone-tools/persephone
persephone/utterance.py
make_speaker_utters
def make_speaker_utters(utterances: List[Utterance]) -> Dict[str, List[Utterance]]: """ Creates a dictionary mapping from speakers to their utterances. """ speaker_utters = defaultdict(list) # type: DefaultDict[str, List[Utterance]] for utter in utterances: speaker_utters[utter.speaker].append(utter) return speaker_utters
python
def make_speaker_utters(utterances: List[Utterance]) -> Dict[str, List[Utterance]]: """ Creates a dictionary mapping from speakers to their utterances. """ speaker_utters = defaultdict(list) # type: DefaultDict[str, List[Utterance]] for utter in utterances: speaker_utters[utter.speaker].append(utter) return speaker_utters
[ "def", "make_speaker_utters", "(", "utterances", ":", "List", "[", "Utterance", "]", ")", "->", "Dict", "[", "str", ",", "List", "[", "Utterance", "]", "]", ":", "speaker_utters", "=", "defaultdict", "(", "list", ")", "# type: DefaultDict[str, List[Utterance]]", "for", "utter", "in", "utterances", ":", "speaker_utters", "[", "utter", ".", "speaker", "]", ".", "append", "(", "utter", ")", "return", "speaker_utters" ]
Creates a dictionary mapping from speakers to their utterances.
[ "Creates", "a", "dictionary", "mapping", "from", "speakers", "to", "their", "utterances", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L106-L113
train
persephone-tools/persephone
persephone/utterance.py
remove_too_short
def remove_too_short(utterances: List[Utterance], _winlen=25, winstep=10) -> List[Utterance]: """ Removes utterances that will probably have issues with CTC because of the number of frames being less than the number of tokens in the transcription. Assuming char tokenization to minimize false negatives. """ def is_too_short(utterance: Utterance) -> bool: charlen = len(utterance.text) if (duration(utterance) / winstep) < charlen: return True else: return False return [utter for utter in utterances if not is_too_short(utter)]
python
def remove_too_short(utterances: List[Utterance], _winlen=25, winstep=10) -> List[Utterance]: """ Removes utterances that will probably have issues with CTC because of the number of frames being less than the number of tokens in the transcription. Assuming char tokenization to minimize false negatives. """ def is_too_short(utterance: Utterance) -> bool: charlen = len(utterance.text) if (duration(utterance) / winstep) < charlen: return True else: return False return [utter for utter in utterances if not is_too_short(utter)]
[ "def", "remove_too_short", "(", "utterances", ":", "List", "[", "Utterance", "]", ",", "_winlen", "=", "25", ",", "winstep", "=", "10", ")", "->", "List", "[", "Utterance", "]", ":", "def", "is_too_short", "(", "utterance", ":", "Utterance", ")", "->", "bool", ":", "charlen", "=", "len", "(", "utterance", ".", "text", ")", "if", "(", "duration", "(", "utterance", ")", "/", "winstep", ")", "<", "charlen", ":", "return", "True", "else", ":", "return", "False", "return", "[", "utter", "for", "utter", "in", "utterances", "if", "not", "is_too_short", "(", "utter", ")", "]" ]
Removes utterances that will probably have issues with CTC because of the number of frames being less than the number of tokens in the transcription. Assuming char tokenization to minimize false negatives.
[ "Removes", "utterances", "that", "will", "probably", "have", "issues", "with", "CTC", "because", "of", "the", "number", "of", "frames", "being", "less", "than", "the", "number", "of", "tokens", "in", "the", "transcription", ".", "Assuming", "char", "tokenization", "to", "minimize", "false", "negatives", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L128-L141
train
persephone-tools/persephone
persephone/distance.py
min_edit_distance
def min_edit_distance( source: Sequence[T], target: Sequence[T], ins_cost: Callable[..., int] = lambda _x: 1, del_cost: Callable[..., int] = lambda _x: 1, sub_cost: Callable[..., int] = lambda x, y: 0 if x == y else 1) -> int: """Calculates the minimum edit distance between two sequences. Uses the Levenshtein weighting as a default, but offers keyword arguments to supply functions to measure the costs for editing with different elements. Args: ins_cost: A function describing the cost of inserting a given char del_cost: A function describing the cost of deleting a given char sub_cost: A function describing the cost of substituting one char for Returns: The edit distance between the two input sequences. """ # Initialize an m+1 by n+1 array. Note that the strings start from index 1, # with index 0 being used to denote the empty string. n = len(target) m = len(source) distance = np.zeros((m+1, n+1), dtype=np.int16) # Initialize the zeroth row and column to be the distance from the empty # string. for i in range(1, m+1): distance[i, 0] = distance[i-1, 0] + ins_cost(source[i-1]) for j in range(1, n+1): distance[0, j] = distance[0, j-1] + ins_cost(target[j-1]) # Do the dynamic programming to fill in the matrix with the edit distances. for j in range(1, n+1): for i in range(1, m+1): distance[i, j] = min( distance[i-1, j] + ins_cost(source[i-1]), distance[i-1, j-1] + sub_cost(source[i-1],target[j-1]), distance[i, j-1] + del_cost(target[j-1])) return int(distance[len(source), len(target)])
python
def min_edit_distance( source: Sequence[T], target: Sequence[T], ins_cost: Callable[..., int] = lambda _x: 1, del_cost: Callable[..., int] = lambda _x: 1, sub_cost: Callable[..., int] = lambda x, y: 0 if x == y else 1) -> int: """Calculates the minimum edit distance between two sequences. Uses the Levenshtein weighting as a default, but offers keyword arguments to supply functions to measure the costs for editing with different elements. Args: ins_cost: A function describing the cost of inserting a given char del_cost: A function describing the cost of deleting a given char sub_cost: A function describing the cost of substituting one char for Returns: The edit distance between the two input sequences. """ # Initialize an m+1 by n+1 array. Note that the strings start from index 1, # with index 0 being used to denote the empty string. n = len(target) m = len(source) distance = np.zeros((m+1, n+1), dtype=np.int16) # Initialize the zeroth row and column to be the distance from the empty # string. for i in range(1, m+1): distance[i, 0] = distance[i-1, 0] + ins_cost(source[i-1]) for j in range(1, n+1): distance[0, j] = distance[0, j-1] + ins_cost(target[j-1]) # Do the dynamic programming to fill in the matrix with the edit distances. for j in range(1, n+1): for i in range(1, m+1): distance[i, j] = min( distance[i-1, j] + ins_cost(source[i-1]), distance[i-1, j-1] + sub_cost(source[i-1],target[j-1]), distance[i, j-1] + del_cost(target[j-1])) return int(distance[len(source), len(target)])
[ "def", "min_edit_distance", "(", "source", ":", "Sequence", "[", "T", "]", ",", "target", ":", "Sequence", "[", "T", "]", ",", "ins_cost", ":", "Callable", "[", "...", ",", "int", "]", "=", "lambda", "_x", ":", "1", ",", "del_cost", ":", "Callable", "[", "...", ",", "int", "]", "=", "lambda", "_x", ":", "1", ",", "sub_cost", ":", "Callable", "[", "...", ",", "int", "]", "=", "lambda", "x", ",", "y", ":", "0", "if", "x", "==", "y", "else", "1", ")", "->", "int", ":", "# Initialize an m+1 by n+1 array. Note that the strings start from index 1,", "# with index 0 being used to denote the empty string.", "n", "=", "len", "(", "target", ")", "m", "=", "len", "(", "source", ")", "distance", "=", "np", ".", "zeros", "(", "(", "m", "+", "1", ",", "n", "+", "1", ")", ",", "dtype", "=", "np", ".", "int16", ")", "# Initialize the zeroth row and column to be the distance from the empty", "# string.", "for", "i", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "distance", "[", "i", ",", "0", "]", "=", "distance", "[", "i", "-", "1", ",", "0", "]", "+", "ins_cost", "(", "source", "[", "i", "-", "1", "]", ")", "for", "j", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "distance", "[", "0", ",", "j", "]", "=", "distance", "[", "0", ",", "j", "-", "1", "]", "+", "ins_cost", "(", "target", "[", "j", "-", "1", "]", ")", "# Do the dynamic programming to fill in the matrix with the edit distances.", "for", "j", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "for", "i", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "distance", "[", "i", ",", "j", "]", "=", "min", "(", "distance", "[", "i", "-", "1", ",", "j", "]", "+", "ins_cost", "(", "source", "[", "i", "-", "1", "]", ")", ",", "distance", "[", "i", "-", "1", ",", "j", "-", "1", "]", "+", "sub_cost", "(", "source", "[", "i", "-", "1", "]", ",", "target", "[", "j", "-", "1", "]", ")", ",", "distance", "[", "i", ",", "j", "-", "1", "]", "+", "del_cost", "(", "target", "[", "j", "-", "1", "]", ")", ")", "return", "int", "(", "distance", "[", "len", "(", "source", ")", ",", "len", "(", "target", ")", "]", ")" ]
Calculates the minimum edit distance between two sequences. Uses the Levenshtein weighting as a default, but offers keyword arguments to supply functions to measure the costs for editing with different elements. Args: ins_cost: A function describing the cost of inserting a given char del_cost: A function describing the cost of deleting a given char sub_cost: A function describing the cost of substituting one char for Returns: The edit distance between the two input sequences.
[ "Calculates", "the", "minimum", "edit", "distance", "between", "two", "sequences", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/distance.py#L9-L51
train
persephone-tools/persephone
persephone/distance.py
word_error_rate
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float: """ Calculate the word error rate of a sequence against a reference. Args: ref: The gold-standard reference sequence hyp: The hypothesis to be evaluated against the reference. Returns: The word error rate of the supplied hypothesis with respect to the reference string. Raises: persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0. """ if len(ref) == 0: raise EmptyReferenceException( "Cannot calculating word error rate against a length 0 "\ "reference sequence.") distance = min_edit_distance(ref, hyp) return 100 * float(distance) / len(ref)
python
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float: """ Calculate the word error rate of a sequence against a reference. Args: ref: The gold-standard reference sequence hyp: The hypothesis to be evaluated against the reference. Returns: The word error rate of the supplied hypothesis with respect to the reference string. Raises: persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0. """ if len(ref) == 0: raise EmptyReferenceException( "Cannot calculating word error rate against a length 0 "\ "reference sequence.") distance = min_edit_distance(ref, hyp) return 100 * float(distance) / len(ref)
[ "def", "word_error_rate", "(", "ref", ":", "Sequence", "[", "T", "]", ",", "hyp", ":", "Sequence", "[", "T", "]", ")", "->", "float", ":", "if", "len", "(", "ref", ")", "==", "0", ":", "raise", "EmptyReferenceException", "(", "\"Cannot calculating word error rate against a length 0 \"", "\"reference sequence.\"", ")", "distance", "=", "min_edit_distance", "(", "ref", ",", "hyp", ")", "return", "100", "*", "float", "(", "distance", ")", "/", "len", "(", "ref", ")" ]
Calculate the word error rate of a sequence against a reference. Args: ref: The gold-standard reference sequence hyp: The hypothesis to be evaluated against the reference. Returns: The word error rate of the supplied hypothesis with respect to the reference string. Raises: persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
[ "Calculate", "the", "word", "error", "rate", "of", "a", "sequence", "against", "a", "reference", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/distance.py#L178-L200
train
persephone-tools/persephone
persephone/model.py
dense_to_human_readable
def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]: """ Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = [index_to_label[index] for index in non_empty_phonemes] transcripts.append(transcript) return transcripts
python
def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]: """ Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = [index_to_label[index] for index in non_empty_phonemes] transcripts.append(transcript) return transcripts
[ "def", "dense_to_human_readable", "(", "dense_repr", ":", "Sequence", "[", "Sequence", "[", "int", "]", "]", ",", "index_to_label", ":", "Dict", "[", "int", ",", "str", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "transcripts", "=", "[", "]", "for", "dense_r", "in", "dense_repr", ":", "non_empty_phonemes", "=", "[", "phn_i", "for", "phn_i", "in", "dense_r", "if", "phn_i", "!=", "0", "]", "transcript", "=", "[", "index_to_label", "[", "index", "]", "for", "index", "in", "non_empty_phonemes", "]", "transcripts", ".", "append", "(", "transcript", ")", "return", "transcripts" ]
Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels.
[ "Converts", "a", "dense", "representation", "of", "model", "decoded", "output", "into", "human", "readable", "using", "a", "mapping", "from", "indices", "to", "labels", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L36-L46
train
persephone-tools/persephone
persephone/model.py
decode
def decode(model_path_prefix: Union[str, Path], input_paths: Sequence[Path], label_set: Set[str], *, feature_type: str = "fbank", #TODO Make this None and infer feature_type from dimension of NN input layer. batch_size: int = 64, feat_dir: Optional[Path]=None, batch_x_name: str="batch_x:0", batch_x_lens_name: str="batch_x_lens:0", output_name: str="hyp_dense_decoded:0") -> List[List[str]]: """Use an existing tensorflow model that exists on disk to decode WAV files. Args: model_path_prefix: The path to the saved tensorflow model. This is the full prefix to the ".ckpt" file. input_paths: A sequence of `pathlib.Path`s to WAV files to put through the model provided. label_set: The set of all the labels this model uses. feature_type: The type of features this model uses. Note that this MUST match the type of features that the model was trained on initially. feat_dir: Any files that require preprocessing will be saved to the path specified by this. batch_x_name: The name of the tensorflow input for batch_x batch_x_lens_name: The name of the tensorflow input for batch_x_lens output_name: The name of the tensorflow output """ if not input_paths: raise PersephoneException("No untranscribed WAVs to transcribe.") model_path_prefix = str(model_path_prefix) for p in input_paths: if not p.exists(): raise PersephoneException( "The WAV file path {} does not exist".format(p) ) preprocessed_file_paths = [] for p in input_paths: prefix = p.stem # Check the "feat" directory as per the filesystem conventions of a Corpus feature_file_ext = ".{}.npy".format(feature_type) conventional_npy_location = p.parent.parent / "feat" / (Path(prefix + feature_file_ext)) if conventional_npy_location.exists(): # don't need to preprocess it preprocessed_file_paths.append(conventional_npy_location) else: if not feat_dir: feat_dir = p.parent.parent / "feat" if not feat_dir.is_dir(): os.makedirs(str(feat_dir)) mono16k_wav_path = feat_dir / "{}.wav".format(prefix) feat_path = feat_dir / "{}.{}.npy".format(prefix, feature_type) feat_extract.convert_wav(p, mono16k_wav_path) preprocessed_file_paths.append(feat_path) # preprocess the file that weren't found in the features directory # as per the filesystem conventions if feat_dir: feat_extract.from_dir(feat_dir, feature_type) fn_batches = utils.make_batches(preprocessed_file_paths, batch_size) # Load the model and perform decoding. metagraph = load_metagraph(model_path_prefix) with tf.Session() as sess: metagraph.restore(sess, model_path_prefix) for fn_batch in fn_batches: batch_x, batch_x_lens = utils.load_batch_x(fn_batch) # TODO These placeholder names should be a backup if names from a newer # naming scheme aren't present. Otherwise this won't generalize to # different architectures. feed_dict = {batch_x_name: batch_x, batch_x_lens_name: batch_x_lens} dense_decoded = sess.run(output_name, feed_dict=feed_dict) # Create a human-readable representation of the decoded. indices_to_labels = labels.make_indices_to_labels(label_set) human_readable = dense_to_human_readable(dense_decoded, indices_to_labels) return human_readable
python
def decode(model_path_prefix: Union[str, Path], input_paths: Sequence[Path], label_set: Set[str], *, feature_type: str = "fbank", #TODO Make this None and infer feature_type from dimension of NN input layer. batch_size: int = 64, feat_dir: Optional[Path]=None, batch_x_name: str="batch_x:0", batch_x_lens_name: str="batch_x_lens:0", output_name: str="hyp_dense_decoded:0") -> List[List[str]]: """Use an existing tensorflow model that exists on disk to decode WAV files. Args: model_path_prefix: The path to the saved tensorflow model. This is the full prefix to the ".ckpt" file. input_paths: A sequence of `pathlib.Path`s to WAV files to put through the model provided. label_set: The set of all the labels this model uses. feature_type: The type of features this model uses. Note that this MUST match the type of features that the model was trained on initially. feat_dir: Any files that require preprocessing will be saved to the path specified by this. batch_x_name: The name of the tensorflow input for batch_x batch_x_lens_name: The name of the tensorflow input for batch_x_lens output_name: The name of the tensorflow output """ if not input_paths: raise PersephoneException("No untranscribed WAVs to transcribe.") model_path_prefix = str(model_path_prefix) for p in input_paths: if not p.exists(): raise PersephoneException( "The WAV file path {} does not exist".format(p) ) preprocessed_file_paths = [] for p in input_paths: prefix = p.stem # Check the "feat" directory as per the filesystem conventions of a Corpus feature_file_ext = ".{}.npy".format(feature_type) conventional_npy_location = p.parent.parent / "feat" / (Path(prefix + feature_file_ext)) if conventional_npy_location.exists(): # don't need to preprocess it preprocessed_file_paths.append(conventional_npy_location) else: if not feat_dir: feat_dir = p.parent.parent / "feat" if not feat_dir.is_dir(): os.makedirs(str(feat_dir)) mono16k_wav_path = feat_dir / "{}.wav".format(prefix) feat_path = feat_dir / "{}.{}.npy".format(prefix, feature_type) feat_extract.convert_wav(p, mono16k_wav_path) preprocessed_file_paths.append(feat_path) # preprocess the file that weren't found in the features directory # as per the filesystem conventions if feat_dir: feat_extract.from_dir(feat_dir, feature_type) fn_batches = utils.make_batches(preprocessed_file_paths, batch_size) # Load the model and perform decoding. metagraph = load_metagraph(model_path_prefix) with tf.Session() as sess: metagraph.restore(sess, model_path_prefix) for fn_batch in fn_batches: batch_x, batch_x_lens = utils.load_batch_x(fn_batch) # TODO These placeholder names should be a backup if names from a newer # naming scheme aren't present. Otherwise this won't generalize to # different architectures. feed_dict = {batch_x_name: batch_x, batch_x_lens_name: batch_x_lens} dense_decoded = sess.run(output_name, feed_dict=feed_dict) # Create a human-readable representation of the decoded. indices_to_labels = labels.make_indices_to_labels(label_set) human_readable = dense_to_human_readable(dense_decoded, indices_to_labels) return human_readable
[ "def", "decode", "(", "model_path_prefix", ":", "Union", "[", "str", ",", "Path", "]", ",", "input_paths", ":", "Sequence", "[", "Path", "]", ",", "label_set", ":", "Set", "[", "str", "]", ",", "*", ",", "feature_type", ":", "str", "=", "\"fbank\"", ",", "#TODO Make this None and infer feature_type from dimension of NN input layer.", "batch_size", ":", "int", "=", "64", ",", "feat_dir", ":", "Optional", "[", "Path", "]", "=", "None", ",", "batch_x_name", ":", "str", "=", "\"batch_x:0\"", ",", "batch_x_lens_name", ":", "str", "=", "\"batch_x_lens:0\"", ",", "output_name", ":", "str", "=", "\"hyp_dense_decoded:0\"", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "if", "not", "input_paths", ":", "raise", "PersephoneException", "(", "\"No untranscribed WAVs to transcribe.\"", ")", "model_path_prefix", "=", "str", "(", "model_path_prefix", ")", "for", "p", "in", "input_paths", ":", "if", "not", "p", ".", "exists", "(", ")", ":", "raise", "PersephoneException", "(", "\"The WAV file path {} does not exist\"", ".", "format", "(", "p", ")", ")", "preprocessed_file_paths", "=", "[", "]", "for", "p", "in", "input_paths", ":", "prefix", "=", "p", ".", "stem", "# Check the \"feat\" directory as per the filesystem conventions of a Corpus", "feature_file_ext", "=", "\".{}.npy\"", ".", "format", "(", "feature_type", ")", "conventional_npy_location", "=", "p", ".", "parent", ".", "parent", "/", "\"feat\"", "/", "(", "Path", "(", "prefix", "+", "feature_file_ext", ")", ")", "if", "conventional_npy_location", ".", "exists", "(", ")", ":", "# don't need to preprocess it", "preprocessed_file_paths", ".", "append", "(", "conventional_npy_location", ")", "else", ":", "if", "not", "feat_dir", ":", "feat_dir", "=", "p", ".", "parent", ".", "parent", "/", "\"feat\"", "if", "not", "feat_dir", ".", "is_dir", "(", ")", ":", "os", ".", "makedirs", "(", "str", "(", "feat_dir", ")", ")", "mono16k_wav_path", "=", "feat_dir", "/", "\"{}.wav\"", ".", "format", "(", "prefix", ")", "feat_path", "=", "feat_dir", "/", "\"{}.{}.npy\"", ".", "format", "(", "prefix", ",", "feature_type", ")", "feat_extract", ".", "convert_wav", "(", "p", ",", "mono16k_wav_path", ")", "preprocessed_file_paths", ".", "append", "(", "feat_path", ")", "# preprocess the file that weren't found in the features directory", "# as per the filesystem conventions", "if", "feat_dir", ":", "feat_extract", ".", "from_dir", "(", "feat_dir", ",", "feature_type", ")", "fn_batches", "=", "utils", ".", "make_batches", "(", "preprocessed_file_paths", ",", "batch_size", ")", "# Load the model and perform decoding.", "metagraph", "=", "load_metagraph", "(", "model_path_prefix", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "metagraph", ".", "restore", "(", "sess", ",", "model_path_prefix", ")", "for", "fn_batch", "in", "fn_batches", ":", "batch_x", ",", "batch_x_lens", "=", "utils", ".", "load_batch_x", "(", "fn_batch", ")", "# TODO These placeholder names should be a backup if names from a newer", "# naming scheme aren't present. Otherwise this won't generalize to", "# different architectures.", "feed_dict", "=", "{", "batch_x_name", ":", "batch_x", ",", "batch_x_lens_name", ":", "batch_x_lens", "}", "dense_decoded", "=", "sess", ".", "run", "(", "output_name", ",", "feed_dict", "=", "feed_dict", ")", "# Create a human-readable representation of the decoded.", "indices_to_labels", "=", "labels", ".", "make_indices_to_labels", "(", "label_set", ")", "human_readable", "=", "dense_to_human_readable", "(", "dense_decoded", ",", "indices_to_labels", ")", "return", "human_readable" ]
Use an existing tensorflow model that exists on disk to decode WAV files. Args: model_path_prefix: The path to the saved tensorflow model. This is the full prefix to the ".ckpt" file. input_paths: A sequence of `pathlib.Path`s to WAV files to put through the model provided. label_set: The set of all the labels this model uses. feature_type: The type of features this model uses. Note that this MUST match the type of features that the model was trained on initially. feat_dir: Any files that require preprocessing will be saved to the path specified by this. batch_x_name: The name of the tensorflow input for batch_x batch_x_lens_name: The name of the tensorflow input for batch_x_lens output_name: The name of the tensorflow output
[ "Use", "an", "existing", "tensorflow", "model", "that", "exists", "on", "disk", "to", "decode", "WAV", "files", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L68-L153
train
persephone-tools/persephone
persephone/model.py
Model.eval
def eval(self, restore_model_path: Optional[str]=None) -> None: """ Evaluates the model on a test set.""" saver = tf.train.Saver() with tf.Session(config=allow_growth_config) as sess: if restore_model_path: logger.info("restoring model from %s", restore_model_path) saver.restore(sess, restore_model_path) else: assert self.saved_model_path, "{}".format(self.saved_model_path) logger.info("restoring model from %s", self.saved_model_path) saver.restore(sess, self.saved_model_path) test_x, test_x_lens, test_y = self.corpus_reader.test_batch() feed_dict = {self.batch_x: test_x, self.batch_x_lens: test_x_lens, self.batch_y: test_y} test_ler, dense_decoded, dense_ref = sess.run( [self.ler, self.dense_decoded, self.dense_ref], feed_dict=feed_dict) hyps, refs = self.corpus_reader.human_readable_hyp_ref( dense_decoded, dense_ref) # Log hypotheses hyps_dir = os.path.join(self.exp_dir, "test") if not os.path.isdir(hyps_dir): os.mkdir(hyps_dir) with open(os.path.join(hyps_dir, "hyps"), "w", encoding=ENCODING) as hyps_f: for hyp in hyps: print(" ".join(hyp), file=hyps_f) with open(os.path.join(hyps_dir, "refs"), "w", encoding=ENCODING) as refs_f: for ref in refs: print(" ".join(ref), file=refs_f) test_per = utils.batch_per(hyps, refs) assert test_per == test_ler with open(os.path.join(hyps_dir, "test_per"), "w", encoding=ENCODING) as per_f: print("LER: %f" % (test_ler), file=per_f)
python
def eval(self, restore_model_path: Optional[str]=None) -> None: """ Evaluates the model on a test set.""" saver = tf.train.Saver() with tf.Session(config=allow_growth_config) as sess: if restore_model_path: logger.info("restoring model from %s", restore_model_path) saver.restore(sess, restore_model_path) else: assert self.saved_model_path, "{}".format(self.saved_model_path) logger.info("restoring model from %s", self.saved_model_path) saver.restore(sess, self.saved_model_path) test_x, test_x_lens, test_y = self.corpus_reader.test_batch() feed_dict = {self.batch_x: test_x, self.batch_x_lens: test_x_lens, self.batch_y: test_y} test_ler, dense_decoded, dense_ref = sess.run( [self.ler, self.dense_decoded, self.dense_ref], feed_dict=feed_dict) hyps, refs = self.corpus_reader.human_readable_hyp_ref( dense_decoded, dense_ref) # Log hypotheses hyps_dir = os.path.join(self.exp_dir, "test") if not os.path.isdir(hyps_dir): os.mkdir(hyps_dir) with open(os.path.join(hyps_dir, "hyps"), "w", encoding=ENCODING) as hyps_f: for hyp in hyps: print(" ".join(hyp), file=hyps_f) with open(os.path.join(hyps_dir, "refs"), "w", encoding=ENCODING) as refs_f: for ref in refs: print(" ".join(ref), file=refs_f) test_per = utils.batch_per(hyps, refs) assert test_per == test_ler with open(os.path.join(hyps_dir, "test_per"), "w", encoding=ENCODING) as per_f: print("LER: %f" % (test_ler), file=per_f)
[ "def", "eval", "(", "self", ",", "restore_model_path", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "with", "tf", ".", "Session", "(", "config", "=", "allow_growth_config", ")", "as", "sess", ":", "if", "restore_model_path", ":", "logger", ".", "info", "(", "\"restoring model from %s\"", ",", "restore_model_path", ")", "saver", ".", "restore", "(", "sess", ",", "restore_model_path", ")", "else", ":", "assert", "self", ".", "saved_model_path", ",", "\"{}\"", ".", "format", "(", "self", ".", "saved_model_path", ")", "logger", ".", "info", "(", "\"restoring model from %s\"", ",", "self", ".", "saved_model_path", ")", "saver", ".", "restore", "(", "sess", ",", "self", ".", "saved_model_path", ")", "test_x", ",", "test_x_lens", ",", "test_y", "=", "self", ".", "corpus_reader", ".", "test_batch", "(", ")", "feed_dict", "=", "{", "self", ".", "batch_x", ":", "test_x", ",", "self", ".", "batch_x_lens", ":", "test_x_lens", ",", "self", ".", "batch_y", ":", "test_y", "}", "test_ler", ",", "dense_decoded", ",", "dense_ref", "=", "sess", ".", "run", "(", "[", "self", ".", "ler", ",", "self", ".", "dense_decoded", ",", "self", ".", "dense_ref", "]", ",", "feed_dict", "=", "feed_dict", ")", "hyps", ",", "refs", "=", "self", ".", "corpus_reader", ".", "human_readable_hyp_ref", "(", "dense_decoded", ",", "dense_ref", ")", "# Log hypotheses", "hyps_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "exp_dir", ",", "\"test\"", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "hyps_dir", ")", ":", "os", ".", "mkdir", "(", "hyps_dir", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "hyps_dir", ",", "\"hyps\"", ")", ",", "\"w\"", ",", "encoding", "=", "ENCODING", ")", "as", "hyps_f", ":", "for", "hyp", "in", "hyps", ":", "print", "(", "\" \"", ".", "join", "(", "hyp", ")", ",", "file", "=", "hyps_f", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "hyps_dir", ",", "\"refs\"", ")", ",", "\"w\"", ",", "encoding", "=", "ENCODING", ")", "as", "refs_f", ":", "for", "ref", "in", "refs", ":", "print", "(", "\" \"", ".", "join", "(", "ref", ")", ",", "file", "=", "refs_f", ")", "test_per", "=", "utils", ".", "batch_per", "(", "hyps", ",", "refs", ")", "assert", "test_per", "==", "test_ler", "with", "open", "(", "os", ".", "path", ".", "join", "(", "hyps_dir", ",", "\"test_per\"", ")", ",", "\"w\"", ",", "encoding", "=", "ENCODING", ")", "as", "per_f", ":", "print", "(", "\"LER: %f\"", "%", "(", "test_ler", ")", ",", "file", "=", "per_f", ")" ]
Evaluates the model on a test set.
[ "Evaluates", "the", "model", "on", "a", "test", "set", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L258-L299
train
persephone-tools/persephone
persephone/model.py
Model.output_best_scores
def output_best_scores(self, best_epoch_str: str) -> None: """Output best scores to the filesystem""" BEST_SCORES_FILENAME = "best_scores.txt" with open(os.path.join(self.exp_dir, BEST_SCORES_FILENAME), "w", encoding=ENCODING) as best_f: print(best_epoch_str, file=best_f, flush=True)
python
def output_best_scores(self, best_epoch_str: str) -> None: """Output best scores to the filesystem""" BEST_SCORES_FILENAME = "best_scores.txt" with open(os.path.join(self.exp_dir, BEST_SCORES_FILENAME), "w", encoding=ENCODING) as best_f: print(best_epoch_str, file=best_f, flush=True)
[ "def", "output_best_scores", "(", "self", ",", "best_epoch_str", ":", "str", ")", "->", "None", ":", "BEST_SCORES_FILENAME", "=", "\"best_scores.txt\"", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "exp_dir", ",", "BEST_SCORES_FILENAME", ")", ",", "\"w\"", ",", "encoding", "=", "ENCODING", ")", "as", "best_f", ":", "print", "(", "best_epoch_str", ",", "file", "=", "best_f", ",", "flush", "=", "True", ")" ]
Output best scores to the filesystem
[ "Output", "best", "scores", "to", "the", "filesystem" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L301-L306
train
persephone-tools/persephone
persephone/corpus.py
ensure_no_set_overlap
def ensure_no_set_overlap(train: Sequence[str], valid: Sequence[str], test: Sequence[str]) -> None: """ Ensures no test set data has creeped into the training set.""" logger.debug("Ensuring that the training, validation and test data sets have no overlap") train_s = set(train) valid_s = set(valid) test_s = set(test) if train_s & valid_s: logger.warning("train and valid have overlapping items: {}".format(train_s & valid_s)) raise PersephoneException("train and valid have overlapping items: {}".format(train_s & valid_s)) if train_s & test_s: logger.warning("train and test have overlapping items: {}".format(train_s & test_s)) raise PersephoneException("train and test have overlapping items: {}".format(train_s & test_s)) if valid_s & test_s: logger.warning("valid and test have overlapping items: {}".format(valid_s & test_s)) raise PersephoneException("valid and test have overlapping items: {}".format(valid_s & test_s))
python
def ensure_no_set_overlap(train: Sequence[str], valid: Sequence[str], test: Sequence[str]) -> None: """ Ensures no test set data has creeped into the training set.""" logger.debug("Ensuring that the training, validation and test data sets have no overlap") train_s = set(train) valid_s = set(valid) test_s = set(test) if train_s & valid_s: logger.warning("train and valid have overlapping items: {}".format(train_s & valid_s)) raise PersephoneException("train and valid have overlapping items: {}".format(train_s & valid_s)) if train_s & test_s: logger.warning("train and test have overlapping items: {}".format(train_s & test_s)) raise PersephoneException("train and test have overlapping items: {}".format(train_s & test_s)) if valid_s & test_s: logger.warning("valid and test have overlapping items: {}".format(valid_s & test_s)) raise PersephoneException("valid and test have overlapping items: {}".format(valid_s & test_s))
[ "def", "ensure_no_set_overlap", "(", "train", ":", "Sequence", "[", "str", "]", ",", "valid", ":", "Sequence", "[", "str", "]", ",", "test", ":", "Sequence", "[", "str", "]", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Ensuring that the training, validation and test data sets have no overlap\"", ")", "train_s", "=", "set", "(", "train", ")", "valid_s", "=", "set", "(", "valid", ")", "test_s", "=", "set", "(", "test", ")", "if", "train_s", "&", "valid_s", ":", "logger", ".", "warning", "(", "\"train and valid have overlapping items: {}\"", ".", "format", "(", "train_s", "&", "valid_s", ")", ")", "raise", "PersephoneException", "(", "\"train and valid have overlapping items: {}\"", ".", "format", "(", "train_s", "&", "valid_s", ")", ")", "if", "train_s", "&", "test_s", ":", "logger", ".", "warning", "(", "\"train and test have overlapping items: {}\"", ".", "format", "(", "train_s", "&", "test_s", ")", ")", "raise", "PersephoneException", "(", "\"train and test have overlapping items: {}\"", ".", "format", "(", "train_s", "&", "test_s", ")", ")", "if", "valid_s", "&", "test_s", ":", "logger", ".", "warning", "(", "\"valid and test have overlapping items: {}\"", ".", "format", "(", "valid_s", "&", "test_s", ")", ")", "raise", "PersephoneException", "(", "\"valid and test have overlapping items: {}\"", ".", "format", "(", "valid_s", "&", "test_s", ")", ")" ]
Ensures no test set data has creeped into the training set.
[ "Ensures", "no", "test", "set", "data", "has", "creeped", "into", "the", "training", "set", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L31-L47
train
persephone-tools/persephone
persephone/corpus.py
get_untranscribed_prefixes_from_file
def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]: """ The file "untranscribed_prefixes.txt" will specify prefixes which do not have an associated transcription file if placed in the target directory. This will fetch those prefixes from that file and will return an empty list if that file does not exist. See find_untranscribed_wavs function for finding untranscribed prefixes in an experiment directory. Returns: A list of all untranscribed prefixes as specified in the file """ untranscribed_prefix_fn = target_directory / "untranscribed_prefixes.txt" if untranscribed_prefix_fn.exists(): with untranscribed_prefix_fn.open() as f: prefixes = f.readlines() return [prefix.strip() for prefix in prefixes] else: #logger.warning("Attempting to get untranscribed prefixes but the file ({})" # " that should specify these does not exist".format(untranscribed_prefix_fn)) pass return []
python
def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]: """ The file "untranscribed_prefixes.txt" will specify prefixes which do not have an associated transcription file if placed in the target directory. This will fetch those prefixes from that file and will return an empty list if that file does not exist. See find_untranscribed_wavs function for finding untranscribed prefixes in an experiment directory. Returns: A list of all untranscribed prefixes as specified in the file """ untranscribed_prefix_fn = target_directory / "untranscribed_prefixes.txt" if untranscribed_prefix_fn.exists(): with untranscribed_prefix_fn.open() as f: prefixes = f.readlines() return [prefix.strip() for prefix in prefixes] else: #logger.warning("Attempting to get untranscribed prefixes but the file ({})" # " that should specify these does not exist".format(untranscribed_prefix_fn)) pass return []
[ "def", "get_untranscribed_prefixes_from_file", "(", "target_directory", ":", "Path", ")", "->", "List", "[", "str", "]", ":", "untranscribed_prefix_fn", "=", "target_directory", "/", "\"untranscribed_prefixes.txt\"", "if", "untranscribed_prefix_fn", ".", "exists", "(", ")", ":", "with", "untranscribed_prefix_fn", ".", "open", "(", ")", "as", "f", ":", "prefixes", "=", "f", ".", "readlines", "(", ")", "return", "[", "prefix", ".", "strip", "(", ")", "for", "prefix", "in", "prefixes", "]", "else", ":", "#logger.warning(\"Attempting to get untranscribed prefixes but the file ({})\"", "# \" that should specify these does not exist\".format(untranscribed_prefix_fn))", "pass", "return", "[", "]" ]
The file "untranscribed_prefixes.txt" will specify prefixes which do not have an associated transcription file if placed in the target directory. This will fetch those prefixes from that file and will return an empty list if that file does not exist. See find_untranscribed_wavs function for finding untranscribed prefixes in an experiment directory. Returns: A list of all untranscribed prefixes as specified in the file
[ "The", "file", "untranscribed_prefixes", ".", "txt", "will", "specify", "prefixes", "which", "do", "not", "have", "an", "associated", "transcription", "file", "if", "placed", "in", "the", "target", "directory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L69-L94
train
persephone-tools/persephone
persephone/corpus.py
determine_labels
def determine_labels(target_dir: Path, label_type: str) -> Set[str]: """ Returns a set of all phonemes found in the corpus. Assumes that WAV files and label files are split into utterances and segregated in a directory which contains a "wav" subdirectory and "label" subdirectory. Arguments: target_dir: A `Path` to the directory where the corpus data is found label_type: The type of label we are creating the label set from. For example "phonemes" would only search for labels for that type. """ logger.info("Finding phonemes of type %s in directory %s", label_type, target_dir) label_dir = target_dir / "label/" if not label_dir.is_dir(): raise FileNotFoundError( "The directory {} does not exist.".format(target_dir)) phonemes = set() # type: Set[str] for fn in os.listdir(str(label_dir)): if fn.endswith(str(label_type)): with (label_dir / fn).open("r", encoding=ENCODING) as f: try: line_phonemes = set(f.readline().split()) except UnicodeDecodeError: logger.error("Unicode decode error on file %s", fn) print("Unicode decode error on file {}".format(fn)) raise phonemes = phonemes.union(line_phonemes) return phonemes
python
def determine_labels(target_dir: Path, label_type: str) -> Set[str]: """ Returns a set of all phonemes found in the corpus. Assumes that WAV files and label files are split into utterances and segregated in a directory which contains a "wav" subdirectory and "label" subdirectory. Arguments: target_dir: A `Path` to the directory where the corpus data is found label_type: The type of label we are creating the label set from. For example "phonemes" would only search for labels for that type. """ logger.info("Finding phonemes of type %s in directory %s", label_type, target_dir) label_dir = target_dir / "label/" if not label_dir.is_dir(): raise FileNotFoundError( "The directory {} does not exist.".format(target_dir)) phonemes = set() # type: Set[str] for fn in os.listdir(str(label_dir)): if fn.endswith(str(label_type)): with (label_dir / fn).open("r", encoding=ENCODING) as f: try: line_phonemes = set(f.readline().split()) except UnicodeDecodeError: logger.error("Unicode decode error on file %s", fn) print("Unicode decode error on file {}".format(fn)) raise phonemes = phonemes.union(line_phonemes) return phonemes
[ "def", "determine_labels", "(", "target_dir", ":", "Path", ",", "label_type", ":", "str", ")", "->", "Set", "[", "str", "]", ":", "logger", ".", "info", "(", "\"Finding phonemes of type %s in directory %s\"", ",", "label_type", ",", "target_dir", ")", "label_dir", "=", "target_dir", "/", "\"label/\"", "if", "not", "label_dir", ".", "is_dir", "(", ")", ":", "raise", "FileNotFoundError", "(", "\"The directory {} does not exist.\"", ".", "format", "(", "target_dir", ")", ")", "phonemes", "=", "set", "(", ")", "# type: Set[str]", "for", "fn", "in", "os", ".", "listdir", "(", "str", "(", "label_dir", ")", ")", ":", "if", "fn", ".", "endswith", "(", "str", "(", "label_type", ")", ")", ":", "with", "(", "label_dir", "/", "fn", ")", ".", "open", "(", "\"r\"", ",", "encoding", "=", "ENCODING", ")", "as", "f", ":", "try", ":", "line_phonemes", "=", "set", "(", "f", ".", "readline", "(", ")", ".", "split", "(", ")", ")", "except", "UnicodeDecodeError", ":", "logger", ".", "error", "(", "\"Unicode decode error on file %s\"", ",", "fn", ")", "print", "(", "\"Unicode decode error on file {}\"", ".", "format", "(", "fn", ")", ")", "raise", "phonemes", "=", "phonemes", ".", "union", "(", "line_phonemes", ")", "return", "phonemes" ]
Returns a set of all phonemes found in the corpus. Assumes that WAV files and label files are split into utterances and segregated in a directory which contains a "wav" subdirectory and "label" subdirectory. Arguments: target_dir: A `Path` to the directory where the corpus data is found label_type: The type of label we are creating the label set from. For example "phonemes" would only search for labels for that type.
[ "Returns", "a", "set", "of", "all", "phonemes", "found", "in", "the", "corpus", ".", "Assumes", "that", "WAV", "files", "and", "label", "files", "are", "split", "into", "utterances", "and", "segregated", "in", "a", "directory", "which", "contains", "a", "wav", "subdirectory", "and", "label", "subdirectory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L617-L645
train
persephone-tools/persephone
persephone/corpus.py
Corpus.from_elan
def from_elan(cls: Type[CorpusT], org_dir: Path, tgt_dir: Path, feat_type: str = "fbank", label_type: str = "phonemes", utterance_filter: Callable[[Utterance], bool] = None, label_segmenter: Optional[LabelSegmenter] = None, speakers: List[str] = None, lazy: bool = True, tier_prefixes: Tuple[str, ...] = ("xv", "rf")) -> CorpusT: """ Construct a `Corpus` from ELAN files. Args: org_dir: A path to the directory containing the unpreprocessed data. tgt_dir: A path to the directory where the preprocessed data will be stored. feat_type: A string describing the input speech features. For example, "fbank" for log Mel filterbank features. label_type: A string describing the transcription labels. For example, "phonemes" or "tones". utterance_filter: A function that returns False if an utterance should not be included in the corpus and True otherwise. This can be used to remove undesirable utterances for training, such as codeswitched utterances. label_segmenter: An object that has an attribute `segment_labels`, which is creates new `Utterance` instances from old ones, by segmenting the tokens in their `text` attribute. Note, `LabelSegmenter` might be better as a function, the only issue is it needs to carry with it a list of labels. This could potentially be a function attribute. speakers: A list of speakers to filter for. If `None`, utterances from all speakers are included. tier_prefixes: A collection of strings that prefix ELAN tiers to filter for. For example, if this is `("xv", "rf")`, then tiers named "xv", "xv@Mark", "rf@Rose" would be extracted if they existed. """ # This currently bails out if label_segmenter is not provided if not label_segmenter: raise ValueError("A label segmenter must be provided via label_segmenter") # In case path is supplied as a string, make it a Path if isinstance(tgt_dir, str): tgt_dir = Path(tgt_dir) # Read utterances from org_dir. utterances = elan.utterances_from_dir(org_dir, tier_prefixes=tier_prefixes) # Filter utterances based on some criteria (such as codeswitching). if utterance_filter: utterances = [utter for utter in utterances if utterance_filter(utter)] utterances = utterance.remove_duplicates(utterances) # Segment the labels in the utterances appropriately if label_segmenter: utterances = [label_segmenter.segment_labels(utter) for utter in utterances] # Remove utterances without transcriptions. utterances = utterance.remove_empty_text(utterances) # Remove utterances with exceptionally short wav_files that are too # short for CTC to work. utterances = utterance.remove_too_short(utterances) tgt_dir.mkdir(parents=True, exist_ok=True) # TODO A lot of these methods aren't ELAN-specific. preprocess.elan was # only used to get the utterances. There could be another Corpus # factory method that takes Utterance objects. the fromElan and # fromPangloss constructors could call this. # Writes the transcriptions to the tgt_dir/label/ dir utterance.write_transcriptions(utterances, (tgt_dir / "label"), label_type, lazy=lazy) # Extracts utterance level WAV information from the input file. wav.extract_wavs(utterances, (tgt_dir / "wav"), lazy=lazy) corpus = cls(feat_type, label_type, tgt_dir, labels=label_segmenter.labels, speakers=speakers) corpus.utterances = utterances return corpus
python
def from_elan(cls: Type[CorpusT], org_dir: Path, tgt_dir: Path, feat_type: str = "fbank", label_type: str = "phonemes", utterance_filter: Callable[[Utterance], bool] = None, label_segmenter: Optional[LabelSegmenter] = None, speakers: List[str] = None, lazy: bool = True, tier_prefixes: Tuple[str, ...] = ("xv", "rf")) -> CorpusT: """ Construct a `Corpus` from ELAN files. Args: org_dir: A path to the directory containing the unpreprocessed data. tgt_dir: A path to the directory where the preprocessed data will be stored. feat_type: A string describing the input speech features. For example, "fbank" for log Mel filterbank features. label_type: A string describing the transcription labels. For example, "phonemes" or "tones". utterance_filter: A function that returns False if an utterance should not be included in the corpus and True otherwise. This can be used to remove undesirable utterances for training, such as codeswitched utterances. label_segmenter: An object that has an attribute `segment_labels`, which is creates new `Utterance` instances from old ones, by segmenting the tokens in their `text` attribute. Note, `LabelSegmenter` might be better as a function, the only issue is it needs to carry with it a list of labels. This could potentially be a function attribute. speakers: A list of speakers to filter for. If `None`, utterances from all speakers are included. tier_prefixes: A collection of strings that prefix ELAN tiers to filter for. For example, if this is `("xv", "rf")`, then tiers named "xv", "xv@Mark", "rf@Rose" would be extracted if they existed. """ # This currently bails out if label_segmenter is not provided if not label_segmenter: raise ValueError("A label segmenter must be provided via label_segmenter") # In case path is supplied as a string, make it a Path if isinstance(tgt_dir, str): tgt_dir = Path(tgt_dir) # Read utterances from org_dir. utterances = elan.utterances_from_dir(org_dir, tier_prefixes=tier_prefixes) # Filter utterances based on some criteria (such as codeswitching). if utterance_filter: utterances = [utter for utter in utterances if utterance_filter(utter)] utterances = utterance.remove_duplicates(utterances) # Segment the labels in the utterances appropriately if label_segmenter: utterances = [label_segmenter.segment_labels(utter) for utter in utterances] # Remove utterances without transcriptions. utterances = utterance.remove_empty_text(utterances) # Remove utterances with exceptionally short wav_files that are too # short for CTC to work. utterances = utterance.remove_too_short(utterances) tgt_dir.mkdir(parents=True, exist_ok=True) # TODO A lot of these methods aren't ELAN-specific. preprocess.elan was # only used to get the utterances. There could be another Corpus # factory method that takes Utterance objects. the fromElan and # fromPangloss constructors could call this. # Writes the transcriptions to the tgt_dir/label/ dir utterance.write_transcriptions(utterances, (tgt_dir / "label"), label_type, lazy=lazy) # Extracts utterance level WAV information from the input file. wav.extract_wavs(utterances, (tgt_dir / "wav"), lazy=lazy) corpus = cls(feat_type, label_type, tgt_dir, labels=label_segmenter.labels, speakers=speakers) corpus.utterances = utterances return corpus
[ "def", "from_elan", "(", "cls", ":", "Type", "[", "CorpusT", "]", ",", "org_dir", ":", "Path", ",", "tgt_dir", ":", "Path", ",", "feat_type", ":", "str", "=", "\"fbank\"", ",", "label_type", ":", "str", "=", "\"phonemes\"", ",", "utterance_filter", ":", "Callable", "[", "[", "Utterance", "]", ",", "bool", "]", "=", "None", ",", "label_segmenter", ":", "Optional", "[", "LabelSegmenter", "]", "=", "None", ",", "speakers", ":", "List", "[", "str", "]", "=", "None", ",", "lazy", ":", "bool", "=", "True", ",", "tier_prefixes", ":", "Tuple", "[", "str", ",", "...", "]", "=", "(", "\"xv\"", ",", "\"rf\"", ")", ")", "->", "CorpusT", ":", "# This currently bails out if label_segmenter is not provided", "if", "not", "label_segmenter", ":", "raise", "ValueError", "(", "\"A label segmenter must be provided via label_segmenter\"", ")", "# In case path is supplied as a string, make it a Path", "if", "isinstance", "(", "tgt_dir", ",", "str", ")", ":", "tgt_dir", "=", "Path", "(", "tgt_dir", ")", "# Read utterances from org_dir.", "utterances", "=", "elan", ".", "utterances_from_dir", "(", "org_dir", ",", "tier_prefixes", "=", "tier_prefixes", ")", "# Filter utterances based on some criteria (such as codeswitching).", "if", "utterance_filter", ":", "utterances", "=", "[", "utter", "for", "utter", "in", "utterances", "if", "utterance_filter", "(", "utter", ")", "]", "utterances", "=", "utterance", ".", "remove_duplicates", "(", "utterances", ")", "# Segment the labels in the utterances appropriately", "if", "label_segmenter", ":", "utterances", "=", "[", "label_segmenter", ".", "segment_labels", "(", "utter", ")", "for", "utter", "in", "utterances", "]", "# Remove utterances without transcriptions.", "utterances", "=", "utterance", ".", "remove_empty_text", "(", "utterances", ")", "# Remove utterances with exceptionally short wav_files that are too", "# short for CTC to work.", "utterances", "=", "utterance", ".", "remove_too_short", "(", "utterances", ")", "tgt_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "# TODO A lot of these methods aren't ELAN-specific. preprocess.elan was", "# only used to get the utterances. There could be another Corpus", "# factory method that takes Utterance objects. the fromElan and", "# fromPangloss constructors could call this.", "# Writes the transcriptions to the tgt_dir/label/ dir", "utterance", ".", "write_transcriptions", "(", "utterances", ",", "(", "tgt_dir", "/", "\"label\"", ")", ",", "label_type", ",", "lazy", "=", "lazy", ")", "# Extracts utterance level WAV information from the input file.", "wav", ".", "extract_wavs", "(", "utterances", ",", "(", "tgt_dir", "/", "\"wav\"", ")", ",", "lazy", "=", "lazy", ")", "corpus", "=", "cls", "(", "feat_type", ",", "label_type", ",", "tgt_dir", ",", "labels", "=", "label_segmenter", ".", "labels", ",", "speakers", "=", "speakers", ")", "corpus", ".", "utterances", "=", "utterances", "return", "corpus" ]
Construct a `Corpus` from ELAN files. Args: org_dir: A path to the directory containing the unpreprocessed data. tgt_dir: A path to the directory where the preprocessed data will be stored. feat_type: A string describing the input speech features. For example, "fbank" for log Mel filterbank features. label_type: A string describing the transcription labels. For example, "phonemes" or "tones". utterance_filter: A function that returns False if an utterance should not be included in the corpus and True otherwise. This can be used to remove undesirable utterances for training, such as codeswitched utterances. label_segmenter: An object that has an attribute `segment_labels`, which is creates new `Utterance` instances from old ones, by segmenting the tokens in their `text` attribute. Note, `LabelSegmenter` might be better as a function, the only issue is it needs to carry with it a list of labels. This could potentially be a function attribute. speakers: A list of speakers to filter for. If `None`, utterances from all speakers are included. tier_prefixes: A collection of strings that prefix ELAN tiers to filter for. For example, if this is `("xv", "rf")`, then tiers named "xv", "xv@Mark", "rf@Rose" would be extracted if they existed.
[ "Construct", "a", "Corpus", "from", "ELAN", "files", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L236-L315
train
persephone-tools/persephone
persephone/corpus.py
Corpus.set_and_check_directories
def set_and_check_directories(self, tgt_dir: Path) -> None: """ Make sure that the required directories exist in the target directory. set variables accordingly. """ logger.info("Setting up directories for corpus in %s", tgt_dir) # Check directories exist. if not tgt_dir.is_dir(): raise FileNotFoundError( "The directory {} does not exist.".format(tgt_dir)) if not self.wav_dir.is_dir(): raise PersephoneException( "The supplied path requires a 'wav' subdirectory.") self.feat_dir.mkdir(parents=True, exist_ok=True) if not self.label_dir.is_dir(): raise PersephoneException( "The supplied path requires a 'label' subdirectory.")
python
def set_and_check_directories(self, tgt_dir: Path) -> None: """ Make sure that the required directories exist in the target directory. set variables accordingly. """ logger.info("Setting up directories for corpus in %s", tgt_dir) # Check directories exist. if not tgt_dir.is_dir(): raise FileNotFoundError( "The directory {} does not exist.".format(tgt_dir)) if not self.wav_dir.is_dir(): raise PersephoneException( "The supplied path requires a 'wav' subdirectory.") self.feat_dir.mkdir(parents=True, exist_ok=True) if not self.label_dir.is_dir(): raise PersephoneException( "The supplied path requires a 'label' subdirectory.")
[ "def", "set_and_check_directories", "(", "self", ",", "tgt_dir", ":", "Path", ")", "->", "None", ":", "logger", ".", "info", "(", "\"Setting up directories for corpus in %s\"", ",", "tgt_dir", ")", "# Check directories exist.", "if", "not", "tgt_dir", ".", "is_dir", "(", ")", ":", "raise", "FileNotFoundError", "(", "\"The directory {} does not exist.\"", ".", "format", "(", "tgt_dir", ")", ")", "if", "not", "self", ".", "wav_dir", ".", "is_dir", "(", ")", ":", "raise", "PersephoneException", "(", "\"The supplied path requires a 'wav' subdirectory.\"", ")", "self", ".", "feat_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "if", "not", "self", ".", "label_dir", ".", "is_dir", "(", ")", ":", "raise", "PersephoneException", "(", "\"The supplied path requires a 'label' subdirectory.\"", ")" ]
Make sure that the required directories exist in the target directory. set variables accordingly.
[ "Make", "sure", "that", "the", "required", "directories", "exist", "in", "the", "target", "directory", ".", "set", "variables", "accordingly", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L338-L355
train