sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def get_context_data(self, **kwargs): """ We supplement the normal context data by adding our fields and labels. """ context = super(SmartView, self).get_context_data(**kwargs) # derive our field config self.field_config = self.derive_field_config() # add our fields self.fields = self.derive_fields() # build up our current parameter string, EXCLUSIVE of our page. These # are used to build pagination URLs url_params = "?" order_params = "" for key in self.request.GET.keys(): if key != 'page' and key != 'pjax' and (len(key) == 0 or key[0] != '_'): for value in self.request.GET.getlist(key): url_params += "%s=%s&" % (key, urlquote(value)) elif key == '_order': order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.GET.getlist(key)]) context['url_params'] = url_params context['order_params'] = order_params + "&" context['pjax'] = self.pjax # set our blocks context['blocks'] = dict() # stuff it all in our context context['fields'] = self.fields context['view'] = self context['field_config'] = self.field_config context['title'] = self.derive_title() # and any extra context the user specified context.update(self.extra_context) # by default, our base is 'base.html', but we might be pjax base_template = "base.html" if 'pjax' in self.request.GET or 'pjax' in self.request.POST: base_template = "smartmin/pjax.html" if 'HTTP_X_PJAX' in self.request.META: base_template = "smartmin/pjax.html" context['base_template'] = base_template # set our refresh if we have one refresh = self.derive_refresh() if refresh: context['refresh'] = refresh return context
We supplement the normal context data by adding our fields and labels.
entailment
def render_to_response(self, context, **response_kwargs): """ Overloaded to deal with _format arguments. """ # should we actually render in json? if '_format' in self.request.GET and self.request.GET['_format'] == 'json': return JsonResponse(self.as_json(context), safe=False) # otherwise, return normally else: return super(SmartView, self).render_to_response(context)
Overloaded to deal with _format arguments.
entailment
def derive_fields(self): """ Derives our fields. We first default to using our 'fields' variable if available, otherwise we figure it out from our object. """ if self.fields: return list(self.fields) else: fields = [] for field in self.object._meta.fields: fields.append(field.name) # only exclude? then remove those items there exclude = self.derive_exclude() # remove any excluded fields fields = [field for field in fields if field not in exclude] return fields
Derives our fields. We first default to using our 'fields' variable if available, otherwise we figure it out from our object.
entailment
def get_context_data(self, **kwargs): """ Add in the field to use for the name field """ context = super(SmartDeleteView, self).get_context_data(**kwargs) context['name_field'] = self.name_field context['cancel_url'] = self.get_cancel_url() return context
Add in the field to use for the name field
entailment
def derive_title(self): """ Derives our title from our list """ title = super(SmartListView, self).derive_title() if not title: return force_text(self.model._meta.verbose_name_plural).title() else: return title
Derives our title from our list
entailment
def derive_link_fields(self, context): """ Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable. """ if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields
Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable.
entailment
def lookup_field_orderable(self, field): """ Returns whether the passed in field is sortable or not, by default all 'raw' fields, that is fields that are part of the model are sortable. """ try: self.model._meta.get_field_by_name(field) return True except Exception: # that field doesn't exist, so not sortable return False
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that is fields that are part of the model are sortable.
entailment
def get_context_data(self, **kwargs): """ Add in what fields are linkable """ context = super(SmartListView, self).get_context_data(**kwargs) # our linkable fields self.link_fields = self.derive_link_fields(context) # stuff it all in our context context['link_fields'] = self.link_fields # our search term if any if 'search' in self.request.GET: context['search'] = self.request.GET['search'] # our ordering field if any order = self.derive_ordering() if order: if order[0] == '-': context['order'] = order[1:] context['order_asc'] = False else: context['order'] = order context['order_asc'] = True return context
Add in what fields are linkable
entailment
def derive_queryset(self, **kwargs): """ Derives our queryset. """ # get our parent queryset queryset = super(SmartListView, self).get_queryset(**kwargs) # apply any filtering search_fields = self.derive_search_fields() search_query = self.request.GET.get('search') if search_fields and search_query: term_queries = [] for term in search_query.split(' '): field_queries = [] for field in search_fields: field_queries.append(Q(**{field: term})) term_queries.append(reduce(operator.or_, field_queries)) queryset = queryset.filter(reduce(operator.and_, term_queries)) # add any select related related = self.derive_select_related() if related: queryset = queryset.select_related(*related) # return our queryset return queryset
Derives our queryset.
entailment
def get_queryset(self, **kwargs): """ Gets our queryset. This takes care of filtering if there are any fields to filter by. """ queryset = self.derive_queryset(**kwargs) return self.order_queryset(queryset)
Gets our queryset. This takes care of filtering if there are any fields to filter by.
entailment
def derive_ordering(self): """ Returns what field should be used for ordering (using a prepended '-' to indicate descending sort). If the default order of the queryset should be used, returns None """ if '_order' in self.request.GET: return self.request.GET['_order'] elif self.default_order: return self.default_order else: return None
Returns what field should be used for ordering (using a prepended '-' to indicate descending sort). If the default order of the queryset should be used, returns None
entailment
def order_queryset(self, queryset): """ Orders the passed in queryset, returning a new queryset in response. By default uses the _order query parameter. """ order = self.derive_ordering() # if we get our order from the request # make sure it is a valid field in the list if '_order' in self.request.GET: if order.lstrip('-') not in self.derive_fields(): order = None if order: # if our order is a single string, convert to a simple list if isinstance(order, str): order = (order,) queryset = queryset.order_by(*order) return queryset
Orders the passed in queryset, returning a new queryset in response. By default uses the _order query parameter.
entailment
def derive_fields(self): """ Derives our fields. """ if self.fields: return self.fields else: fields = [] for field in self.object_list.model._meta.fields: if field.name != 'id': fields.append(field.name) return fields
Derives our fields.
entailment
def render_to_response(self, context, **response_kwargs): """ Overloaded to deal with _format arguments. """ # is this a select2 format response? if self.request.GET.get('_format', 'html') == 'select2': results = [] for obj in context['object_list']: result = None if hasattr(obj, 'as_select2'): result = obj.as_select2() if not result: result = dict(id=obj.pk, text="%s" % obj) results.append(result) json_data = dict(results=results, err='nil', more=context['page_obj'].has_next()) return JsonResponse(json_data) # otherwise, return normally else: return super(SmartListView, self).render_to_response(context)
Overloaded to deal with _format arguments.
entailment
def get_form(self): """ Returns an instance of the form to be used in this view. """ self.form = super(SmartFormMixin, self).get_form() fields = list(self.derive_fields()) # apply our field filtering on our form class exclude = self.derive_exclude() exclude += self.derive_readonly() # remove any excluded fields for field in exclude: if field in self.form.fields: del self.form.fields[field] if fields is not None: # filter out our form fields remove = [name for name in self.form.fields.keys() if name not in fields] for name in remove: del self.form.fields[name] # stuff in our referer as the default location for where to return location = forms.CharField(widget=forms.widgets.HiddenInput(), required=False) if ('HTTP_REFERER' in self.request.META): location.initial = self.request.META['HTTP_REFERER'] # add the location to our form fields self.form.fields['loc'] = location if fields: fields.append('loc') # provides a hook to programmatically customize fields before rendering for (name, field) in self.form.fields.items(): field = self.customize_form_field(name, field) self.form.fields[name] = field return self.form
Returns an instance of the form to be used in this view.
entailment
def customize_form_field(self, name, field): """ Allows views to customize their form fields. By default, Smartmin replaces the plain textbox date input with it's own DatePicker implementation. """ if isinstance(field, forms.fields.DateField) and isinstance(field.widget, forms.widgets.DateInput): field.widget = widgets.DatePickerWidget() field.input_formats = [field.widget.input_format[1]] + list(field.input_formats) if isinstance(field, forms.fields.ImageField) and isinstance(field.widget, forms.widgets.ClearableFileInput): field.widget = widgets.ImageThumbnailWidget() return field
Allows views to customize their form fields. By default, Smartmin replaces the plain textbox date input with it's own DatePicker implementation.
entailment
def lookup_field_label(self, context, field, default=None): """ Figures out what the field label should be for the passed in field name. We overload this so as to use our form to see if there is label set there. If so then we'll pass that as the default instead of having our parent derive the field from the name. """ default = None for form_field in self.form: if form_field.name == field: default = form_field.label break return super(SmartFormMixin, self).lookup_field_label(context, field, default=default)
Figures out what the field label should be for the passed in field name. We overload this so as to use our form to see if there is label set there. If so then we'll pass that as the default instead of having our parent derive the field from the name.
entailment
def lookup_field_help(self, field, default=None): """ Looks up the help text for the passed in field. This is overloaded so that we can check whether our form has help text set explicitely. If so, we will pass this as the default to our parent function. """ default = None for form_field in self.form: if form_field.name == field: default = form_field.help_text break return super(SmartFormMixin, self).lookup_field_help(field, default=default)
Looks up the help text for the passed in field. This is overloaded so that we can check whether our form has help text set explicitely. If so, we will pass this as the default to our parent function.
entailment
def derive_readonly(self): """ Figures out what fields should be readonly. We iterate our field_config to find all that have a readonly of true """ readonly = list(self.readonly) for key, value in self.field_config.items(): if 'readonly' in value and value['readonly']: readonly.append(key) return readonly
Figures out what fields should be readonly. We iterate our field_config to find all that have a readonly of true
entailment
def derive_fields(self): """ Derives our fields. """ if self.fields is not None: fields = list(self.fields) else: form = self.form fields = [] for field in form: fields.append(field.name) # this is slightly confusing but we add in readonly fields here because they will still # need to be displayed readonly = self.derive_readonly() if readonly: fields += readonly # remove any excluded fields for exclude in self.derive_exclude(): if exclude in fields: fields.remove(exclude) return fields
Derives our fields.
entailment
def get_form_class(self): """ Returns the form class to use in this view """ if self.form_class: form_class = self.form_class else: if self.model is not None: # If a model has been explicitly provided, use it model = self.model elif hasattr(self, 'object') and self.object is not None: # If this view is operating on a single object, use # the class of that object model = self.object.__class__ else: # Try to get a queryset and extract the model class # from that model = self.get_queryset().model # run time parameters when building our form factory_kwargs = self.get_factory_kwargs() form_class = model_forms.modelform_factory(model, **factory_kwargs) return form_class
Returns the form class to use in this view
entailment
def get_factory_kwargs(self): """ Let's us specify any extra parameters we might want to call for our form factory. These can include: 'form', 'fields', 'exclude' or 'formfield_callback' """ params = dict() exclude = self.derive_exclude() exclude += self.derive_readonly() if self.fields: fields = list(self.fields) for ex in exclude: if ex in fields: fields.remove(ex) params['fields'] = fields if exclude: params['exclude'] = exclude return params
Let's us specify any extra parameters we might want to call for our form factory. These can include: 'form', 'fields', 'exclude' or 'formfield_callback'
entailment
def get_success_url(self): """ By default we use the referer that was stuffed in our form when it was created """ if self.success_url: # if our smart url references an object, pass that in if self.success_url.find('@') > 0: return smart_url(self.success_url, self.object) else: return smart_url(self.success_url, None) elif 'loc' in self.form.cleaned_data: return self.form.cleaned_data['loc'] raise ImproperlyConfigured("No redirect location found, override get_success_url to not use redirect urls")
By default we use the referer that was stuffed in our form when it was created
entailment
def get_form_kwargs(self): """ We override this, using only those fields specified if they are specified. Otherwise we include all fields in a standard ModelForm. """ kwargs = super(SmartFormMixin, self).get_form_kwargs() kwargs['initial'] = self.derive_initial() return kwargs
We override this, using only those fields specified if they are specified. Otherwise we include all fields in a standard ModelForm.
entailment
def derive_title(self): """ Derives our title from our object """ if not self.title: return _("Create %s") % force_text(self.model._meta.verbose_name).title() else: return self.title
Derives our title from our object
entailment
def permission_for_action(self, action): """ Returns the permission to use for the passed in action """ return "%s.%s_%s" % (self.app_name.lower(), self.model_name.lower(), action)
Returns the permission to use for the passed in action
entailment
def template_for_action(self, action): """ Returns the template to use for the passed in action """ return "%s/%s_%s.html" % (self.module_name.lower(), self.model_name.lower(), action)
Returns the template to use for the passed in action
entailment
def url_name_for_action(self, action): """ Returns the reverse name for this action """ return "%s.%s_%s" % (self.module_name.lower(), self.model_name.lower(), action)
Returns the reverse name for this action
entailment
def view_for_action(self, action): """ Returns the appropriate view class for the passed in action """ # this turns replace_foo into ReplaceFoo and read into Read class_name = "".join([word.capitalize() for word in action.split("_")]) view = None # see if we have a custom class defined for this action if hasattr(self, class_name): # return that one view = getattr(self, class_name) # no model set? set it ourselves if not getattr(view, 'model', None): view.model = self.model # no permission and we are supposed to set them, do so if not hasattr(view, 'permission') and self.permissions: view.permission = self.permission_for_action(action) # set our link URL based on read and update if not getattr(view, 'link_url', None): if 'read' in self.actions: view.link_url = 'id@%s' % self.url_name_for_action('read') elif 'update' in self.actions: view.link_url = 'id@%s' % self.url_name_for_action('update') # if we can't infer a link URL then view class must override lookup_field_link if not getattr(view, 'link_url', None) and 'lookup_field_link' not in view.__dict__: view.link_fields = () # set add_button based on existence of Create view if add_button not explicitly set if action == 'list' and getattr(view, 'add_button', None) is None: view.add_button = 'create' in self.actions # set edit_button based on existence of Update view if edit_button not explicitly set if action == 'read' and getattr(view, 'edit_button', None) is None: view.edit_button = 'update' in self.actions # if update or create, set success url if not set if not getattr(view, 'success_url', None) and (action == 'update' or action == 'create'): view.success_url = '@%s' % self.url_name_for_action('list') # otherwise, use our defaults else: options = dict(model=self.model) # if this is an update or create, and we have a list view, then set the default to that if action == 'update' or action == 'create' and 'list' in self.actions: options['success_url'] = '@%s' % self.url_name_for_action('list') # set permissions if appropriate if self.permissions: options['permission'] = self.permission_for_action(action) if action == 'create': view = type(str("%sCreateView" % self.model_name), (SmartCreateView,), options) elif action == 'read': if 'update' in self.actions: options['edit_button'] = True view = type(str("%sReadView" % self.model_name), (SmartReadView,), options) elif action == 'update': if 'delete' in self.actions: options['delete_url'] = 'id@%s' % self.url_name_for_action('delete') view = type(str("%sUpdateView" % self.model_name), (SmartUpdateView,), options) elif action == 'delete': if 'list' in self.actions: options['cancel_url'] = '@%s' % self.url_name_for_action('list') options['redirect_url'] = '@%s' % self.url_name_for_action('list') elif 'update' in self.actions: options['cancel_url'] = '@%s' % self.url_name_for_action('update') view = type(str("%sDeleteView" % self.model_name), (SmartDeleteView,), options) elif action == 'list': if 'read' in self.actions: options['link_url'] = 'id@%s' % self.url_name_for_action('read') elif 'update' in self.actions: options['link_url'] = 'id@%s' % self.url_name_for_action('update') else: options['link_fields'] = () if 'create' in self.actions: options['add_button'] = True view = type(str("%sListView" % self.model_name), (SmartListView,), options) elif action == 'csv_import': options['model'] = ImportTask view = type(str("%sCSVImportView" % self.model_name), (SmartCSVImportView,), options) if not view: # couldn't find a view? blow up raise Exception("No view found for action: %s" % action) # set the url name for this view view.url_name = self.url_name_for_action(action) # no template set for it? set one based on our action and app name if not getattr(view, 'template_name', None): view.template_name = self.template_for_action(action) view.crudl = self return view
Returns the appropriate view class for the passed in action
entailment
def pattern_for_view(self, view, action): """ Returns the URL pattern for the passed in action. """ # if this view knows how to define a URL pattern, call that if getattr(view, 'derive_url_pattern', None): return view.derive_url_pattern(self.path, action) # otherwise take our best guess else: return r'^%s/%s/$' % (self.path, action)
Returns the URL pattern for the passed in action.
entailment
def as_urlpatterns(self): """ Creates the appropriate URLs for this object. """ urls = [] # for each of our actions for action in self.actions: view_class = self.view_for_action(action) view_pattern = self.pattern_for_view(view_class, action) name = self.url_name_for_action(action) urls.append(url(view_pattern, view_class.as_view(), name=name)) return urls
Creates the appropriate URLs for this object.
entailment
def load_migrations(self): # pragma: no cover """ Loads all migrations in the order they would be applied to a clean database """ executor = MigrationExecutor(connection=None) # create the forwards plan Django would follow on an empty database plan = executor.migration_plan(executor.loader.graph.leaf_nodes(), clean_start=True) if self.verbosity >= 2: for migration, _ in plan: self.stdout.write(" > %s" % migration) return [m[0] for m in plan]
Loads all migrations in the order they would be applied to a clean database
entailment
def extract_operations(self, migrations): """ Extract SQL operations from the given migrations """ operations = [] for migration in migrations: for operation in migration.operations: if isinstance(operation, RunSQL): statements = sqlparse.parse(dedent(operation.sql)) for statement in statements: operation = SqlObjectOperation.parse(statement) if operation: operations.append(operation) if self.verbosity >= 2: self.stdout.write(" > % -100s (%s)" % (operation, migration)) return operations
Extract SQL operations from the given migrations
entailment
def normalize_operations(self, operations): """ Removes redundant SQL operations - e.g. a CREATE X followed by a DROP X """ normalized = OrderedDict() for operation in operations: op_key = (operation.sql_type, operation.obj_name) # do we already have an operation for this object? if op_key in normalized: if self.verbosity >= 2: self.stdout.write(" < %s" % normalized[op_key]) del normalized[op_key] # don't add DROP operations for objects not previously created if operation.is_create: normalized[op_key] = operation elif self.verbosity >= 2: self.stdout.write(" < %s" % operation) return normalized.values()
Removes redundant SQL operations - e.g. a CREATE X followed by a DROP X
entailment
def write_type_dumps(self, operations, preserve_order, output_dir): """ Splits the list of SQL operations by type and dumps these to separate files """ by_type = {SqlType.INDEX: [], SqlType.FUNCTION: [], SqlType.TRIGGER: []} for operation in operations: by_type[operation.sql_type].append(operation) # optionally sort each operation list by the object name if not preserve_order: for obj_type, ops in by_type.items(): by_type[obj_type] = sorted(ops, key=lambda o: o.obj_name) if by_type[SqlType.INDEX]: self.write_dump('indexes', by_type[SqlType.INDEX], output_dir) if by_type[SqlType.FUNCTION]: self.write_dump('functions', by_type[SqlType.FUNCTION], output_dir) if by_type[SqlType.TRIGGER]: self.write_dump('triggers', by_type[SqlType.TRIGGER], output_dir)
Splits the list of SQL operations by type and dumps these to separate files
entailment
def render(self, name, value, attrs=None, renderer=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ html = '' html += '%s' % value html += '<input type="hidden" name="%s" value="%s">' % (escape(name), escape(value)) return mark_safe(html)
Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively.
entailment
def add_atom_data(data_api, data_setters, atom_names, element_names, atom_charges, group_atom_ind): """Add the atomic data to the DataTransferInterface. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param atom_nams the list of atom names for the group :param element_names the list of element names for this group :param atom_charges the list formal atomic charges for this group :param group_atom_ind the index of this atom in the group""" atom_name = atom_names[group_atom_ind] element = element_names[group_atom_ind] charge = atom_charges[group_atom_ind] alternative_location_id = data_api.alt_loc_list[data_api.atom_counter] serial_number = data_api.atom_id_list[data_api.atom_counter] x = data_api.x_coord_list[data_api.atom_counter] y = data_api.y_coord_list[data_api.atom_counter] z = data_api.z_coord_list[data_api.atom_counter] occupancy = data_api.occupancy_list[data_api.atom_counter] temperature_factor = data_api.b_factor_list[data_api.atom_counter] data_setters.set_atom_info(atom_name, serial_number, alternative_location_id, x, y, z, occupancy, temperature_factor, element, charge)
Add the atomic data to the DataTransferInterface. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param atom_nams the list of atom names for the group :param element_names the list of element names for this group :param atom_charges the list formal atomic charges for this group :param group_atom_ind the index of this atom in the group
entailment
def add_group_bonds(data_setters, bond_indices, bond_orders): """Add the bonds for this group. :param data_setters the class to push the data to :param bond_indices the indices of the atoms in the group that are bonded (in pairs) :param bond_orders the orders of the bonds""" for bond_index in range(len(bond_orders)): data_setters.set_group_bond(bond_indices[bond_index*2],bond_indices[bond_index*2+1],bond_orders[bond_index])
Add the bonds for this group. :param data_setters the class to push the data to :param bond_indices the indices of the atoms in the group that are bonded (in pairs) :param bond_orders the orders of the bonds
entailment
def add_group(data_api, data_setters, group_index): """Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group""" group_type_ind = data_api.group_type_list[group_index] atom_count = len(data_api.group_list[group_type_ind]["atomNameList"]) insertion_code = data_api.ins_code_list[group_index] data_setters.set_group_info(data_api.group_list[group_type_ind]["groupName"], data_api.group_id_list[group_index], insertion_code, data_api.group_list[group_type_ind]["chemCompType"], atom_count, data_api.num_bonds, data_api.group_list[group_type_ind]["singleLetterCode"], data_api.sequence_index_list[group_index], data_api.sec_struct_list[group_index]) for group_atom_ind in range(atom_count): add_atom_data(data_api, data_setters, data_api.group_list[group_type_ind]["atomNameList"], data_api.group_list[group_type_ind]["elementList"], data_api.group_list[group_type_ind]["formalChargeList"], group_atom_ind) data_api.atom_counter +=1 add_group_bonds(data_setters, data_api.group_list[group_type_ind]["bondAtomList"], data_api.group_list[group_type_ind]["bondOrderList"]) return atom_count
Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group
entailment
def add_chain_info(data_api, data_setters, chain_index): """Add the data for a whole chain. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param chain_index the index for this chain""" chain_id = data_api.chain_id_list[chain_index] chain_name = data_api.chain_name_list[chain_index] num_groups = data_api.groups_per_chain[chain_index] data_setters.set_chain_info(chain_id, chain_name, num_groups) next_ind = data_api.group_counter + num_groups last_ind = data_api.group_counter for group_ind in range(last_ind, next_ind): add_group(data_api, data_setters, group_ind) data_api.group_counter +=1 data_api.chain_counter+=1
Add the data for a whole chain. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param chain_index the index for this chain
entailment
def add_atomic_information(data_api, data_setters): """Add all the structural information. :param data_api the data api from where to get the data :param data_setters the class to push the data to""" for model_chains in data_api.chains_per_model: data_setters.set_model_info(data_api.model_counter, model_chains) tot_chains_this_model = data_api.chain_counter + model_chains last_chain_counter = data_api.chain_counter for chain_index in range(last_chain_counter, tot_chains_this_model): add_chain_info(data_api, data_setters, chain_index) data_api.model_counter+=1
Add all the structural information. :param data_api the data api from where to get the data :param data_setters the class to push the data to
entailment
def generate_bio_assembly(data_api, struct_inflator): """Generate the bioassembly data. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object""" bioassembly_count = 0 for bioassembly in data_api.bio_assembly: bioassembly_count += 1 for transform in bioassembly["transformList"]: struct_inflator.set_bio_assembly_trans(bioassembly_count, transform["chainIndexList"], transform["matrix"])
Generate the bioassembly data. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
entailment
def add_inter_group_bonds(data_api, struct_inflator): """ Generate inter group bonds. Bond indices are specified within the whole structure and start at 0. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object""" for i in range(len(data_api.bond_order_list)): struct_inflator.set_inter_group_bond(data_api.bond_atom_list[i * 2], data_api.bond_atom_list[i * 2 + 1], data_api.bond_order_list[i])
Generate inter group bonds. Bond indices are specified within the whole structure and start at 0. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
entailment
def add_header_info(data_api, struct_inflator): """ Add ancilliary header information to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object """ struct_inflator.set_header_info(data_api.r_free, data_api.r_work, data_api.resolution, data_api.title, data_api.deposition_date, data_api.release_date, data_api.experimental_methods)
Add ancilliary header information to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
entailment
def add_xtalographic_info(data_api, struct_inflator): """ Add the crystallographic data to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object""" if data_api.unit_cell == None and data_api.space_group is not None: struct_inflator.set_xtal_info(data_api.space_group, constants.UNKNOWN_UNIT_CELL) elif data_api.unit_cell is not None and data_api.space_group is None: struct_inflator.set_xtal_info(constants.UNKNOWN_SPACE_GROUP, data_api.unit_cell) elif data_api.unit_cell is None and data_api.space_group is None: struct_inflator.set_xtal_info(constants.UNKNOWN_SPACE_GROUP, constants.UNKNOWN_UNIT_CELL) else: struct_inflator.set_xtal_info(data_api.space_group, data_api.unit_cell)
Add the crystallographic data to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
entailment
def add_entity_info( data_api, struct_inflator): """Add the entity info to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object """ for entity in data_api.entity_list: struct_inflator.set_entity_info(entity["chainIndexList"], entity["sequence"], entity["description"], entity["type"])
Add the entity info to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
entailment
def get_bonds(input_group): """Utility function to get indices (in pairs) of the bonds.""" out_list = [] for i in range(len(input_group.bond_order_list)): out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],)) return out_list
Utility function to get indices (in pairs) of the bonds.
entailment
def get_unique_groups(input_list): """Function to get a unique list of groups.""" out_list = [] for item in input_list: if item not in out_list: out_list.append(item) return out_list
Function to get a unique list of groups.
entailment
def convert_to_dict(self): """Convert the group object to an appropriate DICT""" out_dict = {} out_dict["groupName"] = self.group_name out_dict["atomNameList"] = self.atom_name_list out_dict["elementList"] = self.element_list out_dict["bondOrderList"] = self.bond_order_list out_dict["bondAtomList"] = self.bond_atom_list out_dict["formalChargeList"] = self.charge_list out_dict["singleLetterCode"] = self.single_letter_code out_dict["chemCompType"] = self.group_type return out_dict
Convert the group object to an appropriate DICT
entailment
def set_atom_info(self, atom_name, serial_number, alternative_location_id, x, y, z, occupancy, temperature_factor, element, charge): """Create an atom object an set the information. :param atom_name: the atom name, e.g. CA for this atom :param serial_number: the serial id of the atom (e.g. 1) :param alternative_location_id: the alternative location id for the atom, if present :param x: the x coordiante of the atom :param y: the y coordinate of the atom :param z: the z coordinate of the atom :param occupancy: the occupancy of the atom :param temperature_factor: the temperature factor of the atom :param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca :param charge: the formal atomic charge of the atom """ raise NotImplementedError
Create an atom object an set the information. :param atom_name: the atom name, e.g. CA for this atom :param serial_number: the serial id of the atom (e.g. 1) :param alternative_location_id: the alternative location id for the atom, if present :param x: the x coordiante of the atom :param y: the y coordinate of the atom :param z: the z coordinate of the atom :param occupancy: the occupancy of the atom :param temperature_factor: the temperature factor of the atom :param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca :param charge: the formal atomic charge of the atom
entailment
def set_group_info(self, group_name, group_number, insertion_code, group_type, atom_count, bond_count, single_letter_code, sequence_index, secondary_structure_type): """Set the information for a group :param group_name: the name of this group,e.g. LYS :param group_number: the residue number of this group :param insertion_code: the insertion code for this group :param group_type: a string indicating the type of group (as found in the chemcomp dictionary. Empty string if none available. :param atom_count: the number of atoms in the group :param bond_count: the number of unique bonds in the group :param single_letter_code: the single letter code of the group :param sequence_index: the index of this group in the sequence defined by the enttiy :param secondary_structure_type: the type of secondary structure used (types are according to DSSP and number to type mappings are defined in the specification) """ raise NotImplementedError
Set the information for a group :param group_name: the name of this group,e.g. LYS :param group_number: the residue number of this group :param insertion_code: the insertion code for this group :param group_type: a string indicating the type of group (as found in the chemcomp dictionary. Empty string if none available. :param atom_count: the number of atoms in the group :param bond_count: the number of unique bonds in the group :param single_letter_code: the single letter code of the group :param sequence_index: the index of this group in the sequence defined by the enttiy :param secondary_structure_type: the type of secondary structure used (types are according to DSSP and number to type mappings are defined in the specification)
entailment
def set_header_info(self, r_free, r_work, resolution, title, deposition_date, release_date, experimental_methods): """Sets the header information. :param r_free: the measured R-Free for the structure :param r_work: the measure R-Work for the structure :param resolution: the resolution of the structure :param title: the title of the structure :param deposition_date: the deposition date of the structure :param release_date: the release date of the structure :param experimnetal_methods: the list of experimental methods in the structure """ raise NotImplementedError
Sets the header information. :param r_free: the measured R-Free for the structure :param r_work: the measure R-Work for the structure :param resolution: the resolution of the structure :param title: the title of the structure :param deposition_date: the deposition date of the structure :param release_date: the release date of the structure :param experimnetal_methods: the list of experimental methods in the structure
entailment
def encode_data(self): """Encode the data back into a dict.""" output_data = {} output_data["groupTypeList"] = encode_array(self.group_type_list, 4, 0) output_data["xCoordList"] = encode_array(self.x_coord_list, 10, 1000) output_data["yCoordList"] = encode_array(self.y_coord_list, 10, 1000) output_data["zCoordList"] = encode_array(self.z_coord_list, 10, 1000) output_data["bFactorList"] = encode_array(self.b_factor_list, 10, 100) output_data["occupancyList"] = encode_array(self.occupancy_list, 9, 100) output_data["atomIdList"] = encode_array(self.atom_id_list, 8, 0) output_data["altLocList"] = encode_array(self.alt_loc_list, 6, 0) output_data["insCodeList"] = encode_array(self.ins_code_list, 6, 0) output_data["groupIdList"] = encode_array(self.group_id_list, 8, 0) output_data["groupList"] = self.group_list output_data["sequenceIndexList"] = encode_array(self.sequence_index_list, 8, 0) output_data["chainNameList"] = encode_array(self.chain_name_list, 5, 4) output_data["chainIdList"] = encode_array(self.chain_id_list, 5, 4) output_data["bondAtomList"] = encode_array(self.bond_atom_list, 4, 0) output_data["bondOrderList"] = encode_array(self.bond_order_list, 2, 0) output_data["secStructList"] = encode_array(self.sec_struct_list, 2, 0) output_data["chainsPerModel"] = self.chains_per_model output_data["groupsPerChain"] = self.groups_per_chain output_data["spaceGroup"] = self.space_group output_data["mmtfVersion"] = self.mmtf_version output_data["mmtfProducer"] = self.mmtf_producer output_data["structureId"] = self.structure_id output_data["entityList"] = self.entity_list output_data["bioAssemblyList"] = self.bio_assembly output_data["rFree"] = self.r_free output_data["rWork"] = self.r_work output_data["resolution"] = self.resolution output_data["title"] = self.title output_data["experimentalMethods"] = self.experimental_methods output_data["depositionDate"] = self.deposition_date output_data["releaseDate"] = self.release_date output_data["unitCell"] = self.unit_cell output_data["numBonds"] = self.num_bonds output_data["numChains"] = self.num_chains output_data["numModels"] = self.num_models output_data["numAtoms"] = self.num_atoms output_data["numGroups"] = self.num_groups return output_data
Encode the data back into a dict.
entailment
def init_structure(self, total_num_bonds, total_num_atoms, total_num_groups, total_num_chains, total_num_models, structure_id): """Initialise the structure object. :param total_num_bonds: the number of bonds in the structure :param total_num_atoms: the number of atoms in the structure :param total_num_groups: the number of groups in the structure :param total_num_chains: the number of chains in the structure :param total_num_models: the number of models in the structure :param structure_id the: id of the structure (e.g. PDB id) """ self.mmtf_version = constants.MMTF_VERSION self.mmtf_producer = constants.PRODUCER self.num_atoms = total_num_atoms self.num_bonds = total_num_bonds self.num_groups = total_num_groups self.num_chains = total_num_chains self.num_models = total_num_models self.structure_id = structure_id # initialise the arrays self.x_coord_list = [] self.y_coord_list = [] self.z_coord_list = [] self.group_type_list = [] self.entity_list = [] self.b_factor_list = [] self.occupancy_list = [] self.atom_id_list = [] self.alt_loc_list = [] self.ins_code_list = [] self.group_id_list = [] self.sequence_index_list = [] self.group_list = [] self.chain_name_list = [] self.chain_id_list = [] self.bond_atom_list = [] self.bond_order_list = [] self.sec_struct_list = [] self.chains_per_model = [] self.groups_per_chain = [] self.current_group = None self.bio_assembly = []
Initialise the structure object. :param total_num_bonds: the number of bonds in the structure :param total_num_atoms: the number of atoms in the structure :param total_num_groups: the number of groups in the structure :param total_num_chains: the number of chains in the structure :param total_num_models: the number of models in the structure :param structure_id the: id of the structure (e.g. PDB id)
entailment
def set_atom_info(self, atom_name, serial_number, alternative_location_id, x, y, z, occupancy, temperature_factor, element, charge): """Create an atom object an set the information. :param atom_name: the atom name, e.g. CA for this atom :param serial_number: the serial id of the atom (e.g. 1) :param alternative_location_id: the alternative location id for the atom, if present :param x: the x coordiante of the atom :param y: the y coordinate of the atom :param z: the z coordinate of the atom :param occupancy: the occupancy of the atom :param temperature_factor: the temperature factor of the atom :param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca :param charge: the formal atomic charge of the atom """ self.x_coord_list.append(x) self.y_coord_list.append(y) self.z_coord_list.append(z) self.atom_id_list.append(serial_number) self.alt_loc_list.append(alternative_location_id) self.occupancy_list.append(occupancy) self.b_factor_list.append(temperature_factor) ## Now add the group level data self.current_group.atom_name_list.append(atom_name) self.current_group.charge_list.append(charge) self.current_group.element_list.append(element)
Create an atom object an set the information. :param atom_name: the atom name, e.g. CA for this atom :param serial_number: the serial id of the atom (e.g. 1) :param alternative_location_id: the alternative location id for the atom, if present :param x: the x coordiante of the atom :param y: the y coordinate of the atom :param z: the z coordinate of the atom :param occupancy: the occupancy of the atom :param temperature_factor: the temperature factor of the atom :param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca :param charge: the formal atomic charge of the atom
entailment
def set_chain_info(self, chain_id, chain_name, num_groups): """Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has """ self.chain_id_list.append(chain_id) self.chain_name_list.append(chain_name) self.groups_per_chain.append(num_groups)
Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has
entailment
def set_entity_info(self, chain_indices, sequence, description, entity_type): """Set the entity level information for the structure. :param chain_indices: the indices of the chains for this entity :param sequence: the one letter code sequence for this entity :param description: the description for this entity :param entity_type: the entity type (polymer,non-polymer,water) """ self.entity_list.append(make_entity_dict(chain_indices,sequence,description,entity_type))
Set the entity level information for the structure. :param chain_indices: the indices of the chains for this entity :param sequence: the one letter code sequence for this entity :param description: the description for this entity :param entity_type: the entity type (polymer,non-polymer,water)
entailment
def set_group_info(self, group_name, group_number, insertion_code, group_type, atom_count, bond_count, single_letter_code, sequence_index, secondary_structure_type): """Set the information for a group :param group_name: the name of this group,e.g. LYS :param group_number: the residue number of this group :param insertion_code: the insertion code for this group :param group_type: a string indicating the type of group (as found in the chemcomp dictionary. Empty string if none available. :param atom_count: the number of atoms in the group :param bond_count: the number of unique bonds in the group :param single_letter_code: the single letter code of the group :param sequence_index: the index of this group in the sequence defined by the enttiy :param secondary_structure_type: the type of secondary structure used (types are according to DSSP and number to type mappings are defined in the specification) """ # Add the group to the overall list - unless it's the first time round if self.current_group is not None: self.group_list.append(self.current_group) # Add the group level information self.group_id_list.append(group_number) self.ins_code_list.append(insertion_code) self.sequence_index_list.append(sequence_index) self.sec_struct_list.append(secondary_structure_type) self.current_group = Group() self.current_group.group_name = group_name self.current_group.group_type = group_type self.current_group.single_letter_code = single_letter_code
Set the information for a group :param group_name: the name of this group,e.g. LYS :param group_number: the residue number of this group :param insertion_code: the insertion code for this group :param group_type: a string indicating the type of group (as found in the chemcomp dictionary. Empty string if none available. :param atom_count: the number of atoms in the group :param bond_count: the number of unique bonds in the group :param single_letter_code: the single letter code of the group :param sequence_index: the index of this group in the sequence defined by the enttiy :param secondary_structure_type: the type of secondary structure used (types are according to DSSP and number to type mappings are defined in the specification)
entailment
def set_xtal_info(self, space_group, unit_cell): """Set the crystallographic information for the structure :param space_group: the space group name, e.g. "P 21 21 21" :param unit_cell: an array of length 6 with the unit cell parameters in order: a, b, c, alpha, beta, gamma """ self.space_group = space_group self.unit_cell = unit_cell
Set the crystallographic information for the structure :param space_group: the space group name, e.g. "P 21 21 21" :param unit_cell: an array of length 6 with the unit cell parameters in order: a, b, c, alpha, beta, gamma
entailment
def set_header_info(self, r_free, r_work, resolution, title, deposition_date, release_date, experimental_methods): """Sets the header information. :param r_free: the measured R-Free for the structure :param r_work: the measure R-Work for the structure :param resolution: the resolution of the structure :param title: the title of the structure :param deposition_date: the deposition date of the structure :param release_date: the release date of the structure :param experimnetal_methods: the list of experimental methods in the structure """ self.r_free = r_free self.r_work = r_work self.resolution = resolution self.title = title self.deposition_date = deposition_date self.release_date = release_date self.experimental_methods = experimental_methods
Sets the header information. :param r_free: the measured R-Free for the structure :param r_work: the measure R-Work for the structure :param resolution: the resolution of the structure :param title: the title of the structure :param deposition_date: the deposition date of the structure :param release_date: the release date of the structure :param experimnetal_methods: the list of experimental methods in the structure
entailment
def set_bio_assembly_trans(self, bio_assembly_index, input_chain_indices, input_transform): """Set the Bioassembly transformation information. A single bioassembly can have multiple transforms, :param bio_assembly_index: the integer index of the bioassembly :param input_chain_indices: the list of integer indices for the chains of this bioassembly :param input_transformation: the list of doubles for the transform of this bioassmbly transform""" this_bioass = None for bioass in self.bio_assembly: if bioass['name'] == str(bio_assembly_index): this_bioass = bioass break if not this_bioass: this_bioass = {"name": str(bio_assembly_index), 'transformList': []} else: self.bio_assembly.remove(this_bioass) this_bioass['transformList'].append({'chainIndexList':input_chain_indices,'matrix': input_transform}) self.bio_assembly.append(this_bioass)
Set the Bioassembly transformation information. A single bioassembly can have multiple transforms, :param bio_assembly_index: the integer index of the bioassembly :param input_chain_indices: the list of integer indices for the chains of this bioassembly :param input_transformation: the list of doubles for the transform of this bioassmbly transform
entailment
def finalize_structure(self): """Any functions needed to cleanup the structure.""" self.group_list.append(self.current_group) group_set = get_unique_groups(self.group_list) for item in self.group_list: self.group_type_list.append(group_set.index(item)) self.group_list = [x.convert_to_dict() for x in group_set]
Any functions needed to cleanup the structure.
entailment
def set_group_bond(self, atom_index_one, atom_index_two, bond_order): """Add bonds within a group. :param atom_index_one: the integer atom index (in the group) of the first partner in the bond :param atom_index_two: the integer atom index (in the group) of the second partner in the bond :param bond_order: the integer bond order """ self.current_group.bond_atom_list.append(atom_index_one) self.current_group.bond_atom_list.append(atom_index_two) self.current_group.bond_order_list.append(bond_order)
Add bonds within a group. :param atom_index_one: the integer atom index (in the group) of the first partner in the bond :param atom_index_two: the integer atom index (in the group) of the second partner in the bond :param bond_order: the integer bond order
entailment
def set_inter_group_bond(self, atom_index_one, atom_index_two, bond_order): """Add bonds between groups. :param atom_index_one: the integer atom index (in the structure) of the first partner in the bond :param atom_index_two: the integer atom index (in the structure) of the second partner in the bond :param bond_order the bond order """ self.bond_atom_list.append(atom_index_one) self.bond_atom_list.append(atom_index_two) self.bond_order_list.append(bond_order)
Add bonds between groups. :param atom_index_one: the integer atom index (in the structure) of the first partner in the bond :param atom_index_two: the integer atom index (in the structure) of the second partner in the bond :param bond_order the bond order
entailment
def run_length_encode(in_array): """A function to run length decode an int array. :param in_array: the inptut array of integers :return the encoded integer array""" if(len(in_array)==0): return [] curr_ans = in_array[0] out_array = [curr_ans] counter = 1 for in_int in in_array[1:]: if in_int == curr_ans: counter+=1 else: out_array.append(counter) out_array.append(in_int) curr_ans = in_int counter = 1 # Add the final counter out_array.append(counter) return out_array
A function to run length decode an int array. :param in_array: the inptut array of integers :return the encoded integer array
entailment
def delta_encode(in_array): """A function to delta decode an int array. :param in_array: the inut array to be delta encoded :return the encoded integer array""" if(len(in_array)==0): return [] curr_ans = in_array[0] out_array = [curr_ans] for in_int in in_array[1:]: out_array.append(in_int-curr_ans) curr_ans = in_int return out_array
A function to delta decode an int array. :param in_array: the inut array to be delta encoded :return the encoded integer array
entailment
def decode_array(input_array): """Parse the header of an input byte array and then decode using the input array, the codec and the appropirate parameter. :param input_array: the array to be decoded :return the decoded array""" codec, length, param, input_array = parse_header(input_array) return codec_dict[codec].decode(input_array, param)
Parse the header of an input byte array and then decode using the input array, the codec and the appropirate parameter. :param input_array: the array to be decoded :return the decoded array
entailment
def encode_array(input_array, codec, param): """Encode the array using the method and then add the header to this array. :param input_array: the array to be encoded :param codec: the integer index of the codec to use :param param: the integer parameter to use in the function :return an array with the header added to the fornt""" return add_header(codec_dict[codec].encode(input_array, param), codec, len(input_array), param)
Encode the array using the method and then add the header to this array. :param input_array: the array to be encoded :param codec: the integer index of the codec to use :param param: the integer parameter to use in the function :return an array with the header added to the fornt
entailment
def run_length_decode(in_array): """A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array""" switch=False out_array=[] for item in in_array: if switch==False: this_item = item switch=True else: switch=False out_array.extend([this_item]*int(item)) return out_array
A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array
entailment
def delta_decode(in_array): """A function to delta decode an int array. :param in_array: the input array of integers :return the decoded array""" if len(in_array) == 0: return [] this_ans = in_array[0] out_array = [this_ans] for i in range(1, len(in_array)): this_ans += in_array[i] out_array.append(this_ans) return out_array
A function to delta decode an int array. :param in_array: the input array of integers :return the decoded array
entailment
def convert_bytes_to_ints(in_bytes, num): """Convert a byte array into an integer array. The number of bytes forming an integer is defined by num :param in_bytes: the input bytes :param num: the number of bytes per int :return the integer array""" dt = numpy.dtype('>i' + str(num)) return numpy.frombuffer(in_bytes, dt)
Convert a byte array into an integer array. The number of bytes forming an integer is defined by num :param in_bytes: the input bytes :param num: the number of bytes per int :return the integer array
entailment
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN))) return [s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings]
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
entailment
def recursive_index_decode(int_array, max=32767, min=-32768): """Unpack an array of integers using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index decoding""" out_arr = [] decoded_val = 0 for item in int_array.tolist(): if item==max or item==min: decoded_val += item else: decoded_val += item out_arr.append(decoded_val) decoded_val = 0 return numpy.asarray(out_arr,dtype=numpy.int32)
Unpack an array of integers using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index decoding
entailment
def get_coords(self): """Utility function to get the coordinates as a single list of tuples.""" out_list = [] for i in range(len(self.x_coord_list)): out_list.append((self.x_coord_list[i],self.y_coord_list[i],self.z_coord_list[i],)) return out_list
Utility function to get the coordinates as a single list of tuples.
entailment
def decode_data(self, input_data): """Function to decode the input data and place it onto the class. :param input_data: the input data as a dict""" self.group_type_list = decode_array(input_data["groupTypeList"]) self.x_coord_list = decode_array(input_data["xCoordList"]) self.y_coord_list = decode_array(input_data["yCoordList"]) self.z_coord_list = decode_array(input_data["zCoordList"]) if "bFactorList" in input_data: self.b_factor_list = decode_array(input_data["bFactorList"]) else: self.b_factor_list = [] if "occupancyList" in input_data: self.occupancy_list = decode_array(input_data["occupancyList"]) else: self.occupancy_list = [] if "atomIdList" in input_data: self.atom_id_list = decode_array(input_data["atomIdList"]) else: self.atom_id_list = [] if "altLocList" in input_data: self.alt_loc_list = decode_array(input_data["altLocList"]) else: self.alt_loc_list = [] if "insCodeList" in input_data: self.ins_code_list = decode_array(input_data["insCodeList"]) else: self.ins_code_list = [] self.group_id_list = decode_array(input_data["groupIdList"]) self.group_list = input_data["groupList"] if "sequenceIndexList" in input_data: self.sequence_index_list = decode_array(input_data["sequenceIndexList"]) else: self.sequence_index_list = [] self.chains_per_model = input_data["chainsPerModel"] self.groups_per_chain = input_data["groupsPerChain"] if "chainNameList" in input_data: self.chain_name_list = decode_array(input_data["chainNameList"]) else: self.chain_name_list = [] self.chain_id_list = decode_array(input_data["chainIdList"]) if "spaceGroup" in input_data: self.space_group = input_data["spaceGroup"] else: self.space_group = None if "bondAtomList" in input_data: self.bond_atom_list = decode_array(input_data["bondAtomList"]) else: self.bond_atom_list = None if "bondOrderList" in input_data: self.bond_order_list = decode_array(input_data["bondOrderList"]) else: self.bond_order_list = None if sys.version_info[0] < 3: if "mmtfVersion" in input_data: self.mmtf_version = input_data["mmtfVersion"] else: self.mmtf_version = None if "mmtfProducer" in input_data: self.mmtf_producer = input_data["mmtfProducer"] else: self.mmtf_producer = None if "structureId" in input_data: self.structure_id = input_data["structureId"] else: self.structure_id = None else: if "mmtfVersion" in input_data: self.mmtf_version = input_data["mmtfVersion"] else: self.mmtf_version = None if "mmtfProducer" in input_data: self.mmtf_producer = input_data["mmtfProducer"] else: self.mmtf_producer = None if "structureId" in input_data: self.structure_id = input_data["structureId"] else: self.structure_id = None if "title" in input_data: if sys.version_info[0] < 3: self.title = input_data["title"] else: self.title = input_data["title"] if "experimentalMethods" in input_data: self.experimental_methods = input_data["experimentalMethods"] else: self.experimental_methods = None if "depositionDate" in input_data: self.deposition_date = input_data["depositionDate"] else: self.deposition_date = None if "releaseDate" in input_data: self.release_date = input_data["releaseDate"] else: self.release_date = None if "entityList" in input_data: self.entity_list = input_data["entityList"] else: self.entity_list = [] if "bioAssemblyList" in input_data: self.bio_assembly = input_data["bioAssemblyList"] else: self.bio_assembly = [] if "rFree" in input_data: self.r_free = input_data["rFree"] else: self.r_free = None if "rWork" in input_data: self.r_work = input_data["rWork"] else: self.r_work = None if "resolution" in input_data: self.resolution = input_data["resolution"] else: self.resolution = None if "unitCell" in input_data: self.unit_cell = input_data["unitCell"] else: self.unit_cell = None if "secStructList" in input_data: self.sec_struct_list = decode_array(input_data["secStructList"]) # Now all the numbers to defien the self.num_bonds = int(input_data["numBonds"]) self.num_chains = int(input_data["numChains"]) self.num_models = int(input_data["numModels"]) self.num_atoms = int(input_data["numAtoms"]) self.num_groups = int(input_data["numGroups"])
Function to decode the input data and place it onto the class. :param input_data: the input data as a dict
entailment
def pass_data_on(self, data_setters): """Write the data from the getters to the setters. :param data_setters: a series of functions that can fill a chemical data structure :type data_setters: DataTransferInterface """ data_setters.init_structure(self.num_bonds, len(self.x_coord_list), len(self.group_type_list), len(self.chain_id_list), len(self.chains_per_model), self.structure_id) decoder_utils.add_entity_info(self, data_setters) decoder_utils.add_atomic_information(self, data_setters) decoder_utils.add_header_info(self, data_setters) decoder_utils.add_xtalographic_info(self, data_setters) decoder_utils.generate_bio_assembly(self, data_setters) decoder_utils.add_inter_group_bonds(self, data_setters) data_setters.finalize_structure()
Write the data from the getters to the setters. :param data_setters: a series of functions that can fill a chemical data structure :type data_setters: DataTransferInterface
entailment
def _internet_on(address): """ Check to see if the internet is on by pinging a set address. :param address: the IP or address to hit :return: a boolean - true if can be reached, false if not. """ try: urllib2.urlopen(address, timeout=1) return True except urllib2.URLError as err: return False
Check to see if the internet is on by pinging a set address. :param address: the IP or address to hit :return: a boolean - true if can be reached, false if not.
entailment
def write_mmtf(file_path, input_data, input_function): """API function to write data as MMTF to a file :param file_path the path of the file to write :param input_data the input data in any user format :param input_function a function to converte input_data to an output format. Must contain all methods in TemplateEncoder """ mmtf_encoder = MMTFEncoder() pass_data_on(input_data, input_function, mmtf_encoder) mmtf_encoder.write_file(file_path)
API function to write data as MMTF to a file :param file_path the path of the file to write :param input_data the input data in any user format :param input_function a function to converte input_data to an output format. Must contain all methods in TemplateEncoder
entailment
def get_raw_data_from_url(pdb_id, reduced=False): """" Get the msgpack unpacked data given a PDB id. :param pdb_id: the input PDB id :return the unpacked data (a dict) """ url = get_url(pdb_id,reduced) request = urllib2.Request(url) request.add_header('Accept-encoding', 'gzip') response = urllib2.urlopen(request) if response.info().get('Content-Encoding') == 'gzip': data = ungzip_data(response.read()) else: data = response.read() return _unpack(data)
Get the msgpack unpacked data given a PDB id. :param pdb_id: the input PDB id :return the unpacked data (a dict)
entailment
def parse(file_path): """Return a decoded API to the data from a file path. :param file_path: the input file path. Data is not entropy compressed (e.g. gzip) :return an API to decoded data """ newDecoder = MMTFDecoder() with open(file_path, "rb") as fh: newDecoder.decode_data(_unpack(fh)) return newDecoder
Return a decoded API to the data from a file path. :param file_path: the input file path. Data is not entropy compressed (e.g. gzip) :return an API to decoded data
entailment
def parse_gzip(file_path): """Return a decoded API to the data from a file path. File is gzip compressed. :param file_path: the input file path. Data is gzip compressed. :return an API to decoded data""" newDecoder = MMTFDecoder() newDecoder.decode_data(_unpack(gzip.open(file_path, "rb"))) return newDecoder
Return a decoded API to the data from a file path. File is gzip compressed. :param file_path: the input file path. Data is gzip compressed. :return an API to decoded data
entailment
def ungzip_data(input_data): """Return a string of data after gzip decoding :param the input gziped data :return the gzip decoded data""" buf = StringIO(input_data) f = gzip.GzipFile(fileobj=buf) return f
Return a string of data after gzip decoding :param the input gziped data :return the gzip decoded data
entailment
def parse_header(input_array): """Parse the header and return it along with the input array minus the header. :param input_array the array to parse :return the codec, the length of the decoded array, the parameter and the remainder of the array""" codec = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[0:4])[0] length = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[4:8])[0] param = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[8:12])[0] return codec,length,param,input_array[12:]
Parse the header and return it along with the input array minus the header. :param input_array the array to parse :return the codec, the length of the decoded array, the parameter and the remainder of the array
entailment
def add_header(input_array, codec, length, param): """Add the header to the appropriate array. :param the encoded array to add the header to :param the codec being used :param the length of the decoded array :param the parameter to add to the header :return the prepended encoded byte array""" return struct.pack(mmtf.utils.constants.NUM_DICT[4], codec) + \ struct.pack(mmtf.utils.constants.NUM_DICT[4], length) + \ struct.pack(mmtf.utils.constants.NUM_DICT[4], param) + input_array
Add the header to the appropriate array. :param the encoded array to add the header to :param the codec being used :param the length of the decoded array :param the parameter to add to the header :return the prepended encoded byte array
entailment
def convert_bytes_to_ints(in_bytes, num): """Convert a byte array into an integer array. The number of bytes forming an integer is defined by num :param in_bytes: the input bytes :param num: the number of bytes per int :return the integer array""" out_arr = [] for i in range(len(in_bytes)//num): val = in_bytes[i * num:i * num + num] unpacked = struct.unpack(mmtf.utils.constants.NUM_DICT[num], val) out_arr.append(unpacked[0]) return out_arr
Convert a byte array into an integer array. The number of bytes forming an integer is defined by num :param in_bytes: the input bytes :param num: the number of bytes per int :return the integer array
entailment
def convert_ints_to_bytes(in_ints, num): """Convert an integer array into a byte arrays. The number of bytes forming an integer is defined by num :param in_ints: the input integers :param num: the number of bytes per int :return the integer array""" out_bytes= b"" for val in in_ints: out_bytes+=struct.pack(mmtf.utils.constants.NUM_DICT[num], val) return out_bytes
Convert an integer array into a byte arrays. The number of bytes forming an integer is defined by num :param in_ints: the input integers :param num: the number of bytes per int :return the integer array
entailment
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN out_strings = [] for i in range(tot_strings): out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN] out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE)) return out_strings
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
entailment
def encode_chain_list(in_strings): """Convert a list of strings to a list of byte arrays. :param in_strings: the input strings :return the encoded list of byte arrays""" out_bytes = b"" for in_s in in_strings: out_bytes+=in_s.encode('ascii') for i in range(mmtf.utils.constants.CHAIN_LEN -len(in_s)): out_bytes+= mmtf.utils.constants.NULL_BYTE.encode('ascii') return out_bytes
Convert a list of strings to a list of byte arrays. :param in_strings: the input strings :return the encoded list of byte arrays
entailment
def recursive_index_encode(int_array, max=32767, min=-32768): """Pack an integer array using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index encoding""" out_arr = [] for curr in int_array: if curr >= 0 : while curr >= max: out_arr.append(max) curr -= max else: while curr <= min: out_arr.append(min) curr += int(math.fabs(min)) out_arr.append(curr) return out_arr
Pack an integer array using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index encoding
entailment
def recursive_index_decode(int_array, max=32767, min=-32768): """Unpack an array of integers using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index decoding""" out_arr = [] encoded_ind = 0 while encoded_ind < len(int_array): decoded_val = 0 while int_array[encoded_ind]==max or int_array[encoded_ind]==min: decoded_val += int_array[encoded_ind] encoded_ind+=1 if int_array[encoded_ind]==0: break decoded_val += int_array[encoded_ind] encoded_ind+=1 out_arr.append(decoded_val) return out_arr
Unpack an array of integers using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index decoding
entailment
def run_length_decode(in_array): """A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array""" switch=False out_array=[] in_array = in_array.tolist() for item in in_array: if switch==False: this_item = item switch=True else: switch=False out_array.extend([this_item]*int(item)) return numpy.asarray(out_array, dtype=numpy.int32)
A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array
entailment
def build(algo, init): '''Build and return an optimizer for the rosenbrock function. In downhill, an optimizer can be constructed using the build() top-level function. This function requires several Theano quantities such as the loss being optimized and the parameters to update during optimization. ''' x = theano.shared(np.array(init, FLOAT), name='x') n = 0.1 * RandomStreams().normal((len(init) - 1, )) monitors = [] if len(init) == 2: # this gives us access to the x and y locations during optimization. monitors.extend([('x', x[:-1].sum()), ('y', x[1:].sum())]) return downhill.build( algo, loss=(n + 100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(), params=[x], monitors=monitors, monitor_gradients=True)
Build and return an optimizer for the rosenbrock function. In downhill, an optimizer can be constructed using the build() top-level function. This function requires several Theano quantities such as the loss being optimized and the parameters to update during optimization.
entailment
def build_and_trace(algo, init, limit=100, **kwargs): '''Run an optimizer on the rosenbrock function. Return xs, ys, and losses. In downhill, optimization algorithms can be iterated over to progressively minimize the loss. At each iteration, the optimizer yields a dictionary of monitor values that were computed during that iteration. Here we build an optimizer and then run it for a fixed number of iterations. ''' kw = dict(min_improvement=0, patience=0, max_gradient_norm=100) kw.update(kwargs) xs, ys, loss = [], [], [] for tm, _ in build(algo, init).iterate([[]], **kw): if len(init) == 2: xs.append(tm['x']) ys.append(tm['y']) loss.append(tm['loss']) if len(loss) == limit: break # Return the optimization up to any failure of patience. return xs[:-9], ys[:-9], loss[-9]
Run an optimizer on the rosenbrock function. Return xs, ys, and losses. In downhill, optimization algorithms can be iterated over to progressively minimize the loss. At each iteration, the optimizer yields a dictionary of monitor values that were computed during that iteration. Here we build an optimizer and then run it for a fixed number of iterations.
entailment
def minimize(loss, train, valid=None, params=None, inputs=None, algo='rmsprop', updates=(), monitors=(), monitor_gradients=False, batch_size=32, train_batches=None, valid_batches=None, **kwargs): '''Minimize a loss function with respect to some symbolic parameters. Additional keyword arguments are passed to the underlying :class:`Optimizer <downhill.base.Optimizer>` instance. Parameters ---------- loss : Theano expression Loss function to minimize. This must be a scalar-valued expression. train : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable Dataset to use for computing gradient updates. valid : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable, optional Dataset to use for validating the minimization process. The training dataset is used if this is not provided. params : list of Theano variables, optional Symbolic variables to adjust to minimize the loss. If not given, these will be computed automatically by walking the computation graph. inputs : list of Theano variables, optional Symbolic variables required to compute the loss. If not given, these will be computed automatically by walking the computation graph. algo : str, optional Name of the minimization algorithm to use. Must be one of the strings that can be passed to :func:`build`. Defaults to ``'rmsprop'``. updates : list of update pairs, optional A list of pairs providing updates for the internal of the loss computation. Normally this is empty, but it can be provided if the loss, for example, requires an update to an internal random number generator. monitors : dict or sequence of (str, Theano expression) tuples, optional Additional values to monitor during optimization. These must be provided as either a sequence of (name, expression) tuples, or as a dictionary mapping string names to Theano expressions. monitor_gradients : bool, optional If True, add monitors to log the norms of the parameter gradients during optimization. Defaults to False. batch_size : int, optional Size of batches provided by datasets. Defaults to 32. train_batches : int, optional Number of batches of training data to iterate over during one pass of optimization. Defaults to None, which uses the entire training dataset. valid_batches : int, optional Number of batches of validation data to iterate over during one pass of validation. Defaults to None, which uses the entire validation dataset. Returns ------- train_monitors : dict A dictionary mapping monitor names to monitor values. This dictionary will always contain the ``'loss'`` key, giving the value of the loss evaluated on the training dataset. valid_monitors : dict A dictionary mapping monitor names to monitor values, evaluated on the validation dataset. This dictionary will always contain the ``'loss'`` key, giving the value of the loss function. Because validation is not always computed after every optimization update, these monitor values may be "stale"; however, they will always contain the most recently computed values. ''' if not isinstance(train, Dataset): train = Dataset( train, name='train', batch_size=batch_size, iteration_size=train_batches, ) if valid is not None and not isinstance(valid, Dataset): valid = Dataset( valid, name='valid', batch_size=batch_size, iteration_size=valid_batches, ) return build( algo, loss=loss, params=params, inputs=inputs, updates=updates, monitors=monitors, monitor_gradients=monitor_gradients, ).minimize(train, valid, **kwargs)
Minimize a loss function with respect to some symbolic parameters. Additional keyword arguments are passed to the underlying :class:`Optimizer <downhill.base.Optimizer>` instance. Parameters ---------- loss : Theano expression Loss function to minimize. This must be a scalar-valued expression. train : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable Dataset to use for computing gradient updates. valid : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable, optional Dataset to use for validating the minimization process. The training dataset is used if this is not provided. params : list of Theano variables, optional Symbolic variables to adjust to minimize the loss. If not given, these will be computed automatically by walking the computation graph. inputs : list of Theano variables, optional Symbolic variables required to compute the loss. If not given, these will be computed automatically by walking the computation graph. algo : str, optional Name of the minimization algorithm to use. Must be one of the strings that can be passed to :func:`build`. Defaults to ``'rmsprop'``. updates : list of update pairs, optional A list of pairs providing updates for the internal of the loss computation. Normally this is empty, but it can be provided if the loss, for example, requires an update to an internal random number generator. monitors : dict or sequence of (str, Theano expression) tuples, optional Additional values to monitor during optimization. These must be provided as either a sequence of (name, expression) tuples, or as a dictionary mapping string names to Theano expressions. monitor_gradients : bool, optional If True, add monitors to log the norms of the parameter gradients during optimization. Defaults to False. batch_size : int, optional Size of batches provided by datasets. Defaults to 32. train_batches : int, optional Number of batches of training data to iterate over during one pass of optimization. Defaults to None, which uses the entire training dataset. valid_batches : int, optional Number of batches of validation data to iterate over during one pass of validation. Defaults to None, which uses the entire validation dataset. Returns ------- train_monitors : dict A dictionary mapping monitor names to monitor values. This dictionary will always contain the ``'loss'`` key, giving the value of the loss evaluated on the training dataset. valid_monitors : dict A dictionary mapping monitor names to monitor values, evaluated on the validation dataset. This dictionary will always contain the ``'loss'`` key, giving the value of the loss function. Because validation is not always computed after every optimization update, these monitor values may be "stale"; however, they will always contain the most recently computed values.
entailment
def make_label(loss, key): '''Create a legend label for an optimization run.''' algo, rate, mu, half, reg = key slots, args = ['{:.3f}', '{}', 'm={:.3f}'], [loss, algo, mu] if algo in 'SGD NAG RMSProp Adam ESGD'.split(): slots.append('lr={:.2e}') args.append(rate) if algo in 'RMSProp ADADELTA ESGD'.split(): slots.append('rmsh={}') args.append(half) slots.append('rmsr={:.2e}') args.append(reg) return ' '.join(slots).format(*args)
Create a legend label for an optimization run.
entailment
def iterate(self, shuffle=True): '''Iterate over batches in the dataset. This method generates ``iteration_size`` batches from the dataset and then returns. Parameters ---------- shuffle : bool, optional Shuffle the batches in this dataset if the iteration reaches the end of the batch list. Defaults to True. Yields ------ batches : data batches A sequence of batches---often from a training, validation, or test dataset. ''' for _ in range(self.iteration_size): if self._callable is not None: yield self._callable() else: yield self._next_batch(shuffle)
Iterate over batches in the dataset. This method generates ``iteration_size`` batches from the dataset and then returns. Parameters ---------- shuffle : bool, optional Shuffle the batches in this dataset if the iteration reaches the end of the batch list. Defaults to True. Yields ------ batches : data batches A sequence of batches---often from a training, validation, or test dataset.
entailment
def shared_like(param, suffix, init=0): '''Create a Theano shared variable like an existing parameter. Parameters ---------- param : Theano variable Theano variable to use for shape information. suffix : str Suffix to append to the parameter's name for the new variable. init : float or ndarray, optional Initial value of the shared variable. Defaults to 0. Returns ------- shared : Theano shared variable A new shared variable with the same shape and data type as ``param``. ''' return theano.shared(np.zeros_like(param.get_value()) + init, name='{}_{}'.format(param.name, suffix), broadcastable=param.broadcastable)
Create a Theano shared variable like an existing parameter. Parameters ---------- param : Theano variable Theano variable to use for shape information. suffix : str Suffix to append to the parameter's name for the new variable. init : float or ndarray, optional Initial value of the shared variable. Defaults to 0. Returns ------- shared : Theano shared variable A new shared variable with the same shape and data type as ``param``.
entailment
def find_inputs_and_params(node): '''Walk a computation graph and extract root variables. Parameters ---------- node : Theano expression A symbolic Theano expression to walk. Returns ------- inputs : list Theano variables A list of candidate inputs for this graph. Inputs are nodes in the graph with no parents that are not shared and are not constants. params : list of Theano shared variables A list of candidate parameters for this graph. Parameters are nodes in the graph that are shared variables. ''' queue, seen, inputs, params = [node], set(), set(), set() while queue: node = queue.pop() seen.add(node) queue.extend(p for p in node.get_parents() if p not in seen) if not node.get_parents(): if isinstance(node, theano.compile.SharedVariable): params.add(node) elif not isinstance(node, TT.Constant): inputs.add(node) return list(inputs), list(params)
Walk a computation graph and extract root variables. Parameters ---------- node : Theano expression A symbolic Theano expression to walk. Returns ------- inputs : list Theano variables A list of candidate inputs for this graph. Inputs are nodes in the graph with no parents that are not shared and are not constants. params : list of Theano shared variables A list of candidate parameters for this graph. Parameters are nodes in the graph that are shared variables.
entailment
def log(msg, *args, **kwargs): '''Log a message to the console. Parameters ---------- msg : str A string to display on the console. This can contain {}-style formatting commands; the remaining positional and keyword arguments will be used to fill them in. ''' now = datetime.datetime.now() module = 'downhill' if _detailed_callsite: caller = inspect.stack()[1] parts = caller.filename.replace('.py', '').split('/') module = '{}:{}'.format( '.'.join(parts[parts.index('downhill')+1:]), caller.lineno) click.echo(' '.join(( click.style(now.strftime('%Y%m%d'), fg='blue'), click.style(now.strftime('%H%M%S'), fg='cyan'), click.style(module, fg='magenta'), msg.format(*args, **kwargs), )))
Log a message to the console. Parameters ---------- msg : str A string to display on the console. This can contain {}-style formatting commands; the remaining positional and keyword arguments will be used to fill them in.
entailment
def log_param(name, value): '''Log a parameter value to the console. Parameters ---------- name : str Name of the parameter being logged. value : any Value of the parameter being logged. ''' log('setting {} = {}', click.style(str(name)), click.style(str(value), fg='yellow'))
Log a parameter value to the console. Parameters ---------- name : str Name of the parameter being logged. value : any Value of the parameter being logged.
entailment