repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
read_yaml_config
def read_yaml_config(filename, check=True, osreplace=True, exit=True): """ reads in a yaml file from the specified filename. If check is set to true the code will fail if the file does not exist. However if it is set to false and the file does not exist, None is returned. :param exit: if true is exist with sys exit :param osreplace: if true replaces environment variables from the OS :param filename: the file name :param check: if True fails if the file does not exist, if False and the file does not exist return will be None """ location = filename if location is not None: location = path_expand(location) if not os.path.exists(location) and not check: return None if check and os.path.exists(location): # test for tab in yaml file if check_file_for_tabs(location): log.error("The file {0} contains tabs. yaml " "Files are not allowed to contain tabs".format(location)) sys.exit() result = None try: if osreplace: result = open(location, 'r').read() t = Template(result) result = t.substitute(os.environ) # data = yaml.safe_load(result) data = ordered_load(result, yaml.SafeLoader) else: f = open(location, "r") # data = yaml.safe_load(f) data = ordered_load(result, yaml.SafeLoader) f.close() return data except Exception as e: log.error( "The file {0} fails with a yaml read error".format(filename)) Error.traceback(e) sys.exit() else: log.error("The file {0} does not exist.".format(filename)) if exit: sys.exit() return None
python
def read_yaml_config(filename, check=True, osreplace=True, exit=True): """ reads in a yaml file from the specified filename. If check is set to true the code will fail if the file does not exist. However if it is set to false and the file does not exist, None is returned. :param exit: if true is exist with sys exit :param osreplace: if true replaces environment variables from the OS :param filename: the file name :param check: if True fails if the file does not exist, if False and the file does not exist return will be None """ location = filename if location is not None: location = path_expand(location) if not os.path.exists(location) and not check: return None if check and os.path.exists(location): # test for tab in yaml file if check_file_for_tabs(location): log.error("The file {0} contains tabs. yaml " "Files are not allowed to contain tabs".format(location)) sys.exit() result = None try: if osreplace: result = open(location, 'r').read() t = Template(result) result = t.substitute(os.environ) # data = yaml.safe_load(result) data = ordered_load(result, yaml.SafeLoader) else: f = open(location, "r") # data = yaml.safe_load(f) data = ordered_load(result, yaml.SafeLoader) f.close() return data except Exception as e: log.error( "The file {0} fails with a yaml read error".format(filename)) Error.traceback(e) sys.exit() else: log.error("The file {0} does not exist.".format(filename)) if exit: sys.exit() return None
reads in a yaml file from the specified filename. If check is set to true the code will fail if the file does not exist. However if it is set to false and the file does not exist, None is returned. :param exit: if true is exist with sys exit :param osreplace: if true replaces environment variables from the OS :param filename: the file name :param check: if True fails if the file does not exist, if False and the file does not exist return will be None
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L129-L185
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
custom_print
def custom_print(data_structure, indent): """ prints a given data structure such as a dict or ordered dict at a given indentation level :param data_structure: :param indent: :return: """ for key, value in data_structure.items(): print("\n%s%s:" % (' ' * attribute_indent * indent, str(key)), end=' ') if isinstance(value, OrderedDict): custom_print(value, indent + 1) elif isinstance(value, dict): custom_print(value, indent + 1) else: print("%s" % (str(value)), end=' ')
python
def custom_print(data_structure, indent): """ prints a given data structure such as a dict or ordered dict at a given indentation level :param data_structure: :param indent: :return: """ for key, value in data_structure.items(): print("\n%s%s:" % (' ' * attribute_indent * indent, str(key)), end=' ') if isinstance(value, OrderedDict): custom_print(value, indent + 1) elif isinstance(value, dict): custom_print(value, indent + 1) else: print("%s" % (str(value)), end=' ')
prints a given data structure such as a dict or ordered dict at a given indentation level :param data_structure: :param indent: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L209-L223
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
OrderedJsonEncoder.encode
def encode(self, o, depth=0): """ encode the json object at given depth :param o: the object :param depth: the depth :return: the json encoding """ if isinstance(o, OrderedDict): return "{" + ",\n ".join([self.encode(k) + ":" + self.encode(v, depth + 1) for (k, v) in o.items()]) + "}\n" else: return simplejson.JSONEncoder.encode(self, o)
python
def encode(self, o, depth=0): """ encode the json object at given depth :param o: the object :param depth: the depth :return: the json encoding """ if isinstance(o, OrderedDict): return "{" + ",\n ".join([self.encode(k) + ":" + self.encode(v, depth + 1) for (k, v) in o.items()]) + "}\n" else: return simplejson.JSONEncoder.encode(self, o)
encode the json object at given depth :param o: the object :param depth: the depth :return: the json encoding
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L194-L206
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict._update_meta
def _update_meta(self): """ internal function to define the metadata regarding filename, location, and prefix. """ for v in ["filename", "location", "prefix"]: if "meta" not in self: self["meta"] = {} self["meta"][v] = self[v] del self[v]
python
def _update_meta(self): """ internal function to define the metadata regarding filename, location, and prefix. """ for v in ["filename", "location", "prefix"]: if "meta" not in self: self["meta"] = {} self["meta"][v] = self[v] del self[v]
internal function to define the metadata regarding filename, location, and prefix.
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L264-L273
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.load
def load(self, filename): """ Loads the yaml file with the given filename. :param filename: the name of the yaml file """ self._set_filename(filename) if os.path.isfile(self['location']): # d = OrderedDict(read_yaml_config(self['location'], check=True)) d = read_yaml_config(self['location'], check=True) with open(self['location']) as myfile: document = myfile.read() x = yaml.load(document, Loader=yaml.FullLoader) try: self.update(d) except: print("ERROR: can not find", self["location"]) sys.exit() else: print( "Error while reading and updating the configuration file {:}".format( filename))
python
def load(self, filename): """ Loads the yaml file with the given filename. :param filename: the name of the yaml file """ self._set_filename(filename) if os.path.isfile(self['location']): # d = OrderedDict(read_yaml_config(self['location'], check=True)) d = read_yaml_config(self['location'], check=True) with open(self['location']) as myfile: document = myfile.read() x = yaml.load(document, Loader=yaml.FullLoader) try: self.update(d) except: print("ERROR: can not find", self["location"]) sys.exit() else: print( "Error while reading and updating the configuration file {:}".format( filename))
Loads the yaml file with the given filename. :param filename: the name of the yaml file
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L284-L307
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.error_keys_not_found
def error_keys_not_found(self, keys): """ Check if the requested keys are found in the dict. :param keys: keys to be looked for """ try: log.error("Filename: {0}".format(self['meta']['location'])) except: log.error("Filename: {0}".format(self['location'])) log.error("Key '{0}' does not exist".format('.'.join(keys))) indent = "" last_index = len(keys) - 1 for i, k in enumerate(keys): if i == last_index: log.error(indent + k + ": <- this value is missing") else: log.error(indent + k + ":") indent += " "
python
def error_keys_not_found(self, keys): """ Check if the requested keys are found in the dict. :param keys: keys to be looked for """ try: log.error("Filename: {0}".format(self['meta']['location'])) except: log.error("Filename: {0}".format(self['location'])) log.error("Key '{0}' does not exist".format('.'.join(keys))) indent = "" last_index = len(keys) - 1 for i, k in enumerate(keys): if i == last_index: log.error(indent + k + ": <- this value is missing") else: log.error(indent + k + ":") indent += " "
Check if the requested keys are found in the dict. :param keys: keys to be looked for
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L361-L379
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.yaml
def yaml(self): """ returns the yaml output of the dict. """ return ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False)
python
def yaml(self): """ returns the yaml output of the dict. """ return ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False)
returns the yaml output of the dict.
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L393-L399
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.get
def get(self, *keys): """ returns the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: get("provisioner","policy") will return the value of config["provisioner"]["policy"] from the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. get("provisioner.policy") """ if keys is None: return self if "." in keys[0]: keys = keys[0].split('.') element = self for v in keys: try: element = element[v] except KeyError: self.error_keys_not_found(keys) # sys.exit() return element
python
def get(self, *keys): """ returns the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: get("provisioner","policy") will return the value of config["provisioner"]["policy"] from the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. get("provisioner.policy") """ if keys is None: return self if "." in keys[0]: keys = keys[0].split('.') element = self for v in keys: try: element = element[v] except KeyError: self.error_keys_not_found(keys) # sys.exit() return element
returns the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: get("provisioner","policy") will return the value of config["provisioner"]["policy"] from the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. get("provisioner.policy")
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L425-L447
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.set
def set(self, value, *keys): """ Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") """ element = self if keys is None: return self if '.' in keys[0]: keys = keys[0].split(".") nested_str = ''.join(["['{0}']".format(x) for x in keys]) # Safely evaluate an expression to see if it is one of the Python # literal structures: strings, numbers, tuples, lists, dicts, booleans, # and None. Quoted string will be used if it is none of these types. try: ast.literal_eval(str(value)) converted = str(value) except ValueError: converted = "'" + str(value) + "'" exec("self" + nested_str + "=" + converted) return element
python
def set(self, value, *keys): """ Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") """ element = self if keys is None: return self if '.' in keys[0]: keys = keys[0].split(".") nested_str = ''.join(["['{0}']".format(x) for x in keys]) # Safely evaluate an expression to see if it is one of the Python # literal structures: strings, numbers, tuples, lists, dicts, booleans, # and None. Quoted string will be used if it is none of these types. try: ast.literal_eval(str(value)) converted = str(value) except ValueError: converted = "'" + str(value) + "'" exec("self" + nested_str + "=" + converted) return element
Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L449-L477
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
BaseConfigDict.attribute
def attribute(self, keys): """ TODO: document this method :param keys: """ if self['meta']['prefix'] is None: k = keys else: k = self['meta']['prefix'] + "." + keys return self.get(k)
python
def attribute(self, keys): """ TODO: document this method :param keys: """ if self['meta']['prefix'] is None: k = keys else: k = self['meta']['prefix'] + "." + keys return self.get(k)
TODO: document this method :param keys:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L488-L498
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.flatwrite
def flatwrite(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="", sep="." ): """ writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :param sep: uses sep as the separator for csv printer :return: """ flat = flatten(table, sep=sep) return Printer.write(flat, sort_keys=sort_keys, order=order, header=header, output=output)
python
def flatwrite(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="", sep="." ): """ writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :param sep: uses sep as the separator for csv printer :return: """ flat = flatten(table, sep=sep) return Printer.write(flat, sort_keys=sort_keys, order=order, header=header, output=output)
writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :param sep: uses sep as the separator for csv printer :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L24-L50
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.write
def write(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="" ): """ writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :return: """ if output == "raw": return table elif table is None: return None elif type(table) in [dict, dotdict]: return cls.dict(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) elif type(table) == list: return cls.list(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) else: Console.error("unkown type {0}".format(type(table)))
python
def write(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="" ): """ writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :return: """ if output == "raw": return table elif table is None: return None elif type(table) in [dict, dotdict]: return cls.dict(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) elif type(table) == list: return cls.list(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) else: Console.error("unkown type {0}".format(type(table)))
writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L53-L88
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.list
def list(cls, l, order=None, header=None, output="table", sort_keys=True, show_none="" ): """ :param l: l is a list not a dict :param order: :param header: :param output: :param sort_keys: :param show_none: :return: """ d = {} count = 0 for entry in l: name = str(count) d[name] = entry count += 1 return cls.dict(d, order=order, header=header, sort_keys=sort_keys, output=output, show_none=show_none)
python
def list(cls, l, order=None, header=None, output="table", sort_keys=True, show_none="" ): """ :param l: l is a list not a dict :param order: :param header: :param output: :param sort_keys: :param show_none: :return: """ d = {} count = 0 for entry in l: name = str(count) d[name] = entry count += 1 return cls.dict(d, order=order, header=header, sort_keys=sort_keys, output=output, show_none=show_none)
:param l: l is a list not a dict :param order: :param header: :param output: :param sort_keys: :param show_none: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L91-L119
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.dict
def dict(cls, d, order=None, header=None, output="table", sort_keys=True, show_none=""): """ TODO :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param output: type of output (table, csv, json, yaml or dict) :type output: string :param sort_keys: :type sort_keys: bool :param show_none: prints None if True for None values otherwise "" :type show_none: bool :return: """ if output == "table": if d == {}: return None else: return cls.dict_table(d, order=order, header=header, sort_keys=sort_keys) elif output == "csv": return cls.csv(d, order=order, header=header, sort_keys=sort_keys) elif output == "json": return json.dumps(d, sort_keys=sort_keys, indent=4) elif output == "yaml": return yaml.dump(convert_from_unicode(d), default_flow_style=False) elif output == "dict": return d else: return "UNKOWN FORMAT. Please use table, csv, json, yaml, dict."
python
def dict(cls, d, order=None, header=None, output="table", sort_keys=True, show_none=""): """ TODO :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param output: type of output (table, csv, json, yaml or dict) :type output: string :param sort_keys: :type sort_keys: bool :param show_none: prints None if True for None values otherwise "" :type show_none: bool :return: """ if output == "table": if d == {}: return None else: return cls.dict_table(d, order=order, header=header, sort_keys=sort_keys) elif output == "csv": return cls.csv(d, order=order, header=header, sort_keys=sort_keys) elif output == "json": return json.dumps(d, sort_keys=sort_keys, indent=4) elif output == "yaml": return yaml.dump(convert_from_unicode(d), default_flow_style=False) elif output == "dict": return d else: return "UNKOWN FORMAT. Please use table, csv, json, yaml, dict."
TODO :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param output: type of output (table, csv, json, yaml or dict) :type output: string :param sort_keys: :type sort_keys: bool :param show_none: prints None if True for None values otherwise "" :type show_none: bool :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L122-L166
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.csv
def csv(cls, d, order=None, header=None, sort_keys=True): """ prints a table in csv format :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param sort_keys: TODO: not yet implemented :type sort_keys: bool :return: a string representing the table in csv format """ first_element = list(d)[0] def _keys(): return list(d[first_element]) # noinspection PyBroadException def _get(element, key): try: tmp = str(d[element][key]) except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() table = "" content = [] for attribute in order: content.append(attribute) table = table + ",".join([str(e) for e in content]) + "\n" for job in d: content = [] for attribute in order: try: content.append(d[job][attribute]) except: content.append("None") table = table + ",".join([str(e) for e in content]) + "\n" return table
python
def csv(cls, d, order=None, header=None, sort_keys=True): """ prints a table in csv format :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param sort_keys: TODO: not yet implemented :type sort_keys: bool :return: a string representing the table in csv format """ first_element = list(d)[0] def _keys(): return list(d[first_element]) # noinspection PyBroadException def _get(element, key): try: tmp = str(d[element][key]) except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() table = "" content = [] for attribute in order: content.append(attribute) table = table + ",".join([str(e) for e in content]) + "\n" for job in d: content = [] for attribute in order: try: content.append(d[job][attribute]) except: content.append("None") table = table + ",".join([str(e) for e in content]) + "\n" return table
prints a table in csv format :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param sort_keys: TODO: not yet implemented :type sort_keys: bool :return: a string representing the table in csv format
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L169-L226
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.dict_table
def dict_table(cls, d, order=None, header=None, sort_keys=True, show_none="", max_width=40): """prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int """ def _keys(): all_keys = [] for e in d: keys = d[e].keys() all_keys.extend(keys) return list(set(all_keys)) # noinspection PyBroadException def _get(item, key): try: tmp = str(d[item][key]) if tmp == "None": tmp = show_none except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() x = PrettyTable(header) x.max_width = max_width if sort_keys: if type(sort_keys) is str: sorted_list = sorted(d, key=lambda x: d[x][sort_keys]) elif type(sort_keys) == tuple: sorted_list = sorted(d, key=lambda x: tuple( [d[x][sort_key] for sort_key in sort_keys])) else: sorted_list = d else: sorted_list = d for element in sorted_list: values = [] for key in order: values.append(_get(element, key)) x.add_row(values) x.align = "l" return x
python
def dict_table(cls, d, order=None, header=None, sort_keys=True, show_none="", max_width=40): """prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int """ def _keys(): all_keys = [] for e in d: keys = d[e].keys() all_keys.extend(keys) return list(set(all_keys)) # noinspection PyBroadException def _get(item, key): try: tmp = str(d[item][key]) if tmp == "None": tmp = show_none except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() x = PrettyTable(header) x.max_width = max_width if sort_keys: if type(sort_keys) is str: sorted_list = sorted(d, key=lambda x: d[x][sort_keys]) elif type(sort_keys) == tuple: sorted_list = sorted(d, key=lambda x: tuple( [d[x][sort_key] for sort_key in sort_keys])) else: sorted_list = d else: sorted_list = d for element in sorted_list: values = [] for key in order: values.append(_get(element, key)) x.add_row(values) x.align = "l" return x
prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L229-L300
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.attribute
def attribute(cls, d, header=None, order=None, sort_keys=True, output="table"): """prints a attribute/key value table :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param output: the output format table, csv, dict, json """ if header is None: header = ["Attribute", "Value"] if output == "table": x = PrettyTable(header) if order is not None: sorted_list = order else: sorted_list = list(d) if sort_keys: sorted_list = sorted(d) for key in sorted_list: if type(d[key]) == dict: values = d[key] x.add_row([key, "+"]) for e in values: x.add_row([" -", "{}: {}".format(e, values[e])]) elif type(d[key]) == list: values = list(d[key]) x.add_row([key, "+"]) for e in values: x.add_row([" -", e]) else: x.add_row([key, d[key] or ""]) x.align = "l" return x else: return cls.dict({output: d}, output=output)
python
def attribute(cls, d, header=None, order=None, sort_keys=True, output="table"): """prints a attribute/key value table :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param output: the output format table, csv, dict, json """ if header is None: header = ["Attribute", "Value"] if output == "table": x = PrettyTable(header) if order is not None: sorted_list = order else: sorted_list = list(d) if sort_keys: sorted_list = sorted(d) for key in sorted_list: if type(d[key]) == dict: values = d[key] x.add_row([key, "+"]) for e in values: x.add_row([" -", "{}: {}".format(e, values[e])]) elif type(d[key]) == list: values = list(d[key]) x.add_row([key, "+"]) for e in values: x.add_row([" -", e]) else: x.add_row([key, d[key] or ""]) x.align = "l" return x else: return cls.dict({output: d}, output=output)
prints a attribute/key value table :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param output: the output format table, csv, dict, json
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L303-L350
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.print_list
def print_list(cls, l, output='table'): """ prints a list :param l: the list :param output: the output, default is a table :return: """ def dict_from_list(l): """ returns a dict from a list for printing :param l: the list :return: """ d = dict([(idx, item) for idx, item in enumerate(l)]) return d if output == 'table': x = PrettyTable(["Index", "Host"]) for (idx, item) in enumerate(l): x.add_row([idx, item]) x.align = "l" x.align["Index"] = "r" return x elif output == 'csv': return ",".join(l) elif output == 'dict': d = dict_from_list(l) return d elif output == 'json': d = dict_from_list(l) result = json.dumps(d, indent=4) return result elif output == 'yaml': d = dict_from_list(l) result = yaml.dump(d, default_flow_style=False) return result elif output == 'txt': return "\n".join(l)
python
def print_list(cls, l, output='table'): """ prints a list :param l: the list :param output: the output, default is a table :return: """ def dict_from_list(l): """ returns a dict from a list for printing :param l: the list :return: """ d = dict([(idx, item) for idx, item in enumerate(l)]) return d if output == 'table': x = PrettyTable(["Index", "Host"]) for (idx, item) in enumerate(l): x.add_row([idx, item]) x.align = "l" x.align["Index"] = "r" return x elif output == 'csv': return ",".join(l) elif output == 'dict': d = dict_from_list(l) return d elif output == 'json': d = dict_from_list(l) result = json.dumps(d, indent=4) return result elif output == 'yaml': d = dict_from_list(l) result = yaml.dump(d, default_flow_style=False) return result elif output == 'txt': return "\n".join(l)
prints a list :param l: the list :param output: the output, default is a table :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L353-L391
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
Printer.row_table
def row_table(cls, d, order=None, labels=None): """prints a pretty table from data in the dict. :param d: A dict to be printed :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param labels: The array of labels for the column """ # header header = list(d) x = PrettyTable(labels) if order is None: order = header for key in order: value = d[key] if type(value) == list: x.add_row([key, value[0]]) for element in value[1:]: x.add_row(["", element]) elif type(value) == dict: value_keys = list(value) first_key = value_keys[0] rest_keys = value_keys[1:] x.add_row( [key, "{0} : {1}".format(first_key, value[first_key])]) for element in rest_keys: x.add_row(["", "{0} : {1}".format(element, value[element])]) else: x.add_row([key, value]) x.align = "l" return x
python
def row_table(cls, d, order=None, labels=None): """prints a pretty table from data in the dict. :param d: A dict to be printed :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param labels: The array of labels for the column """ # header header = list(d) x = PrettyTable(labels) if order is None: order = header for key in order: value = d[key] if type(value) == list: x.add_row([key, value[0]]) for element in value[1:]: x.add_row(["", element]) elif type(value) == dict: value_keys = list(value) first_key = value_keys[0] rest_keys = value_keys[1:] x.add_row( [key, "{0} : {1}".format(first_key, value[first_key])]) for element in rest_keys: x.add_row(["", "{0} : {1}".format(element, value[element])]) else: x.add_row([key, value]) x.align = "l" return x
prints a pretty table from data in the dict. :param d: A dict to be printed :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param labels: The array of labels for the column
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L394-L424
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/authorized_keys.py
get_fingerprint_from_public_key
def get_fingerprint_from_public_key(pubkey): """Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str """ # TODO: why is there a tmpdir? with tempdir() as workdir: key = os.path.join(workdir, 'key.pub') with open(key, 'w') as fd: fd.write(pubkey) cmd = [ 'ssh-keygen', '-l', '-f', key, ] p = Subprocess(cmd) output = p.stdout.strip() bits, fingerprint, _ = output.split(' ', 2) return fingerprint
python
def get_fingerprint_from_public_key(pubkey): """Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str """ # TODO: why is there a tmpdir? with tempdir() as workdir: key = os.path.join(workdir, 'key.pub') with open(key, 'w') as fd: fd.write(pubkey) cmd = [ 'ssh-keygen', '-l', '-f', key, ] p = Subprocess(cmd) output = p.stdout.strip() bits, fingerprint, _ = output.split(' ', 2) return fingerprint
Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/authorized_keys.py#L17-L40
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/authorized_keys.py
AuthorizedKeys.load
def load(cls, path): """ load the keys from a path :param path: the filename (path) in which we find the keys :return: """ auth = cls() with open(path) as fd: for pubkey in itertools.imap(str.strip, fd): # skip empty lines if not pubkey: continue auth.add(pubkey) return auth
python
def load(cls, path): """ load the keys from a path :param path: the filename (path) in which we find the keys :return: """ auth = cls() with open(path) as fd: for pubkey in itertools.imap(str.strip, fd): # skip empty lines if not pubkey: continue auth.add(pubkey) return auth
load the keys from a path :param path: the filename (path) in which we find the keys :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/authorized_keys.py#L52-L66
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/authorized_keys.py
AuthorizedKeys.add
def add(self, pubkey): """ add a public key. :param pubkey: the filename to the public key :return: """ f = get_fingerprint_from_public_key(pubkey) if f not in self._keys: self._order[len(self._keys)] = f self._keys[f] = pubkey
python
def add(self, pubkey): """ add a public key. :param pubkey: the filename to the public key :return: """ f = get_fingerprint_from_public_key(pubkey) if f not in self._keys: self._order[len(self._keys)] = f self._keys[f] = pubkey
add a public key. :param pubkey: the filename to the public key :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/authorized_keys.py#L68-L77
ella/ella
ella/photos/newman_admin.py
PhotoAdmin.thumb
def thumb(self, obj): """ Generates html and thumbnails for admin site. """ format, created = Format.objects.get_or_create(name='newman_thumb', defaults={ 'max_width': 100, 'max_height': 100, 'flexible_height': False, 'stretch': False, 'nocrop': True, }) if created: format.sites = Site.objects.all() info = obj.get_formated_photo(format) return '<a href="%(href)s"><img src="%(src)s"></a>' % { 'href': '%s/' % obj.pk, 'src': info['url'] }
python
def thumb(self, obj): """ Generates html and thumbnails for admin site. """ format, created = Format.objects.get_or_create(name='newman_thumb', defaults={ 'max_width': 100, 'max_height': 100, 'flexible_height': False, 'stretch': False, 'nocrop': True, }) if created: format.sites = Site.objects.all() info = obj.get_formated_photo(format) return '<a href="%(href)s"><img src="%(src)s"></a>' % { 'href': '%s/' % obj.pk, 'src': info['url'] }
Generates html and thumbnails for admin site.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/newman_admin.py#L125-L146
ssalentin/plip
plip/plipcmd.py
process_pdb
def process_pdb(pdbfile, outpath, as_string=False, outputprefix='report'): """Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output.""" if not as_string: startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1] else: startmessage = "Starting analysis from stdin.\n" write_message(startmessage) write_message('='*len(startmessage)+'\n') mol = PDBComplex() mol.output_path = outpath mol.load_pdb(pdbfile, as_string=as_string) # #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid) for ligand in mol.ligands: mol.characterize_complex(ligand) create_folder_if_not_exists(outpath) # Generate the report files streport = StructureReport(mol, outputprefix=outputprefix) config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets)) ###################################### # PyMOL Visualization (parallelized) # ###################################### if config.PYMOL or config.PICS: try: from plip.modules.visualize import visualize_in_pymol except ImportError: from modules.visualize import visualize_in_pymol complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets) if not len(mol.interaction_sets[site].interacting_res) == 0] if config.MAXTHREADS > 1: write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS) parfn = parallel_fn(visualize_in_pymol) parfn(complexes, processes=config.MAXTHREADS) else: [visualize_in_pymol(plcomplex) for plcomplex in complexes] if config.XML: # Generate report in xml format streport.write_xml(as_string=config.STDOUT) if config.TXT: # Generate report in txt (rst) format streport.write_txt(as_string=config.STDOUT)
python
def process_pdb(pdbfile, outpath, as_string=False, outputprefix='report'): """Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output.""" if not as_string: startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1] else: startmessage = "Starting analysis from stdin.\n" write_message(startmessage) write_message('='*len(startmessage)+'\n') mol = PDBComplex() mol.output_path = outpath mol.load_pdb(pdbfile, as_string=as_string) # #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid) for ligand in mol.ligands: mol.characterize_complex(ligand) create_folder_if_not_exists(outpath) # Generate the report files streport = StructureReport(mol, outputprefix=outputprefix) config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets)) ###################################### # PyMOL Visualization (parallelized) # ###################################### if config.PYMOL or config.PICS: try: from plip.modules.visualize import visualize_in_pymol except ImportError: from modules.visualize import visualize_in_pymol complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets) if not len(mol.interaction_sets[site].interacting_res) == 0] if config.MAXTHREADS > 1: write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS) parfn = parallel_fn(visualize_in_pymol) parfn(complexes, processes=config.MAXTHREADS) else: [visualize_in_pymol(plcomplex) for plcomplex in complexes] if config.XML: # Generate report in xml format streport.write_xml(as_string=config.STDOUT) if config.TXT: # Generate report in txt (rst) format streport.write_txt(as_string=config.STDOUT)
Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L53-L97
ssalentin/plip
plip/plipcmd.py
download_structure
def download_structure(inputpdbid): """Given a PDB ID, downloads the corresponding PDB structure. Checks for validity of ID and handles error while downloading. Returns the path of the downloaded file.""" try: if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein': sysexit(3, 'Invalid PDB ID (Wrong format)\n') pdbfile, pdbid = fetch_pdb(inputpdbid.lower()) pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid)) create_folder_if_not_exists(config.BASEPATH) with open(pdbpath, 'w') as g: g.write(pdbfile) write_message('file downloaded as %s\n\n' % pdbpath) return pdbpath, pdbid except ValueError: # Invalid PDB ID, cannot fetch from RCBS server sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
python
def download_structure(inputpdbid): """Given a PDB ID, downloads the corresponding PDB structure. Checks for validity of ID and handles error while downloading. Returns the path of the downloaded file.""" try: if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein': sysexit(3, 'Invalid PDB ID (Wrong format)\n') pdbfile, pdbid = fetch_pdb(inputpdbid.lower()) pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid)) create_folder_if_not_exists(config.BASEPATH) with open(pdbpath, 'w') as g: g.write(pdbfile) write_message('file downloaded as %s\n\n' % pdbpath) return pdbpath, pdbid except ValueError: # Invalid PDB ID, cannot fetch from RCBS server sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
Given a PDB ID, downloads the corresponding PDB structure. Checks for validity of ID and handles error while downloading. Returns the path of the downloaded file.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L100-L116
ssalentin/plip
plip/plipcmd.py
remove_duplicates
def remove_duplicates(slist): """Checks input lists for duplicates and returns a list with unique entries""" unique = list(set(slist)) difference = len(slist) - len(unique) if difference == 1: write_message("Removed one duplicate entry from input list.\n") if difference > 1: write_message("Removed %i duplicate entries from input list.\n" % difference) return unique
python
def remove_duplicates(slist): """Checks input lists for duplicates and returns a list with unique entries""" unique = list(set(slist)) difference = len(slist) - len(unique) if difference == 1: write_message("Removed one duplicate entry from input list.\n") if difference > 1: write_message("Removed %i duplicate entries from input list.\n" % difference) return unique
Checks input lists for duplicates and returns a list with unique entries
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L119-L128
ssalentin/plip
plip/plipcmd.py
main
def main(inputstructs, inputpdbids): """Main function. Calls functions for processing, report generation and visualization.""" pdbid, pdbpath = None, None # #@todo For multiprocessing, implement better stacktracing for errors # Print title and version title = "* Protein-Ligand Interaction Profiler v%s *" % __version__ write_message('\n' + '*' * len(title) + '\n') write_message(title) write_message('\n' + '*' * len(title) + '\n\n') outputprefix = config.OUTPUTFILENAME if inputstructs is not None: # Process PDB file(s) num_structures = len(inputstructs) inputstructs = remove_duplicates(inputstructs) read_from_stdin = False for inputstruct in inputstructs: if inputstruct == '-': inputstruct = sys.stdin.read() read_from_stdin = True if config.RAWSTRING: if sys.version_info < (3,): inputstruct = bytes(inputstruct).decode('unicode_escape') else: inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape') else: if os.path.getsize(inputstruct) == 0: sysexit(2, 'Empty PDB file\n') # Exit if input file is empty if num_structures > 1: basename = inputstruct.split('.')[-2].split('/')[-1] config.OUTPATH = '/'.join([config.BASEPATH, basename]) outputprefix = 'report' process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix) else: # Try to fetch the current PDB structure(s) directly from the RCBS server num_pdbids = len(inputpdbids) inputpdbids = remove_duplicates(inputpdbids) for inputpdbid in inputpdbids: pdbpath, pdbid = download_structure(inputpdbid) if num_pdbids > 1: config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()]) outputprefix = 'report' process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix) if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None: if config.BASEPATH in ['.', './']: write_message('\nFinished analysis. Find the result files in the working directory.\n\n') else: write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
python
def main(inputstructs, inputpdbids): """Main function. Calls functions for processing, report generation and visualization.""" pdbid, pdbpath = None, None # #@todo For multiprocessing, implement better stacktracing for errors # Print title and version title = "* Protein-Ligand Interaction Profiler v%s *" % __version__ write_message('\n' + '*' * len(title) + '\n') write_message(title) write_message('\n' + '*' * len(title) + '\n\n') outputprefix = config.OUTPUTFILENAME if inputstructs is not None: # Process PDB file(s) num_structures = len(inputstructs) inputstructs = remove_duplicates(inputstructs) read_from_stdin = False for inputstruct in inputstructs: if inputstruct == '-': inputstruct = sys.stdin.read() read_from_stdin = True if config.RAWSTRING: if sys.version_info < (3,): inputstruct = bytes(inputstruct).decode('unicode_escape') else: inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape') else: if os.path.getsize(inputstruct) == 0: sysexit(2, 'Empty PDB file\n') # Exit if input file is empty if num_structures > 1: basename = inputstruct.split('.')[-2].split('/')[-1] config.OUTPATH = '/'.join([config.BASEPATH, basename]) outputprefix = 'report' process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix) else: # Try to fetch the current PDB structure(s) directly from the RCBS server num_pdbids = len(inputpdbids) inputpdbids = remove_duplicates(inputpdbids) for inputpdbid in inputpdbids: pdbpath, pdbid = download_structure(inputpdbid) if num_pdbids > 1: config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()]) outputprefix = 'report' process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix) if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None: if config.BASEPATH in ['.', './']: write_message('\nFinished analysis. Find the result files in the working directory.\n\n') else: write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
Main function. Calls functions for processing, report generation and visualization.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L131-L177
ssalentin/plip
plip/plipcmd.py
main_init
def main_init(): """Parse command line arguments and start main script for analysis.""" parser = ArgumentParser(prog="PLIP", description=descript) pdbstructure = parser.add_mutually_exclusive_group(required=True) # Needs either PDB ID or file # '-' as file name reads from stdin pdbstructure.add_argument("-f", "--file", dest="input", nargs="+", help="Set input file, '-' reads from stdin") pdbstructure.add_argument("-i", "--input", dest="pdbid", nargs="+") outputgroup = parser.add_mutually_exclusive_group(required=False) # Needs either outpath or stdout outputgroup.add_argument("-o", "--out", dest="outpath", default="./") outputgroup.add_argument("-O", "--stdout", dest="stdout", action="store_true", default=False, help="Write to stdout instead of file") parser.add_argument("--rawstring", dest="use_raw_string", default=False, action="store_true", help="Use Python raw strings for stdout and stdin") parser.add_argument("-v", "--verbose", dest="verbose", default=False, help="Set verbose mode", action="store_true") parser.add_argument("-p", "--pics", dest="pics", default=False, help="Additional pictures", action="store_true") parser.add_argument("-x", "--xml", dest="xml", default=False, help="Generate report file in XML format", action="store_true") parser.add_argument("-t", "--txt", dest="txt", default=False, help="Generate report file in TXT (RST) format", action="store_true") parser.add_argument("-y", "--pymol", dest="pymol", default=False, help="Additional PyMOL session files", action="store_true") parser.add_argument("--maxthreads", dest="maxthreads", default=multiprocessing.cpu_count(), help="Set maximum number of main threads (number of binding sites processed simultaneously)." "If not set, PLIP uses all available CPUs if possible.", type=int) parser.add_argument("--breakcomposite", dest="breakcomposite", default=False, help="Don't combine ligand fragments with covalent bonds but treat them as single ligands for the analysis.", action="store_true") parser.add_argument("--altlocation", dest="altlocation", default=False, help="Also consider alternate locations for atoms (e.g. alternate conformations).", action="store_true") parser.add_argument("--debug", dest="debug", default=False, help="Turn on DEBUG mode with extended log.", action="store_true") parser.add_argument("--nofix", dest="nofix", default=False, help="Turns off fixing of PDB files.", action="store_true") parser.add_argument("--nofixfile", dest="nofixfile", default=False, help="Turns off writing files for fixed PDB files.", action="store_true") parser.add_argument("--nopdbcanmap", dest="nopdbcanmap", default=False, help="Turns off calculation of mapping between canonical and PDB atom order for ligands.", action="store_true") parser.add_argument("--dnareceptor", dest="dnareceptor", default=False, help="Uses the DNA instead of the protein as a receptor for interactions.", action="store_true") parser.add_argument("--name", dest="outputfilename", default="report", help="Set a filename for the report TXT and XML files. Will only work when processing single structures.") ligandtype = parser.add_mutually_exclusive_group() # Either peptide/inter or intra mode ligandtype.add_argument("--peptides", "--inter", dest="peptides", default=[], help="Allows to define one or multiple chains as peptide ligands or to detect inter-chain contacts", nargs="+") ligandtype.add_argument("--intra", dest="intra", help="Allows to define one chain to analyze intra-chain contacts.") parser.add_argument("--keepmod", dest="keepmod", default=False, help="Keep modified residues as ligands", action="store_true") # Optional threshold arguments, not shown in help thr = namedtuple('threshold', 'name type') thresholds = [thr(name='aromatic_planarity', type='angle'), thr(name='hydroph_dist_max', type='distance'), thr(name='hbond_dist_max', type='distance'), thr(name='hbond_don_angle_min', type='angle'), thr(name='pistack_dist_max', type='distance'), thr(name='pistack_ang_dev', type='other'), thr(name='pistack_offset_max', type='distance'), thr(name='pication_dist_max', type='distance'), thr(name='saltbridge_dist_max', type='distance'), thr(name='halogen_dist_max', type='distance'), thr(name='halogen_acc_angle', type='angle'), thr(name='halogen_don_angle', type='angle'), thr(name='halogen_angle_dev', type='other'), thr(name='water_bridge_mindist', type='distance'), thr(name='water_bridge_maxdist', type='distance'), thr(name='water_bridge_omega_min', type='angle'), thr(name='water_bridge_omega_max', type='angle'), thr(name='water_bridge_theta_min', type='angle')] for t in thresholds: parser.add_argument('--%s' % t.name, dest=t.name, type=lambda val: threshold_limiter(parser, val), help=argparse.SUPPRESS) arguments = parser.parse_args() config.VERBOSE = True if (arguments.verbose or arguments.debug) else False config.DEBUG = True if arguments.debug else False config.MAXTHREADS = arguments.maxthreads config.XML = arguments.xml config.TXT = arguments.txt config.PICS = arguments.pics config.PYMOL = arguments.pymol config.STDOUT = arguments.stdout config.RAWSTRING = arguments.use_raw_string config.OUTPATH = arguments.outpath config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/']) if not config.OUTPATH.endswith('/') else config.OUTPATH) config.BASEPATH = config.OUTPATH # Used for batch processing config.BREAKCOMPOSITE = arguments.breakcomposite config.ALTLOC = arguments.altlocation config.PEPTIDES = arguments.peptides config.INTRA = arguments.intra config.NOFIX = arguments.nofix config.NOFIXFILE = arguments.nofixfile config.NOPDBCANMAP = arguments.nopdbcanmap config.KEEPMOD = arguments.keepmod config.DNARECEPTOR = arguments.dnareceptor config.OUTPUTFILENAME = arguments.outputfilename # Make sure we have pymol with --pics and --pymol if config.PICS or config.PYMOL: try: import pymol except ImportError: write_message("PyMOL is required for --pics and --pymol.\n", mtype='error') raise # Assign values to global thresholds for t in thresholds: tvalue = getattr(arguments, t.name) if tvalue is not None: if t.type == 'angle' and not 0 < tvalue < 180: # Check value for angle thresholds parser.error("Threshold for angles need to have values within 0 and 180.") if t.type == 'distance': if tvalue > 10: # Check value for angle thresholds parser.error("Threshold for distances must not be larger than 10 Angstrom.") elif tvalue > config.BS_DIST + 1: # Dynamically adapt the search space for binding site residues config.BS_DIST = tvalue + 1 setattr(config, t.name.upper(), tvalue) # Check additional conditions for interdependent thresholds if not config.HALOGEN_ACC_ANGLE > config.HALOGEN_ANGLE_DEV: parser.error("The halogen acceptor angle has to be larger than the halogen angle deviation.") if not config.HALOGEN_DON_ANGLE > config.HALOGEN_ANGLE_DEV: parser.error("The halogen donor angle has to be larger than the halogen angle deviation.") if not config.WATER_BRIDGE_MINDIST < config.WATER_BRIDGE_MAXDIST: parser.error("The water bridge minimum distance has to be smaller than the water bridge maximum distance.") if not config.WATER_BRIDGE_OMEGA_MIN < config.WATER_BRIDGE_OMEGA_MAX: parser.error("The water bridge omega minimum angle has to be smaller than the water bridge omega maximum angle") expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None main(expanded_path, arguments.pdbid)
python
def main_init(): """Parse command line arguments and start main script for analysis.""" parser = ArgumentParser(prog="PLIP", description=descript) pdbstructure = parser.add_mutually_exclusive_group(required=True) # Needs either PDB ID or file # '-' as file name reads from stdin pdbstructure.add_argument("-f", "--file", dest="input", nargs="+", help="Set input file, '-' reads from stdin") pdbstructure.add_argument("-i", "--input", dest="pdbid", nargs="+") outputgroup = parser.add_mutually_exclusive_group(required=False) # Needs either outpath or stdout outputgroup.add_argument("-o", "--out", dest="outpath", default="./") outputgroup.add_argument("-O", "--stdout", dest="stdout", action="store_true", default=False, help="Write to stdout instead of file") parser.add_argument("--rawstring", dest="use_raw_string", default=False, action="store_true", help="Use Python raw strings for stdout and stdin") parser.add_argument("-v", "--verbose", dest="verbose", default=False, help="Set verbose mode", action="store_true") parser.add_argument("-p", "--pics", dest="pics", default=False, help="Additional pictures", action="store_true") parser.add_argument("-x", "--xml", dest="xml", default=False, help="Generate report file in XML format", action="store_true") parser.add_argument("-t", "--txt", dest="txt", default=False, help="Generate report file in TXT (RST) format", action="store_true") parser.add_argument("-y", "--pymol", dest="pymol", default=False, help="Additional PyMOL session files", action="store_true") parser.add_argument("--maxthreads", dest="maxthreads", default=multiprocessing.cpu_count(), help="Set maximum number of main threads (number of binding sites processed simultaneously)." "If not set, PLIP uses all available CPUs if possible.", type=int) parser.add_argument("--breakcomposite", dest="breakcomposite", default=False, help="Don't combine ligand fragments with covalent bonds but treat them as single ligands for the analysis.", action="store_true") parser.add_argument("--altlocation", dest="altlocation", default=False, help="Also consider alternate locations for atoms (e.g. alternate conformations).", action="store_true") parser.add_argument("--debug", dest="debug", default=False, help="Turn on DEBUG mode with extended log.", action="store_true") parser.add_argument("--nofix", dest="nofix", default=False, help="Turns off fixing of PDB files.", action="store_true") parser.add_argument("--nofixfile", dest="nofixfile", default=False, help="Turns off writing files for fixed PDB files.", action="store_true") parser.add_argument("--nopdbcanmap", dest="nopdbcanmap", default=False, help="Turns off calculation of mapping between canonical and PDB atom order for ligands.", action="store_true") parser.add_argument("--dnareceptor", dest="dnareceptor", default=False, help="Uses the DNA instead of the protein as a receptor for interactions.", action="store_true") parser.add_argument("--name", dest="outputfilename", default="report", help="Set a filename for the report TXT and XML files. Will only work when processing single structures.") ligandtype = parser.add_mutually_exclusive_group() # Either peptide/inter or intra mode ligandtype.add_argument("--peptides", "--inter", dest="peptides", default=[], help="Allows to define one or multiple chains as peptide ligands or to detect inter-chain contacts", nargs="+") ligandtype.add_argument("--intra", dest="intra", help="Allows to define one chain to analyze intra-chain contacts.") parser.add_argument("--keepmod", dest="keepmod", default=False, help="Keep modified residues as ligands", action="store_true") # Optional threshold arguments, not shown in help thr = namedtuple('threshold', 'name type') thresholds = [thr(name='aromatic_planarity', type='angle'), thr(name='hydroph_dist_max', type='distance'), thr(name='hbond_dist_max', type='distance'), thr(name='hbond_don_angle_min', type='angle'), thr(name='pistack_dist_max', type='distance'), thr(name='pistack_ang_dev', type='other'), thr(name='pistack_offset_max', type='distance'), thr(name='pication_dist_max', type='distance'), thr(name='saltbridge_dist_max', type='distance'), thr(name='halogen_dist_max', type='distance'), thr(name='halogen_acc_angle', type='angle'), thr(name='halogen_don_angle', type='angle'), thr(name='halogen_angle_dev', type='other'), thr(name='water_bridge_mindist', type='distance'), thr(name='water_bridge_maxdist', type='distance'), thr(name='water_bridge_omega_min', type='angle'), thr(name='water_bridge_omega_max', type='angle'), thr(name='water_bridge_theta_min', type='angle')] for t in thresholds: parser.add_argument('--%s' % t.name, dest=t.name, type=lambda val: threshold_limiter(parser, val), help=argparse.SUPPRESS) arguments = parser.parse_args() config.VERBOSE = True if (arguments.verbose or arguments.debug) else False config.DEBUG = True if arguments.debug else False config.MAXTHREADS = arguments.maxthreads config.XML = arguments.xml config.TXT = arguments.txt config.PICS = arguments.pics config.PYMOL = arguments.pymol config.STDOUT = arguments.stdout config.RAWSTRING = arguments.use_raw_string config.OUTPATH = arguments.outpath config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/']) if not config.OUTPATH.endswith('/') else config.OUTPATH) config.BASEPATH = config.OUTPATH # Used for batch processing config.BREAKCOMPOSITE = arguments.breakcomposite config.ALTLOC = arguments.altlocation config.PEPTIDES = arguments.peptides config.INTRA = arguments.intra config.NOFIX = arguments.nofix config.NOFIXFILE = arguments.nofixfile config.NOPDBCANMAP = arguments.nopdbcanmap config.KEEPMOD = arguments.keepmod config.DNARECEPTOR = arguments.dnareceptor config.OUTPUTFILENAME = arguments.outputfilename # Make sure we have pymol with --pics and --pymol if config.PICS or config.PYMOL: try: import pymol except ImportError: write_message("PyMOL is required for --pics and --pymol.\n", mtype='error') raise # Assign values to global thresholds for t in thresholds: tvalue = getattr(arguments, t.name) if tvalue is not None: if t.type == 'angle' and not 0 < tvalue < 180: # Check value for angle thresholds parser.error("Threshold for angles need to have values within 0 and 180.") if t.type == 'distance': if tvalue > 10: # Check value for angle thresholds parser.error("Threshold for distances must not be larger than 10 Angstrom.") elif tvalue > config.BS_DIST + 1: # Dynamically adapt the search space for binding site residues config.BS_DIST = tvalue + 1 setattr(config, t.name.upper(), tvalue) # Check additional conditions for interdependent thresholds if not config.HALOGEN_ACC_ANGLE > config.HALOGEN_ANGLE_DEV: parser.error("The halogen acceptor angle has to be larger than the halogen angle deviation.") if not config.HALOGEN_DON_ANGLE > config.HALOGEN_ANGLE_DEV: parser.error("The halogen donor angle has to be larger than the halogen angle deviation.") if not config.WATER_BRIDGE_MINDIST < config.WATER_BRIDGE_MAXDIST: parser.error("The water bridge minimum distance has to be smaller than the water bridge maximum distance.") if not config.WATER_BRIDGE_OMEGA_MIN < config.WATER_BRIDGE_OMEGA_MAX: parser.error("The water bridge omega minimum angle has to be smaller than the water bridge omega maximum angle") expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None main(expanded_path, arguments.pdbid)
Parse command line arguments and start main script for analysis.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L184-L307
ella/ella
ella/photos/admin.py
FormatedPhotoForm.clean
def clean(self): """ Validation function that checks the dimensions of the crop whether it fits into the original and the format. """ data = self.cleaned_data photo = data['photo'] if ( (data['crop_left'] > photo.width) or (data['crop_top'] > photo.height) or ((data['crop_left'] + data['crop_width']) > photo.width) or ((data['crop_top'] + data['crop_height']) > photo.height) ): # raise forms.ValidationError, ugettext("The specified crop coordinates do not fit into the source photo.") raise ValidationError(ugettext("The specified crop coordinates do not fit into the source photo.")) return data
python
def clean(self): """ Validation function that checks the dimensions of the crop whether it fits into the original and the format. """ data = self.cleaned_data photo = data['photo'] if ( (data['crop_left'] > photo.width) or (data['crop_top'] > photo.height) or ((data['crop_left'] + data['crop_width']) > photo.width) or ((data['crop_top'] + data['crop_height']) > photo.height) ): # raise forms.ValidationError, ugettext("The specified crop coordinates do not fit into the source photo.") raise ValidationError(ugettext("The specified crop coordinates do not fit into the source photo.")) return data
Validation function that checks the dimensions of the crop whether it fits into the original and the format.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/admin.py#L16-L30
ella/ella
ella/photos/admin.py
FormatForm.clean
def clean(self): """ Check format name uniqueness for sites :return: cleaned_data """ data = self.cleaned_data formats = Format.objects.filter(name=data['name']) if self.instance: formats = formats.exclude(pk=self.instance.pk) exists_sites = [] for f in formats: for s in f.sites.all(): if s in data['sites']: exists_sites.append(s.__unicode__()) if len(exists_sites): raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites))) return data
python
def clean(self): """ Check format name uniqueness for sites :return: cleaned_data """ data = self.cleaned_data formats = Format.objects.filter(name=data['name']) if self.instance: formats = formats.exclude(pk=self.instance.pk) exists_sites = [] for f in formats: for s in f.sites.all(): if s in data['sites']: exists_sites.append(s.__unicode__()) if len(exists_sites): raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites))) return data
Check format name uniqueness for sites :return: cleaned_data
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/admin.py#L37-L58
ella/ella
ella/photos/admin.py
PhotoOptions.format_photo_json
def format_photo_json(self, request, photo, format): "Used in admin image 'crop tool'." try: photo = get_cached_object(Photo, pk=photo) format = get_cached_object(Format, pk=format) content = { 'error': False, 'image':settings.MEDIA_URL + photo.image, 'width':photo.width, 'height': photo.height, 'format_width':format.max_width, 'format_height':format.max_height } except (Photo.DoesNotExist, Format.DoesNotExist): content = {'error':True} return HttpResponse(simplejson.dumps(content))
python
def format_photo_json(self, request, photo, format): "Used in admin image 'crop tool'." try: photo = get_cached_object(Photo, pk=photo) format = get_cached_object(Format, pk=format) content = { 'error': False, 'image':settings.MEDIA_URL + photo.image, 'width':photo.width, 'height': photo.height, 'format_width':format.max_width, 'format_height':format.max_height } except (Photo.DoesNotExist, Format.DoesNotExist): content = {'error':True} return HttpResponse(simplejson.dumps(content))
Used in admin image 'crop tool'.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/admin.py#L114-L129
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.set_initial_representations
def set_initial_representations(self): """Set the initial representations""" self.update_model_dict() self.rc("background solid white") self.rc("setattr g display 0") # Hide all pseudobonds self.rc("~display #%i & :/isHet & ~:%s" % (self.model_dict[self.plipname], self.hetid))
python
def set_initial_representations(self): """Set the initial representations""" self.update_model_dict() self.rc("background solid white") self.rc("setattr g display 0") # Hide all pseudobonds self.rc("~display #%i & :/isHet & ~:%s" % (self.model_dict[self.plipname], self.hetid))
Set the initial representations
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L36-L41
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.update_model_dict
def update_model_dict(self): """Updates the model dictionary""" dct = {} models = self.chimera.openModels for md in models.list(): dct[md.name] = md.id self.model_dict = dct
python
def update_model_dict(self): """Updates the model dictionary""" dct = {} models = self.chimera.openModels for md in models.list(): dct[md.name] = md.id self.model_dict = dct
Updates the model dictionary
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L43-L49
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.atom_by_serialnumber
def atom_by_serialnumber(self): """Provides a dictionary mapping serial numbers to their atom objects.""" atm_by_snum = {} for atom in self.model.atoms: atm_by_snum[atom.serialNumber] = atom return atm_by_snum
python
def atom_by_serialnumber(self): """Provides a dictionary mapping serial numbers to their atom objects.""" atm_by_snum = {} for atom in self.model.atoms: atm_by_snum[atom.serialNumber] = atom return atm_by_snum
Provides a dictionary mapping serial numbers to their atom objects.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L51-L56
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_hydrophobic
def show_hydrophobic(self): """Visualizes hydrophobic contacts.""" grp = self.getPseudoBondGroup("Hydrophobic Interactions-%i" % self.tid, associateWith=[self.model]) grp.lineType = self.chimera.Dash grp.lineWidth = 3 grp.color = self.colorbyname('gray') for i in self.plcomplex.hydrophobic_contacts.pairs_ids: self.bs_res_ids.append(i[0])
python
def show_hydrophobic(self): """Visualizes hydrophobic contacts.""" grp = self.getPseudoBondGroup("Hydrophobic Interactions-%i" % self.tid, associateWith=[self.model]) grp.lineType = self.chimera.Dash grp.lineWidth = 3 grp.color = self.colorbyname('gray') for i in self.plcomplex.hydrophobic_contacts.pairs_ids: self.bs_res_ids.append(i[0])
Visualizes hydrophobic contacts.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L58-L65
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_hbonds
def show_hbonds(self): """Visualizes hydrogen bonds.""" grp = self.getPseudoBondGroup("Hydrogen Bonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.hbonds.ldon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[0]) for i in self.plcomplex.hbonds.pdon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[1])
python
def show_hbonds(self): """Visualizes hydrogen bonds.""" grp = self.getPseudoBondGroup("Hydrogen Bonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.hbonds.ldon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[0]) for i in self.plcomplex.hbonds.pdon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[1])
Visualizes hydrogen bonds.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L67-L78
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_halogen
def show_halogen(self): """Visualizes halogen bonds.""" grp = self.getPseudoBondGroup("HalogenBonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.halogen_bonds: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('turquoise') self.bs_res_ids.append(i.acc_id)
python
def show_halogen(self): """Visualizes halogen bonds.""" grp = self.getPseudoBondGroup("HalogenBonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.halogen_bonds: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('turquoise') self.bs_res_ids.append(i.acc_id)
Visualizes halogen bonds.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L80-L88
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_stacking
def show_stacking(self): """Visualizes pi-stacking interactions.""" grp = self.getPseudoBondGroup("pi-Stacking-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, stack in enumerate(self.plcomplex.pistacking): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") centroid_prot = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.proteinring_center centroid_prot.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_prot) centroid_lig = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.ligandring_center centroid_lig.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_lig) b = grp.newPseudoBond(centroid_lig, centroid_prot) b.color = self.colorbyname('forest green') self.bs_res_ids += stack.proteinring_atoms
python
def show_stacking(self): """Visualizes pi-stacking interactions.""" grp = self.getPseudoBondGroup("pi-Stacking-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, stack in enumerate(self.plcomplex.pistacking): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") centroid_prot = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.proteinring_center centroid_prot.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_prot) centroid_lig = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.ligandring_center centroid_lig.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_lig) b = grp.newPseudoBond(centroid_lig, centroid_prot) b.color = self.colorbyname('forest green') self.bs_res_ids += stack.proteinring_atoms
Visualizes pi-stacking interactions.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L90-L112
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_cationpi
def show_cationpi(self): """Visualizes cation-pi interactions""" grp = self.getPseudoBondGroup("Cation-Pi-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, cat in enumerate(self.plcomplex.pication): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = cat.charge_center chargecenter.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter) centroid = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = cat.ring_center centroid.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid) b = grp.newPseudoBond(centroid, chargecenter) b.color = self.colorbyname('orange') if cat.protcharged: self.bs_res_ids += cat.charge_atoms else: self.bs_res_ids += cat.ring_atoms
python
def show_cationpi(self): """Visualizes cation-pi interactions""" grp = self.getPseudoBondGroup("Cation-Pi-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, cat in enumerate(self.plcomplex.pication): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = cat.charge_center chargecenter.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter) centroid = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = cat.ring_center centroid.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid) b = grp.newPseudoBond(centroid, chargecenter) b.color = self.colorbyname('orange') if cat.protcharged: self.bs_res_ids += cat.charge_atoms else: self.bs_res_ids += cat.ring_atoms
Visualizes cation-pi interactions
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L114-L139
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_sbridges
def show_sbridges(self): """Visualizes salt bridges.""" # Salt Bridges grp = self.getPseudoBondGroup("Salt Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, sbridge in enumerate(self.plcomplex.saltbridges): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter1 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.positive_center chargecenter1.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter1) chargecenter2 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.negative_center chargecenter2.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter2) b = grp.newPseudoBond(chargecenter1, chargecenter2) b.color = self.colorbyname('yellow') if sbridge.protispos: self.bs_res_ids += sbridge.positive_atoms else: self.bs_res_ids += sbridge.negative_atoms
python
def show_sbridges(self): """Visualizes salt bridges.""" # Salt Bridges grp = self.getPseudoBondGroup("Salt Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, sbridge in enumerate(self.plcomplex.saltbridges): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter1 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.positive_center chargecenter1.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter1) chargecenter2 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.negative_center chargecenter2.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter2) b = grp.newPseudoBond(chargecenter1, chargecenter2) b.color = self.colorbyname('yellow') if sbridge.protispos: self.bs_res_ids += sbridge.positive_atoms else: self.bs_res_ids += sbridge.negative_atoms
Visualizes salt bridges.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L141-L167
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_wbridges
def show_wbridges(self): """Visualizes water bridges""" grp = self.getPseudoBondGroup("Water Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, wbridge in enumerate(self.plcomplex.waterbridges): c = grp.newPseudoBond(self.atoms[wbridge.water_id], self.atoms[wbridge.acc_id]) c.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) b = grp.newPseudoBond(self.atoms[wbridge.don_id], self.atoms[wbridge.water_id]) b.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) if wbridge.protisdon: self.bs_res_ids.append(wbridge.don_id) else: self.bs_res_ids.append(wbridge.acc_id)
python
def show_wbridges(self): """Visualizes water bridges""" grp = self.getPseudoBondGroup("Water Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, wbridge in enumerate(self.plcomplex.waterbridges): c = grp.newPseudoBond(self.atoms[wbridge.water_id], self.atoms[wbridge.acc_id]) c.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) b = grp.newPseudoBond(self.atoms[wbridge.don_id], self.atoms[wbridge.water_id]) b.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) if wbridge.protisdon: self.bs_res_ids.append(wbridge.don_id) else: self.bs_res_ids.append(wbridge.acc_id)
Visualizes water bridges
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L169-L183
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.show_metal
def show_metal(self): """Visualizes metal coordination.""" grp = self.getPseudoBondGroup("Metal Coordination-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, metal in enumerate(self.plcomplex.metal_complexes): c = grp.newPseudoBond(self.atoms[metal.metal_id], self.atoms[metal.target_id]) c.color = self.colorbyname('magenta') if metal.location == 'water': self.water_ids.append(metal.target_id) if metal.location.startswith('protein'): self.bs_res_ids.append(metal.target_id)
python
def show_metal(self): """Visualizes metal coordination.""" grp = self.getPseudoBondGroup("Metal Coordination-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, metal in enumerate(self.plcomplex.metal_complexes): c = grp.newPseudoBond(self.atoms[metal.metal_id], self.atoms[metal.target_id]) c.color = self.colorbyname('magenta') if metal.location == 'water': self.water_ids.append(metal.target_id) if metal.location.startswith('protein'): self.bs_res_ids.append(metal.target_id)
Visualizes metal coordination.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L185-L197
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.cleanup
def cleanup(self): """Clean up the visualization.""" if not len(self.water_ids) == 0: # Hide all non-interacting water molecules water_selection = [] for wid in self.water_ids: water_selection.append('serialNumber=%i' % wid) self.rc("~display :HOH") self.rc("display :@/%s" % " or ".join(water_selection)) # Show all interacting binding site residues self.rc("~display #%i & ~:/isHet" % self.model_dict[self.plipname]) self.rc("display :%s" % ",".join([str(self.atoms[bsid].residue.id) for bsid in self.bs_res_ids])) self.rc("color lightblue :HOH")
python
def cleanup(self): """Clean up the visualization.""" if not len(self.water_ids) == 0: # Hide all non-interacting water molecules water_selection = [] for wid in self.water_ids: water_selection.append('serialNumber=%i' % wid) self.rc("~display :HOH") self.rc("display :@/%s" % " or ".join(water_selection)) # Show all interacting binding site residues self.rc("~display #%i & ~:/isHet" % self.model_dict[self.plipname]) self.rc("display :%s" % ",".join([str(self.atoms[bsid].residue.id) for bsid in self.bs_res_ids])) self.rc("color lightblue :HOH")
Clean up the visualization.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L199-L213
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.zoom_to_ligand
def zoom_to_ligand(self): """Centers the view on the ligand and its binding site residues.""" self.rc("center #%i & :%s" % (self.model_dict[self.plipname], self.hetid))
python
def zoom_to_ligand(self): """Centers the view on the ligand and its binding site residues.""" self.rc("center #%i & :%s" % (self.model_dict[self.plipname], self.hetid))
Centers the view on the ligand and its binding site residues.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L215-L217
ssalentin/plip
plip/modules/chimeraplip.py
ChimeraVisualizer.refinements
def refinements(self): """Details for the visualization.""" self.rc("setattr a color gray @CENTROID") self.rc("setattr a radius 0.3 @CENTROID") self.rc("represent sphere @CENTROID") self.rc("setattr a color orange @CHARGE") self.rc("setattr a radius 0.4 @CHARGE") self.rc("represent sphere @CHARGE") self.rc("display :pseudoatoms")
python
def refinements(self): """Details for the visualization.""" self.rc("setattr a color gray @CENTROID") self.rc("setattr a radius 0.3 @CENTROID") self.rc("represent sphere @CENTROID") self.rc("setattr a color orange @CHARGE") self.rc("setattr a radius 0.4 @CHARGE") self.rc("represent sphere @CHARGE") self.rc("display :pseudoatoms")
Details for the visualization.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L219-L227
ella/ella
ella/core/migrations/0002_remove_shit_data.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." if not db.dry_run: for pl in orm['core.Placement'].objects.all(): pl.listing_set.update(publishable=pl.publishable) publishable = pl.publishable publishable.publish_from = pl.publish_from publishable.static = pl.static publishable.publish_to = pl.publish_to publishable.save(force_update=True)
python
def forwards(self, orm): "Write your forwards methods here." if not db.dry_run: for pl in orm['core.Placement'].objects.all(): pl.listing_set.update(publishable=pl.publishable) publishable = pl.publishable publishable.publish_from = pl.publish_from publishable.static = pl.static publishable.publish_to = pl.publish_to publishable.save(force_update=True)
Write your forwards methods here.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/migrations/0002_remove_shit_data.py#L11-L20
ella/ella
ella/photos/formatter.py
Formatter.format
def format(self): """ Crop and resize the supplied image. Return the image and the crop_box used. If the input format is JPEG and in EXIF there is information about rotation, use it and rotate resulting image. """ if hasattr(self.image, '_getexif'): self.rotate_exif() crop_box = self.crop_to_ratio() self.resize() return self.image, crop_box
python
def format(self): """ Crop and resize the supplied image. Return the image and the crop_box used. If the input format is JPEG and in EXIF there is information about rotation, use it and rotate resulting image. """ if hasattr(self.image, '_getexif'): self.rotate_exif() crop_box = self.crop_to_ratio() self.resize() return self.image, crop_box
Crop and resize the supplied image. Return the image and the crop_box used. If the input format is JPEG and in EXIF there is information about rotation, use it and rotate resulting image.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L22-L31
ella/ella
ella/photos/formatter.py
Formatter.set_format
def set_format(self): """ Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height). """ f = self.fmt if f.flexible_height and f.flexible_max_height: flexw, flexh = self.fw, f.flexible_max_height flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio
python
def set_format(self): """ Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height). """ f = self.fmt if f.flexible_height and f.flexible_max_height: flexw, flexh = self.fw, f.flexible_max_height flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio
Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height).
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L33-L47
ella/ella
ella/photos/formatter.py
Formatter.get_crop_box
def get_crop_box(self): """ Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio. """ # check if the flexible height option is active and applies self.set_format() if self.fmt.nocrop: # cropping not allowed return if self.crop_box: # crop coordinates passed in explicitely return self.crop_box iw, ih = self.image.size if iw <= self.fw and ih <= self.fh: # image fits in the target format, no need to crop return if self.image_ratio < self.format_ratio: # image taller than format diff = ih - (iw * self.fh / self.fw) return (0, diff // 2 , iw, ih - diff // 2) elif self.image_ratio > self.format_ratio: # image wider than format diff = iw - (ih * self.fw / self.fh) return (diff // 2, 0, iw - diff // 2, ih) else: # same ratio as format return
python
def get_crop_box(self): """ Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio. """ # check if the flexible height option is active and applies self.set_format() if self.fmt.nocrop: # cropping not allowed return if self.crop_box: # crop coordinates passed in explicitely return self.crop_box iw, ih = self.image.size if iw <= self.fw and ih <= self.fh: # image fits in the target format, no need to crop return if self.image_ratio < self.format_ratio: # image taller than format diff = ih - (iw * self.fh / self.fw) return (0, diff // 2 , iw, ih - diff // 2) elif self.image_ratio > self.format_ratio: # image wider than format diff = iw - (ih * self.fw / self.fh) return (diff // 2, 0, iw - diff // 2, ih) else: # same ratio as format return
Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L49-L89
ella/ella
ella/photos/formatter.py
Formatter.center_important_part
def center_important_part(self, crop_box): """ If important_box was specified, make sure it lies inside the crop box. """ if not self.important_box: return crop_box # shortcuts ib = self.important_box cl, ct, cr, cb = crop_box iw, ih = self.image.size # compute the move of crop center onto important center move_horiz = (ib[0] + ib[2]) // 2 - (cl + cr) // 2 move_verti = (ib[1] + ib[3]) // 2 - (ct + cb) // 2 # make sure we don't get out of the image # ... horizontaly if move_horiz > 0: move_horiz = min(iw - cr, move_horiz) else: move_horiz = max(-cl, move_horiz) # .. and verticaly if move_verti > 0: move_verti = min(ih - cb, move_verti) else: move_verti = max(-ct, move_verti) # move the crop_box return (cl + move_horiz, ct + move_verti, cr + move_horiz, cb + move_verti)
python
def center_important_part(self, crop_box): """ If important_box was specified, make sure it lies inside the crop box. """ if not self.important_box: return crop_box # shortcuts ib = self.important_box cl, ct, cr, cb = crop_box iw, ih = self.image.size # compute the move of crop center onto important center move_horiz = (ib[0] + ib[2]) // 2 - (cl + cr) // 2 move_verti = (ib[1] + ib[3]) // 2 - (ct + cb) // 2 # make sure we don't get out of the image # ... horizontaly if move_horiz > 0: move_horiz = min(iw - cr, move_horiz) else: move_horiz = max(-cl, move_horiz) # .. and verticaly if move_verti > 0: move_verti = min(ih - cb, move_verti) else: move_verti = max(-ct, move_verti) # move the crop_box return (cl + move_horiz, ct + move_verti, cr + move_horiz, cb + move_verti)
If important_box was specified, make sure it lies inside the crop box.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L91-L121
ella/ella
ella/photos/formatter.py
Formatter.crop_to_ratio
def crop_to_ratio(self): " Get crop coordinates and perform the crop if we get any. " crop_box = self.get_crop_box() if not crop_box: return crop_box = self.center_important_part(crop_box) iw, ih = self.image.size # see if we want to crop something from outside of the image out_of_photo = min(crop_box[0], crop_box[1]) < 0 or crop_box[2] > iw or crop_box[3] > ih # check whether there's transparent information in the image transparent = self.image.mode in ('RGBA', 'LA') if photos_settings.DEFAULT_BG_COLOR != 'black' and out_of_photo and not transparent: # if we do, just crop the image to the portion that will be visible updated_crop_box = ( max(0, crop_box[0]), max(0, crop_box[1]), min(iw, crop_box[2]), min(ih, crop_box[3]), ) cropped = self.image.crop(updated_crop_box) # create new image of the proper size and color self.image = Image.new('RGB', (crop_box[2] - crop_box[0], crop_box[3] - crop_box[1]), photos_settings.DEFAULT_BG_COLOR) # and paste the cropped part into it's proper position self.image.paste(cropped, (abs(min(crop_box[0], 0)), abs(min(crop_box[1], 0)))) else: # crop normally if not the case self.image = self.image.crop(crop_box) return crop_box
python
def crop_to_ratio(self): " Get crop coordinates and perform the crop if we get any. " crop_box = self.get_crop_box() if not crop_box: return crop_box = self.center_important_part(crop_box) iw, ih = self.image.size # see if we want to crop something from outside of the image out_of_photo = min(crop_box[0], crop_box[1]) < 0 or crop_box[2] > iw or crop_box[3] > ih # check whether there's transparent information in the image transparent = self.image.mode in ('RGBA', 'LA') if photos_settings.DEFAULT_BG_COLOR != 'black' and out_of_photo and not transparent: # if we do, just crop the image to the portion that will be visible updated_crop_box = ( max(0, crop_box[0]), max(0, crop_box[1]), min(iw, crop_box[2]), min(ih, crop_box[3]), ) cropped = self.image.crop(updated_crop_box) # create new image of the proper size and color self.image = Image.new('RGB', (crop_box[2] - crop_box[0], crop_box[3] - crop_box[1]), photos_settings.DEFAULT_BG_COLOR) # and paste the cropped part into it's proper position self.image.paste(cropped, (abs(min(crop_box[0], 0)), abs(min(crop_box[1], 0)))) else: # crop normally if not the case self.image = self.image.crop(crop_box) return crop_box
Get crop coordinates and perform the crop if we get any.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L124-L153
ella/ella
ella/photos/formatter.py
Formatter.get_resized_size
def get_resized_size(self): """ Get target size for the stretched or shirnked image to fit within the target dimensions. Do not stretch images if not format.stretch. Note that this method is designed to operate on already cropped image. """ f = self.fmt iw, ih = self.image.size if not f.stretch and iw <= self.fw and ih <= self.fh: return if self.image_ratio == self.format_ratio: # same ratio, just resize return (self.fw, self.fh) elif self.image_ratio < self.format_ratio: # image taller than format return (self.fh * iw / ih, self.fh) else: # self.image_ratio > self.format_ratio # image wider than format return (self.fw, self.fw * ih / iw)
python
def get_resized_size(self): """ Get target size for the stretched or shirnked image to fit within the target dimensions. Do not stretch images if not format.stretch. Note that this method is designed to operate on already cropped image. """ f = self.fmt iw, ih = self.image.size if not f.stretch and iw <= self.fw and ih <= self.fh: return if self.image_ratio == self.format_ratio: # same ratio, just resize return (self.fw, self.fh) elif self.image_ratio < self.format_ratio: # image taller than format return (self.fh * iw / ih, self.fh) else: # self.image_ratio > self.format_ratio # image wider than format return (self.fw, self.fw * ih / iw)
Get target size for the stretched or shirnked image to fit within the target dimensions. Do not stretch images if not format.stretch. Note that this method is designed to operate on already cropped image.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L155-L178
ella/ella
ella/photos/formatter.py
Formatter.resize
def resize(self): """ Get target size for a cropped image and do the resizing if we got anything usable. """ resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
python
def resize(self): """ Get target size for a cropped image and do the resizing if we got anything usable. """ resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
Get target size for a cropped image and do the resizing if we got anything usable.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L180-L189
ella/ella
ella/photos/formatter.py
Formatter.rotate_exif
def rotate_exif(self): """ Rotate image via exif information. Only 90, 180 and 270 rotations are supported. """ exif = self.image._getexif() or {} rotation = exif.get(TAGS['Orientation'], 1) rotations = { 6: -90, 3: -180, 8: -270, } if rotation not in rotations: return self.image = self.image.rotate(rotations[rotation])
python
def rotate_exif(self): """ Rotate image via exif information. Only 90, 180 and 270 rotations are supported. """ exif = self.image._getexif() or {} rotation = exif.get(TAGS['Orientation'], 1) rotations = { 6: -90, 3: -180, 8: -270, } if rotation not in rotations: return self.image = self.image.rotate(rotations[rotation])
Rotate image via exif information. Only 90, 180 and 270 rotations are supported.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L191-L207
ssalentin/plip
plip/modules/visualize.py
select_by_ids
def select_by_ids(selname, idlist, selection_exists=False, chunksize=20, restrict=None): """Selection with a large number of ids concatenated into a selection list can cause buffer overflow in PyMOL. This function takes a selection name and and list of IDs (list of integers) as input and makes a careful step-by-step selection (packages of 20 by default)""" idlist = list(set(idlist)) # Remove duplicates if not selection_exists: cmd.select(selname, 'None') # Empty selection first idchunks = [idlist[i:i+chunksize] for i in range(0, len(idlist), chunksize)] for idchunk in idchunks: cmd.select(selname, '%s or (id %s)' % (selname, '+'.join(map(str, idchunk)))) if restrict is not None: cmd.select(selname, '%s and %s' % (selname, restrict))
python
def select_by_ids(selname, idlist, selection_exists=False, chunksize=20, restrict=None): """Selection with a large number of ids concatenated into a selection list can cause buffer overflow in PyMOL. This function takes a selection name and and list of IDs (list of integers) as input and makes a careful step-by-step selection (packages of 20 by default)""" idlist = list(set(idlist)) # Remove duplicates if not selection_exists: cmd.select(selname, 'None') # Empty selection first idchunks = [idlist[i:i+chunksize] for i in range(0, len(idlist), chunksize)] for idchunk in idchunks: cmd.select(selname, '%s or (id %s)' % (selname, '+'.join(map(str, idchunk)))) if restrict is not None: cmd.select(selname, '%s and %s' % (selname, restrict))
Selection with a large number of ids concatenated into a selection list can cause buffer overflow in PyMOL. This function takes a selection name and and list of IDs (list of integers) as input and makes a careful step-by-step selection (packages of 20 by default)
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/visualize.py#L18-L30
ssalentin/plip
plip/modules/visualize.py
visualize_in_pymol
def visualize_in_pymol(plcomplex): """Visualizes the protein-ligand pliprofiler at one site in PyMOL.""" vis = PyMOLVisualizer(plcomplex) ##################### # Set everything up # ##################### pdbid = plcomplex.pdbid lig_members = plcomplex.lig_members chain = plcomplex.chain if config.PEPTIDES != []: vis.ligname = 'PeptideChain%s' % plcomplex.chain if config.INTRA is not None: vis.ligname = 'Intra%s' % plcomplex.chain ligname = vis.ligname hetid = plcomplex.hetid metal_ids = plcomplex.metal_ids metal_ids_str = '+'.join([str(i) for i in metal_ids]) ######################## # Basic visualizations # ######################## start_pymol(run=True, options='-pcq', quiet=not config.DEBUG) vis.set_initial_representations() cmd.load(plcomplex.sourcefile) current_name = cmd.get_object_list(selection='(all)')[0] write_message('Setting current_name to "%s" and pdbid to "%s\n"' % (current_name, pdbid), mtype='debug') cmd.set_name(current_name, pdbid) cmd.hide('everything', 'all') if config.PEPTIDES != []: cmd.select(ligname, 'chain %s and not resn HOH' % plcomplex.chain) else: cmd.select(ligname, 'resn %s and chain %s and resi %s*' % (hetid, chain, plcomplex.position)) write_message("Selecting ligand for PDBID %s and ligand name %s with: " % (pdbid, ligname), mtype='debug') write_message('resn %s and chain %s and resi %s*' % (hetid, chain, plcomplex.position), mtype='debug') # Visualize and color metal ions if there are any if not len(metal_ids) == 0: vis.select_by_ids(ligname, metal_ids, selection_exists=True) cmd.show('spheres', 'id %s and %s' % (metal_ids_str, pdbid)) # Additionally, select all members of composite ligands if len(lig_members) > 1: for member in lig_members: resid, chain, resnr = member[0], member[1], str(member[2]) cmd.select(ligname, '%s or (resn %s and chain %s and resi %s)' % (ligname, resid, chain, resnr)) cmd.show('sticks', ligname) cmd.color('myblue') cmd.color('myorange', ligname) cmd.util.cnc('all') if not len(metal_ids) == 0: cmd.color('hotpink', 'id %s' % metal_ids_str) cmd.hide('sticks', 'id %s' % metal_ids_str) cmd.set('sphere_scale', 0.3, ligname) cmd.deselect() vis.make_initial_selections() vis.show_hydrophobic() # Hydrophobic Contacts vis.show_hbonds() # Hydrogen Bonds vis.show_halogen() # Halogen Bonds vis.show_stacking() # pi-Stacking Interactions vis.show_cationpi() # pi-Cation Interactions vis.show_sbridges() # Salt Bridges vis.show_wbridges() # Water Bridges vis.show_metal() # Metal Coordination vis.refinements() vis.zoom_to_ligand() vis.selections_cleanup() vis.selections_group() vis.additional_cleanup() if config.DNARECEPTOR: # Rename Cartoon selection to Line selection and change repr. cmd.set_name('%sCartoon' % plcomplex.pdbid, '%sLines' % plcomplex.pdbid) cmd.hide('cartoon', '%sLines' % plcomplex.pdbid) cmd.show('lines', '%sLines' % plcomplex.pdbid) if config.PEPTIDES != []: filename = "%s_PeptideChain%s" % (pdbid.upper(), plcomplex.chain) if config.PYMOL: vis.save_session(config.OUTPATH, override=filename) elif config.INTRA is not None: filename = "%s_IntraChain%s" % (pdbid.upper(), plcomplex.chain) if config.PYMOL: vis.save_session(config.OUTPATH, override=filename) else: filename = '%s_%s' % (pdbid.upper(), "_".join([hetid, plcomplex.chain, plcomplex.position])) if config.PYMOL: vis.save_session(config.OUTPATH) if config.PICS: vis.save_picture(config.OUTPATH, filename)
python
def visualize_in_pymol(plcomplex): """Visualizes the protein-ligand pliprofiler at one site in PyMOL.""" vis = PyMOLVisualizer(plcomplex) ##################### # Set everything up # ##################### pdbid = plcomplex.pdbid lig_members = plcomplex.lig_members chain = plcomplex.chain if config.PEPTIDES != []: vis.ligname = 'PeptideChain%s' % plcomplex.chain if config.INTRA is not None: vis.ligname = 'Intra%s' % plcomplex.chain ligname = vis.ligname hetid = plcomplex.hetid metal_ids = plcomplex.metal_ids metal_ids_str = '+'.join([str(i) for i in metal_ids]) ######################## # Basic visualizations # ######################## start_pymol(run=True, options='-pcq', quiet=not config.DEBUG) vis.set_initial_representations() cmd.load(plcomplex.sourcefile) current_name = cmd.get_object_list(selection='(all)')[0] write_message('Setting current_name to "%s" and pdbid to "%s\n"' % (current_name, pdbid), mtype='debug') cmd.set_name(current_name, pdbid) cmd.hide('everything', 'all') if config.PEPTIDES != []: cmd.select(ligname, 'chain %s and not resn HOH' % plcomplex.chain) else: cmd.select(ligname, 'resn %s and chain %s and resi %s*' % (hetid, chain, plcomplex.position)) write_message("Selecting ligand for PDBID %s and ligand name %s with: " % (pdbid, ligname), mtype='debug') write_message('resn %s and chain %s and resi %s*' % (hetid, chain, plcomplex.position), mtype='debug') # Visualize and color metal ions if there are any if not len(metal_ids) == 0: vis.select_by_ids(ligname, metal_ids, selection_exists=True) cmd.show('spheres', 'id %s and %s' % (metal_ids_str, pdbid)) # Additionally, select all members of composite ligands if len(lig_members) > 1: for member in lig_members: resid, chain, resnr = member[0], member[1], str(member[2]) cmd.select(ligname, '%s or (resn %s and chain %s and resi %s)' % (ligname, resid, chain, resnr)) cmd.show('sticks', ligname) cmd.color('myblue') cmd.color('myorange', ligname) cmd.util.cnc('all') if not len(metal_ids) == 0: cmd.color('hotpink', 'id %s' % metal_ids_str) cmd.hide('sticks', 'id %s' % metal_ids_str) cmd.set('sphere_scale', 0.3, ligname) cmd.deselect() vis.make_initial_selections() vis.show_hydrophobic() # Hydrophobic Contacts vis.show_hbonds() # Hydrogen Bonds vis.show_halogen() # Halogen Bonds vis.show_stacking() # pi-Stacking Interactions vis.show_cationpi() # pi-Cation Interactions vis.show_sbridges() # Salt Bridges vis.show_wbridges() # Water Bridges vis.show_metal() # Metal Coordination vis.refinements() vis.zoom_to_ligand() vis.selections_cleanup() vis.selections_group() vis.additional_cleanup() if config.DNARECEPTOR: # Rename Cartoon selection to Line selection and change repr. cmd.set_name('%sCartoon' % plcomplex.pdbid, '%sLines' % plcomplex.pdbid) cmd.hide('cartoon', '%sLines' % plcomplex.pdbid) cmd.show('lines', '%sLines' % plcomplex.pdbid) if config.PEPTIDES != []: filename = "%s_PeptideChain%s" % (pdbid.upper(), plcomplex.chain) if config.PYMOL: vis.save_session(config.OUTPATH, override=filename) elif config.INTRA is not None: filename = "%s_IntraChain%s" % (pdbid.upper(), plcomplex.chain) if config.PYMOL: vis.save_session(config.OUTPATH, override=filename) else: filename = '%s_%s' % (pdbid.upper(), "_".join([hetid, plcomplex.chain, plcomplex.position])) if config.PYMOL: vis.save_session(config.OUTPATH) if config.PICS: vis.save_picture(config.OUTPATH, filename)
Visualizes the protein-ligand pliprofiler at one site in PyMOL.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/visualize.py#L33-L134
ella/ella
ella/core/views.py
get_content_type
def get_content_type(ct_name): """ A helper function that returns ContentType object based on its slugified verbose_name_plural. Results of this function is cached to improve performance. :Parameters: - `ct_name`: Slugified verbose_name_plural of the target model. :Exceptions: - `Http404`: if no matching ContentType is found """ try: ct = CONTENT_TYPE_MAPPING[ct_name] except KeyError: for model in models.get_models(): if ct_name == slugify(model._meta.verbose_name_plural): ct = ContentType.objects.get_for_model(model) CONTENT_TYPE_MAPPING[ct_name] = ct break else: raise Http404 return ct
python
def get_content_type(ct_name): """ A helper function that returns ContentType object based on its slugified verbose_name_plural. Results of this function is cached to improve performance. :Parameters: - `ct_name`: Slugified verbose_name_plural of the target model. :Exceptions: - `Http404`: if no matching ContentType is found """ try: ct = CONTENT_TYPE_MAPPING[ct_name] except KeyError: for model in models.get_models(): if ct_name == slugify(model._meta.verbose_name_plural): ct = ContentType.objects.get_for_model(model) CONTENT_TYPE_MAPPING[ct_name] = ct break else: raise Http404 return ct
A helper function that returns ContentType object based on its slugified verbose_name_plural. Results of this function is cached to improve performance. :Parameters: - `ct_name`: Slugified verbose_name_plural of the target model. :Exceptions: - `Http404`: if no matching ContentType is found
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L423-L445
ella/ella
ella/core/views.py
get_templates
def get_templates(name, slug=None, category=None, app_label=None, model_label=None): """ Returns templates in following format and order: * ``'page/category/%s/content_type/%s.%s/%s/%s' % (<CATEGORY_PART>, app_label, model_label, slug, name)`` * ``'page/category/%s/content_type/%s.%s/%s' % (<CATEGORY_PART>, app_label, model_label, name)`` * ``'page/category/%s/%s' % (<CATEGORY_PART>, name)`` * ``'page/content_type/%s.%s/%s' % (app_label, model_label, name)`` * ``'page/%s' % name`` Where ``<CATEGORY_PART>`` is derived from ``path`` attribute by these rules: * When **no** parent exists (this is therfore root category) ``<CATEGORY_PART> = path`` * When exactly **one** parent exists: ``<CATEGORY_PART> = path`` * When multiple parent exist (category nestedN is deep in the tree):: <CATEGORY_PART> = ( 'nested1/nested2/../nestedN/', 'nested1/nested2/../nestedN-1/', ... 'nested1' ) Examples. Three categories exist having slugs **ROOT**, **NESTED1**, **NESTED2** where **NESTED2**'s parent is **NESTED1**.:: ROOT \ NESTED1 \ NESTED2 * For **ROOT**, ``<CATEGORY_PART>`` is only one - "ROOT". * For **NESTED1**, ``<CATEGORY_PART>`` is only one - "NESTED1". * For **NESTED2**, ``<CATEGORY_PART>`` has two elements: "NESTED1/NESTED2" and "NESTED1". """ def category_templates(category, incomplete_template, params): paths = [] parts = category.path.split('/') for i in reversed(range(1, len(parts) + 1)): params.update({'pth': '/'.join(parts[:i])}) paths.append(incomplete_template % params) return paths FULL = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(slug)s/%(name)s' FULL_NO_SLUG = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(name)s' BY_CATEGORY = 'page/category/%(pth)s/%(name)s' BY_CONTENT_TYPE = 'page/content_type/%(app_label)s.%(model_label)s/%(name)s' templates = [] params = {'name': name} if app_label and model_label: params.update({'app_label': app_label, 'model_label': model_label}) if slug: params.update({'slug': slug}) if category: if app_label and model_label: if slug: templates += category_templates(category, FULL, params) templates += category_templates(category, FULL_NO_SLUG, params) templates += category_templates(category, BY_CATEGORY, params) if app_label and model_label: templates.append(BY_CONTENT_TYPE % params) templates.append('page/%(name)s' % params) return templates
python
def get_templates(name, slug=None, category=None, app_label=None, model_label=None): """ Returns templates in following format and order: * ``'page/category/%s/content_type/%s.%s/%s/%s' % (<CATEGORY_PART>, app_label, model_label, slug, name)`` * ``'page/category/%s/content_type/%s.%s/%s' % (<CATEGORY_PART>, app_label, model_label, name)`` * ``'page/category/%s/%s' % (<CATEGORY_PART>, name)`` * ``'page/content_type/%s.%s/%s' % (app_label, model_label, name)`` * ``'page/%s' % name`` Where ``<CATEGORY_PART>`` is derived from ``path`` attribute by these rules: * When **no** parent exists (this is therfore root category) ``<CATEGORY_PART> = path`` * When exactly **one** parent exists: ``<CATEGORY_PART> = path`` * When multiple parent exist (category nestedN is deep in the tree):: <CATEGORY_PART> = ( 'nested1/nested2/../nestedN/', 'nested1/nested2/../nestedN-1/', ... 'nested1' ) Examples. Three categories exist having slugs **ROOT**, **NESTED1**, **NESTED2** where **NESTED2**'s parent is **NESTED1**.:: ROOT \ NESTED1 \ NESTED2 * For **ROOT**, ``<CATEGORY_PART>`` is only one - "ROOT". * For **NESTED1**, ``<CATEGORY_PART>`` is only one - "NESTED1". * For **NESTED2**, ``<CATEGORY_PART>`` has two elements: "NESTED1/NESTED2" and "NESTED1". """ def category_templates(category, incomplete_template, params): paths = [] parts = category.path.split('/') for i in reversed(range(1, len(parts) + 1)): params.update({'pth': '/'.join(parts[:i])}) paths.append(incomplete_template % params) return paths FULL = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(slug)s/%(name)s' FULL_NO_SLUG = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(name)s' BY_CATEGORY = 'page/category/%(pth)s/%(name)s' BY_CONTENT_TYPE = 'page/content_type/%(app_label)s.%(model_label)s/%(name)s' templates = [] params = {'name': name} if app_label and model_label: params.update({'app_label': app_label, 'model_label': model_label}) if slug: params.update({'slug': slug}) if category: if app_label and model_label: if slug: templates += category_templates(category, FULL, params) templates += category_templates(category, FULL_NO_SLUG, params) templates += category_templates(category, BY_CATEGORY, params) if app_label and model_label: templates.append(BY_CONTENT_TYPE % params) templates.append('page/%(name)s' % params) return templates
Returns templates in following format and order: * ``'page/category/%s/content_type/%s.%s/%s/%s' % (<CATEGORY_PART>, app_label, model_label, slug, name)`` * ``'page/category/%s/content_type/%s.%s/%s' % (<CATEGORY_PART>, app_label, model_label, name)`` * ``'page/category/%s/%s' % (<CATEGORY_PART>, name)`` * ``'page/content_type/%s.%s/%s' % (app_label, model_label, name)`` * ``'page/%s' % name`` Where ``<CATEGORY_PART>`` is derived from ``path`` attribute by these rules: * When **no** parent exists (this is therfore root category) ``<CATEGORY_PART> = path`` * When exactly **one** parent exists: ``<CATEGORY_PART> = path`` * When multiple parent exist (category nestedN is deep in the tree):: <CATEGORY_PART> = ( 'nested1/nested2/../nestedN/', 'nested1/nested2/../nestedN-1/', ... 'nested1' ) Examples. Three categories exist having slugs **ROOT**, **NESTED1**, **NESTED2** where **NESTED2**'s parent is **NESTED1**.:: ROOT \ NESTED1 \ NESTED2 * For **ROOT**, ``<CATEGORY_PART>`` is only one - "ROOT". * For **NESTED1**, ``<CATEGORY_PART>`` is only one - "NESTED1". * For **NESTED2**, ``<CATEGORY_PART>`` has two elements: "NESTED1/NESTED2" and "NESTED1".
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L449-L518
ella/ella
ella/core/views.py
get_templates_from_publishable
def get_templates_from_publishable(name, publishable): """ Returns the same template list as `get_templates` but gets values from `Publishable` instance. """ slug = publishable.slug category = publishable.category app_label = publishable.content_type.app_label model_label = publishable.content_type.model return get_templates(name, slug, category, app_label, model_label)
python
def get_templates_from_publishable(name, publishable): """ Returns the same template list as `get_templates` but gets values from `Publishable` instance. """ slug = publishable.slug category = publishable.category app_label = publishable.content_type.app_label model_label = publishable.content_type.model return get_templates(name, slug, category, app_label, model_label)
Returns the same template list as `get_templates` but gets values from `Publishable` instance.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L521-L529
ella/ella
ella/core/views.py
export
def export(request, count, name='', content_type=None): """ Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include """ t_list = [] if name: t_list.append('page/export/%s.html' % name) t_list.append('page/export/banner.html') try: cat = Category.objects.get_by_tree_path('') except Category.DoesNotExist: raise Http404() listing = Listing.objects.get_listing(count=count, category=cat) return render( request, t_list, { 'category' : cat, 'listing' : listing }, content_type=content_type )
python
def export(request, count, name='', content_type=None): """ Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include """ t_list = [] if name: t_list.append('page/export/%s.html' % name) t_list.append('page/export/banner.html') try: cat = Category.objects.get_by_tree_path('') except Category.DoesNotExist: raise Http404() listing = Listing.objects.get_listing(count=count, category=cat) return render( request, t_list, { 'category' : cat, 'listing' : listing }, content_type=content_type )
Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L538-L562
ella/ella
ella/core/views.py
EllaCoreView.get_templates
def get_templates(self, context, template_name=None): " Extract parameters for `get_templates` from the context. " if not template_name: template_name = self.template_name kw = {} if 'object' in context: o = context['object'] kw['slug'] = o.slug if context.get('content_type', False): ct = context['content_type'] kw['app_label'] = ct.app_label kw['model_label'] = ct.model return get_templates(template_name, category=context['category'], **kw)
python
def get_templates(self, context, template_name=None): " Extract parameters for `get_templates` from the context. " if not template_name: template_name = self.template_name kw = {} if 'object' in context: o = context['object'] kw['slug'] = o.slug if context.get('content_type', False): ct = context['content_type'] kw['app_label'] = ct.app_label kw['model_label'] = ct.model return get_templates(template_name, category=context['category'], **kw)
Extract parameters for `get_templates` from the context.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L82-L97
ella/ella
ella/core/views.py
ListContentType._archive_entry_year
def _archive_entry_year(self, category): " Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category " year = getattr(settings, 'ARCHIVE_ENTRY_YEAR', None) if not year: n = now() try: year = Listing.objects.filter( category__site__id=settings.SITE_ID, category__tree_path__startswith=category.tree_path, publish_from__lte=n ).values('publish_from')[0]['publish_from'].year except: year = n.year return year
python
def _archive_entry_year(self, category): " Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category " year = getattr(settings, 'ARCHIVE_ENTRY_YEAR', None) if not year: n = now() try: year = Listing.objects.filter( category__site__id=settings.SITE_ID, category__tree_path__startswith=category.tree_path, publish_from__lte=n ).values('publish_from')[0]['publish_from'].year except: year = n.year return year
Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L321-L334
ella/ella
ella/photos/models.py
Photo.save
def save(self, **kwargs): """Overrides models.Model.save. - Generates slug. - Saves image file. """ if not self.width or not self.height: self.width, self.height = self.image.width, self.image.height # prefill the slug with the ID, it requires double save if not self.id: img = self.image # store dummy values first... w, h = self.width, self.height self.image = '' self.width, self.height = w, h self.slug = '' super(Photo, self).save(force_insert=True) # ... so that we can generate the slug self.slug = str(self.id) + '-' + slugify(self.title) # truncate slug in order to fit in an ImageField and/or paths in Redirects self.slug = self.slug[:64] # .. tha will be used in the image's upload_to function self.image = img # and the image will be saved properly super(Photo, self).save(force_update=True) else: try: old = Photo.objects.get(pk=self.pk) force_update = True # delete formatedphotos if new image was uploaded if old.image != self.image: for f_photo in self.formatedphoto_set.all(): f_photo.delete() except Photo.DoesNotExist: # somebody is just trying to create new model with given PK force_update = False super(Photo, self).save(force_update=force_update)
python
def save(self, **kwargs): """Overrides models.Model.save. - Generates slug. - Saves image file. """ if not self.width or not self.height: self.width, self.height = self.image.width, self.image.height # prefill the slug with the ID, it requires double save if not self.id: img = self.image # store dummy values first... w, h = self.width, self.height self.image = '' self.width, self.height = w, h self.slug = '' super(Photo, self).save(force_insert=True) # ... so that we can generate the slug self.slug = str(self.id) + '-' + slugify(self.title) # truncate slug in order to fit in an ImageField and/or paths in Redirects self.slug = self.slug[:64] # .. tha will be used in the image's upload_to function self.image = img # and the image will be saved properly super(Photo, self).save(force_update=True) else: try: old = Photo.objects.get(pk=self.pk) force_update = True # delete formatedphotos if new image was uploaded if old.image != self.image: for f_photo in self.formatedphoto_set.all(): f_photo.delete() except Photo.DoesNotExist: # somebody is just trying to create new model with given PK force_update = False super(Photo, self).save(force_update=force_update)
Overrides models.Model.save. - Generates slug. - Saves image file.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L110-L152
ella/ella
ella/photos/models.py
Format.get_blank_img
def get_blank_img(self): """ Return fake ``FormatedPhoto`` object to be used in templates when an error occurs in image generation. """ if photos_settings.DEBUG: return self.get_placeholder_img() out = { 'blank': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name), } return out
python
def get_blank_img(self): """ Return fake ``FormatedPhoto`` object to be used in templates when an error occurs in image generation. """ if photos_settings.DEBUG: return self.get_placeholder_img() out = { 'blank': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name), } return out
Return fake ``FormatedPhoto`` object to be used in templates when an error occurs in image generation.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L216-L230
ella/ella
ella/photos/models.py
Format.get_placeholder_img
def get_placeholder_img(self): """ Returns fake ``FormatedPhoto`` object grabbed from image placeholder generator service for the purpose of debugging when images are not available but we still want to see something. """ pars = { 'width': self.max_width, 'height': self.max_height } out = { 'placeholder': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.DEBUG_PLACEHOLDER_PROVIDER_TEMPLATE % pars } return out
python
def get_placeholder_img(self): """ Returns fake ``FormatedPhoto`` object grabbed from image placeholder generator service for the purpose of debugging when images are not available but we still want to see something. """ pars = { 'width': self.max_width, 'height': self.max_height } out = { 'placeholder': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.DEBUG_PLACEHOLDER_PROVIDER_TEMPLATE % pars } return out
Returns fake ``FormatedPhoto`` object grabbed from image placeholder generator service for the purpose of debugging when images are not available but we still want to see something.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L232-L248
ella/ella
ella/photos/models.py
Format.save
def save(self, **kwargs): """Overrides models.Model.save. - Delete formatted photos if format save and not now created (because of possible changes) """ if self.id: for f_photo in self.formatedphoto_set.all(): f_photo.delete() super(Format, self).save(**kwargs)
python
def save(self, **kwargs): """Overrides models.Model.save. - Delete formatted photos if format save and not now created (because of possible changes) """ if self.id: for f_photo in self.formatedphoto_set.all(): f_photo.delete() super(Format, self).save(**kwargs)
Overrides models.Model.save. - Delete formatted photos if format save and not now created (because of possible changes)
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L254-L265
ella/ella
ella/photos/models.py
FormatedPhoto.generate
def generate(self, save=True): """ Generates photo file in current format. If ``save`` is ``True``, file is saved too. """ stretched_photo, crop_box = self._generate_img() # set crop_box to (0,0,0,0) if photo not cropped if not crop_box: crop_box = 0, 0, 0, 0 self.crop_left, self.crop_top, right, bottom = crop_box self.crop_width = right - self.crop_left self.crop_height = bottom - self.crop_top self.width, self.height = stretched_photo.size f = StringIO() imgf = (self.photo._get_image().format or Image.EXTENSION[path.splitext(self.photo.image.name)[1]]) stretched_photo.save(f, format=imgf, quality=self.format.resample_quality) f.seek(0) self.image.save(self.file(), ContentFile(f.read()), save)
python
def generate(self, save=True): """ Generates photo file in current format. If ``save`` is ``True``, file is saved too. """ stretched_photo, crop_box = self._generate_img() # set crop_box to (0,0,0,0) if photo not cropped if not crop_box: crop_box = 0, 0, 0, 0 self.crop_left, self.crop_top, right, bottom = crop_box self.crop_width = right - self.crop_left self.crop_height = bottom - self.crop_top self.width, self.height = stretched_photo.size f = StringIO() imgf = (self.photo._get_image().format or Image.EXTENSION[path.splitext(self.photo.image.name)[1]]) stretched_photo.save(f, format=imgf, quality=self.format.resample_quality) f.seek(0) self.image.save(self.file(), ContentFile(f.read()), save)
Generates photo file in current format. If ``save`` is ``True``, file is saved too.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L375-L400
ella/ella
ella/photos/models.py
FormatedPhoto.save
def save(self, **kwargs): """Overrides models.Model.save - Removes old file from the FS - Generates new file. """ self.remove_file() if not self.image: self.generate(save=False) else: self.image.name = self.file() super(FormatedPhoto, self).save(**kwargs)
python
def save(self, **kwargs): """Overrides models.Model.save - Removes old file from the FS - Generates new file. """ self.remove_file() if not self.image: self.generate(save=False) else: self.image.name = self.file() super(FormatedPhoto, self).save(**kwargs)
Overrides models.Model.save - Removes old file from the FS - Generates new file.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L402-L413
ella/ella
ella/photos/models.py
FormatedPhoto.file
def file(self): """ Method returns formated photo path - derived from format.id and source Photo filename """ if photos_settings.FORMATED_PHOTO_FILENAME is not None: return photos_settings.FORMATED_PHOTO_FILENAME(self) source_file = path.split(self.photo.image.name) return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])
python
def file(self): """ Method returns formated photo path - derived from format.id and source Photo filename """ if photos_settings.FORMATED_PHOTO_FILENAME is not None: return photos_settings.FORMATED_PHOTO_FILENAME(self) source_file = path.split(self.photo.image.name) return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])
Method returns formated photo path - derived from format.id and source Photo filename
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L427-L432
ella/ella
ella/positions/templatetags/positions.py
_get_category_from_pars_var
def _get_category_from_pars_var(template_var, context): ''' get category from template variable or from tree_path ''' cat = template_var.resolve(context) if isinstance(cat, basestring): cat = Category.objects.get_by_tree_path(cat) return cat
python
def _get_category_from_pars_var(template_var, context): ''' get category from template variable or from tree_path ''' cat = template_var.resolve(context) if isinstance(cat, basestring): cat = Category.objects.get_by_tree_path(cat) return cat
get category from template variable or from tree_path
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/templatetags/positions.py#L11-L18
ella/ella
ella/positions/templatetags/positions.py
position
def position(parser, token): """ Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %} """ bits = token.split_contents() nodelist = parser.parse(('end' + bits[0],)) parser.delete_first_token() return _parse_position_tag(bits, nodelist)
python
def position(parser, token): """ Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %} """ bits = token.split_contents() nodelist = parser.parse(('end' + bits[0],)) parser.delete_first_token() return _parse_position_tag(bits, nodelist)
Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %}
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/templatetags/positions.py#L22-L40
ella/ella
ella/positions/templatetags/positions.py
ifposition
def ifposition(parser, token): """ Syntax:: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %} {% else %} {% endifposition %} """ bits = list(token.split_contents()) end_tag = 'end' + bits[0] nofallback = False if bits[-1] == 'nofallback': nofallback = True bits.pop() if len(bits) >= 4 and bits[-2] == 'for': category = template.Variable(bits.pop()) pos_names = bits[1:-1] else: raise TemplateSyntaxError('Invalid syntax: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %}') nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = template.NodeList() return IfPositionNode(category, pos_names, nofallback, nodelist_true, nodelist_false)
python
def ifposition(parser, token): """ Syntax:: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %} {% else %} {% endifposition %} """ bits = list(token.split_contents()) end_tag = 'end' + bits[0] nofallback = False if bits[-1] == 'nofallback': nofallback = True bits.pop() if len(bits) >= 4 and bits[-2] == 'for': category = template.Variable(bits.pop()) pos_names = bits[1:-1] else: raise TemplateSyntaxError('Invalid syntax: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %}') nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = template.NodeList() return IfPositionNode(category, pos_names, nofallback, nodelist_true, nodelist_false)
Syntax:: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %} {% else %} {% endifposition %}
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/templatetags/positions.py#L77-L109
ssalentin/plip
plip/modules/detection.py
filter_contacts
def filter_contacts(pairings): """Filter interactions by two criteria: 1. No interactions between the same residue (important for intra mode). 2. No duplicate interactions (A with B and B with A, also important for intra mode).""" if not config.INTRA: return pairings filtered1_pairings = [p for p in pairings if (p.resnr, p.reschain) != (p.resnr_l, p.reschain_l)] already_considered = [] filtered2_pairings = [] for contact in filtered1_pairings: try: dist = 'D{}'.format(round(contact.distance, 2)) except AttributeError: try: dist = 'D{}'.format(round(contact.distance_ah, 2)) except AttributeError: dist = 'D{}'.format(round(contact.distance_aw, 2)) res1, res2 = ''.join([str(contact.resnr), contact.reschain]), ''.join( [str(contact.resnr_l), contact.reschain_l]) data = {res1, res2, dist} if data not in already_considered: filtered2_pairings.append(contact) already_considered.append(data) return filtered2_pairings
python
def filter_contacts(pairings): """Filter interactions by two criteria: 1. No interactions between the same residue (important for intra mode). 2. No duplicate interactions (A with B and B with A, also important for intra mode).""" if not config.INTRA: return pairings filtered1_pairings = [p for p in pairings if (p.resnr, p.reschain) != (p.resnr_l, p.reschain_l)] already_considered = [] filtered2_pairings = [] for contact in filtered1_pairings: try: dist = 'D{}'.format(round(contact.distance, 2)) except AttributeError: try: dist = 'D{}'.format(round(contact.distance_ah, 2)) except AttributeError: dist = 'D{}'.format(round(contact.distance_aw, 2)) res1, res2 = ''.join([str(contact.resnr), contact.reschain]), ''.join( [str(contact.resnr_l), contact.reschain_l]) data = {res1, res2, dist} if data not in already_considered: filtered2_pairings.append(contact) already_considered.append(data) return filtered2_pairings
Filter interactions by two criteria: 1. No interactions between the same residue (important for intra mode). 2. No duplicate interactions (A with B and B with A, also important for intra mode).
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L22-L45
ssalentin/plip
plip/modules/detection.py
hydrophobic_interactions
def hydrophobic_interactions(atom_set_a, atom_set_b): """Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX """ data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx ' 'distance restype resnr reschain restype_l, resnr_l, reschain_l') pairings = [] for a, b in itertools.product(atom_set_a, atom_set_b): if a.orig_idx == b.orig_idx: continue e = euclidean3d(a.atom.coords, b.atom.coords) if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX: continue restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom) restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom) contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx, distance=e, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
python
def hydrophobic_interactions(atom_set_a, atom_set_b): """Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX """ data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx ' 'distance restype resnr reschain restype_l, resnr_l, reschain_l') pairings = [] for a, b in itertools.product(atom_set_a, atom_set_b): if a.orig_idx == b.orig_idx: continue e = euclidean3d(a.atom.coords, b.atom.coords) if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX: continue restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom) restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom) contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx, distance=e, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L52-L72
ssalentin/plip
plip/modules/detection.py
hbonds
def hbonds(acceptors, donor_pairs, protisdon, typ): """Detection of hydrogen bonds between sets of acceptors and donor pairs. Definition: All pairs of hydrogen bond acceptor and donors with donor hydrogens and acceptor showing a distance within HBOND DIST MIN and HBOND DIST MAX and donor angles above HBOND_DON_ANGLE_MIN """ data = namedtuple('hbond', 'a a_orig_idx d d_orig_idx h distance_ah distance_ad angle type protisdon resnr ' 'restype reschain resnr_l restype_l reschain_l sidechain atype dtype') pairings = [] for acc, don in itertools.product(acceptors, donor_pairs): if not typ == 'strong': continue # Regular (strong) hydrogen bonds dist_ah = euclidean3d(acc.a.coords, don.h.coords) dist_ad = euclidean3d(acc.a.coords, don.d.coords) if not config.MIN_DIST < dist_ad < config.HBOND_DIST_MAX: continue vec1, vec2 = vector(don.h.coords, don.d.coords), vector(don.h.coords, acc.a.coords) v = vecangle(vec1, vec2) if not v > config.HBOND_DON_ANGLE_MIN: continue protatom = don.d.OBAtom if protisdon else acc.a.OBAtom ligatom = don.d.OBAtom if not protisdon else acc.a.OBAtom is_sidechain_hbond = protatom.GetResidue().GetAtomProperty(protatom, 8) # Check if sidechain atom resnr = whichresnumber(don.d) if protisdon else whichresnumber(acc.a) resnr_l = whichresnumber(acc.a_orig_atom) if protisdon else whichresnumber(don.d_orig_atom) restype = whichrestype(don.d) if protisdon else whichrestype(acc.a) restype_l = whichrestype(acc.a_orig_atom) if protisdon else whichrestype(don.d_orig_atom) reschain = whichchain(don.d) if protisdon else whichchain(acc.a) rechain_l = whichchain(acc.a_orig_atom) if protisdon else whichchain(don.d_orig_atom) # Next line prevents H-Bonds within amino acids in intermolecular interactions if config.INTRA is not None and whichresnumber(don.d) == whichresnumber(acc.a): continue # Next line prevents backbone-backbone H-Bonds if config.INTRA is not None and protatom.GetResidue().GetAtomProperty(protatom, 8) and ligatom.GetResidue().GetAtomProperty(ligatom, 8): continue contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, d=don.d, d_orig_idx=don.d_orig_idx, h=don.h, distance_ah=dist_ah, distance_ad=dist_ad, angle=v, type=typ, protisdon=protisdon, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=rechain_l, sidechain=is_sidechain_hbond, atype=acc.a.type, dtype=don.d.type) pairings.append(contact) return filter_contacts(pairings)
python
def hbonds(acceptors, donor_pairs, protisdon, typ): """Detection of hydrogen bonds between sets of acceptors and donor pairs. Definition: All pairs of hydrogen bond acceptor and donors with donor hydrogens and acceptor showing a distance within HBOND DIST MIN and HBOND DIST MAX and donor angles above HBOND_DON_ANGLE_MIN """ data = namedtuple('hbond', 'a a_orig_idx d d_orig_idx h distance_ah distance_ad angle type protisdon resnr ' 'restype reschain resnr_l restype_l reschain_l sidechain atype dtype') pairings = [] for acc, don in itertools.product(acceptors, donor_pairs): if not typ == 'strong': continue # Regular (strong) hydrogen bonds dist_ah = euclidean3d(acc.a.coords, don.h.coords) dist_ad = euclidean3d(acc.a.coords, don.d.coords) if not config.MIN_DIST < dist_ad < config.HBOND_DIST_MAX: continue vec1, vec2 = vector(don.h.coords, don.d.coords), vector(don.h.coords, acc.a.coords) v = vecangle(vec1, vec2) if not v > config.HBOND_DON_ANGLE_MIN: continue protatom = don.d.OBAtom if protisdon else acc.a.OBAtom ligatom = don.d.OBAtom if not protisdon else acc.a.OBAtom is_sidechain_hbond = protatom.GetResidue().GetAtomProperty(protatom, 8) # Check if sidechain atom resnr = whichresnumber(don.d) if protisdon else whichresnumber(acc.a) resnr_l = whichresnumber(acc.a_orig_atom) if protisdon else whichresnumber(don.d_orig_atom) restype = whichrestype(don.d) if protisdon else whichrestype(acc.a) restype_l = whichrestype(acc.a_orig_atom) if protisdon else whichrestype(don.d_orig_atom) reschain = whichchain(don.d) if protisdon else whichchain(acc.a) rechain_l = whichchain(acc.a_orig_atom) if protisdon else whichchain(don.d_orig_atom) # Next line prevents H-Bonds within amino acids in intermolecular interactions if config.INTRA is not None and whichresnumber(don.d) == whichresnumber(acc.a): continue # Next line prevents backbone-backbone H-Bonds if config.INTRA is not None and protatom.GetResidue().GetAtomProperty(protatom, 8) and ligatom.GetResidue().GetAtomProperty(ligatom, 8): continue contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, d=don.d, d_orig_idx=don.d_orig_idx, h=don.h, distance_ah=dist_ah, distance_ad=dist_ad, angle=v, type=typ, protisdon=protisdon, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=rechain_l, sidechain=is_sidechain_hbond, atype=acc.a.type, dtype=don.d.type) pairings.append(contact) return filter_contacts(pairings)
Detection of hydrogen bonds between sets of acceptors and donor pairs. Definition: All pairs of hydrogen bond acceptor and donors with donor hydrogens and acceptor showing a distance within HBOND DIST MIN and HBOND DIST MAX and donor angles above HBOND_DON_ANGLE_MIN
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L75-L117
ssalentin/plip
plip/modules/detection.py
pistacking
def pistacking(rings_bs, rings_lig): """Return all pi-stackings between the given aromatic ring systems in receptor and ligand.""" data = namedtuple( 'pistack', 'proteinring ligandring distance angle offset type restype resnr reschain restype_l resnr_l reschain_l') pairings = [] for r, l in itertools.product(rings_bs, rings_lig): # DISTANCE AND RING ANGLE CALCULATION d = euclidean3d(r.center, l.center) b = vecangle(r.normal, l.normal) a = min(b, 180 - b if not 180 - b < 0 else b) # Smallest of two angles, depending on direction of normal # RING CENTER OFFSET CALCULATION (project each ring center into the other ring) proj1 = projection(l.normal, l.center, r.center) proj2 = projection(r.normal, r.center, l.center) offset = min(euclidean3d(proj1, l.center), euclidean3d(proj2, r.center)) # RECEPTOR DATA resnr, restype, reschain = whichresnumber(r.atoms[0]), whichrestype(r.atoms[0]), whichchain(r.atoms[0]) resnr_l, restype_l, reschain_l = whichresnumber(l.orig_atoms[0]), whichrestype( l.orig_atoms[0]), whichchain(l.orig_atoms[0]) # SELECTION BY DISTANCE, ANGLE AND OFFSET passed = False if not config.MIN_DIST < d < config.PISTACK_DIST_MAX: continue if 0 < a < config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'P' passed = True if 90 - config.PISTACK_ANG_DEV < a < 90 + config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'T' passed = True if passed: contact = data(proteinring=r, ligandring=l, distance=d, angle=a, offset=offset, type=ptype, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
python
def pistacking(rings_bs, rings_lig): """Return all pi-stackings between the given aromatic ring systems in receptor and ligand.""" data = namedtuple( 'pistack', 'proteinring ligandring distance angle offset type restype resnr reschain restype_l resnr_l reschain_l') pairings = [] for r, l in itertools.product(rings_bs, rings_lig): # DISTANCE AND RING ANGLE CALCULATION d = euclidean3d(r.center, l.center) b = vecangle(r.normal, l.normal) a = min(b, 180 - b if not 180 - b < 0 else b) # Smallest of two angles, depending on direction of normal # RING CENTER OFFSET CALCULATION (project each ring center into the other ring) proj1 = projection(l.normal, l.center, r.center) proj2 = projection(r.normal, r.center, l.center) offset = min(euclidean3d(proj1, l.center), euclidean3d(proj2, r.center)) # RECEPTOR DATA resnr, restype, reschain = whichresnumber(r.atoms[0]), whichrestype(r.atoms[0]), whichchain(r.atoms[0]) resnr_l, restype_l, reschain_l = whichresnumber(l.orig_atoms[0]), whichrestype( l.orig_atoms[0]), whichchain(l.orig_atoms[0]) # SELECTION BY DISTANCE, ANGLE AND OFFSET passed = False if not config.MIN_DIST < d < config.PISTACK_DIST_MAX: continue if 0 < a < config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'P' passed = True if 90 - config.PISTACK_ANG_DEV < a < 90 + config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'T' passed = True if passed: contact = data(proteinring=r, ligandring=l, distance=d, angle=a, offset=offset, type=ptype, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
Return all pi-stackings between the given aromatic ring systems in receptor and ligand.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L120-L156
ssalentin/plip
plip/modules/detection.py
pication
def pication(rings, pos_charged, protcharged): """Return all pi-Cation interaction between aromatic rings and positively charged groups. For tertiary and quaternary amines, check also the angle between the ring and the nitrogen. """ data = namedtuple( 'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged') pairings = [] if len(rings) == 0 or len(pos_charged) == 0: return pairings for ring in rings: c = ring.center for p in pos_charged: d = euclidean3d(c, p.center) # Project the center of charge into the ring and measure distance to ring center proj = projection(ring.normal, ring.center, p.center) offset = euclidean3d(proj, ring.center) if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX: continue if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine': # Special case here if the ligand has a tertiary amine, check an additional angle # Otherwise, we might have have a pi-cation interaction 'through' the ligand n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)] n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms] amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]), vector(n_atoms_coords[2], n_atoms_coords[0])) b = vecangle(ring.normal, amine_normal) # Smallest of two angles, depending on direction of normal a = min(b, 180 - b if not 180 - b < 0 else b) if not a > 30.0: resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0]) reschain = whichchain(ring.atoms[0]) resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0]) reschain_l = whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) break resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0]) resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0]) restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0]) restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0]) reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0]) reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) return filter_contacts(pairings)
python
def pication(rings, pos_charged, protcharged): """Return all pi-Cation interaction between aromatic rings and positively charged groups. For tertiary and quaternary amines, check also the angle between the ring and the nitrogen. """ data = namedtuple( 'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged') pairings = [] if len(rings) == 0 or len(pos_charged) == 0: return pairings for ring in rings: c = ring.center for p in pos_charged: d = euclidean3d(c, p.center) # Project the center of charge into the ring and measure distance to ring center proj = projection(ring.normal, ring.center, p.center) offset = euclidean3d(proj, ring.center) if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX: continue if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine': # Special case here if the ligand has a tertiary amine, check an additional angle # Otherwise, we might have have a pi-cation interaction 'through' the ligand n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)] n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms] amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]), vector(n_atoms_coords[2], n_atoms_coords[0])) b = vecangle(ring.normal, amine_normal) # Smallest of two angles, depending on direction of normal a = min(b, 180 - b if not 180 - b < 0 else b) if not a > 30.0: resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0]) reschain = whichchain(ring.atoms[0]) resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0]) reschain_l = whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) break resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0]) resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0]) restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0]) restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0]) reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0]) reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) return filter_contacts(pairings)
Return all pi-Cation interaction between aromatic rings and positively charged groups. For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L159-L208
ssalentin/plip
plip/modules/detection.py
saltbridge
def saltbridge(poscenter, negcenter, protispos): """Detect all salt bridges (pliprofiler between centers of positive and negative charge)""" data = namedtuple( 'saltbridge', 'positive negative distance protispos resnr restype reschain resnr_l restype_l reschain_l') pairings = [] for pc, nc in itertools.product(poscenter, negcenter): if not config.MIN_DIST < euclidean3d(pc.center, nc.center) < config.SALTBRIDGE_DIST_MAX: continue resnr = pc.resnr if protispos else nc.resnr resnr_l = whichresnumber(nc.orig_atoms[0]) if protispos else whichresnumber(pc.orig_atoms[0]) restype = pc.restype if protispos else nc.restype restype_l = whichrestype(nc.orig_atoms[0]) if protispos else whichrestype(pc.orig_atoms[0]) reschain = pc.reschain if protispos else nc.reschain reschain_l = whichchain(nc.orig_atoms[0]) if protispos else whichchain(pc.orig_atoms[0]) contact = data(positive=pc, negative=nc, distance=euclidean3d(pc.center, nc.center), protispos=protispos, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
python
def saltbridge(poscenter, negcenter, protispos): """Detect all salt bridges (pliprofiler between centers of positive and negative charge)""" data = namedtuple( 'saltbridge', 'positive negative distance protispos resnr restype reschain resnr_l restype_l reschain_l') pairings = [] for pc, nc in itertools.product(poscenter, negcenter): if not config.MIN_DIST < euclidean3d(pc.center, nc.center) < config.SALTBRIDGE_DIST_MAX: continue resnr = pc.resnr if protispos else nc.resnr resnr_l = whichresnumber(nc.orig_atoms[0]) if protispos else whichresnumber(pc.orig_atoms[0]) restype = pc.restype if protispos else nc.restype restype_l = whichrestype(nc.orig_atoms[0]) if protispos else whichrestype(pc.orig_atoms[0]) reschain = pc.reschain if protispos else nc.reschain reschain_l = whichchain(nc.orig_atoms[0]) if protispos else whichchain(pc.orig_atoms[0]) contact = data(positive=pc, negative=nc, distance=euclidean3d(pc.center, nc.center), protispos=protispos, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
Detect all salt bridges (pliprofiler between centers of positive and negative charge)
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L211-L229
ssalentin/plip
plip/modules/detection.py
halogen
def halogen(acceptor, donor): """Detect all halogen bonds of the type Y-O...X-C""" data = namedtuple('halogenbond', 'acc acc_orig_idx don don_orig_idx distance don_angle acc_angle restype ' 'resnr reschain restype_l resnr_l reschain_l donortype acctype sidechain') pairings = [] for acc, don in itertools.product(acceptor, donor): dist = euclidean3d(acc.o.coords, don.x.coords) if not config.MIN_DIST < dist < config.HALOGEN_DIST_MAX: continue vec1, vec2 = vector(acc.o.coords, acc.y.coords), vector(acc.o.coords, don.x.coords) vec3, vec4 = vector(don.x.coords, acc.o.coords), vector(don.x.coords, don.c.coords) acc_angle, don_angle = vecangle(vec1, vec2), vecangle(vec3, vec4) is_sidechain_hal = acc.o.OBAtom.GetResidue().GetAtomProperty(acc.o.OBAtom, 8) # Check if sidechain atom if not config.HALOGEN_ACC_ANGLE - config.HALOGEN_ANGLE_DEV < acc_angle \ < config.HALOGEN_ACC_ANGLE + config.HALOGEN_ANGLE_DEV: continue if not config.HALOGEN_DON_ANGLE - config.HALOGEN_ANGLE_DEV < don_angle \ < config.HALOGEN_DON_ANGLE + config.HALOGEN_ANGLE_DEV: continue restype, reschain, resnr = whichrestype(acc.o), whichchain(acc.o), whichresnumber(acc.o) restype_l, reschain_l, resnr_l = whichrestype(don.orig_x), whichchain(don.orig_x), whichresnumber(don.orig_x) contact = data(acc=acc, acc_orig_idx=acc.o_orig_idx, don=don, don_orig_idx=don.x_orig_idx, distance=dist, don_angle=don_angle, acc_angle=acc_angle, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, donortype=don.x.OBAtom.GetType(), acctype=acc.o.type, sidechain=is_sidechain_hal) pairings.append(contact) return filter_contacts(pairings)
python
def halogen(acceptor, donor): """Detect all halogen bonds of the type Y-O...X-C""" data = namedtuple('halogenbond', 'acc acc_orig_idx don don_orig_idx distance don_angle acc_angle restype ' 'resnr reschain restype_l resnr_l reschain_l donortype acctype sidechain') pairings = [] for acc, don in itertools.product(acceptor, donor): dist = euclidean3d(acc.o.coords, don.x.coords) if not config.MIN_DIST < dist < config.HALOGEN_DIST_MAX: continue vec1, vec2 = vector(acc.o.coords, acc.y.coords), vector(acc.o.coords, don.x.coords) vec3, vec4 = vector(don.x.coords, acc.o.coords), vector(don.x.coords, don.c.coords) acc_angle, don_angle = vecangle(vec1, vec2), vecangle(vec3, vec4) is_sidechain_hal = acc.o.OBAtom.GetResidue().GetAtomProperty(acc.o.OBAtom, 8) # Check if sidechain atom if not config.HALOGEN_ACC_ANGLE - config.HALOGEN_ANGLE_DEV < acc_angle \ < config.HALOGEN_ACC_ANGLE + config.HALOGEN_ANGLE_DEV: continue if not config.HALOGEN_DON_ANGLE - config.HALOGEN_ANGLE_DEV < don_angle \ < config.HALOGEN_DON_ANGLE + config.HALOGEN_ANGLE_DEV: continue restype, reschain, resnr = whichrestype(acc.o), whichchain(acc.o), whichresnumber(acc.o) restype_l, reschain_l, resnr_l = whichrestype(don.orig_x), whichchain(don.orig_x), whichresnumber(don.orig_x) contact = data(acc=acc, acc_orig_idx=acc.o_orig_idx, don=don, don_orig_idx=don.x_orig_idx, distance=dist, don_angle=don_angle, acc_angle=acc_angle, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, donortype=don.x.OBAtom.GetType(), acctype=acc.o.type, sidechain=is_sidechain_hal) pairings.append(contact) return filter_contacts(pairings)
Detect all halogen bonds of the type Y-O...X-C
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L232-L260
ssalentin/plip
plip/modules/detection.py
water_bridges
def water_bridges(bs_hba, lig_hba, bs_hbd, lig_hbd, water): """Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.""" data = namedtuple('waterbridge', 'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw ' 'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon') pairings = [] # First find all acceptor-water pairs with distance within d # and all donor-water pairs with distance within d and angle greater theta lig_aw, prot_aw, lig_dw, prot_hw = [], [], [], [] for w in water: for acc1 in lig_hba: dist = euclidean3d(acc1.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: lig_aw.append((acc1, w, dist)) for acc2 in bs_hba: dist = euclidean3d(acc2.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: prot_aw.append((acc2, w, dist)) for don1 in lig_hbd: dist = euclidean3d(don1.d.coords, w.oxy.coords) d_angle = vecangle(vector(don1.h.coords, don1.d.coords), vector(don1.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: lig_dw.append((don1, w, dist, d_angle)) for don2 in bs_hbd: dist = euclidean3d(don2.d.coords, w.oxy.coords) d_angle = vecangle(vector(don2.h.coords, don2.d.coords), vector(don2.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: prot_hw.append((don2, w, dist, d_angle)) for l, p in itertools.product(lig_aw, prot_hw): acc, wl, distance_aw = l don, wd, distance_dw, d_angle = p if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(don.d), whichchain(don.d), whichrestype(don.d) resnr_l, reschain_l, restype_l = whichresnumber(acc.a_orig_atom), whichchain( acc.a_orig_atom), whichrestype(acc.a_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protisdon=True) pairings.append(contact) for p, l in itertools.product(prot_aw, lig_dw): acc, wl, distance_aw = p don, wd, distance_dw, d_angle = l if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(acc.a), whichchain(acc.a), whichrestype(acc.a) resnr_l, reschain_l, restype_l = whichresnumber(don.d_orig_atom), whichchain( don.d_orig_atom), whichrestype(don.d_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, protisdon=False) pairings.append(contact) return filter_contacts(pairings)
python
def water_bridges(bs_hba, lig_hba, bs_hbd, lig_hbd, water): """Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.""" data = namedtuple('waterbridge', 'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw ' 'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon') pairings = [] # First find all acceptor-water pairs with distance within d # and all donor-water pairs with distance within d and angle greater theta lig_aw, prot_aw, lig_dw, prot_hw = [], [], [], [] for w in water: for acc1 in lig_hba: dist = euclidean3d(acc1.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: lig_aw.append((acc1, w, dist)) for acc2 in bs_hba: dist = euclidean3d(acc2.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: prot_aw.append((acc2, w, dist)) for don1 in lig_hbd: dist = euclidean3d(don1.d.coords, w.oxy.coords) d_angle = vecangle(vector(don1.h.coords, don1.d.coords), vector(don1.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: lig_dw.append((don1, w, dist, d_angle)) for don2 in bs_hbd: dist = euclidean3d(don2.d.coords, w.oxy.coords) d_angle = vecangle(vector(don2.h.coords, don2.d.coords), vector(don2.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: prot_hw.append((don2, w, dist, d_angle)) for l, p in itertools.product(lig_aw, prot_hw): acc, wl, distance_aw = l don, wd, distance_dw, d_angle = p if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(don.d), whichchain(don.d), whichrestype(don.d) resnr_l, reschain_l, restype_l = whichresnumber(acc.a_orig_atom), whichchain( acc.a_orig_atom), whichrestype(acc.a_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protisdon=True) pairings.append(contact) for p, l in itertools.product(prot_aw, lig_dw): acc, wl, distance_aw = p don, wd, distance_dw, d_angle = l if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(acc.a), whichchain(acc.a), whichrestype(acc.a) resnr_l, reschain_l, restype_l = whichresnumber(don.d_orig_atom), whichchain( don.d_orig_atom), whichrestype(don.d_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, protisdon=False) pairings.append(contact) return filter_contacts(pairings)
Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L263-L330
ssalentin/plip
plip/modules/detection.py
metal_complexation
def metal_complexation(metals, metal_binding_lig, metal_binding_bs): """Find all metal complexes between metals and appropriate groups in both protein and ligand, as well as water""" data = namedtuple('metal_complex', 'metal metal_orig_idx metal_type target target_orig_idx target_type ' 'coordination_num distance resnr restype ' 'reschain restype_l reschain_l resnr_l location rms, geometry num_partners complexnum') pairings_dict = {} pairings = [] # #@todo Refactor metal_to_id = {} metal_to_orig_atom = {} for metal, target in itertools.product(metals, metal_binding_lig + metal_binding_bs): distance = euclidean3d(metal.m.coords, target.atom.coords) if not distance < config.METAL_DIST_MAX: continue if metal.m not in pairings_dict: pairings_dict[metal.m] = [(target, distance), ] metal_to_id[metal.m] = metal.m_orig_idx metal_to_orig_atom[metal.m] = metal.orig_m else: pairings_dict[metal.m].append((target, distance)) for cnum, metal in enumerate(pairings_dict): rms = 0.0 excluded = [] # cnum +1 being the complex number contact_pairs = pairings_dict[metal] num_targets = len(contact_pairs) vectors_dict = defaultdict(list) for contact_pair in contact_pairs: target, distance = contact_pair vectors_dict[target.atom.idx].append(vector(metal.coords, target.atom.coords)) # Listing of coordination numbers and their geometries configs = {2: ['linear', ], 3: ['trigonal.planar', 'trigonal.pyramidal'], 4: ['tetrahedral', 'square.planar'], 5: ['trigonal.bipyramidal', 'square.pyramidal'], 6: ['octahedral', ]} # Angle signatures for each geometry (as seen from each target atom) ideal_angles = {'linear': [[180.0]] * 2, 'trigonal.planar': [[120.0, 120.0]] * 3, 'trigonal.pyramidal': [[109.5, 109.5]] * 3, 'tetrahedral': [[109.5, 109.5, 109.5, 109.5]] * 4, 'square.planar': [[90.0, 90.0, 90.0, 90.0]] * 4, 'trigonal.bipyramidal': [[120.0, 120.0, 90.0, 90.0]] * 3 + [[90.0, 90.0, 90.0, 180.0]] * 2, 'square.pyramidal': [[90.0, 90.0, 90.0, 180.0]] * 4 + [[90.0, 90.0, 90.0, 90.0]], 'octahedral': [[90.0, 90.0, 90.0, 90.0, 180.0]] * 6} angles_dict = {} for target in vectors_dict: cur_vector = vectors_dict[target] other_vectors = [] for t in vectors_dict: if not t == target: [other_vectors.append(x) for x in vectors_dict[t]] angles = [vecangle(pair[0], pair[1]) for pair in itertools.product(cur_vector, other_vectors)] angles_dict[target] = angles all_total = [] # Record fit information for each geometry tested gdata = namedtuple('gdata', 'geometry rms coordination excluded diff_targets') # Geometry Data # Can't specify geometry with only one target if num_targets == 1: final_geom = 'NA' final_coo = 1 excluded = [] rms = 0.0 else: for coo in sorted(configs, reverse=True): # Start with highest coordination number geometries = configs[coo] for geometry in geometries: signature = ideal_angles[geometry] # Set of ideal angles for geometry, from each perspective geometry_total = 0 geometry_scores = [] # All scores for one geometry (from all subsignatures) used_up_targets = [] # Use each target just once for a subsignature not_used = [] coo_diff = num_targets - coo # How many more observed targets are there? # Find best match for each subsignature for subsignature in signature: # Ideal angles from one perspective best_target = None # There's one best-matching target for each subsignature best_target_score = 999 for k, target in enumerate(angles_dict): if target not in used_up_targets: observed_angles = angles_dict[target] # Observed angles from perspective of one target single_target_scores = [] used_up_observed_angles = [] for i, ideal_angle in enumerate(subsignature): # For each angle in the signature, find the best-matching observed angle best_match = None best_match_diff = 999 for j, observed_angle in enumerate(observed_angles): if j not in used_up_observed_angles: diff = abs(ideal_angle - observed_angle) if diff < best_match_diff: best_match_diff = diff best_match = j if best_match is not None: used_up_observed_angles.append(best_match) single_target_scores.append(best_match_diff) # Calculate RMS for target angles target_total = sum([x ** 2 for x in single_target_scores]) ** 0.5 # Tot. score targ/sig if target_total < best_target_score: best_target_score = target_total best_target = target used_up_targets.append(best_target) geometry_scores.append(best_target_score) # Total score is mean of RMS values geometry_total = np.mean(geometry_scores) # Record the targets not used for excluding them when deciding for a final geometry [not_used.append(target) for target in angles_dict if target not in used_up_targets] all_total.append(gdata(geometry=geometry, rms=geometry_total, coordination=coo, excluded=not_used, diff_targets=coo_diff)) # Make a decision here. Starting with the geometry with lowest difference in ideal and observed partners ... # Check if the difference between the RMS to the next best solution is not larger than 0.5 if not num_targets == 1: # Can't decide for any geoemtry in that case all_total = sorted(all_total, key=lambda x: abs(x.diff_targets)) for i, total in enumerate(all_total): next_total = all_total[i + 1] this_rms, next_rms = total.rms, next_total.rms diff_to_next = next_rms - this_rms if diff_to_next > 0.5: final_geom, final_coo, rms, excluded = total.geometry, total.coordination, total.rms, total.excluded break elif next_total.rms < 3.5: final_geom, final_coo, = next_total.geometry, next_total.coordination rms, excluded = next_total.rms, next_total.excluded break elif i == len(all_total) - 2: final_geom, final_coo, rms, excluded = "NA", "NA", float('nan'), [] break # Record all contact pairing, excluding those with targets superfluous for chosen geometry only_water = set([x[0].location for x in contact_pairs]) == {'water'} if not only_water: # No complex if just with water as targets write_message("Metal ion %s complexed with %s geometry (coo. number %r/ %i observed).\n" % (metal.type, final_geom, final_coo, num_targets), indent=True) for contact_pair in contact_pairs: target, distance = contact_pair if target.atom.idx not in excluded: metal_orig_atom = metal_to_orig_atom[metal] restype_l, reschain_l, resnr_l = whichrestype(metal_orig_atom), whichchain( metal_orig_atom), whichresnumber(metal_orig_atom) contact = data(metal=metal, metal_orig_idx=metal_to_id[metal], metal_type=metal.type, target=target, target_orig_idx=target.atom_orig_idx, target_type=target.type, coordination_num=final_coo, distance=distance, resnr=target.resnr, restype=target.restype, reschain=target.reschain, location=target.location, rms=rms, geometry=final_geom, num_partners=num_targets, complexnum=cnum + 1, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
python
def metal_complexation(metals, metal_binding_lig, metal_binding_bs): """Find all metal complexes between metals and appropriate groups in both protein and ligand, as well as water""" data = namedtuple('metal_complex', 'metal metal_orig_idx metal_type target target_orig_idx target_type ' 'coordination_num distance resnr restype ' 'reschain restype_l reschain_l resnr_l location rms, geometry num_partners complexnum') pairings_dict = {} pairings = [] # #@todo Refactor metal_to_id = {} metal_to_orig_atom = {} for metal, target in itertools.product(metals, metal_binding_lig + metal_binding_bs): distance = euclidean3d(metal.m.coords, target.atom.coords) if not distance < config.METAL_DIST_MAX: continue if metal.m not in pairings_dict: pairings_dict[metal.m] = [(target, distance), ] metal_to_id[metal.m] = metal.m_orig_idx metal_to_orig_atom[metal.m] = metal.orig_m else: pairings_dict[metal.m].append((target, distance)) for cnum, metal in enumerate(pairings_dict): rms = 0.0 excluded = [] # cnum +1 being the complex number contact_pairs = pairings_dict[metal] num_targets = len(contact_pairs) vectors_dict = defaultdict(list) for contact_pair in contact_pairs: target, distance = contact_pair vectors_dict[target.atom.idx].append(vector(metal.coords, target.atom.coords)) # Listing of coordination numbers and their geometries configs = {2: ['linear', ], 3: ['trigonal.planar', 'trigonal.pyramidal'], 4: ['tetrahedral', 'square.planar'], 5: ['trigonal.bipyramidal', 'square.pyramidal'], 6: ['octahedral', ]} # Angle signatures for each geometry (as seen from each target atom) ideal_angles = {'linear': [[180.0]] * 2, 'trigonal.planar': [[120.0, 120.0]] * 3, 'trigonal.pyramidal': [[109.5, 109.5]] * 3, 'tetrahedral': [[109.5, 109.5, 109.5, 109.5]] * 4, 'square.planar': [[90.0, 90.0, 90.0, 90.0]] * 4, 'trigonal.bipyramidal': [[120.0, 120.0, 90.0, 90.0]] * 3 + [[90.0, 90.0, 90.0, 180.0]] * 2, 'square.pyramidal': [[90.0, 90.0, 90.0, 180.0]] * 4 + [[90.0, 90.0, 90.0, 90.0]], 'octahedral': [[90.0, 90.0, 90.0, 90.0, 180.0]] * 6} angles_dict = {} for target in vectors_dict: cur_vector = vectors_dict[target] other_vectors = [] for t in vectors_dict: if not t == target: [other_vectors.append(x) for x in vectors_dict[t]] angles = [vecangle(pair[0], pair[1]) for pair in itertools.product(cur_vector, other_vectors)] angles_dict[target] = angles all_total = [] # Record fit information for each geometry tested gdata = namedtuple('gdata', 'geometry rms coordination excluded diff_targets') # Geometry Data # Can't specify geometry with only one target if num_targets == 1: final_geom = 'NA' final_coo = 1 excluded = [] rms = 0.0 else: for coo in sorted(configs, reverse=True): # Start with highest coordination number geometries = configs[coo] for geometry in geometries: signature = ideal_angles[geometry] # Set of ideal angles for geometry, from each perspective geometry_total = 0 geometry_scores = [] # All scores for one geometry (from all subsignatures) used_up_targets = [] # Use each target just once for a subsignature not_used = [] coo_diff = num_targets - coo # How many more observed targets are there? # Find best match for each subsignature for subsignature in signature: # Ideal angles from one perspective best_target = None # There's one best-matching target for each subsignature best_target_score = 999 for k, target in enumerate(angles_dict): if target not in used_up_targets: observed_angles = angles_dict[target] # Observed angles from perspective of one target single_target_scores = [] used_up_observed_angles = [] for i, ideal_angle in enumerate(subsignature): # For each angle in the signature, find the best-matching observed angle best_match = None best_match_diff = 999 for j, observed_angle in enumerate(observed_angles): if j not in used_up_observed_angles: diff = abs(ideal_angle - observed_angle) if diff < best_match_diff: best_match_diff = diff best_match = j if best_match is not None: used_up_observed_angles.append(best_match) single_target_scores.append(best_match_diff) # Calculate RMS for target angles target_total = sum([x ** 2 for x in single_target_scores]) ** 0.5 # Tot. score targ/sig if target_total < best_target_score: best_target_score = target_total best_target = target used_up_targets.append(best_target) geometry_scores.append(best_target_score) # Total score is mean of RMS values geometry_total = np.mean(geometry_scores) # Record the targets not used for excluding them when deciding for a final geometry [not_used.append(target) for target in angles_dict if target not in used_up_targets] all_total.append(gdata(geometry=geometry, rms=geometry_total, coordination=coo, excluded=not_used, diff_targets=coo_diff)) # Make a decision here. Starting with the geometry with lowest difference in ideal and observed partners ... # Check if the difference between the RMS to the next best solution is not larger than 0.5 if not num_targets == 1: # Can't decide for any geoemtry in that case all_total = sorted(all_total, key=lambda x: abs(x.diff_targets)) for i, total in enumerate(all_total): next_total = all_total[i + 1] this_rms, next_rms = total.rms, next_total.rms diff_to_next = next_rms - this_rms if diff_to_next > 0.5: final_geom, final_coo, rms, excluded = total.geometry, total.coordination, total.rms, total.excluded break elif next_total.rms < 3.5: final_geom, final_coo, = next_total.geometry, next_total.coordination rms, excluded = next_total.rms, next_total.excluded break elif i == len(all_total) - 2: final_geom, final_coo, rms, excluded = "NA", "NA", float('nan'), [] break # Record all contact pairing, excluding those with targets superfluous for chosen geometry only_water = set([x[0].location for x in contact_pairs]) == {'water'} if not only_water: # No complex if just with water as targets write_message("Metal ion %s complexed with %s geometry (coo. number %r/ %i observed).\n" % (metal.type, final_geom, final_coo, num_targets), indent=True) for contact_pair in contact_pairs: target, distance = contact_pair if target.atom.idx not in excluded: metal_orig_atom = metal_to_orig_atom[metal] restype_l, reschain_l, resnr_l = whichrestype(metal_orig_atom), whichchain( metal_orig_atom), whichresnumber(metal_orig_atom) contact = data(metal=metal, metal_orig_idx=metal_to_id[metal], metal_type=metal.type, target=target, target_orig_idx=target.atom_orig_idx, target_type=target.type, coordination_num=final_coo, distance=distance, resnr=target.resnr, restype=target.restype, reschain=target.reschain, location=target.location, rms=rms, geometry=final_geom, num_partners=num_targets, complexnum=cnum + 1, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
Find all metal complexes between metals and appropriate groups in both protein and ligand, as well as water
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L333-L485
ssalentin/plip
plip/modules/plipxml.py
XMLStorage.getdata
def getdata(self, tree, location, force_string=False): """Gets XML data from a specific element and handles types.""" found = tree.xpath('%s/text()' % location) if not found: return None else: data = found[0] if force_string: return data if data == 'True': return True elif data == 'False': return False else: try: return int(data) except ValueError: try: return float(data) except ValueError: # It's a string return data
python
def getdata(self, tree, location, force_string=False): """Gets XML data from a specific element and handles types.""" found = tree.xpath('%s/text()' % location) if not found: return None else: data = found[0] if force_string: return data if data == 'True': return True elif data == 'False': return False else: try: return int(data) except ValueError: try: return float(data) except ValueError: # It's a string return data
Gets XML data from a specific element and handles types.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/plipxml.py#L18-L39
ssalentin/plip
plip/modules/plipxml.py
XMLStorage.getcoordinates
def getcoordinates(self, tree, location): """Gets coordinates from a specific element in PLIP XML""" return tuple(float(x) for x in tree.xpath('.//%s/*/text()' % location))
python
def getcoordinates(self, tree, location): """Gets coordinates from a specific element in PLIP XML""" return tuple(float(x) for x in tree.xpath('.//%s/*/text()' % location))
Gets coordinates from a specific element in PLIP XML
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/plipxml.py#L41-L43
ssalentin/plip
plip/modules/plipxml.py
BSite.get_atom_mapping
def get_atom_mapping(self): """Parses the ligand atom mapping.""" # Atom mappings smiles_to_pdb_mapping = self.bindingsite.xpath('mappings/smiles_to_pdb/text()') if smiles_to_pdb_mapping == []: self.mappings = {'smiles_to_pdb': None, 'pdb_to_smiles': None} else: smiles_to_pdb_mapping = {int(y[0]): int(y[1]) for y in [x.split(':') for x in smiles_to_pdb_mapping[0].split(',')]} self.mappings = {'smiles_to_pdb': smiles_to_pdb_mapping} self.mappings['pdb_to_smiles'] = {v: k for k, v in self.mappings['smiles_to_pdb'].items()}
python
def get_atom_mapping(self): """Parses the ligand atom mapping.""" # Atom mappings smiles_to_pdb_mapping = self.bindingsite.xpath('mappings/smiles_to_pdb/text()') if smiles_to_pdb_mapping == []: self.mappings = {'smiles_to_pdb': None, 'pdb_to_smiles': None} else: smiles_to_pdb_mapping = {int(y[0]): int(y[1]) for y in [x.split(':') for x in smiles_to_pdb_mapping[0].split(',')]} self.mappings = {'smiles_to_pdb': smiles_to_pdb_mapping} self.mappings['pdb_to_smiles'] = {v: k for k, v in self.mappings['smiles_to_pdb'].items()}
Parses the ligand atom mapping.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/plipxml.py#L249-L259
ssalentin/plip
plip/modules/plipxml.py
BSite.get_counts
def get_counts(self): """counts the interaction types and backbone hydrogen bonding in a binding site""" hbondsback = len([hb for hb in self.hbonds if not hb.sidechain]) counts = {'hydrophobics': len(self.hydrophobics), 'hbonds': len(self.hbonds), 'wbridges': len(self.wbridges), 'sbridges': len(self.sbridges), 'pistacks': len(self.pi_stacks), 'pications': len(self.pi_cations), 'halogens': len(self.halogens), 'metal': len(self.metal_complexes), 'hbond_back': hbondsback, 'hbond_nonback': (len(self.hbonds) - hbondsback)} counts['total'] = counts['hydrophobics'] + counts['hbonds'] + counts['wbridges'] + \ counts['sbridges'] + counts['pistacks'] + counts['pications'] + counts['halogens'] + counts['metal'] return counts
python
def get_counts(self): """counts the interaction types and backbone hydrogen bonding in a binding site""" hbondsback = len([hb for hb in self.hbonds if not hb.sidechain]) counts = {'hydrophobics': len(self.hydrophobics), 'hbonds': len(self.hbonds), 'wbridges': len(self.wbridges), 'sbridges': len(self.sbridges), 'pistacks': len(self.pi_stacks), 'pications': len(self.pi_cations), 'halogens': len(self.halogens), 'metal': len(self.metal_complexes), 'hbond_back': hbondsback, 'hbond_nonback': (len(self.hbonds) - hbondsback)} counts['total'] = counts['hydrophobics'] + counts['hbonds'] + counts['wbridges'] + \ counts['sbridges'] + counts['pistacks'] + counts['pications'] + counts['halogens'] + counts['metal'] return counts
counts the interaction types and backbone hydrogen bonding in a binding site
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/plipxml.py#L261-L271
ssalentin/plip
plip/modules/plipxml.py
PLIPXMLREST.load_data
def load_data(self, pdbid): """Loads and parses an XML resource and saves it as a tree if successful""" f = urlopen("http://projects.biotec.tu-dresden.de/plip-rest/pdb/%s?format=xml" % pdbid.lower()) self.doc = etree.parse(f)
python
def load_data(self, pdbid): """Loads and parses an XML resource and saves it as a tree if successful""" f = urlopen("http://projects.biotec.tu-dresden.de/plip-rest/pdb/%s?format=xml" % pdbid.lower()) self.doc = etree.parse(f)
Loads and parses an XML resource and saves it as a tree if successful
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/plipxml.py#L303-L306
ella/ella
ella/articles/migrations/0005_move_updated_to_publishable.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." for a in orm.Article.objects.all(): if a.updated: a.last_updated = a.updated a.save(force_update=True)
python
def forwards(self, orm): "Write your forwards methods here." for a in orm.Article.objects.all(): if a.updated: a.last_updated = a.updated a.save(force_update=True)
Write your forwards methods here.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/articles/migrations/0005_move_updated_to_publishable.py#L16-L21
ssalentin/plip
plip/modules/webservices.py
check_pdb_status
def check_pdb_status(pdbid): """Returns the status and up-to-date entry in the PDB for a given PDB ID""" url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid xmlf = urlopen(url) xml = et.parse(xmlf) xmlf.close() status = None current_pdbid = pdbid for df in xml.xpath('//record'): status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT' if status == 'OBSOLETE': current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries return [status, current_pdbid.lower()]
python
def check_pdb_status(pdbid): """Returns the status and up-to-date entry in the PDB for a given PDB ID""" url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid xmlf = urlopen(url) xml = et.parse(xmlf) xmlf.close() status = None current_pdbid = pdbid for df in xml.xpath('//record'): status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT' if status == 'OBSOLETE': current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries return [status, current_pdbid.lower()]
Returns the status and up-to-date entry in the PDB for a given PDB ID
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/webservices.py#L22-L34
ssalentin/plip
plip/modules/webservices.py
fetch_pdb
def fetch_pdb(pdbid): """Get the newest entry from the RCSB server for the given PDB ID. Exits with '1' if PDB ID is invalid.""" pdbid = pdbid.lower() write_message('\nChecking status of PDB ID %s ... ' % pdbid) state, current_entry = check_pdb_status(pdbid) # Get state and current PDB ID if state == 'OBSOLETE': write_message('entry is obsolete, getting %s instead.\n' % current_entry) elif state == 'CURRENT': write_message('entry is up to date.\n') elif state == 'UNKNOWN': sysexit(3, 'Invalid PDB ID (Entry does not exist on PDB server)\n') write_message('Downloading file from PDB ... ') pdburl = 'http://www.rcsb.org/pdb/files/%s.pdb' % current_entry # Get URL for current entry try: pdbfile = urlopen(pdburl).read().decode() # If no PDB file is available, a text is now shown with "We're sorry, but ..." # Could previously be distinguished by an HTTP error if 'sorry' in pdbfile: sysexit(5, "No file in PDB format available from wwPDB for the given PDB ID.\n") except HTTPError: sysexit(5, "No file in PDB format available from wwPDB for the given PDB ID.\n") return [pdbfile, current_entry]
python
def fetch_pdb(pdbid): """Get the newest entry from the RCSB server for the given PDB ID. Exits with '1' if PDB ID is invalid.""" pdbid = pdbid.lower() write_message('\nChecking status of PDB ID %s ... ' % pdbid) state, current_entry = check_pdb_status(pdbid) # Get state and current PDB ID if state == 'OBSOLETE': write_message('entry is obsolete, getting %s instead.\n' % current_entry) elif state == 'CURRENT': write_message('entry is up to date.\n') elif state == 'UNKNOWN': sysexit(3, 'Invalid PDB ID (Entry does not exist on PDB server)\n') write_message('Downloading file from PDB ... ') pdburl = 'http://www.rcsb.org/pdb/files/%s.pdb' % current_entry # Get URL for current entry try: pdbfile = urlopen(pdburl).read().decode() # If no PDB file is available, a text is now shown with "We're sorry, but ..." # Could previously be distinguished by an HTTP error if 'sorry' in pdbfile: sysexit(5, "No file in PDB format available from wwPDB for the given PDB ID.\n") except HTTPError: sysexit(5, "No file in PDB format available from wwPDB for the given PDB ID.\n") return [pdbfile, current_entry]
Get the newest entry from the RCSB server for the given PDB ID. Exits with '1' if PDB ID is invalid.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/webservices.py#L37-L59
ella/ella
ella/positions/models.py
PositionBox
def PositionBox(position, *args, **kwargs): " Delegate the boxing. " obj = position.target return getattr(position.target, 'box_class', Box)(obj, *args, **kwargs)
python
def PositionBox(position, *args, **kwargs): " Delegate the boxing. " obj = position.target return getattr(position.target, 'box_class', Box)(obj, *args, **kwargs)
Delegate the boxing.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/models.py#L57-L60
ella/ella
ella/positions/models.py
PositionManager.get_active_position
def get_active_position(self, category, name, nofallback=False): """ Get active position for given position name. params: category - Category model to look for name - name of the position nofallback - if True than do not fall back to parent category if active position is not found for category """ now = timezone.now() lookup = (Q(active_from__isnull=True) | Q(active_from__lte=now)) & \ (Q(active_till__isnull=True) | Q(active_till__gt=now)) while True: try: return self.get(lookup, category=category, name=name, disabled=False) except Position.DoesNotExist: # if nofallback was specified, do not look into parent categories if nofallback: return False # traverse the category tree to the top otherwise category = category.tree_parent # we reached the top and still haven't found the position - return if category is None: return False
python
def get_active_position(self, category, name, nofallback=False): """ Get active position for given position name. params: category - Category model to look for name - name of the position nofallback - if True than do not fall back to parent category if active position is not found for category """ now = timezone.now() lookup = (Q(active_from__isnull=True) | Q(active_from__lte=now)) & \ (Q(active_till__isnull=True) | Q(active_till__gt=now)) while True: try: return self.get(lookup, category=category, name=name, disabled=False) except Position.DoesNotExist: # if nofallback was specified, do not look into parent categories if nofallback: return False # traverse the category tree to the top otherwise category = category.tree_parent # we reached the top and still haven't found the position - return if category is None: return False
Get active position for given position name. params: category - Category model to look for name - name of the position nofallback - if True than do not fall back to parent category if active position is not found for category
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/models.py#L26-L54
ella/ella
ella/positions/models.py
Position.render
def render(self, context, nodelist, box_type): " Render the position. " if not self.target: if self.target_ct: # broken Generic FK: log.warning('Broken target for position with pk %r', self.pk) return '' try: return Template(self.text, name="position-%s" % self.name).render(context) except TemplateSyntaxError: log.error('Broken definition for position with pk %r', self.pk) return '' if self.box_type: box_type = self.box_type if self.text: nodelist = Template('%s\n%s' % (nodelist.render({}), self.text), name="position-%s" % self.name).nodelist b = self.box_class(self, box_type, nodelist) return b.render(context)
python
def render(self, context, nodelist, box_type): " Render the position. " if not self.target: if self.target_ct: # broken Generic FK: log.warning('Broken target for position with pk %r', self.pk) return '' try: return Template(self.text, name="position-%s" % self.name).render(context) except TemplateSyntaxError: log.error('Broken definition for position with pk %r', self.pk) return '' if self.box_type: box_type = self.box_type if self.text: nodelist = Template('%s\n%s' % (nodelist.render({}), self.text), name="position-%s" % self.name).nodelist b = self.box_class(self, box_type, nodelist) return b.render(context)
Render the position.
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/positions/models.py#L119-L139
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.set_initial_representations
def set_initial_representations(self): """General settings for PyMOL""" self.standard_settings() cmd.set('dash_gap', 0) # Show not dashes, but lines for the pliprofiler cmd.set('ray_shadow', 0) # Turn on ray shadows for clearer ray-traced images cmd.set('cartoon_color', 'mylightblue') # Set clipping planes for full view cmd.clip('far', -1000) cmd.clip('near', 1000)
python
def set_initial_representations(self): """General settings for PyMOL""" self.standard_settings() cmd.set('dash_gap', 0) # Show not dashes, but lines for the pliprofiler cmd.set('ray_shadow', 0) # Turn on ray shadows for clearer ray-traced images cmd.set('cartoon_color', 'mylightblue') # Set clipping planes for full view cmd.clip('far', -1000) cmd.clip('near', 1000)
General settings for PyMOL
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L24-L33
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.standard_settings
def standard_settings(self): """Sets up standard settings for a nice visualization.""" cmd.set('bg_rgb', [1.0, 1.0, 1.0]) # White background cmd.set('depth_cue', 0) # Turn off depth cueing (no fog) cmd.set('cartoon_side_chain_helper', 1) # Improve combined visualization of sticks and cartoon cmd.set('cartoon_fancy_helices', 1) # Nicer visualization of helices (using tapered ends) cmd.set('transparency_mode', 1) # Turn on multilayer transparency cmd.set('dash_radius', 0.05) self.set_custom_colorset()
python
def standard_settings(self): """Sets up standard settings for a nice visualization.""" cmd.set('bg_rgb', [1.0, 1.0, 1.0]) # White background cmd.set('depth_cue', 0) # Turn off depth cueing (no fog) cmd.set('cartoon_side_chain_helper', 1) # Improve combined visualization of sticks and cartoon cmd.set('cartoon_fancy_helices', 1) # Nicer visualization of helices (using tapered ends) cmd.set('transparency_mode', 1) # Turn on multilayer transparency cmd.set('dash_radius', 0.05) self.set_custom_colorset()
Sets up standard settings for a nice visualization.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L46-L54
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.set_custom_colorset
def set_custom_colorset(self): """Defines a colorset with matching colors. Provided by Joachim.""" cmd.set_color('myorange', '[253, 174, 97]') cmd.set_color('mygreen', '[171, 221, 164]') cmd.set_color('myred', '[215, 25, 28]') cmd.set_color('myblue', '[43, 131, 186]') cmd.set_color('mylightblue', '[158, 202, 225]') cmd.set_color('mylightgreen', '[229, 245, 224]')
python
def set_custom_colorset(self): """Defines a colorset with matching colors. Provided by Joachim.""" cmd.set_color('myorange', '[253, 174, 97]') cmd.set_color('mygreen', '[171, 221, 164]') cmd.set_color('myred', '[215, 25, 28]') cmd.set_color('myblue', '[43, 131, 186]') cmd.set_color('mylightblue', '[158, 202, 225]') cmd.set_color('mylightgreen', '[229, 245, 224]')
Defines a colorset with matching colors. Provided by Joachim.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L56-L63
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.show_hydrophobic
def show_hydrophobic(self): """Visualizes hydrophobic contacts.""" hydroph = self.plcomplex.hydrophobic_contacts if not len(hydroph.bs_ids) == 0: self.select_by_ids('Hydrophobic-P', hydroph.bs_ids, restrict=self.protname) self.select_by_ids('Hydrophobic-L', hydroph.lig_ids, restrict=self.ligname) for i in hydroph.pairs_ids: cmd.select('tmp_bs', 'id %i & %s' % (i[0], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[1], self.ligname)) cmd.distance('Hydrophobic', 'tmp_bs', 'tmp_lig') if self.object_exists('Hydrophobic'): cmd.set('dash_gap', 0.5, 'Hydrophobic') cmd.set('dash_color', 'grey50', 'Hydrophobic') else: cmd.select('Hydrophobic-P', 'None')
python
def show_hydrophobic(self): """Visualizes hydrophobic contacts.""" hydroph = self.plcomplex.hydrophobic_contacts if not len(hydroph.bs_ids) == 0: self.select_by_ids('Hydrophobic-P', hydroph.bs_ids, restrict=self.protname) self.select_by_ids('Hydrophobic-L', hydroph.lig_ids, restrict=self.ligname) for i in hydroph.pairs_ids: cmd.select('tmp_bs', 'id %i & %s' % (i[0], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[1], self.ligname)) cmd.distance('Hydrophobic', 'tmp_bs', 'tmp_lig') if self.object_exists('Hydrophobic'): cmd.set('dash_gap', 0.5, 'Hydrophobic') cmd.set('dash_color', 'grey50', 'Hydrophobic') else: cmd.select('Hydrophobic-P', 'None')
Visualizes hydrophobic contacts.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L83-L98
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.show_hbonds
def show_hbonds(self): """Visualizes hydrogen bonds.""" hbonds = self.plcomplex.hbonds for group in [['HBondDonor-P', hbonds.prot_don_id], ['HBondAccept-P', hbonds.prot_acc_id]]: if not len(group[1]) == 0: self.select_by_ids(group[0], group[1], restrict=self.protname) for group in [['HBondDonor-L', hbonds.lig_don_id], ['HBondAccept-L', hbonds.lig_acc_id]]: if not len(group[1]) == 0: self.select_by_ids(group[0], group[1], restrict=self.ligname) for i in hbonds.ldon_id: cmd.select('tmp_bs', 'id %i & %s' % (i[0], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[1], self.ligname)) cmd.distance('HBonds', 'tmp_bs', 'tmp_lig') for i in hbonds.pdon_id: cmd.select('tmp_bs', 'id %i & %s' % (i[1], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[0], self.ligname)) cmd.distance('HBonds', 'tmp_bs', 'tmp_lig') if self.object_exists('HBonds'): cmd.set('dash_color', 'blue', 'HBonds')
python
def show_hbonds(self): """Visualizes hydrogen bonds.""" hbonds = self.plcomplex.hbonds for group in [['HBondDonor-P', hbonds.prot_don_id], ['HBondAccept-P', hbonds.prot_acc_id]]: if not len(group[1]) == 0: self.select_by_ids(group[0], group[1], restrict=self.protname) for group in [['HBondDonor-L', hbonds.lig_don_id], ['HBondAccept-L', hbonds.lig_acc_id]]: if not len(group[1]) == 0: self.select_by_ids(group[0], group[1], restrict=self.ligname) for i in hbonds.ldon_id: cmd.select('tmp_bs', 'id %i & %s' % (i[0], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[1], self.ligname)) cmd.distance('HBonds', 'tmp_bs', 'tmp_lig') for i in hbonds.pdon_id: cmd.select('tmp_bs', 'id %i & %s' % (i[1], self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (i[0], self.ligname)) cmd.distance('HBonds', 'tmp_bs', 'tmp_lig') if self.object_exists('HBonds'): cmd.set('dash_color', 'blue', 'HBonds')
Visualizes hydrogen bonds.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L100-L120
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.show_halogen
def show_halogen(self): """Visualize halogen bonds.""" halogen = self.plcomplex.halogen_bonds all_don_x, all_acc_o = [], [] for h in halogen: all_don_x.append(h.don_id) all_acc_o.append(h.acc_id) cmd.select('tmp_bs', 'id %i & %s' % (h.acc_id, self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (h.don_id, self.ligname)) cmd.distance('HalogenBonds', 'tmp_bs', 'tmp_lig') if not len(all_acc_o) == 0: self.select_by_ids('HalogenAccept', all_acc_o, restrict=self.protname) self.select_by_ids('HalogenDonor', all_don_x, restrict=self.ligname) if self.object_exists('HalogenBonds'): cmd.set('dash_color', 'greencyan', 'HalogenBonds')
python
def show_halogen(self): """Visualize halogen bonds.""" halogen = self.plcomplex.halogen_bonds all_don_x, all_acc_o = [], [] for h in halogen: all_don_x.append(h.don_id) all_acc_o.append(h.acc_id) cmd.select('tmp_bs', 'id %i & %s' % (h.acc_id, self.protname)) cmd.select('tmp_lig', 'id %i & %s' % (h.don_id, self.ligname)) cmd.distance('HalogenBonds', 'tmp_bs', 'tmp_lig') if not len(all_acc_o) == 0: self.select_by_ids('HalogenAccept', all_acc_o, restrict=self.protname) self.select_by_ids('HalogenDonor', all_don_x, restrict=self.ligname) if self.object_exists('HalogenBonds'): cmd.set('dash_color', 'greencyan', 'HalogenBonds')
Visualize halogen bonds.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L122-L137
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.show_stacking
def show_stacking(self): """Visualize pi-stacking interactions.""" stacks = self.plcomplex.pistacking for i, stack in enumerate(stacks): pires_ids = '+'.join(map(str, stack.proteinring_atoms)) pilig_ids = '+'.join(map(str, stack.ligandring_atoms)) cmd.select('StackRings-P', 'StackRings-P or (id %s & %s)' % (pires_ids, self.protname)) cmd.select('StackRings-L', 'StackRings-L or (id %s & %s)' % (pilig_ids, self.ligname)) cmd.select('StackRings-P', 'byres StackRings-P') cmd.show('sticks', 'StackRings-P') cmd.pseudoatom('ps-pistack-1-%i' % i, pos=stack.proteinring_center) cmd.pseudoatom('ps-pistack-2-%i' % i, pos=stack.ligandring_center) cmd.pseudoatom('Centroids-P', pos=stack.proteinring_center) cmd.pseudoatom('Centroids-L', pos=stack.ligandring_center) if stack.type == 'P': cmd.distance('PiStackingP', 'ps-pistack-1-%i' % i, 'ps-pistack-2-%i' % i) if stack.type == 'T': cmd.distance('PiStackingT', 'ps-pistack-1-%i' % i, 'ps-pistack-2-%i' % i) if self.object_exists('PiStackingP'): cmd.set('dash_color', 'green', 'PiStackingP') cmd.set('dash_gap', 0.3, 'PiStackingP') cmd.set('dash_length', 0.6, 'PiStackingP') if self.object_exists('PiStackingT'): cmd.set('dash_color', 'smudge', 'PiStackingT') cmd.set('dash_gap', 0.3, 'PiStackingT') cmd.set('dash_length', 0.6, 'PiStackingT')
python
def show_stacking(self): """Visualize pi-stacking interactions.""" stacks = self.plcomplex.pistacking for i, stack in enumerate(stacks): pires_ids = '+'.join(map(str, stack.proteinring_atoms)) pilig_ids = '+'.join(map(str, stack.ligandring_atoms)) cmd.select('StackRings-P', 'StackRings-P or (id %s & %s)' % (pires_ids, self.protname)) cmd.select('StackRings-L', 'StackRings-L or (id %s & %s)' % (pilig_ids, self.ligname)) cmd.select('StackRings-P', 'byres StackRings-P') cmd.show('sticks', 'StackRings-P') cmd.pseudoatom('ps-pistack-1-%i' % i, pos=stack.proteinring_center) cmd.pseudoatom('ps-pistack-2-%i' % i, pos=stack.ligandring_center) cmd.pseudoatom('Centroids-P', pos=stack.proteinring_center) cmd.pseudoatom('Centroids-L', pos=stack.ligandring_center) if stack.type == 'P': cmd.distance('PiStackingP', 'ps-pistack-1-%i' % i, 'ps-pistack-2-%i' % i) if stack.type == 'T': cmd.distance('PiStackingT', 'ps-pistack-1-%i' % i, 'ps-pistack-2-%i' % i) if self.object_exists('PiStackingP'): cmd.set('dash_color', 'green', 'PiStackingP') cmd.set('dash_gap', 0.3, 'PiStackingP') cmd.set('dash_length', 0.6, 'PiStackingP') if self.object_exists('PiStackingT'): cmd.set('dash_color', 'smudge', 'PiStackingT') cmd.set('dash_gap', 0.3, 'PiStackingT') cmd.set('dash_length', 0.6, 'PiStackingT')
Visualize pi-stacking interactions.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L139-L166
ssalentin/plip
plip/modules/pymolplip.py
PyMOLVisualizer.show_cationpi
def show_cationpi(self): """Visualize cation-pi interactions.""" for i, p in enumerate(self.plcomplex.pication): cmd.pseudoatom('ps-picat-1-%i' % i, pos=p.ring_center) cmd.pseudoatom('ps-picat-2-%i' % i, pos=p.charge_center) if p.protcharged: cmd.pseudoatom('Chargecenter-P', pos=p.charge_center) cmd.pseudoatom('Centroids-L', pos=p.ring_center) pilig_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-L', 'PiCatRing-L or (id %s & %s)' % (pilig_ids, self.ligname)) for a in p.charge_atoms: cmd.select('PosCharge-P', 'PosCharge-P or (id %i & %s)' % (a, self.protname)) else: cmd.pseudoatom('Chargecenter-L', pos=p.charge_center) cmd.pseudoatom('Centroids-P', pos=p.ring_center) pires_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-P', 'PiCatRing-P or (id %s & %s)' % (pires_ids, self.protname)) for a in p.charge_atoms: cmd.select('PosCharge-L', 'PosCharge-L or (id %i & %s)' % (a, self.ligname)) cmd.distance('PiCation', 'ps-picat-1-%i' % i, 'ps-picat-2-%i' % i) if self.object_exists('PiCation'): cmd.set('dash_color', 'orange', 'PiCation') cmd.set('dash_gap', 0.3, 'PiCation') cmd.set('dash_length', 0.6, 'PiCation')
python
def show_cationpi(self): """Visualize cation-pi interactions.""" for i, p in enumerate(self.plcomplex.pication): cmd.pseudoatom('ps-picat-1-%i' % i, pos=p.ring_center) cmd.pseudoatom('ps-picat-2-%i' % i, pos=p.charge_center) if p.protcharged: cmd.pseudoatom('Chargecenter-P', pos=p.charge_center) cmd.pseudoatom('Centroids-L', pos=p.ring_center) pilig_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-L', 'PiCatRing-L or (id %s & %s)' % (pilig_ids, self.ligname)) for a in p.charge_atoms: cmd.select('PosCharge-P', 'PosCharge-P or (id %i & %s)' % (a, self.protname)) else: cmd.pseudoatom('Chargecenter-L', pos=p.charge_center) cmd.pseudoatom('Centroids-P', pos=p.ring_center) pires_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-P', 'PiCatRing-P or (id %s & %s)' % (pires_ids, self.protname)) for a in p.charge_atoms: cmd.select('PosCharge-L', 'PosCharge-L or (id %i & %s)' % (a, self.ligname)) cmd.distance('PiCation', 'ps-picat-1-%i' % i, 'ps-picat-2-%i' % i) if self.object_exists('PiCation'): cmd.set('dash_color', 'orange', 'PiCation') cmd.set('dash_gap', 0.3, 'PiCation') cmd.set('dash_length', 0.6, 'PiCation')
Visualize cation-pi interactions.
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L168-L191