Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def add_result(self, scan_id, result_type, host='', name='', value='',
port='', test_id='', severity='', qod=''):
assert scan_id
assert len(name) or len(value)
result = dict()
result['type'] = result_type
result['name'] = name
result['severity'] = severity
result['test_id'] = test_id
result['value'] = value
result['host'] = host
result['port'] = port
result['qod'] = qod
results = self.scans_table[scan_id]['results']
results.append(result)
# Set scan_info's results to propagate results to parent process.
self.scans_table[scan_id]['results'] = results | [
" Add a result to a scan in the table. "
]
|
Please provide a description of the function:def set_progress(self, scan_id, progress):
if progress > 0 and progress <= 100:
self.scans_table[scan_id]['progress'] = progress
if progress == 100:
self.scans_table[scan_id]['end_time'] = int(time.time()) | [
" Sets scan_id scan's progress. "
]
|
Please provide a description of the function:def set_target_progress(self, scan_id, target, host, progress):
if progress > 0 and progress <= 100:
targets = self.scans_table[scan_id]['target_progress']
targets[target][host] = progress
# Set scan_info's target_progress to propagate progresses
# to parent process.
self.scans_table[scan_id]['target_progress'] = targets | [
" Sets scan_id scan's progress. "
]
|
Please provide a description of the function:def set_host_finished(self, scan_id, target, host):
finished_hosts = self.scans_table[scan_id]['finished_hosts']
finished_hosts[target].extend(host)
self.scans_table[scan_id]['finished_hosts'] = finished_hosts | [
" Add the host in a list of finished hosts "
]
|
Please provide a description of the function:def get_hosts_unfinished(self, scan_id):
unfinished_hosts = list()
for target in self.scans_table[scan_id]['finished_hosts']:
unfinished_hosts.extend(target_str_to_list(target))
for target in self.scans_table[scan_id]['finished_hosts']:
for host in self.scans_table[scan_id]['finished_hosts'][target]:
unfinished_hosts.remove(host)
return unfinished_hosts | [
" Get a list of finished hosts."
]
|
Please provide a description of the function:def results_iterator(self, scan_id, pop_res):
if pop_res:
result_aux = self.scans_table[scan_id]['results']
self.scans_table[scan_id]['results'] = list()
return iter(result_aux)
return iter(self.scans_table[scan_id]['results']) | [
" Returns an iterator over scan_id scan's results. If pop_res is True,\n it removed the fetched results from the list.\n "
]
|
Please provide a description of the function:def del_results_for_stopped_hosts(self, scan_id):
unfinished_hosts = self.get_hosts_unfinished(scan_id)
for result in self.results_iterator(scan_id, False):
if result['host'] in unfinished_hosts:
self.remove_single_result(scan_id, result) | [
" Remove results from the result table for those host\n "
]
|
Please provide a description of the function:def resume_scan(self, scan_id, options):
self.scans_table[scan_id]['status'] = ScanStatus.INIT
if options:
self.scans_table[scan_id]['options'] = options
self.del_results_for_stopped_hosts(scan_id)
return scan_id | [
" Reset the scan status in the scan_table to INIT.\n Also, overwrite the options, because a resume task cmd\n can add some new option. E.g. exclude hosts list.\n Parameters:\n scan_id (uuid): Scan ID to identify the scan process to be resumed.\n options (dict): Options for the scan to be resumed. This options\n are not added to the already existent ones.\n The old ones are removed\n\n Return:\n Scan ID which identifies the current scan.\n "
]
|
Please provide a description of the function:def create_scan(self, scan_id='', targets='', options=None, vts=''):
if self.data_manager is None:
self.data_manager = multiprocessing.Manager()
# Check if it is possible to resume task. To avoid to resume, the
# scan must be deleted from the scans_table.
if scan_id and self.id_exists(scan_id) and (
self.get_status(scan_id) == ScanStatus.STOPPED):
return self.resume_scan(scan_id, options)
if not options:
options = dict()
scan_info = self.data_manager.dict()
scan_info['results'] = list()
scan_info['finished_hosts'] = dict(
[[target, []] for target, _, _ in targets])
scan_info['progress'] = 0
scan_info['target_progress'] = dict(
[[target, {}] for target, _, _ in targets])
scan_info['targets'] = targets
scan_info['vts'] = vts
scan_info['options'] = options
scan_info['start_time'] = int(time.time())
scan_info['end_time'] = "0"
scan_info['status'] = ScanStatus.INIT
if scan_id is None or scan_id == '':
scan_id = str(uuid.uuid4())
scan_info['scan_id'] = scan_id
self.scans_table[scan_id] = scan_info
return scan_id | [
" Creates a new scan with provided scan information. "
]
|
Please provide a description of the function:def set_option(self, scan_id, name, value):
self.scans_table[scan_id]['options'][name] = value | [
" Set a scan_id scan's name option to value. "
]
|
Please provide a description of the function:def get_target_progress(self, scan_id, target):
total_hosts = len(target_str_to_list(target))
host_progresses = self.scans_table[scan_id]['target_progress'].get(target)
try:
t_prog = sum(host_progresses.values()) / total_hosts
except ZeroDivisionError:
LOGGER.error("Zero division error in ", get_target_progress.__name__)
raise
return t_prog | [
" Get a target's current progress value.\n The value is calculated with the progress of each single host\n in the target."
]
|
Please provide a description of the function:def get_target_list(self, scan_id):
target_list = []
for target, _, _ in self.scans_table[scan_id]['targets']:
target_list.append(target)
return target_list | [
" Get a scan's target list. "
]
|
Please provide a description of the function:def get_ports(self, scan_id, target):
if target:
for item in self.scans_table[scan_id]['targets']:
if target == item[0]:
return item[1]
return self.scans_table[scan_id]['targets'][0][1] | [
" Get a scan's ports list. If a target is specified\n it will return the corresponding port for it. If not,\n it returns the port item of the first nested list in\n the target's list.\n "
]
|
Please provide a description of the function:def get_credentials(self, scan_id, target):
if target:
for item in self.scans_table[scan_id]['targets']:
if target == item[0]:
return item[2] | [
" Get a scan's credential list. It return dictionary with\n the corresponding credential for a given target.\n "
]
|
Please provide a description of the function:def delete_scan(self, scan_id):
if self.get_status(scan_id) == ScanStatus.RUNNING:
return False
self.scans_table.pop(scan_id)
if len(self.scans_table) == 0:
del self.data_manager
self.data_manager = None
return True | [
" Delete a scan if fully finished. "
]
|
Please provide a description of the function:def get_str(cls, result_type):
if result_type == cls.ALARM:
return "Alarm"
elif result_type == cls.LOG:
return "Log Message"
elif result_type == cls.ERROR:
return "Error Message"
elif result_type == cls.HOST_DETAIL:
return "Host Detail"
else:
assert False, "Erroneous result type {0}.".format(result_type) | [
" Return string name of a result type. "
]
|
Please provide a description of the function:def get_type(cls, result_name):
if result_name == "Alarm":
return cls.ALARM
elif result_name == "Log Message":
return cls.LOG
elif result_name == "Error Message":
return cls.ERROR
elif result_name == "Host Detail":
return cls.HOST_DETAIL
else:
assert False, "Erroneous result name {0}.".format(result_name) | [
" Return string name of a result type. "
]
|
Please provide a description of the function:def is_float(obj):
is_f = isinstance(obj, float)
if not is_f:
try:
float(obj)
is_f = True
except (ValueError, TypeError):
is_f = False
return is_f and not is_bool(obj) | [
"\n Valid types are:\n - objects of float type\n - Strings that can be converted to float. For example '1e-06'\n "
]
|
Please provide a description of the function:def is_timestamp(obj):
return isinstance(obj, datetime.datetime) or is_string(obj) or is_int(obj) or is_float(obj) | [
"\n Yaml either have automatically converted it to a datetime object\n or it is a string that will be validated later.\n "
]
|
Please provide a description of the function:def init_logging(log_level):
log_level = log_level_to_string_map[min(log_level, 5)]
msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s"
logging_conf = {
"version": 1,
"root": {
"level": log_level,
"handlers": ["console"]
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": log_level,
"formatter": "simple",
"stream": "ext://sys.stdout"
}
},
"formatters": {
"simple": {
"format": " {0}".format(msg)
}
}
}
logging.config.dictConfig(logging_conf) | [
"\n Init logging settings with default set to INFO\n "
]
|
Please provide a description of the function:def keywords(self):
defined_keywords = [
('allowempty_map', 'allowempty_map'),
('assertion', 'assertion'),
('default', 'default'),
('class', 'class'),
('desc', 'desc'),
('enum', 'enum'),
('example', 'example'),
('extensions', 'extensions'),
('format', 'format'),
('func', 'func'),
('ident', 'ident'),
('include_name', 'include'),
('length', 'length'),
('map_regex_rule', 'map_regex_rule'),
('mapping', 'mapping'),
('matching', 'matching'),
('matching_rule', 'matching_rule'),
('name', 'name'),
('nullable', 'nullable')
('parent', 'parent'),
('pattern', 'pattern'),
('pattern_regexp', 'pattern_regexp'),
('range', 'range'),
('regex_mappings', 'regex_mappings'),
('required', 'required'),
('schema', 'schema'),
('schema_str', 'schema_str'),
('sequence', 'sequence'),
('type', 'type'),
('type_class', 'type_class'),
('unique', 'unique'),
('version', 'version'),
]
found_keywords = []
for var_name, keyword_name in defined_keywords:
if getattr(self, var_name, None):
found_keywords.append(keyword_name)
return found_keywords | [
"\n Returns a list of all keywords that this rule object has defined.\n A keyword is considered defined if the value it returns != None.\n "
]
|
Please provide a description of the function:def check_type_keywords(self, schema, rule, path):
if not self.strict_rule_validation:
return
global_keywords = ['type', 'desc', 'example', 'extensions', 'name', 'nullable', 'version', 'func', 'include']
all_allowed_keywords = {
'str': global_keywords + ['default', 'pattern', 'range', 'enum', 'required', 'unique', 'req'],
'int': global_keywords + ['default', 'range', 'enum', 'required', 'unique'],
'float': global_keywords + ['default', 'enum', 'range', 'required'],
'number': global_keywords + ['default', 'enum'],
'bool': global_keywords + ['default', 'enum'],
'map': global_keywords + ['allowempty_map', 'mapping', 'map', 'allowempty', 'required', 'matching-rule', 'range', 'class'],
'seq': global_keywords + ['sequence', 'seq', 'required', 'range', 'matching'],
'sequence': global_keywords + ['sequence', 'seq', 'required'],
'mapping': global_keywords + ['mapping', 'seq', 'required'],
'timestamp': global_keywords + ['default', 'enum'],
'date': global_keywords + ['default', 'enum'],
'symbol': global_keywords + ['default', 'enum'],
'scalar': global_keywords + ['default', 'enum'],
'text': global_keywords + ['default', 'enum', 'pattern'],
'any': global_keywords + ['default', 'enum'],
'enum': global_keywords + ['default', 'enum'],
'none': global_keywords + ['default', 'enum', 'required'],
}
rule_type = schema.get('type')
if not rule_type:
# Special cases for the "shortcut methods"
if 'sequence' in schema or 'seq' in schema:
rule_type = 'sequence'
elif 'mapping' in schema or 'map' in schema:
rule_type = 'mapping'
allowed_keywords = all_allowed_keywords.get(rule_type)
if not allowed_keywords and 'sequence' not in schema and 'mapping' not in schema and 'seq' not in schema and 'map' not in schema:
raise RuleError('No allowed keywords found for type: {0}'.format(rule_type))
for k, v in schema.items():
if k not in allowed_keywords:
raise RuleError('Keyword "{0}" is not supported for type: "{1}" '.format(k, rule_type)) | [
"\n All supported keywords:\n - allowempty_map\n - assertion\n - class\n - date\n - default\n - desc\n - enum\n - example\n - extensions\n - func\n - ident\n - include_name\n - map_regex_rule\n - mapping\n - matching\n - matching_rule\n - name\n - nullable\n - pattern\n - pattern_regexp\n - range\n - regex_mappings\n - required\n - schema\n - sequence\n - type\n - type_class\n - unique\n - version\n "
]
|
Please provide a description of the function:def _load_extensions(self):
log.debug(u"loading all extensions : %s", self.extensions)
self.loaded_extensions = []
for f in self.extensions:
if not os.path.isabs(f):
f = os.path.abspath(f)
if not os.path.exists(f):
raise CoreError(u"Extension file: {0} not found on disk".format(f))
self.loaded_extensions.append(imp.load_source("", f))
log.debug(self.loaded_extensions)
log.debug([dir(m) for m in self.loaded_extensions]) | [
"\n Load all extension files into the namespace pykwalify.ext\n "
]
|
Please provide a description of the function:def _handle_func(self, value, rule, path, done=None):
func = rule.func
# func keyword is not defined so nothing to do
if not func:
return
found_method = False
for extension in self.loaded_extensions:
method = getattr(extension, func, None)
if method:
found_method = True
# No exception will should be caught. If one is raised it should bubble up all the way.
ret = method(value, rule, path)
if ret is not True and ret is not None:
msg = '%s. Path: {path}' % unicode(ret)
self.errors.append(SchemaError.SchemaErrorEntry(
msg=msg,
path=path,
value=None))
# If False or None or some other object that is interpreted as False
if not ret:
raise CoreError(u"Error when running extension function : {0}".format(func))
# Only run the first matched function. Sinc loading order is determined
# it should be easy to determine which file is used before others
break
if not found_method:
raise CoreError(u"Did not find method '{0}' in any loaded extension file".format(func)) | [
"\n Helper function that should check if func is specified for this rule and\n then handle it for all cases in a generic way.\n "
]
|
Please provide a description of the function:def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix):
if not isinstance(value, int) and not isinstance(value, float):
raise CoreError("Value must be a integer type")
log.debug(
u"Validate range : %s : %s : %s : %s : %s : %s",
max_,
min_,
max_ex,
min_ex,
value,
path,
)
if max_ is not None and max_ < value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
max_=max_))
if min_ is not None and min_ > value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
min_=min_))
if max_ex is not None and max_ex <= value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
max_ex=max_ex))
if min_ex is not None and min_ex >= value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
min_ex=min_ex)) | [
"\n Validate that value is within range values.\n "
]
|
Please provide a description of the function:def parse_cli():
#
# 1. parse cli arguments
#
__docopt__ =
# Import pykwalify package
import pykwalify
args = docopt(__docopt__, version=pykwalify.__version__)
pykwalify.init_logging(1 if args["--quiet"] else args["--verbose"])
log = logging.getLogger(__name__)
#
# 2. validate arguments only, dont go into other code/logic
#
log.debug("Setting verbose level: %s", args["--verbose"])
log.debug("Arguments from CLI: %s", args)
return args | [
"\n The outline of this function needs to be like this:\n\n 1. parse arguments\n 2. validate arguments only, dont go into other logic/code\n 3. run application logic\n ",
"\nusage: pykwalify -d FILE -s FILE ... [-e FILE ...]\n [--strict-rule-validation] [--fix-ruby-style-regex] [--allow-assertions] [--encoding ENCODING]\n [-v ...] [-q]\n\noptional arguments:\n -d FILE, --data-file FILE the file to be tested\n -e FILE, --extension FILE file containing python extension\n -s FILE, --schema-file FILE schema definition file\n --fix-ruby-style-regex This flag fixes some of the quirks of ruby style regex\n that is not compatible with python style regex\n --strict-rule-validation enables strict validation of all keywords for all\n Rule objects to find unsupported keyword usage\n --allow-assertions By default assertions is disabled due to security risk.\n Error will be raised if assertion is used in schema\n but this flag is not used. This option enables assert keyword.\n --encoding ENCODING Specify encoding to open data and schema files with.\n -h, --help show this help message and exit\n -q, --quiet suppress terminal output\n -v, --verbose verbose terminal output (multiple -v increases verbosity)\n --version display the version number and exit\n"
]
|
Please provide a description of the function:def run(cli_args):
from .core import Core
c = Core(
source_file=cli_args["--data-file"],
schema_files=cli_args["--schema-file"],
extensions=cli_args['--extension'],
strict_rule_validation=cli_args['--strict-rule-validation'],
fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'],
allow_assertions=cli_args['--allow-assertions'],
file_encoding=cli_args['--encoding'],
)
c.validate()
return c | [
"\n Split the functionality into 2 methods.\n\n One for parsing the cli and one that runs the application.\n "
]
|
Please provide a description of the function:def pbdesign(n):
assert n>0, 'Number of factors must be a positive integer'
keep = int(n)
n = 4*(int(n/4) + 1) # calculate the correct number of rows (multiple of 4)
f, e = np.frexp([n, n/12., n/20.])
k = [idx for idx, val in enumerate(np.logical_and(f==0.5, e>0)) if val]
assert isinstance(n, int) and k!=[], 'Invalid inputs. n must be a multiple of 4.'
k = k[0]
e = e[k] - 1
if k==0: # N = 1*2**e
H = np.ones((1, 1))
elif k==1: # N = 12*2**e
H = np.vstack((np.ones((1, 12)), np.hstack((np.ones((11, 1)),
toeplitz([-1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1],
[-1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1])))))
elif k==2: # N = 20*2**e
H = np.vstack((np.ones((1, 20)), np.hstack((np.ones((19, 1)),
hankel(
[-1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1],
[1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1])
))))
# Kronecker product construction
for i in range(e):
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
# Reduce the size of the matrix as needed
H = H[:, 1:(keep + 1)]
return np.flipud(H) | [
"\r\n Generate a Plackett-Burman design\r\n \r\n Parameter\r\n ---------\r\n n : int\r\n The number of factors to create a matrix for.\r\n \r\n Returns\r\n -------\r\n H : 2d-array\r\n An orthogonal design matrix with n columns, one for each factor, and\r\n the number of rows being the next multiple of 4 higher than n (e.g.,\r\n for 1-3 factors there are 4 rows, for 4-7 factors there are 8 rows,\r\n etc.)\r\n \r\n Example\r\n -------\r\n \r\n A 3-factor design::\r\n \r\n >>> pbdesign(3)\r\n array([[-1., -1., 1.],\r\n [ 1., -1., -1.],\r\n [-1., 1., -1.],\r\n [ 1., 1., 1.]])\r\n \r\n A 5-factor design::\r\n \r\n >>> pbdesign(5)\r\n array([[-1., -1., 1., -1., 1.],\r\n [ 1., -1., -1., -1., -1.],\r\n [-1., 1., -1., -1., 1.],\r\n [ 1., 1., 1., -1., -1.],\r\n [-1., -1., 1., 1., -1.],\r\n [ 1., -1., -1., 1., 1.],\r\n [-1., 1., -1., 1., -1.],\r\n [ 1., 1., 1., 1., 1.]])\r\n \r\n "
]
|
Please provide a description of the function:def star(n, alpha='faced', center=(1, 1)):
# Star points at the center of each face of the factorial
if alpha=='faced':
a = 1
elif alpha=='orthogonal':
nc = 2**n # factorial points
nco = center[0] # center points to factorial
na = 2*n # axial points
nao = center[1] # center points to axial design
# value of alpha in orthogonal design
a = (n*(1 + nao/float(na))/(1 + nco/float(nc)))**0.5
elif alpha=='rotatable':
nc = 2**n # number of factorial points
a = nc**(0.25) # value of alpha in rotatable design
else:
raise ValueError('Invalid value for "alpha": {:}'.format(alpha))
# Create the actual matrix now.
H = np.zeros((2*n, n))
for i in range(n):
H[2*i:2*i+2, i] = [-1, 1]
H *= a
return H, a | [
"\r\n Create the star points of various design matrices\r\n \r\n Parameters\r\n ----------\r\n n : int\r\n The number of variables in the design\r\n \r\n Optional\r\n --------\r\n alpha : str\r\n Available values are 'faced' (default), 'orthogonal', or 'rotatable'\r\n center : array\r\n A 1-by-2 array of integers indicating the number of center points\r\n assigned in each block of the response surface design. Default is \r\n (1, 1).\r\n \r\n Returns\r\n -------\r\n H : 2d-array\r\n The star-point portion of the design matrix (i.e. at +/- alpha)\r\n a : scalar\r\n The alpha value to scale the star points with.\r\n \r\n Example\r\n -------\r\n ::\r\n \r\n >>> star(3)\r\n array([[-1., 0., 0.],\r\n [ 1., 0., 0.],\r\n [ 0., -1., 0.],\r\n [ 0., 1., 0.],\r\n [ 0., 0., -1.],\r\n [ 0., 0., 1.]])\r\n \r\n "
]
|
Please provide a description of the function:def fold(H, columns=None):
H = np.array(H)
assert len(H.shape)==2, 'Input design matrix must be 2d.'
if columns is None:
columns = range(H.shape[1])
Hf = H.copy()
for col in columns:
vals = np.unique(H[:, col])
assert len(vals)==2, 'Input design matrix must be 2-level factors only.'
for i in range(H.shape[0]):
Hf[i, col] = vals[0] if H[i, col]==vals[1] else vals[1]
Hf = np.vstack((H, Hf))
return Hf | [
"\r\n Fold a design to reduce confounding effects.\r\n \r\n Parameters\r\n ----------\r\n H : 2d-array\r\n The design matrix to be folded.\r\n columns : array\r\n Indices of of columns to fold (Default: None). If ``columns=None`` is\r\n used, then all columns will be folded.\r\n \r\n Returns\r\n -------\r\n Hf : 2d-array\r\n The folded design matrix.\r\n \r\n Examples\r\n --------\r\n ::\r\n \r\n "
]
|
Please provide a description of the function:def build_regression_matrix(H, model, build=None):
ListOfTokens = model.split(' ')
if H.shape[1]==1:
size_index = len(str(H.shape[0]))
else:
size_index = len(str(H.shape[1]))
if build is None:
build = [True]*len(ListOfTokens)
# Test if the vector has the wrong direction (lines instead of columns)
if H.shape[0]==1:
H = H.T
# Collect the list of monomials
Monom_Index = []
for i in range(len(ListOfTokens)):
if build[i]:
Monom_Index += [grep(ListOfTokens, 'x' + str(0)*(size_index - \
len(str(i))) + str(i))]
Monom_Index = -np.sort(-Monom_Index)
Monom_Index = np.unique(Monom_Index)
if H.shape[1]==1:
nb_var = H.shape[0] # vector "mode": the number of vars is equal to the number of lines of H
VectorMode = True
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0)*(size_index - len(str(i))) + str(i),
'H(' + str(i) + ')')
else:
nb_var = H.shape[0] # matrix "mode": the number of vars is equal to the number of columns of H
VectorMode = False
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0)*(size_index - len(str(i))) + str(i),
'H[i,' + str(i) + ')')
# Now build the regression matrix
if VectorMode:
R = np.zeros((len(ListOfTokens), 1))
for j in range(len(ListOfTokens)):
R[j, 0] = eval(ListOfTokens[j])
else:
R = np.zeros((H.shape[0], len(ListOfTokens)))
for i in range(H.shape[0]):
for j in range(len(ListOfTokens)):
R[i, j] = eval(ListOfTokens[j])
return R | [
"\r\n Build a regression matrix using a DOE matrix and a list of monomials.\r\n \r\n Parameters\r\n ----------\r\n H : 2d-array\r\n model : str\r\n build : bool-array\r\n \r\n Returns\r\n -------\r\n R : 2d-array\r\n \r\n "
]
|
Please provide a description of the function:def to_bedtool(iterator):
def gen():
for i in iterator:
yield helpers.asinterval(i)
return pybedtools.BedTool(gen()) | [
"\n Convert any iterator into a pybedtools.BedTool object.\n\n Note that the supplied iterator is not consumed by this function. To save\n to a temp file or to a known location, use the `.saveas()` method of the\n returned BedTool object.\n "
]
|
Please provide a description of the function:def tsses(db, merge_overlapping=False, attrs=None, attrs_sep=":",
merge_kwargs=None, as_bed6=False, bedtools_227_or_later=True):
_override = os.environ.get('GFFUTILS_USES_BEDTOOLS_227_OR_LATER', None)
if _override is not None:
if _override == 'true':
bedtools_227_or_later = True
elif _override == 'false':
bedtools_227_or_later = False
else:
raise ValueError(
"Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER "
"environment variable: {0}".format(_override))
if bedtools_227_or_later:
_merge_kwargs = dict(o='distinct', s=True, c='4,5,6')
else:
_merge_kwargs = dict(o='distinct', s=True, c='4')
if merge_kwargs is not None:
_merge_kwargs.update(merge_kwargs)
def gen():
for gene in db.features_of_type('gene'):
for transcript in db.children(gene, level=1):
if transcript.strand == '-':
transcript.start = transcript.stop
else:
transcript.stop = transcript.start
transcript.featuretype = transcript.featuretype + '_TSS'
yield helpers.asinterval(transcript)
# GFF/GTF format
x = pybedtools.BedTool(gen()).sort()
# Figure out default attrs to use, depending on the original format.
if attrs is None:
if db.dialect['fmt'] == 'gtf':
attrs = 'gene_id'
else:
attrs = 'ID'
if merge_overlapping or as_bed6:
if isinstance(attrs, six.string_types):
attrs = [attrs]
def to_bed(f):
name = attrs_sep.join([f.attrs[i] for i in attrs])
return pybedtools.Interval(
f.chrom,
f.start,
f.stop,
name,
str(f.score),
f.strand)
x = x.each(to_bed).saveas()
if merge_overlapping:
if bedtools_227_or_later:
x = x.merge(**_merge_kwargs)
else:
def fix_merge(f):
f = featurefuncs.extend_fields(f, 6)
return pybedtools.Interval(
f.chrom,
f.start,
f.stop,
f[4],
'.',
f[3])
x = x.merge(**_merge_kwargs).saveas().each(fix_merge).saveas()
return x | [
"\n Create 1-bp transcription start sites for all transcripts in the database\n and return as a sorted pybedtools.BedTool object pointing to a temporary\n file.\n\n To save the file to a known location, use the `.moveto()` method on the\n resulting `pybedtools.BedTool` object.\n\n To extend regions upstream/downstream, see the `.slop()` method on the\n resulting `pybedtools.BedTool object`.\n\n Requires pybedtools.\n\n Parameters\n ----------\n db : gffutils.FeatureDB\n The database to use\n\n as_bed6 : bool\n If True, output file is in BED6 format; otherwise it remains in the\n GFF/GTF format and dialect of the file used to create the database.\n\n Note that the merge options below necessarily force `as_bed6=True`.\n\n merge_overlapping : bool\n If True, output will be in BED format. Overlapping TSSes will be merged\n into a single feature, and their names will be collapsed using\n `merge_sep` and placed in the new name field.\n\n merge_kwargs : dict\n If `merge_overlapping=True`, these keyword arguments are passed to\n pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`.\n The merge operates on a BED6 file which will have had the name field\n constructed as specified by other arguments here. See the available\n options for your installed version of BEDTools; the defaults used here\n are `merge_kwargs=dict(o='distinct', c=4, s=True)`.\n\n Any provided `merge_kwargs` are used to *update* the default. It is\n recommended to not override `c=4` and `s=True`, otherwise the\n post-merge fixing may not work correctly. Good candidates for tweaking\n are `d` (merge distance), `o` (operation), `delim` (delimiter to use\n for collapse operations).\n\n attrs : str or list\n Only has an effect when `as_bed6=True` or `merge_overlapping=True`.\n\n Determines what goes in the name field of an output BED file. By\n default, \"gene_id\" for GTF databases and \"ID\" for GFF. If a list of\n attributes is supplied, e.g. [\"gene_id\", \"transcript_id\"], then these\n will be joined by `attr_join_sep` and then placed in the name field.\n\n attrs_sep: str\n If `as_bed6=True` or `merge_overlapping=True`, then use this character\n to separate attributes in the name field of the output BED. If also\n using `merge_overlapping=True`, you'll probably want this to be\n different than `merge_sep` in order to parse things out later.\n\n bedtools_227_or_later : bool\n In version 2.27, BEDTools changed the output for merge. By default,\n this function expects BEDTools version 2.27 or later, but set this to\n False to assume the older behavior.\n\n For testing purposes, the environment variable\n GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either \"true\" or \"false\"\n and is used to override this argument.\n\n Examples\n --------\n\n >>> import gffutils\n >>> db = gffutils.create_db(\n ... gffutils.example_filename('FBgn0031208.gtf'),\n ... \":memory:\",\n ... keep_order=True,\n ... verbose=False)\n\n Default settings -- no merging, and report a separate TSS on each line even\n if they overlap (as in the first two):\n\n\n >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE\n chr2L\tgffutils_derived\ttranscript_TSS\t7529\t7529\t.\t+\t.\tgene_id \"FBgn0031208\"; transcript_id \"FBtr0300689\";\n chr2L\tgffutils_derived\ttranscript_TSS\t7529\t7529\t.\t+\t.\tgene_id \"FBgn0031208\"; transcript_id \"FBtr0300690\";\n chr2L\tgffutils_derived\ttranscript_TSS\t11000\t11000\t.\t-\t.\tgene_id \"Fk_gene_1\"; transcript_id \"transcript_Fk_gene_1\";\n chr2L\tgffutils_derived\ttranscript_TSS\t12500\t12500\t.\t-\t.\tgene_id \"Fk_gene_2\"; transcript_id \"transcript_Fk_gene_2\";\n <BLANKLINE>\n\n\n Default merging, showing the first two TSSes merged and reported as\n a single unique TSS for the gene. Note the conversion to BED:\n\n >>> x = tsses(db, merge_overlapping=True)\n >>> print(x) # doctest: +NORMALIZE_WHITESPACE\n chr2L\t7528\t7529\tFBgn0031208\t.\t+\n chr2L\t10999\t11000\tFk_gene_1\t.\t-\n chr2L\t12499\t12500\tFk_gene_2\t.\t-\n <BLANKLINE>\n\n Report both gene ID and transcript ID in the name. In some cases this can\n be easier to parse than the original GTF or GFF file. With no merging\n specified, we must add `as_bed6=True` to see the names in BED format.\n\n >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True)\n >>> print(x) # doctest: +NORMALIZE_WHITESPACE\n chr2L\t7528\t7529\tFBgn0031208:FBtr0300689\t.\t+\n chr2L\t7528\t7529\tFBgn0031208:FBtr0300690\t.\t+\n chr2L\t10999\t11000\tFk_gene_1:transcript_Fk_gene_1\t.\t-\n chr2L\t12499\t12500\tFk_gene_2:transcript_Fk_gene_2\t.\t-\n <BLANKLINE>\n\n Use a 3kb merge distance so the last 2 features are merged together:\n\n >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000))\n >>> print(x) # doctest: +NORMALIZE_WHITESPACE\n chr2L\t7528\t7529\tFBgn0031208\t.\t+\n chr2L\t10999\t12500\tFk_gene_1,Fk_gene_2\t.\t-\n <BLANKLINE>\n\n\n The set of unique TSSes for each gene, +1kb upstream and 500bp downstream:\n\n >>> x = tsses(db, merge_overlapping=True)\n >>> x = x.slop(l=1000, r=500, s=True, genome='dm3')\n >>> print(x) # doctest: +NORMALIZE_WHITESPACE\n chr2L\t6528\t8029\tFBgn0031208\t.\t+\n chr2L\t10499\t12000\tFk_gene_1\t.\t-\n chr2L\t11999\t13500\tFk_gene_2\t.\t-\n <BLANKLINE>\n\n\n ",
"\n Generator of pybedtools.Intervals representing TSSes.\n ",
"\n Given a pybedtools.Interval, return a new Interval with the name\n set according to the kwargs provided above.\n "
]
|
Please provide a description of the function:def write_gene_recs(self, db, gene_id):
gene_rec = db[gene_id]
# Output gene record
self.write_rec(gene_rec)
# Get each mRNA's lengths
mRNA_lens = {}
c = list(db.children(gene_id, featuretype="mRNA"))
for mRNA in db.children(gene_id, featuretype="mRNA"):
mRNA_lens[mRNA.id] = \
sum(len(exon) for exon in db.children(mRNA,
featuretype="exon"))
# Sort mRNAs by length
sorted_mRNAs = \
sorted(mRNA_lens.items(), key=lambda x: x[1], reverse=True)
for curr_mRNA in sorted_mRNAs:
mRNA_id = curr_mRNA[0]
mRNA_rec = db[mRNA_id]
# Write mRNA record to file
self.write_rec(mRNA_rec)
# Write mRNA's children records to file
self.write_mRNA_children(db, mRNA_id)
# Write non-mRNA children of gene (only level1)
for gene_child in db.children(gene_id, level=1):
if gene_child.featuretype != "mRNA":
self.write_rec(gene_child) | [
"\n NOTE: The goal of this function is to have a canonical ordering when\n outputting a gene and all of its records to a file. The order is\n intended to be:\n\n gene\n # mRNAs sorted by length, with longest mRNA first\n mRNA_1\n # Exons of mRNA, sorted by start position (ascending)\n exon_1\n # Children of exon, sorted by start position\n exon_child_1\n exon_child_2\n exon_2\n ...\n # Non-exonic children here\n ...\n mRNA_2\n ...\n # Non-mRNA children here\n ...\n\n Output records of a gene to a file, given a GFF database\n and a gene_id. Outputs records in canonical order: gene record\n first, then longest mRNA, followed by longest mRNA exons,\n followed by rest, followed by next longest mRNA, and so on.\n\n Includes the gene record itself in the output.\n\n TODO: This probably doesn't handle deep GFF hierarchies.\n "
]
|
Please provide a description of the function:def write_mRNA_children(self, db, mRNA_id):
mRNA_children = db.children(mRNA_id, order_by='start')
nonexonic_children = []
for child_rec in mRNA_children:
if child_rec.featuretype == "exon":
self.write_rec(child_rec)
self.write_exon_children(db, child_rec)
else:
nonexonic_children.append(child_rec)
self.write_recs(nonexonic_children) | [
"\n Write out the children records of the mRNA given by the ID\n (not including the mRNA record itself) in a canonical\n order, where exons are sorted by start position and given\n first.\n "
]
|
Please provide a description of the function:def write_exon_children(self, db, exon_id):
exon_children = db.children(exon_id, order_by='start')
for exon_child in exon_children:
self.write_rec(exon_child) | [
"\n Write out the children records of the exon given by\n the ID (not including the exon record itself).\n "
]
|
Please provide a description of the function:def close(self):
self.out_stream.close()
# If we're asked to write in place, substitute the named
# temporary file for the current file
if self.in_place:
shutil.move(self.temp_file.name, self.out) | [
"\n Close the stream. Assumes stream has 'close' method.\n "
]
|
Please provide a description of the function:def var_regression_matrix(H, x, model, sigma=1):
x = np.atleast_2d(x)
H = np.atleast_2d(H)
if x.shape[0]==1:
x = x.T
if np.rank(H)<(np.dot(H.T, H)).shape[0]:
raise ValueError("model and DOE don't suit together")
x_mod = build_regression_matrix(x, model)
var = sigma**2*np.dot(np.dot(x_mod.T, np.linalg.inv(np.dot(H.T, H))), x_mod)
return var | [
"\r\n Compute the variance of the 'regression error'.\r\n \r\n Parameters\r\n ----------\r\n H : 2d-array\r\n The regression matrix\r\n x : 2d-array\r\n The coordinates to calculate the regression error variance at.\r\n model : str\r\n A string of tokens that define the regression model (e.g. \r\n '1 x1 x2 x1*x2')\r\n sigma : scalar\r\n An estimate of the variance (default: 1).\r\n \r\n Returns\r\n -------\r\n var : scalar\r\n The variance of the regression error, evaluated at ``x``.\r\n \r\n "
]
|
Please provide a description of the function:def to_seqfeature(feature):
if isinstance(feature, six.string_types):
feature = feature_from_line(feature)
qualifiers = {
'source': [feature.source],
'score': [feature.score],
'seqid': [feature.seqid],
'frame': [feature.frame],
}
qualifiers.update(feature.attributes)
return SeqFeature(
# Convert from GFF 1-based to standard Python 0-based indexing used by
# BioPython
FeatureLocation(feature.start - 1, feature.stop),
id=feature.id,
type=feature.featuretype,
strand=_biopython_strand[feature.strand],
qualifiers=qualifiers
) | [
"\n Converts a gffutils.Feature object to a Bio.SeqFeature object.\n\n The GFF fields `source`, `score`, `seqid`, and `frame` are stored as\n qualifiers. GFF `attributes` are also stored as qualifiers.\n\n Parameters\n ----------\n feature : Feature object, or string\n If string, assume it is a GFF or GTF-format line; otherwise just use\n the provided feature directly.\n "
]
|
Please provide a description of the function:def from_seqfeature(s, **kwargs):
source = s.qualifiers.get('source', '.')[0]
score = s.qualifiers.get('score', '.')[0]
seqid = s.qualifiers.get('seqid', '.')[0]
frame = s.qualifiers.get('frame', '.')[0]
strand = _feature_strand[s.strand]
# BioPython parses 1-based GenBank positions into 0-based for use within
# Python. We need to convert back to 1-based GFF format here.
start = s.location.start.position + 1
stop = s.location.end.position
featuretype = s.type
id = s.id
attributes = dict(s.qualifiers)
attributes.pop('source', '.')
attributes.pop('score', '.')
attributes.pop('seqid', '.')
attributes.pop('frame', '.')
return Feature(seqid, source, featuretype, start, stop, score, strand,
frame, attributes, id=id, **kwargs) | [
"\n Converts a Bio.SeqFeature object to a gffutils.Feature object.\n\n The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be\n stored as qualifiers. Any other qualifiers will be assumed to be GFF\n attributes.\n "
]
|
Please provide a description of the function:def set_pragmas(self, pragmas):
self.pragmas = pragmas
c = self.conn.cursor()
c.executescript(
';\n'.join(
['PRAGMA %s=%s' % i for i in self.pragmas.items()]
)
)
self.conn.commit() | [
"\n Set pragmas for the current database connection.\n\n Parameters\n ----------\n pragmas : dict\n Dictionary of pragmas; see constants.default_pragmas for a template\n and http://www.sqlite.org/pragma.html for a full list.\n "
]
|
Please provide a description of the function:def _feature_returner(self, **kwargs):
kwargs.setdefault('dialect', self.dialect)
kwargs.setdefault('keep_order', self.keep_order)
kwargs.setdefault('sort_attribute_values', self.sort_attribute_values)
return Feature(**kwargs) | [
"\n Returns a feature, adding additional database-specific defaults\n "
]
|
Please provide a description of the function:def schema(self):
c = self.conn.cursor()
c.execute(
'''
SELECT sql FROM sqlite_master
''')
results = []
for i, in c:
if i is not None:
results.append(i)
return '\n'.join(results) | [
"\n Returns the database schema as a string.\n "
]
|
Please provide a description of the function:def count_features_of_type(self, featuretype=None):
c = self.conn.cursor()
if featuretype is not None:
c.execute(
'''
SELECT count() FROM features
WHERE featuretype = ?
''', (featuretype,))
else:
c.execute(
'''
SELECT count() FROM features
''')
results = c.fetchone()
if results is not None:
results = results[0]
return results | [
"\n Simple count of features.\n\n Can be faster than \"grep\", and is faster than checking the length of\n results from :meth:`gffutils.FeatureDB.features_of_type`.\n\n Parameters\n ----------\n\n featuretype : string\n\n Feature type (e.g., \"gene\") to count. If None, then count *all*\n features in the database.\n\n Returns\n -------\n The number of features of this type, as an integer\n\n "
]
|
Please provide a description of the function:def features_of_type(self, featuretype, limit=None, strand=None,
order_by=None, reverse=False,
completely_within=False):
query, args = helpers.make_query(
args=[],
limit=limit,
featuretype=featuretype,
order_by=order_by,
reverse=reverse,
strand=strand,
completely_within=completely_within,
)
for i in self._execute(query, args):
yield self._feature_returner(**i) | [
"\n Returns an iterator of :class:`gffutils.Feature` objects.\n\n Parameters\n ----------\n {_method_doc}\n "
]
|
Please provide a description of the function:def iter_by_parent_childs(self, featuretype="gene", level=None,
order_by=None, reverse=False,
completely_within=False):
# Get all the parent records of the requested feature type
parent_recs = self.all_features(featuretype=featuretype)
# Return a generator of these parent records and their
# children
for parent_rec in parent_recs:
unit_records = \
[parent_rec] + list(self.children(parent_rec.id))
yield unit_records | [
"\n For each parent of type `featuretype`, yield a list L of that parent\n and all of its children (`[parent] + list(children)`). The parent will\n always be L[0].\n\n This is useful for \"sanitizing\" a GFF file for downstream tools.\n\n Additional kwargs are passed to :meth:`FeatureDB.children`, and will\n therefore only affect items L[1:] in each yielded list.\n "
]
|
Please provide a description of the function:def featuretypes(self):
c = self.conn.cursor()
c.execute(
'''
SELECT DISTINCT featuretype from features
''')
for i, in c:
yield i | [
"\n Iterate over feature types found in the database.\n\n Returns\n -------\n A generator object that yields featuretypes (as strings)\n "
]
|
Please provide a description of the function:def _relation(self, id, join_on, join_to, level=None, featuretype=None,
order_by=None, reverse=False, completely_within=False,
limit=None):
# The following docstring will be included in the parents() and
# children() docstrings to maintain consistency, since they both
# delegate to this method.
if isinstance(id, Feature):
id = id.id
other = '''
JOIN relations
ON relations.{join_on} = features.id
WHERE relations.{join_to} = ?
'''.format(**locals())
args = [id]
level_clause = ''
if level is not None:
level_clause = 'relations.level = ?'
args.append(level)
query, args = helpers.make_query(
args=args,
other=other,
extra=level_clause,
featuretype=featuretype,
order_by=order_by,
reverse=reverse,
limit=limit,
completely_within=completely_within,
)
# modify _SELECT so that only unique results are returned
query = query.replace("SELECT", "SELECT DISTINCT")
for i in self._execute(query, args):
yield self._feature_returner(**i) | [
"\n Parameters\n ----------\n\n id : string or a Feature object\n\n level : None or int\n\n If `level=None` (default), then return all children regardless\n of level. If `level` is an integer, then constrain to just that\n level.\n {_method_doc}\n\n Returns\n -------\n A generator object that yields :class:`Feature` objects.\n "
]
|
Please provide a description of the function:def parents(self, id, level=None, featuretype=None, order_by=None,
reverse=False, completely_within=False, limit=None):
return self._relation(
id, join_on='parent', join_to='child', level=level,
featuretype=featuretype, order_by=order_by, reverse=reverse,
limit=limit, completely_within=completely_within) | [
"\n Return parents of feature `id`.\n {_relation_docstring}\n "
]
|
Please provide a description of the function:def execute(self, query):
c = self.conn.cursor()
return c.execute(query) | [
"\n Execute arbitrary queries on the db.\n\n .. seealso::\n\n :class:`FeatureDB.schema` may be helpful when writing your own\n queries.\n\n Parameters\n ----------\n\n query : str\n\n Query to execute -- trailing \";\" optional.\n\n Returns\n -------\n A sqlite3.Cursor object that can be iterated over.\n "
]
|
Please provide a description of the function:def region(self, region=None, seqid=None, start=None, end=None,
strand=None, featuretype=None, completely_within=False):
# Argument handling.
if region is not None:
if (seqid is not None) or (start is not None) or (end is not None):
raise ValueError(
"If region is supplied, do not supply seqid, "
"start, or end as separate kwargs")
if isinstance(region, six.string_types):
toks = region.split(':')
if len(toks) == 1:
seqid = toks[0]
start, end = None, None
else:
seqid, coords = toks[:2]
if len(toks) == 3:
strand = toks[2]
start, end = coords.split('-')
elif isinstance(region, Feature):
seqid = region.seqid
start = region.start
end = region.end
strand = region.strand
# otherwise assume it's a tuple
else:
seqid, start, end = region[:3]
# e.g.,
# completely_within=True..... start >= {start} AND end <= {end}
# completely_within=False.... start < {end} AND end > {start}
if completely_within:
start_op = '>='
end_op = '<='
else:
start_op = '<'
end_op = '>'
end, start = start, end
args = []
position_clause = []
if seqid is not None:
position_clause.append('seqid = ?')
args.append(seqid)
if start is not None:
start = int(start)
position_clause.append('start %s ?' % start_op)
args.append(start)
if end is not None:
end = int(end)
position_clause.append('end %s ?' % end_op)
args.append(end)
position_clause = ' AND '.join(position_clause)
# Only use bins if we have defined boundaries and completely_within is
# True. Otherwise you can't know how far away a feature stretches
# (which means bins are not computable ahead of time)
_bin_clause = ''
if (start is not None) and (end is not None) and completely_within:
if start <= bins.MAX_CHROM_SIZE and end <= bins.MAX_CHROM_SIZE:
_bins = list(bins.bins(start, end, one=False))
# See issue #45
if len(_bins) < 900:
_bin_clause = ' or ' .join(['bin = ?' for _ in _bins])
_bin_clause = 'AND ( %s )' % _bin_clause
args += _bins
query = ' '.join([
constants._SELECT,
'WHERE ',
position_clause,
_bin_clause])
# Add the featuretype clause
if featuretype is not None:
if isinstance(featuretype, six.string_types):
featuretype = [featuretype]
feature_clause = ' or '.join(
['featuretype = ?' for _ in featuretype])
query += ' AND (%s) ' % feature_clause
args.extend(featuretype)
if strand is not None:
strand_clause = ' and strand = ? '
query += strand_clause
args.append(strand)
c = self.conn.cursor()
self._last_query = query
self._last_args = args
self._context = {
'start': start,
'end': end,
'seqid': seqid,
'region': region,
}
c.execute(query, tuple(args))
for i in c:
yield self._feature_returner(**i) | [
"\n Return features within specified genomic coordinates.\n\n Specifying genomic coordinates can be done in a flexible manner\n\n Parameters\n ----------\n region : string, tuple, or Feature instance\n If string, then of the form \"seqid:start-end\". If tuple, then\n (seqid, start, end). If :class:`Feature`, then use the features\n seqid, start, and end values.\n\n This argument is mutually exclusive with start/end/seqid.\n\n *Note*: By design, even if a feature is provided, its strand will\n be ignored. If you want to restrict the output by strand, use the\n separate `strand` kwarg.\n\n strand : + | - | . | None\n If `strand` is provided, then only those features exactly matching\n `strand` will be returned. So `strand='.'` will only return\n unstranded features. Default is `strand=None` which does not\n restrict by strand.\n\n seqid, start, end, strand\n Mutually exclusive with `region`. These kwargs can be used to\n approximate slice notation; see \"Details\" section below.\n\n featuretype : None, string, or iterable\n If not None, then restrict output. If string, then only report\n that feature type. If iterable, then report all featuretypes in\n the iterable.\n\n completely_within : bool\n By default (`completely_within=False`), returns features that\n partially or completely overlap `region`. If\n `completely_within=True`, features that are completely within\n `region` will be returned.\n\n Notes\n -------\n\n The meaning of `seqid`, `start`, and `end` is interpreted as follows:\n\n ====== ====== ===== ======================================\n seqid start end meaning\n ====== ====== ===== ======================================\n str int int equivalent to `region` kwarg\n None int int features from all chroms within coords\n str None int equivalent to [:end] slice notation\n str int None equivalent to [start:] slice notation\n None None None equivalent to FeatureDB.all_features()\n ====== ====== ===== ======================================\n\n If performance is a concern, use `completely_within=True`. This allows\n the query to be optimized by only looking for features that fall in the\n precise genomic bin (same strategy as UCSC Genome Browser and\n BEDTools). Otherwise all features' start/stop coords need to be\n searched to see if they partially overlap the region of interest.\n\n Examples\n --------\n\n - `region(seqid=\"chr1\", start=1000)` returns all features on chr1 that\n start or extend past position 1000\n\n - `region(seqid=\"chr1\", start=1000, completely_within=True)` returns\n all features on chr1 that start past position 1000.\n\n - `region(\"chr1:1-100\", strand=\"+\", completely_within=True)` returns\n only plus-strand features that completely fall within positions 1 to\n 100 on chr1.\n\n Returns\n -------\n A generator object that yields :class:`Feature` objects.\n "
]
|
Please provide a description of the function:def interfeatures(self, features, new_featuretype=None,
merge_attributes=True, dialect=None,
attribute_func=None, update_attributes=None):
for i, f in enumerate(features):
# no inter-feature for the first one
if i == 0:
interfeature_start = f.stop
last_feature = f
continue
interfeature_stop = f.start
if new_featuretype is None:
new_featuretype = 'inter_%s_%s' % (
last_feature.featuretype, f.featuretype)
if last_feature.strand != f.strand:
new_strand = '.'
else:
new_strand = f.strand
if last_feature.chrom != f.chrom:
# We've moved to a new chromosome. For example, if we're
# getting intergenic regions from all genes, they will be on
# different chromosomes. We still assume sorted features, but
# don't complain if they're on different chromosomes -- just
# move on.
last_feature = f
continue
strand = new_strand
chrom = last_feature.chrom
# Shrink
interfeature_start += 1
interfeature_stop -= 1
if merge_attributes:
new_attributes = helpers.merge_attributes(
last_feature.attributes, f.attributes)
else:
new_attributes = {}
if update_attributes:
new_attributes.update(update_attributes)
new_bin = bins.bins(
interfeature_start, interfeature_stop, one=True)
_id = None
fields = dict(
seqid=chrom,
source='gffutils_derived',
featuretype=new_featuretype,
start=interfeature_start,
end=interfeature_stop,
score='.',
strand=strand,
frame='.',
attributes=new_attributes,
bin=new_bin)
if dialect is None:
# Support for @classmethod -- if calling from the class, then
# self.dialect is not defined, so defer to Feature's default
# (which will be constants.dialect, or GFF3).
try:
dialect = self.dialect
except AttributeError:
dialect = None
yield self._feature_returner(**fields)
interfeature_start = f.stop | [
"\n Construct new features representing the space between features.\n\n For example, if `features` is a list of exons, then this method will\n return the introns. If `features` is a list of genes, then this method\n will return the intergenic regions.\n\n Providing N features will return N - 1 new features.\n\n This method purposefully does *not* do any merging or sorting of\n coordinates, so you may want to use :meth:`FeatureDB.merge` first, or\n when selecting features use the `order_by` kwarg, e.g.,\n `db.features_of_type('gene', order_by=('seqid', 'start'))`.\n\n Parameters\n ----------\n features : iterable of :class:`feature.Feature` instances\n Sorted, merged iterable\n\n new_featuretype : string or None\n The new features will all be of this type, or, if None (default)\n then the featuretypes will be constructed from the neighboring\n features, e.g., `inter_exon_exon`.\n\n merge_attributes : bool\n If True, new features' attributes will be a merge of the neighboring\n features' attributes. This is useful if you have provided a list of\n exons; the introns will then retain the transcript and/or gene\n parents as a single item. Otherwise, if False, the attribute will\n be a comma-separated list of values, potentially listing the same\n gene ID twice.\n\n attribute_func : callable or None\n If None, then nothing special is done to the attributes. If\n callable, then the callable accepts two attribute dictionaries and\n returns a single attribute dictionary. If `merge_attributes` is\n True, then `attribute_func` is called before `merge_attributes`.\n This could be useful for manually managing IDs for the new\n features.\n\n update_attributes : dict\n After attributes have been modified and merged, this dictionary can\n be used to replace parts of the attributes dictionary.\n\n Returns\n -------\n A generator that yields :class:`Feature` objects\n "
]
|
Please provide a description of the function:def delete(self, features, make_backup=True, **kwargs):
if make_backup:
if isinstance(self.dbfn, six.string_types):
shutil.copy2(self.dbfn, self.dbfn + '.bak')
c = self.conn.cursor()
query1 =
query2 =
if isinstance(features, FeatureDB):
features = features.all_features()
if isinstance(features, six.string_types):
features = [features]
if isinstance(features, Feature):
features = [features]
for feature in features:
if isinstance(feature, six.string_types):
_id = feature
else:
_id = feature.id
c.execute(query1, (_id,))
c.execute(query2, (_id, _id))
self.conn.commit()
return self | [
"\n Delete features from database.\n\n features : str, iterable, FeatureDB instance\n If FeatureDB, all features will be used. If string, assume it's the\n ID of the feature to remove. Otherwise, assume it's an iterable of\n Feature objects. The classes in gffutils.iterators may be helpful\n in this case.\n\n make_backup : bool\n If True, and the database you're about to update is a file on disk,\n makes a copy of the existing database and saves it with a .bak\n extension.\n\n Returns\n -------\n FeatureDB object, with features deleted.\n ",
"\n DELETE FROM features WHERE id = ?\n ",
"\n DELETE FROM relations WHERE parent = ? OR child = ?\n "
]
|
Please provide a description of the function:def update(self, data, make_backup=True, **kwargs):
from gffutils import create
from gffutils import iterators
if make_backup:
if isinstance(self.dbfn, six.string_types):
shutil.copy2(self.dbfn, self.dbfn + '.bak')
# get iterator-specific kwargs
_iterator_kwargs = {}
for k, v in kwargs.items():
if k in constants._iterator_kwargs:
_iterator_kwargs[k] = v
# Handle all sorts of input
data = iterators.DataIterator(data, **_iterator_kwargs)
if self.dialect['fmt'] == 'gtf':
if 'id_spec' not in kwargs:
kwargs['id_spec'] = {
'gene': 'gene_id', 'transcript': 'transcript_id'}
db = create._GTFDBCreator(
data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs)
elif self.dialect['fmt'] == 'gff3':
if 'id_spec' not in kwargs:
kwargs['id_spec'] = 'ID'
db = create._GFFDBCreator(
data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs)
else:
raise ValueError
db._populate_from_lines(data)
db._update_relations()
db._finalize()
return db | [
"\n Update database with features in `data`.\n\n data : str, iterable, FeatureDB instance\n If FeatureDB, all data will be used. If string, assume it's\n a filename of a GFF or GTF file. Otherwise, assume it's an\n iterable of Feature objects. The classes in gffutils.iterators may\n be helpful in this case.\n\n make_backup : bool\n If True, and the database you're about to update is a file on disk,\n makes a copy of the existing database and saves it with a .bak\n extension.\n\n Notes\n -----\n Other kwargs are used in the same way as in gffutils.create_db; see the\n help for that function for details.\n\n Returns\n -------\n FeatureDB with updated features.\n "
]
|
Please provide a description of the function:def add_relation(self, parent, child, level, parent_func=None,
child_func=None):
if isinstance(parent, six.string_types):
parent = self[parent]
if isinstance(child, six.string_types):
child = self[child]
c = self.conn.cursor()
c.execute('''
INSERT INTO relations (parent, child, level)
VALUES (?, ?, ?)''', (parent.id, child.id, level))
if parent_func is not None:
parent = parent_func(parent, child)
self._update(parent, c)
if child_func is not None:
child = child_func(parent, child)
self._update(child, c)
self.conn.commit()
return self | [
"\n Manually add relations to the database.\n\n Parameters\n ----------\n parent : str or Feature instance\n Parent feature to add.\n\n child : str or Feature instance\n Child feature to add\n\n level : int\n Level of the relation. For example, if parent is a gene and child\n is an mRNA, then you might want level to be 1. But if child is an\n exon, then level would be 2.\n\n parent_func, child_func : callable\n These optional functions control how attributes are updated in the\n database. They both have the signature `func(parent, child)` and\n must return a [possibly modified] Feature instance. For example,\n we could add the child's database id as the \"child\" attribute in\n the parent::\n\n def parent_func(parent, child):\n parent.attributes['child'] = child.id\n\n and add the parent's \"gene_id\" as the child's \"Parent\" attribute::\n\n def child_func(parent, child):\n child.attributes['Parent'] = parent['gene_id']\n\n Returns\n -------\n FeatureDB object with new relations added.\n "
]
|
Please provide a description of the function:def _insert(self, feature, cursor):
try:
cursor.execute(constants._INSERT, feature.astuple())
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT, feature.astuple(self.default_encoding)) | [
"\n Insert a feature into the database.\n "
]
|
Please provide a description of the function:def create_introns(self, exon_featuretype='exon',
grandparent_featuretype='gene', parent_featuretype=None,
new_featuretype='intron', merge_attributes=True):
if (grandparent_featuretype and parent_featuretype) or (
grandparent_featuretype is None and parent_featuretype is None
):
raise ValueError("exactly one of `grandparent_featuretype` or "
"`parent_featuretype` should be provided")
if grandparent_featuretype:
def child_gen():
for gene in self.features_of_type(grandparent_featuretype):
for child in self.children(gene, level=1):
yield child
elif parent_featuretype:
def child_gen():
for child in self.features_of_type(parent_featuretype):
yield child
for child in child_gen():
exons = self.children(child, level=1, featuretype=exon_featuretype,
order_by='start')
for intron in self.interfeatures(
exons, new_featuretype=new_featuretype,
merge_attributes=merge_attributes, dialect=self.dialect
):
yield intron | [
"\n Create introns from existing annotations.\n\n\n Parameters\n ----------\n exon_featuretype : string\n Feature type to use in order to infer introns. Typically `\"exon\"`.\n\n grandparent_featuretype : string\n If `grandparent_featuretype` is not None, then group exons by\n children of this featuretype. If `granparent_featuretype` is\n \"gene\" (default), then introns will be created for all first-level\n children of genes. This may include mRNA, rRNA, ncRNA, etc. If\n you only want to infer introns from one of these featuretypes\n (e.g., mRNA), then use the `parent_featuretype` kwarg which is\n mutually exclusive with `grandparent_featuretype`.\n\n parent_featuretype : string\n If `parent_featuretype` is not None, then only use this featuretype\n to infer introns. Use this if you only want a subset of\n featuretypes to have introns (e.g., \"mRNA\" only, and not ncRNA or\n rRNA). Mutually exclusive with `grandparent_featuretype`.\n\n new_featuretype : string\n Feature type to use for the inferred introns; default is\n `\"intron\"`.\n\n merge_attributes : bool\n Whether or not to merge attributes from all exons. If False then no\n attributes will be created for the introns.\n\n Returns\n -------\n A generator object that yields :class:`Feature` objects representing\n new introns\n\n Notes\n -----\n The returned generator can be passed directly to the\n :meth:`FeatureDB.update` method to permanently add them to the\n database, e.g., ::\n\n db.update(db.create_introns())\n\n "
]
|
Please provide a description of the function:def merge(self, features, ignore_strand=False):
# Consume iterator up front...
features = list(features)
if len(features) == 0:
raise StopIteration
# Either set all strands to '+' or check for strand-consistency.
if ignore_strand:
strand = '.'
else:
strands = [i.strand for i in features]
if len(set(strands)) > 1:
raise ValueError('Specify ignore_strand=True to force merging '
'of multiple strands')
strand = strands[0]
# Sanity check to make sure all features are from the same chromosome.
chroms = [i.chrom for i in features]
if len(set(chroms)) > 1:
raise NotImplementedError('Merging multiple chromosomes not '
'implemented')
chrom = chroms[0]
# To start, we create a merged feature of just the first feature.
current_merged_start = features[0].start
current_merged_stop = features[0].stop
# We don't need to check the first one, so start at feature #2.
for feature in features[1:]:
# Does this feature start within the currently merged feature?...
if feature.start <= current_merged_stop + 1:
# ...It starts within, so leave current_merged_start where it
# is. Does it extend any farther?
if feature.stop >= current_merged_stop:
# Extends further, so set a new stop position
current_merged_stop = feature.stop
else:
# If feature.stop < current_merged_stop, it's completely
# within the previous feature. Nothing more to do.
continue
else:
# The start position is outside the merged feature, so we're
# done with the current merged feature. Prepare for output...
merged_feature = dict(
seqid=feature.chrom,
source='.',
featuretype=feature.featuretype,
start=current_merged_start,
end=current_merged_stop,
score='.',
strand=strand,
frame='.',
attributes='')
yield self._feature_returner(**merged_feature)
# and we start a new one, initializing with this feature's
# start and stop.
current_merged_start = feature.start
current_merged_stop = feature.stop
# need to yield the last one.
if len(features) == 1:
feature = features[0]
merged_feature = dict(
seqid=feature.chrom,
source='.',
featuretype=feature.featuretype,
start=current_merged_start,
end=current_merged_stop,
score='.',
strand=strand,
frame='.',
attributes='')
yield self._feature_returner(**merged_feature) | [
"\n Merge overlapping features together.\n\n Parameters\n ----------\n\n features : iterator of Feature instances\n\n ignore_strand : bool\n If True, features on multiple strands will be merged, and the final\n strand will be set to '.'. Otherwise, ValueError will be raised if\n trying to merge features on differnt strands.\n\n Returns\n -------\n A generator object that yields :class:`Feature` objects representing\n the newly merged features.\n "
]
|
Please provide a description of the function:def children_bp(self, feature, child_featuretype='exon', merge=False,
ignore_strand=False):
children = self.children(feature, featuretype=child_featuretype,
order_by='start')
if merge:
children = self.merge(children, ignore_strand=ignore_strand)
total = 0
for child in children:
total += len(child)
return total | [
"\n Total bp of all children of a featuretype.\n\n Useful for getting the exonic bp of an mRNA.\n\n Parameters\n ----------\n\n feature : str or Feature instance\n\n child_featuretype : str\n Which featuretype to consider. For example, to get exonic bp of an\n mRNA, use `child_featuretype='exon'`.\n\n merge : bool\n Whether or not to merge child features together before summing\n them.\n\n ignore_strand : bool\n If True, then overlapping features on different strands will be\n merged together; otherwise, merging features with different strands\n will result in a ValueError.\n\n Returns\n -------\n Integer representing the total number of bp.\n "
]
|
Please provide a description of the function:def bed12(self, feature, block_featuretype=['exon'],
thick_featuretype=['CDS'], thin_featuretype=None,
name_field='ID', color=None):
if thick_featuretype and thin_featuretype:
raise ValueError("Can only specify one of `thick_featuertype` or "
"`thin_featuretype`")
exons = list(self.children(feature, featuretype=block_featuretype,
order_by='start'))
if len(exons) == 0:
exons = [feature]
feature = self[feature]
first = exons[0].start
last = exons[-1].stop
if first != feature.start:
raise ValueError(
"Start of first exon (%s) does not match start of feature (%s)"
% (first, feature.start))
if last != feature.stop:
raise ValueError(
"End of last exon (%s) does not match end of feature (%s)"
% (last, feature.stop))
if color is None:
color = '0,0,0'
color = color.replace(' ', '').strip()
# Use field names as defined at
# http://genome.ucsc.edu/FAQ/FAQformat.html#format1
chrom = feature.chrom
chromStart = feature.start - 1
chromEnd = feature.stop
orig = constants.always_return_list
constants.always_return_list = True
try:
name = feature[name_field][0]
except KeyError:
name = "."
constants.always_return_list = orig
score = feature.score
if score == '.':
score = '0'
strand = feature.strand
itemRgb = color
blockCount = len(exons)
blockSizes = [len(i) for i in exons]
blockStarts = [i.start - 1 - chromStart for i in exons]
if thick_featuretype:
thick = list(self.children(feature, featuretype=thick_featuretype,
order_by='start'))
if len(thick) == 0:
thickStart = feature.start
thickEnd = feature.stop
else:
thickStart = thick[0].start - 1 # BED 0-based coords
thickEnd = thick[-1].stop
if thin_featuretype:
thin = list(self.children(feature, featuretype=thin_featuretype,
order_by='start'))
if len(thin) == 0:
thickStart = feature.start
thickEnd = feature.stop
else:
thickStart = thin[0].stop
thickEnd = thin[-1].start - 1 # BED 0-based coords
tst = chromStart + blockStarts[-1] + blockSizes[-1]
assert tst == chromEnd, "tst=%s; chromEnd=%s" % (tst, chromEnd)
fields = [
chrom,
chromStart,
chromEnd,
name,
score,
strand,
thickStart,
thickEnd,
itemRgb,
blockCount,
','.join(map(str, blockSizes)),
','.join(map(str, blockStarts))]
return '\t'.join(map(str, fields)) | [
"\n Converts `feature` into a BED12 format.\n\n GFF and GTF files do not necessarily define genes consistently, so this\n method provides flexiblity in specifying what to call a \"transcript\".\n\n Parameters\n ----------\n feature : str or Feature instance\n In most cases, this feature should be a transcript rather than\n a gene.\n\n block_featuretype : str or list\n Which featuretype to use as the exons. These are represented as\n blocks in the BED12 format. Typically 'exon'.\n\n Use the `thick_featuretype` and `thin_featuretype` arguments to\n control the display of CDS as thicker blocks and UTRs as thinner\n blocks.\n\n Note that the features for `thick` or `thin` are *not*\n automatically included in the blocks; if you do want them included,\n then those featuretypes should be added to this `block_features`\n list.\n\n If no child features of type `block_featuretype` are found, then\n the full `feature` is returned in BED12 format as if it had\n a single exon.\n\n thick_featuretype : str or list\n Child featuretype(s) to use in order to determine the boundaries of\n the \"thick\" blocks. In BED12 format, these represent coding\n sequences; typically this would be set to \"CDS\". This argument is\n mutually exclusive with `thin_featuretype`.\n\n Specifically, the BED12 thickStart will be the start coord of the\n first `thick` item and the thickEnd will be the stop coord of the\n last `thick` item.\n\n thin_featuretype : str or list\n Child featuretype(s) to use in order to determine the boundaries of\n the \"thin\" blocks. In BED12 format, these represent untranslated\n regions. Typically \"utr\" or ['three_prime_UTR', 'five_prime_UTR'].\n Mutually exclusive with `thick_featuretype`.\n\n Specifically, the BED12 thickStart field will be the stop coord of\n the first `thin` item and the thickEnd field will be the start\n coord of the last `thin` item.\n\n name_field : str\n Which attribute of `feature` to use as the feature's name. If this\n field is not present, a \".\" placeholder will be used instead.\n\n color : None or str\n If None, then use black (0,0,0) as the RGB color; otherwise this\n should be a comma-separated string of R,G,B values each of which\n are integers in the range 0-255.\n "
]
|
Please provide a description of the function:def DataIterator(data, checklines=10, transform=None,
force_dialect_check=False, from_string=False, **kwargs):
_kwargs = dict(data=data, checklines=checklines, transform=transform,
force_dialect_check=force_dialect_check, **kwargs)
if isinstance(data, six.string_types):
if from_string:
return _StringIterator(**_kwargs)
else:
if os.path.exists(data):
return _FileIterator(**_kwargs)
elif is_url(data):
return _UrlIterator(**_kwargs)
elif isinstance(data, FeatureDB):
_kwargs['data'] = data.all_features()
return _FeatureIterator(**_kwargs)
else:
return _FeatureIterator(**_kwargs) | [
"\n Iterate over features, no matter how they are provided.\n\n Parameters\n ----------\n data : str, iterable of Feature objs, FeatureDB\n `data` can be a string (filename, URL, or contents of a file, if\n from_string=True), any arbitrary iterable of features, or a FeatureDB\n (in which case its all_features() method will be called).\n\n checklines : int\n Number of lines to check in order to infer a dialect.\n\n transform : None or callable\n If not None, `transform` should accept a Feature object as its only\n argument and return either a (possibly modified) Feature object or\n a value that evaluates to False. If the return value is False, the\n feature will be skipped.\n\n force_dialect_check : bool\n If True, check the dialect of every feature. Thorough, but can be\n slow.\n\n from_string : bool\n If True, `data` should be interpreted as the contents of a file rather\n than the filename itself.\n\n dialect : None or dict\n Provide the dialect, which will override auto-detected dialects. If\n provided, you should probably also use `force_dialect_check=False` and\n `checklines=0` but this is not enforced.\n "
]
|
Please provide a description of the function:def bbdesign(n, center=None):
assert n>=3, 'Number of variables must be at least 3'
# First, compute a factorial DOE with 2 parameters
H_fact = ff2n(2)
# Now we populate the real DOE with this DOE
# We made a factorial design on each pair of dimensions
# - So, we created a factorial design with two factors
# - Make two loops
Index = 0
nb_lines = (n*(n-1)/2)*H_fact.shape[0]
H = repeat_center(n, nb_lines)
for i in range(n - 1):
for j in range(i + 1, n):
Index = Index + 1
H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], i] = H_fact[:, 0]
H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], j] = H_fact[:, 1]
if center is None:
if n<=16:
points= [0, 0, 0, 3, 3, 6, 6, 6, 8, 9, 10, 12, 12, 13, 14, 15, 16]
center = points[n]
else:
center = n
H = np.c_[H.T, repeat_center(n, center).T].T
return H | [
"\r\n Create a Box-Behnken design\r\n \r\n Parameters\r\n ----------\r\n n : int\r\n The number of factors in the design\r\n \r\n Optional\r\n --------\r\n center : int\r\n The number of center points to include (default = 1).\r\n \r\n Returns\r\n -------\r\n mat : 2d-array\r\n The design matrix\r\n \r\n Example\r\n -------\r\n ::\r\n \r\n >>> bbdesign(3)\r\n array([[-1., -1., 0.],\r\n [ 1., -1., 0.],\r\n [-1., 1., 0.],\r\n [ 1., 1., 0.],\r\n [-1., 0., -1.],\r\n [ 1., 0., -1.],\r\n [-1., 0., 1.],\r\n [ 1., 0., 1.],\r\n [ 0., -1., -1.],\r\n [ 0., 1., -1.],\r\n [ 0., -1., 1.],\r\n [ 0., 1., 1.],\r\n [ 0., 0., 0.],\r\n [ 0., 0., 0.],\r\n [ 0., 0., 0.]])\r\n \r\n "
]
|
Please provide a description of the function:def inspect(data, look_for=['featuretype', 'chrom', 'attribute_keys',
'feature_count'], limit=None, verbose=True):
results = {}
obj_attrs = []
for i in look_for:
if i not in ['attribute_keys', 'feature_count']:
obj_attrs.append(i)
results[i] = Counter()
attr_keys = 'attribute_keys' in look_for
d = iterators.DataIterator(data)
feature_count = 0
for f in d:
if verbose:
sys.stderr.write('\r%s features inspected' % feature_count)
sys.stderr.flush()
for obj_attr in obj_attrs:
results[obj_attr].update([getattr(f, obj_attr)])
if attr_keys:
results['attribute_keys'].update(f.attributes.keys())
feature_count += 1
if limit and feature_count == limit:
break
new_results = {}
for k, v in results.items():
new_results[k] = dict(v)
new_results['feature_count'] = feature_count
return new_results | [
"\n Inspect a GFF or GTF data source.\n\n This function is useful for figuring out the different featuretypes found\n in a file (for potential removal before creating a FeatureDB).\n\n Returns a dictionary with a key for each item in `look_for` and\n a corresponding value that is a dictionary of how many of each unique item\n were found.\n\n There will always be a `feature_count` key, indicating how many features\n were looked at (if `limit` is provided, then `feature_count` will be the\n same as `limit`).\n\n For example, if `look_for` is ['chrom', 'featuretype'], then the result\n will be a dictionary like::\n\n {\n 'chrom': {\n 'chr1': 500,\n 'chr2': 435,\n 'chr3': 200,\n ...\n ...\n }.\n\n 'featuretype': {\n 'gene': 150,\n 'exon': 324,\n ...\n },\n\n 'feature_count': 5000\n\n }\n\n\n Parameters\n ----------\n data : str, FeatureDB instance, or iterator of Features\n If `data` is a string, assume it's a GFF or GTF filename. If it's\n a FeatureDB instance, then its `all_features()` method will be\n automatically called. Otherwise, assume it's an iterable of Feature\n objects.\n\n look_for : list\n List of things to keep track of. Options are:\n\n - any attribute of a Feature object, such as chrom, source, start,\n stop, strand.\n\n - \"attribute_keys\", which will look at all the individual\n attribute keys of each feature\n\n limit : int\n Number of features to look at. Default is no limit.\n\n verbose : bool\n Report how many features have been processed.\n\n Returns\n -------\n dict\n "
]
|
Please provide a description of the function:def fullfact(levels):
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j]*level_repeat
rng = lvl*range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H | [
"\r\n Create a general full-factorial design\r\n \r\n Parameters\r\n ----------\r\n levels : array-like\r\n An array of integers that indicate the number of levels of each input\r\n design factor.\r\n \r\n Returns\r\n -------\r\n mat : 2d-array\r\n The design matrix with coded levels 0 to k-1 for a k-level factor\r\n \r\n Example\r\n -------\r\n ::\r\n \r\n >>> fullfact([2, 4, 3])\r\n array([[ 0., 0., 0.],\r\n [ 1., 0., 0.],\r\n [ 0., 1., 0.],\r\n [ 1., 1., 0.],\r\n [ 0., 2., 0.],\r\n [ 1., 2., 0.],\r\n [ 0., 3., 0.],\r\n [ 1., 3., 0.],\r\n [ 0., 0., 1.],\r\n [ 1., 0., 1.],\r\n [ 0., 1., 1.],\r\n [ 1., 1., 1.],\r\n [ 0., 2., 1.],\r\n [ 1., 2., 1.],\r\n [ 0., 3., 1.],\r\n [ 1., 3., 1.],\r\n [ 0., 0., 2.],\r\n [ 1., 0., 2.],\r\n [ 0., 1., 2.],\r\n [ 1., 1., 2.],\r\n [ 0., 2., 2.],\r\n [ 1., 2., 2.],\r\n [ 0., 3., 2.],\r\n [ 1., 3., 2.]])\r\n \r\n "
]
|
Please provide a description of the function:def fracfact(gen):
# Recognize letters and combinations
A = [item for item in re.split('\-?\s?\+?', gen) if item] # remove empty strings
C = [len(item) for item in A]
# Indices of single letters (main factors)
I = [i for i, item in enumerate(C) if item==1]
# Indices of letter combinations (we need them to fill out H2 properly).
J = [i for i, item in enumerate(C) if item!=1]
# Check if there are "-" or "+" operators in gen
U = [item for item in gen.split(' ') if item] # remove empty strings
# If R1 is either None or not, the result is not changed, since it is a
# multiplication of 1.
R1 = _grep(U, '+')
R2 = _grep(U, '-')
# Fill in design with two level factorial design
H1 = ff2n(len(I))
H = np.zeros((H1.shape[0], len(C)))
H[:, I] = H1
# Recognize combinations and fill in the rest of matrix H2 with the proper
# products
for k in J:
# For lowercase letters
xx = np.array([ord(c) for c in A[k]]) - 97
# For uppercase letters
if np.any(xx<0):
xx = np.array([ord(c) for c in A[k]]) - 65
H[:, k] = np.prod(H1[:, xx], axis=1)
# Update design if gen includes "-" operator
if R2:
H[:, R2] *= -1
# Return the fractional factorial design
return H | [
"\r\n Create a 2-level fractional-factorial design with a generator string.\r\n \r\n Parameters\r\n ----------\r\n gen : str\r\n A string, consisting of lowercase, uppercase letters or operators \"-\"\r\n and \"+\", indicating the factors of the experiment\r\n \r\n Returns\r\n -------\r\n H : 2d-array\r\n A m-by-n matrix, the fractional factorial design. m is 2^k, where k\r\n is the number of letters in ``gen``, and n is the total number of\r\n entries in ``gen``.\r\n \r\n Notes\r\n -----\r\n In ``gen`` we define the main factors of the experiment and the factors\r\n whose levels are the products of the main factors. For example, if\r\n \r\n gen = \"a b ab\"\r\n \r\n then \"a\" and \"b\" are the main factors, while the 3rd factor is the product\r\n of the first two. If we input uppercase letters in ``gen``, we get the same\r\n result. We can also use the operators \"+\" and \"-\" in ``gen``.\r\n \r\n For example, if\r\n \r\n gen = \"a b -ab\"\r\n \r\n then the 3rd factor is the opposite of the product of \"a\" and \"b\".\r\n \r\n The output matrix includes the two level full factorial design, built by\r\n the main factors of ``gen``, and the products of the main factors. The\r\n columns of ``H`` follow the sequence of ``gen``.\r\n \r\n For example, if\r\n \r\n gen = \"a b ab c\"\r\n \r\n then columns H[:, 0], H[:, 1], and H[:, 3] include the two level full\r\n factorial design and H[:, 2] includes the products of the main factors.\r\n \r\n Examples\r\n --------\r\n ::\r\n \r\n >>> fracfact(\"a b ab\")\r\n array([[-1., -1., 1.],\r\n [ 1., -1., -1.],\r\n [-1., 1., -1.],\r\n [ 1., 1., 1.]])\r\n \r\n >>> fracfact(\"A B AB\")\r\n array([[-1., -1., 1.],\r\n [ 1., -1., -1.],\r\n [-1., 1., -1.],\r\n [ 1., 1., 1.]])\r\n \r\n >>> fracfact(\"a b -ab c +abc\")\r\n array([[-1., -1., -1., -1., -1.],\r\n [ 1., -1., 1., -1., 1.],\r\n [-1., 1., 1., -1., 1.],\r\n [ 1., 1., -1., -1., -1.],\r\n [-1., -1., -1., 1., 1.],\r\n [ 1., -1., 1., 1., -1.],\r\n [-1., 1., 1., 1., -1.],\r\n [ 1., 1., -1., 1., 1.]])\r\n \r\n "
]
|
Please provide a description of the function:def lhs(n, samples=None, criterion=None, iterations=None):
H = None
if samples is None:
samples = n
if criterion is not None:
assert criterion.lower() in ('center', 'c', 'maximin', 'm',
'centermaximin', 'cm', 'correlation',
'corr'), 'Invalid value for "criterion": {}'.format(criterion)
else:
H = _lhsclassic(n, samples)
if criterion is None:
criterion = 'center'
if iterations is None:
iterations = 5
if H is None:
if criterion.lower() in ('center', 'c'):
H = _lhscentered(n, samples)
elif criterion.lower() in ('maximin', 'm'):
H = _lhsmaximin(n, samples, iterations, 'maximin')
elif criterion.lower() in ('centermaximin', 'cm'):
H = _lhsmaximin(n, samples, iterations, 'centermaximin')
elif criterion.lower() in ('correlate', 'corr'):
H = _lhscorrelate(n, samples, iterations)
return H | [
"\r\n Generate a latin-hypercube design\r\n \r\n Parameters\r\n ----------\r\n n : int\r\n The number of factors to generate samples for\r\n \r\n Optional\r\n --------\r\n samples : int\r\n The number of samples to generate for each factor (Default: n)\r\n criterion : str\r\n Allowable values are \"center\" or \"c\", \"maximin\" or \"m\", \r\n \"centermaximin\" or \"cm\", and \"correlation\" or \"corr\". If no value \r\n given, the design is simply randomized.\r\n iterations : int\r\n The number of iterations in the maximin and correlations algorithms\r\n (Default: 5).\r\n \r\n Returns\r\n -------\r\n H : 2d-array\r\n An n-by-samples design matrix that has been normalized so factor values\r\n are uniformly spaced between zero and one.\r\n \r\n Example\r\n -------\r\n A 3-factor design (defaults to 3 samples)::\r\n \r\n >>> lhs(3)\r\n array([[ 0.40069325, 0.08118402, 0.69763298],\r\n [ 0.19524568, 0.41383587, 0.29947106],\r\n [ 0.85341601, 0.75460699, 0.360024 ]])\r\n \r\n A 4-factor design with 6 samples::\r\n \r\n >>> lhs(4, samples=6)\r\n array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196],\r\n [ 0.76945538, 0.43501682, 0.01107457, 0.09583358],\r\n [ 0.45702981, 0.76073773, 0.90245401, 0.18773015],\r\n [ 0.99342115, 0.85814198, 0.16996665, 0.65069309],\r\n [ 0.63092013, 0.22148567, 0.33616859, 0.36332478],\r\n [ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]])\r\n \r\n A 2-factor design with 5 centered samples::\r\n \r\n >>> lhs(2, samples=5, criterion='center')\r\n array([[ 0.3, 0.5],\r\n [ 0.7, 0.9],\r\n [ 0.1, 0.3],\r\n [ 0.9, 0.1],\r\n [ 0.5, 0.7]])\r\n \r\n A 3-factor design with 4 samples where the minimum distance between\r\n all samples has been maximized::\r\n \r\n >>> lhs(3, samples=4, criterion='maximin')\r\n array([[ 0.02642564, 0.55576963, 0.50261649],\r\n [ 0.51606589, 0.88933259, 0.34040838],\r\n [ 0.98431735, 0.0380364 , 0.01621717],\r\n [ 0.40414671, 0.33339132, 0.84845707]])\r\n \r\n A 4-factor design with 5 samples where the samples are as uncorrelated\r\n as possible (within 10 iterations)::\r\n \r\n >>> lhs(4, samples=5, criterion='correlate', iterations=10)\r\n \r\n "
]
|
Please provide a description of the function:def _pdist(x):
x = np.atleast_2d(x)
assert len(x.shape)==2, 'Input array must be 2d-dimensional'
m, n = x.shape
if m<2:
return []
d = []
for i in range(m - 1):
for j in range(i + 1, m):
d.append((sum((x[j, :] - x[i, :])**2))**0.5)
return np.array(d) | [
"\r\n Calculate the pair-wise point distances of a matrix\r\n \r\n Parameters\r\n ----------\r\n x : 2d-array\r\n An m-by-n array of scalars, where there are m points in n dimensions.\r\n \r\n Returns\r\n -------\r\n d : array\r\n A 1-by-b array of scalars, where b = m*(m - 1)/2. This array contains\r\n all the pair-wise point distances, arranged in the order (1, 0), \r\n (2, 0), ..., (m-1, 0), (2, 1), ..., (m-1, 1), ..., (m-1, m-2).\r\n \r\n Examples\r\n --------\r\n ::\r\n \r\n >>> x = np.array([[0.1629447, 0.8616334],\r\n ... [0.5811584, 0.3826752],\r\n ... [0.2270954, 0.4442068],\r\n ... [0.7670017, 0.7264718],\r\n ... [0.8253975, 0.1937736]])\r\n >>> _pdist(x)\r\n array([ 0.6358488, 0.4223272, 0.6189940, 0.9406808, 0.3593699,\r\n 0.3908118, 0.3087661, 0.6092392, 0.6486001, 0.5358894])\r\n \r\n "
]
|
Please provide a description of the function:def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None,
featuretypes_to_ignore=None):
logger.info("Cleaning GFF")
chroms_to_ignore = chroms_to_ignore or []
featuretypes_to_ignore = featuretypes_to_ignore or []
with open(cleaned, 'w') as fout:
for i in gffutils.iterators.DataIterator(gff):
if add_chr:
i.chrom = "chr" + i.chrom
if i.chrom in chroms_to_ignore:
continue
if i.featuretype in featuretypes_to_ignore:
continue
fout.write(str(i) + '\n')
return cleaned | [
"\n Cleans a GFF file by removing features on unwanted chromosomes and of\n unwanted featuretypes. Optionally adds \"chr\" to chrom names.\n "
]
|
Please provide a description of the function:def ccdesign(n, center=(4, 4), alpha='orthogonal', face='circumscribed'):
# Check inputs
assert isinstance(n, int) and n>1, '"n" must be an integer greater than 1.'
assert alpha.lower() in ('orthogonal', 'o', 'rotatable',
'r'), 'Invalid value for "alpha": {:}'.format(alpha)
assert face.lower() in ('circumscribed', 'ccc', 'inscribed', 'cci',
'faced', 'ccf'), 'Invalid value for "face": {:}'.format(face)
try:
nc = len(center)
except:
raise TypeError('Invalid value for "center": {:}. Expected a 1-by-2 array.'.format(center))
else:
if nc!=2:
raise ValueError('Invalid number of values for "center" (expected 2, but got {:})'.format(nc))
# Orthogonal Design
if alpha.lower() in ('orthogonal', 'o'):
H2, a = star(n, alpha='orthogonal', center=center)
# Rotatable Design
if alpha.lower() in ('rotatable', 'r'):
H2, a = star(n, alpha='rotatable')
# Inscribed CCD
if face.lower() in ('inscribed', 'cci'):
H1 = ff2n(n)
H1 = H1/a # Scale down the factorial points
H2, a = star(n)
# Faced CCD
if face.lower() in ('faced', 'ccf'):
H2, a = star(n) # Value of alpha is always 1 in Faced CCD
H1 = ff2n(n)
# Circumscribed CCD
if face.lower() in ('circumscribed', 'ccc'):
H1 = ff2n(n)
C1 = repeat_center(n, center[0])
C2 = repeat_center(n, center[1])
H1 = union(H1, C1)
H2 = union(H2, C2)
H = union(H1, H2)
return H | [
"\r\n Central composite design\r\n \r\n Parameters\r\n ----------\r\n n : int\r\n The number of factors in the design.\r\n \r\n Optional\r\n --------\r\n center : int array\r\n A 1-by-2 array of integers, the number of center points in each block\r\n of the design. (Default: (4, 4)).\r\n alpha : str\r\n A string describing the effect of alpha has on the variance. ``alpha``\r\n can take on the following values:\r\n \r\n 1. 'orthogonal' or 'o' (Default)\r\n \r\n 2. 'rotatable' or 'r'\r\n \r\n face : str\r\n The relation between the start points and the corner (factorial) points.\r\n There are three options for this input:\r\n \r\n 1. 'circumscribed' or 'ccc': This is the original form of the central\r\n composite design. The star points are at some distance ``alpha``\r\n from the center, based on the properties desired for the design.\r\n The start points establish new extremes for the low and high\r\n settings for all factors. These designs have circular, spherical,\r\n or hyperspherical symmetry and require 5 levels for each factor.\r\n Augmenting an existing factorial or resolution V fractional \r\n factorial design with star points can produce this design.\r\n \r\n 2. 'inscribed' or 'cci': For those situations in which the limits\r\n specified for factor settings are truly limits, the CCI design\r\n uses the factors settings as the star points and creates a factorial\r\n or fractional factorial design within those limits (in other words,\r\n a CCI design is a scaled down CCC design with each factor level of\r\n the CCC design divided by ``alpha`` to generate the CCI design).\r\n This design also requires 5 levels of each factor.\r\n \r\n 3. 'faced' or 'ccf': In this design, the star points are at the center\r\n of each face of the factorial space, so ``alpha`` = 1. This \r\n variety requires 3 levels of each factor. Augmenting an existing \r\n factorial or resolution V design with appropriate star points can \r\n also produce this design.\r\n \r\n Notes\r\n -----\r\n - Fractional factorial designs are not (yet) available here.\r\n - 'ccc' and 'cci' can be rotatable design, but 'ccf' cannot.\r\n - If ``face`` is specified, while ``alpha`` is not, then the default value\r\n of ``alpha`` is 'orthogonal'.\r\n \r\n Returns\r\n -------\r\n mat : 2d-array\r\n The design matrix with coded levels -1 and 1\r\n \r\n Example\r\n -------\r\n ::\r\n \r\n >>> ccdesign(3)\r\n array([[-1. , -1. , -1. ],\r\n [ 1. , -1. , -1. ],\r\n [-1. , 1. , -1. ],\r\n [ 1. , 1. , -1. ],\r\n [-1. , -1. , 1. ],\r\n [ 1. , -1. , 1. ],\r\n [-1. , 1. , 1. ],\r\n [ 1. , 1. , 1. ],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ],\r\n [-1.82574186, 0. , 0. ],\r\n [ 1.82574186, 0. , 0. ],\r\n [ 0. , -1.82574186, 0. ],\r\n [ 0. , 1.82574186, 0. ],\r\n [ 0. , 0. , -1.82574186],\r\n [ 0. , 0. , 1.82574186],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. ]])\r\n \r\n \r\n "
]
|
Please provide a description of the function:def feature_from_line(line, dialect=None, strict=True, keep_order=False):
if not strict:
lines = line.splitlines(False)
_lines = []
for i in lines:
i = i.strip()
if len(i) > 0:
_lines.append(i)
assert len(_lines) == 1, _lines
line = _lines[0]
if '\t' in line:
fields = line.rstrip('\n\r').split('\t')
else:
fields = line.rstrip('\n\r').split(None, 8)
else:
fields = line.rstrip('\n\r').split('\t')
try:
attr_string = fields[8]
except IndexError:
attr_string = ""
attrs, _dialect = parser._split_keyvals(attr_string, dialect=dialect)
d = dict(list(zip(constants._gffkeys, fields)))
d['attributes'] = attrs
d['extra'] = fields[9:]
d['keep_order'] = keep_order
if dialect is None:
dialect = _dialect
return Feature(dialect=dialect, **d) | [
"\n Given a line from a GFF file, return a Feature object\n\n Parameters\n ----------\n line : string\n\n strict : bool\n If True (default), assume `line` is a single, tab-delimited string that\n has at least 9 fields.\n\n If False, then the input can have a more flexible format, useful for\n creating single ad hoc features or for writing tests. In this case,\n `line` can be a multi-line string (as long as it has a single non-empty\n line), and, as long as there are only 9 fields (standard GFF/GTF), then\n it's OK to use spaces instead of tabs to separate fields in `line`.\n But if >9 fields are to be used, then tabs must be used.\n\n keep_order, dialect\n Passed directly to :class:`Feature`; see docstring for that class for\n description\n\n Returns\n -------\n A new :class:`Feature` object.\n "
]
|
Please provide a description of the function:def calc_bin(self, _bin=None):
if _bin is None:
try:
_bin = bins.bins(self.start, self.end, one=True)
except TypeError:
_bin = None
return _bin | [
"\n Calculate the smallest UCSC genomic bin that will contain this feature.\n "
]
|
Please provide a description of the function:def astuple(self, encoding=None):
if not encoding:
return (
self.id, self.seqid, self.source, self.featuretype, self.start,
self.end, self.score, self.strand, self.frame,
helpers._jsonify(self.attributes),
helpers._jsonify(self.extra), self.calc_bin()
)
return (
self.id.decode(encoding), self.seqid.decode(encoding),
self.source.decode(encoding), self.featuretype.decode(encoding),
self.start, self.end, self.score.decode(encoding),
self.strand.decode(encoding), self.frame.decode(encoding),
helpers._jsonify(self.attributes).decode(encoding),
helpers._jsonify(self.extra).decode(encoding), self.calc_bin()
) | [
"\n Return a tuple suitable for import into a database.\n\n Attributes field and extra field jsonified into strings. The order of\n fields is such that they can be supplied as arguments for the query\n defined in :attr:`gffutils.constants._INSERT`.\n\n If `encoding` is not None, then convert string fields to unicode using\n the provided encoding.\n\n Returns\n -------\n Tuple\n "
]
|
Please provide a description of the function:def sequence(self, fasta, use_strand=True):
if isinstance(fasta, six.string_types):
fasta = Fasta(fasta, as_raw=False)
# recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation
# and is therefore 0-based half-open.
seq = fasta[self.chrom][self.start-1:self.stop]
if use_strand and self.strand == '-':
seq = seq.reverse.complement
return seq.seq | [
"\n Retrieves the sequence of this feature as a string.\n\n Uses the pyfaidx package.\n\n Parameters\n ----------\n\n fasta : str\n If str, then it's a FASTA-format filename; otherwise assume it's\n a pyfaidx.Fasta object.\n\n use_strand : bool\n If True (default), the sequence returned will be\n reverse-complemented for minus-strand features.\n\n Returns\n -------\n string\n "
]
|
Please provide a description of the function:def to_bed12(f, db, child_type='exon', name_field='ID'):
if isinstance(f, six.string_types):
f = db[f]
children = list(db.children(f, featuretype=child_type, order_by='start'))
sizes = [len(i) for i in children]
starts = [i.start - f.start for i in children]
fields = [
f.chrom,
f.start - 1, # GTF -> BED coord system
f.stop,
f.attributes.get(name_field, ['.'])[0],
f.score,
f.strand,
f.start,
f.stop,
'0,0,0',
len(children),
','.join(map(str, sizes)),
','.join(map(str, starts))
]
return '\t'.join(map(str, fields)) + '\n' | [
"\n Given a top-level feature (e.g., transcript), construct a BED12 entry\n Parameters\n ----------\n f : Feature object or string\n This is the top-level feature represented by one BED12 line. For\n a canonical GFF or GTF, this will generally be a transcript.\n db : a FeatureDB object\n This is need to get the children for the feature\n child_type : str\n Featuretypes that will be represented by the BED12 \"blocks\". Typically\n \"exon\".\n name_field : str\n Attribute to be used in the \"name\" field of the BED12 entry. Usually\n \"ID\" for GFF; \"transcript_id\" for GTF.\n "
]
|
Please provide a description of the function:def infer_dialect(attributes):
if isinstance(attributes, six.string_types):
attributes = [attributes]
dialects = [parser._split_keyvals(i)[1] for i in attributes]
return _choose_dialect(dialects) | [
"\n Infer the dialect based on the attributes.\n\n Parameters\n ----------\n attributes : str or iterable\n A single attributes string from a GTF or GFF line, or an iterable of\n such strings.\n\n Returns\n -------\n Dictionary representing the inferred dialect\n "
]
|
Please provide a description of the function:def _choose_dialect(dialects):
# NOTE: can use helpers.dialect_compare if you need to make this more
# complex....
# For now, this function favors the first dialect, and then appends the
# order of additional fields seen in the attributes of other lines giving
# priority to dialects that come first in the iterable.
if len(dialects) == 0:
return constants.dialect
final_order = []
for dialect in dialects:
for o in dialect['order']:
if o not in final_order:
final_order.append(o)
dialect = dialects[0]
dialect['order'] = final_order
return dialect | [
"\n Given a list of dialects, choose the one to use as the \"canonical\" version.\n\n If `dialects` is an empty list, then use the default GFF3 dialect\n\n Parameters\n ----------\n dialects : iterable\n iterable of dialect dictionaries\n\n Returns\n -------\n dict\n "
]
|
Please provide a description of the function:def make_query(args, other=None, limit=None, strand=None, featuretype=None,
extra=None, order_by=None, reverse=False,
completely_within=False):
_QUERY = ("{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} "
"{LIMIT} {STRAND} {ORDER_BY}")
# Construct a dictionary `d` that will be used later as _QUERY.format(**d).
# Default is just _SELECT, which returns all records in the features table.
# (Recall that constants._SELECT gets the fields in the order needed to
# reconstruct a Feature)
d = dict(_SELECT=constants._SELECT, OTHER="", FEATURETYPE="", LIMIT="",
STRAND="", ORDER_BY="", EXTRA="")
if other:
d['OTHER'] = other
if extra:
d['EXTRA'] = extra
# If `other` and `extra` take args (that is, they have "?" in them), then
# they should have been provided in `args`.
required_args = (d['EXTRA'] + d['OTHER']).count('?')
if len(args) != required_args:
raise ValueError('Not enough args (%s) for subquery' % args)
# Below, if a kwarg is specified, then we create sections of the query --
# appending to args as necessary.
#
# IMPORTANT: the order in which things are processed here is the same as
# the order of the placeholders in _QUERY. That is, we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place.
if featuretype:
# Handle single or iterables of featuretypes.
#
# e.g., "featuretype = 'exon'"
#
# or, "featuretype IN ('exon', 'CDS')"
if isinstance(featuretype, six.string_types):
d['FEATURETYPE'] = "features.featuretype = ?"
args.append(featuretype)
else:
d['FEATURETYPE'] = (
"features.featuretype IN (%s)"
% (','.join(["?" for _ in featuretype]))
)
args.extend(featuretype)
if limit:
# Restrict to a genomic region. Makes use of the UCSC binning strategy
# for performance.
#
# `limit` is a string or a tuple of (chrom, start, stop)
#
# e.g., "seqid = 'chr2L' AND start > 1000 AND end < 5000"
if isinstance(limit, six.string_types):
seqid, startstop = limit.split(':')
start, end = startstop.split('-')
else:
seqid, start, end = limit
# Identify possible bins
_bins = bins.bins(int(start), int(end), one=False)
# Use different overlap conditions
if completely_within:
d['LIMIT'] = (
"features.seqid = ? AND features.start >= ? "
"AND features.end <= ?"
)
args.extend([seqid, start, end])
else:
d['LIMIT'] = (
"features.seqid = ? AND features.start <= ? "
"AND features.end >= ?"
)
# Note order (end, start)
args.extend([seqid, end, start])
# Add bin clause. See issue #45.
if len(_bins) < 900:
d['LIMIT'] += " AND features.bin IN (%s)" % (','.join(map(str, _bins)))
if strand:
# e.g., "strand = '+'"
d['STRAND'] = "features.strand = ?"
args.append(strand)
# TODO: implement file_order!
valid_order_by = constants._gffkeys_extra + ['file_order', 'length']
_order_by = []
if order_by:
# Default is essentially random order.
#
# e.g. "ORDER BY seqid, start DESC"
if isinstance(order_by, six.string_types):
_order_by.append(order_by)
else:
for k in order_by:
if k not in valid_order_by:
raise ValueError("%s not a valid order-by value in %s"
% (k, valid_order_by))
# There's no length field, so order by end - start
if k == 'length':
k = '(end - start)'
_order_by.append(k)
_order_by = ','.join(_order_by)
if reverse:
direction = 'DESC'
else:
direction = 'ASC'
d['ORDER_BY'] = 'ORDER BY %s %s' % (_order_by, direction)
# Ensure only one "WHERE" is included; the rest get "AND ". This is ugly.
where = False
if "where" in d['OTHER'].lower():
where = True
for i in ['EXTRA', 'FEATURETYPE', 'LIMIT', 'STRAND']:
if d[i]:
if not where:
d[i] = "WHERE " + d[i]
where = True
else:
d[i] = "AND " + d[i]
return _QUERY.format(**d), args | [
"\n Multi-purpose, bare-bones ORM function.\n\n This function composes queries given some commonly-used kwargs that can be\n passed to FeatureDB methods (like .parents(), .children(), .all_features(),\n .features_of_type()). It handles, in one place, things like restricting to\n featuretype, limiting to a genomic range, limiting to one strand, or\n returning results ordered by different criteria.\n\n Additional filtering/subsetting/sorting behavior should be added here.\n\n (Note: this ended up having better performance (and flexibility) than\n sqlalchemy)\n\n This function also provides support for additional JOINs etc (supplied via\n the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the\n `_QUERY` var below for the order in which they are used.\n\n For example, FeatureDB._relation uses `other` to supply the JOIN\n substatment, and that same method also uses `extra` to supply the\n \"relations.level = ?\" substatment (see the source for FeatureDB._relation\n for more details).\n\n `args` contains the arguments that will ultimately be supplied to the\n sqlite3.connection.execute function. It may be further populated below --\n for example, if strand=\"+\", then the query will include a strand clause,\n and the strand will be appended to the args.\n\n `args` can be pre-filled with args that are passed to `other` and `extra`.\n "
]
|
Please provide a description of the function:def _bin_from_dict(d):
try:
start = int(d['start'])
end = int(d['end'])
return bins.bins(start, end, one=True)
# e.g., if "."
except ValueError:
return None | [
"\n Given a dictionary yielded by the parser, return the genomic \"UCSC\" bin\n "
]
|
Please provide a description of the function:def _jsonify(x):
if isinstance(x, dict_class):
return json.dumps(x._d, separators=(',', ':'))
return json.dumps(x, separators=(',', ':')) | [
"Use most compact form of JSON"
]
|
Please provide a description of the function:def _unjsonify(x, isattributes=False):
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x) | [
"Convert JSON string to an ordered defaultdict."
]
|
Please provide a description of the function:def _feature_to_fields(f, jsonify=True):
x = []
for k in constants._keys:
v = getattr(f, k)
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | [
"\n Convert feature to tuple, for faster sqlite3 import\n "
]
|
Please provide a description of the function:def _dict_to_fields(d, jsonify=True):
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | [
"\n Convert dict to tuple, for faster sqlite3 import\n "
]
|
Please provide a description of the function:def merge_attributes(attr1, attr2):
new_d = copy.deepcopy(attr1)
new_d.update(attr2)
#all of attr2 key : values just overwrote attr1, fix it
for k, v in new_d.items():
if not isinstance(v, list):
new_d[k] = [v]
for k, v in six.iteritems(attr1):
if k in attr2:
if not isinstance(v, list):
v = [v]
new_d[k].extend(v)
return dict((k, sorted(set(v))) for k, v in new_d.items()) | [
"\n Merges two attribute dictionaries into a single dictionary.\n\n Parameters\n ----------\n `attr1`, `attr2` : dict\n\n Returns\n -------\n dict\n "
]
|
Please provide a description of the function:def dialect_compare(dialect1, dialect2):
orig = set(dialect1.items())
new = set(dialect2.items())
return dict(
added=dict(list(new.difference(orig))),
removed=dict(list(orig.difference(new)))
) | [
"\n Compares two dialects.\n "
]
|
Please provide a description of the function:def sanitize_gff_db(db, gid_field="gid"):
def sanitized_iterator():
# Iterate through the database by each gene's records
for gene_recs in db.iter_by_parent_childs():
# The gene's ID
gene_id = gene_recs[0].id
for rec in gene_recs:
# Fixup coordinates if necessary
if rec.start > rec.stop:
rec.start, rec.stop = rec.stop, rec.start
# Add a gene id field to each gene's records
rec.attributes[gid_field] = [gene_id]
yield rec
# Return sanitized GFF database
sanitized_db = \
gffutils.create_db(sanitized_iterator(), ":memory:",
verbose=False)
return sanitized_db | [
"\n Sanitize given GFF db. Returns a sanitized GFF db.\n\n Sanitizing means:\n\n - Ensuring that start < stop for all features\n - Standardizing gene units by adding a 'gid' attribute\n that makes the file grep-able\n\n TODO: Do something with negative coordinates?\n "
]
|
Please provide a description of the function:def sanitize_gff_file(gff_fname,
in_memory=True,
in_place=False):
db = None
if is_gff_db(gff_fname):
# It's a database filename, so load it
db = gffutils.FeatureDB(gff_fname)
else:
# Need to create a database for file
if in_memory:
db = gffutils.create_db(gff_fname, ":memory:",
verbose=False)
else:
db = get_gff_db(gff_fname)
if in_place:
gff_out = gffwriter.GFFWriter(gff_fname,
in_place=in_place)
else:
gff_out = gffwriter.GFFWriter(sys.stdout)
sanitized_db = sanitize_gff_db(db)
for gene_rec in sanitized_db.all_features(featuretype="gene"):
gff_out.write_gene_recs(sanitized_db, gene_rec.id)
gff_out.close() | [
"\n Sanitize a GFF file.\n "
]
|
Please provide a description of the function:def is_gff_db(db_fname):
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False | [
"\n Return True if the given filename is a GFF database.\n\n For now, rely on .db extension.\n "
]
|
Please provide a description of the function:def get_gff_db(gff_fname,
ext=".db"):
if not os.path.isfile(gff_fname):
# Not sure how we should deal with errors normally in
# gffutils -- Ryan?
raise ValueError("GFF %s does not exist." % (gff_fname))
candidate_db_fname = "%s.%s" % (gff_fname, ext)
if os.path.isfile(candidate_db_fname):
# Standard .db file found, so return it
return candidate_db_fname
# Otherwise, we need to create a temporary but non-deleted
# file to store the db in. It'll be up to the user
# of the function the delete the file when done.
## NOTE: Ryan must have a good scheme for dealing with this
## since pybedtools does something similar under the hood, i.e.
## creating temporary files as needed without over proliferation
db_fname = tempfile.NamedTemporaryFile(delete=False)
# Create the database for the gff file (suppress output
# when using function internally)
print("Creating db for %s" % (gff_fname))
t1 = time.time()
db = gffutils.create_db(gff_fname, db_fname.name,
merge_strategy="merge",
verbose=False)
t2 = time.time()
print(" - Took %.2f seconds" % (t2 - t1))
return db | [
"\n Get db for GFF file. If the database has a .db file,\n load that. Otherwise, create a named temporary file,\n serialize the db to that, and return the loaded database.\n "
]
|
Please provide a description of the function:def bins(start, stop, fmt='gff', one=True):
# For very large coordinates, return 1 which is "somewhere on the
# chromosome".
if start >= MAX_CHROM_SIZE or stop >= MAX_CHROM_SIZE:
if one:
return 1
else:
return set([1])
# Jump to highest resolution bin that will fit these coords (depending on
# whether we have a BED or GFF-style coordinate).
#
# Some GFF files include negative coords, which will throw off this
# calculation. If negative coords, then set the bin to the largest
# possible.
if start < 0:
if one:
return 1
else:
return set([1])
if stop < 0:
if one:
return 1
else:
return set([1])
start = (start - COORD_OFFSETS[fmt]) >> FIRST_SHIFT
stop = (stop) >> FIRST_SHIFT
# We always at least fit within the chrom, which is bin 1.
bins = set([1])
for offset in OFFSETS:
# Since we're going from smallest to largest bins, the first one where
# the feature's start and stop positions are both within the same bin
# is the smallest one these coords fit within.
if one:
if start == stop:
# Note that at this point, because of the bit-shifting, `start`
# is the number of bins (at this current level). So we need to
# add it to `offset` to get the actual bin ID.
return offset + start
# See the Fig 7 reproduction above to see why range().
bins.update(list(range(offset + start, offset + stop + 1)))
# Move to the next level (8x larger bin size; i.e., 2**NEXT_SHIFT
# larger bin size)
start >>= NEXT_SHIFT
stop >>= NEXT_SHIFT
return bins | [
"\n Uses the definition of a \"genomic bin\" described in Fig 7 of\n http://genome.cshlp.org/content/12/6/996.abstract.\n\n Parameters\n ----------\n one : boolean\n If `one=True` (default), then only return the smallest bin that\n completely contains these coordinates (useful for assigning a single\n bin).\n\n If `one=False`, then return the set of *all* bins that overlap these\n coordinates (useful for looking for features that could intersect)\n\n fmt : 'gff' | 'bed'\n This specifies 1-based start coords (gff) or 0-based start coords (bed)\n "
]
|
Please provide a description of the function:def print_bin_sizes():
for i, offset in enumerate(OFFSETS):
binstart = offset
try:
binstop = OFFSETS[i + 1]
except IndexError:
binstop = binstart
bin_size = 2 ** (FIRST_SHIFT + (i * NEXT_SHIFT))
actual_size = bin_size
# nice formatting
bin_size, suffix = bin_size / 1024, 'Kb'
if bin_size >= 1024:
bin_size, suffix = bin_size / 1024, 'Mb'
if bin_size >= 1024:
bin_size, suffix = bin_size / 1024, 'Gb'
size = '(%s %s)' % (bin_size, suffix)
actual_size = '%s bp' % (actual_size)
print('level: {i:1}; bins {binstart:<4} to {binstop:<4}; '
'size: {actual_size:<12} {size:<6}'.format(**locals())) | [
"\n Useful for debugging: how large is each bin, and what are the bin IDs?\n "
]
|
Please provide a description of the function:def _reconstruct(keyvals, dialect, keep_order=False,
sort_attribute_values=False):
if not dialect:
raise AttributeStringError()
if not keyvals:
return ""
parts = []
# Re-encode when reconstructing attributes
if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3':
attributes = keyvals
else:
attributes = {}
for k, v in keyvals.items():
attributes[k] = []
for i in v:
attributes[k].append(''.join([quoter[j] for j in i]))
# May need to split multiple values into multiple key/val pairs
if dialect['repeated keys']:
items = []
for key, val in attributes.items():
if len(val) > 1:
for v in val:
items.append((key, [v]))
else:
items.append((key, val))
else:
items = list(attributes.items())
def sort_key(x):
# sort keys by their order in the dialect; anything not in there will
# be in arbitrary order at the end.
try:
return dialect['order'].index(x[0])
except ValueError:
return 1e6
if keep_order:
items.sort(key=sort_key)
for key, val in items:
# Multival sep is usually a comma:
if val:
if sort_attribute_values:
val = sorted(val)
val_str = dialect['multival separator'].join(val)
if val_str:
# Surround with quotes if needed
if dialect['quoted GFF2 values']:
val_str = '"%s"' % val_str
# Typically "=" for GFF3 or " " otherwise
part = dialect['keyval separator'].join([key, val_str])
else:
if dialect['fmt'] == 'gtf':
part = dialect['keyval separator'].join([key, '""'])
else:
part = key
parts.append(part)
# Typically ";" or "; "
parts_str = dialect['field separator'].join(parts)
# Sometimes need to add this
if dialect['trailing semicolon']:
parts_str += ';'
return parts_str | [
"\n Reconstructs the original attributes string according to the dialect.\n\n Parameters\n ==========\n keyvals : dict\n Attributes from a GFF/GTF feature\n\n dialect : dict\n Dialect containing info on how to reconstruct a string version of the\n attributes\n\n keep_order : bool\n If True, then perform sorting of attribute keys to ensure they are in\n the same order as those provided in the original file. Default is\n False, which saves time especially on large data sets.\n\n sort_attribute_values : bool\n If True, then sort values to ensure they will always be in the same\n order. Mostly only useful for testing; default is False.\n "
]
|
Please provide a description of the function:def _split_keyvals(keyval_str, dialect=None):
def _unquote_quals(quals, dialect):
if not constants.ignore_url_escape_characters and dialect['fmt'] == 'gff3':
for key, vals in quals.items():
unquoted = [urllib.parse.unquote(v) for v in vals]
quals[key] = unquoted
return quals
infer_dialect = False
if dialect is None:
# Make a copy of default dialect so it can be modified as needed
dialect = copy.copy(constants.dialect)
infer_dialect = True
from gffutils import feature
quals = feature.dict_class()
if not keyval_str:
return quals, dialect
# If a dialect was provided, then use that directly.
if not infer_dialect:
if dialect['trailing semicolon']:
keyval_str = keyval_str.rstrip(';')
parts = keyval_str.split(dialect['field separator'])
kvsep = dialect['keyval separator']
if dialect['leading semicolon']:
pieces = []
for p in parts:
if p and p[0] == ';':
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
if dialect['fmt'] == 'gff3':
key_vals = [p.split(kvsep) for p in parts]
else:
leadingsemicolon = dialect['leading semicolon']
pieces = []
for i, p in enumerate(parts):
if i == 0 and leadingsemicolon:
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
quoted = dialect['quoted GFF2 values']
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
try:
quals[key]
except KeyError:
quals[key] = []
if quoted:
if (len(val) > 0 and val[0] == '"' and val[-1] == '"'):
val = val[1:-1]
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
quals[key].extend(vals)
quals = _unquote_quals(quals, dialect)
return quals, dialect
# If we got here, then we need to infer the dialect....
#
# Reset the order to an empty list so that it will only be populated with
# keys that are found in the file.
dialect['order'] = []
# ensembl GTF has trailing semicolon
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
dialect['trailing semicolon'] = True
# GFF2/GTF has a semicolon with at least one space after it.
# Spaces can be on both sides (e.g. wormbase)
# GFF3 works with no spaces.
# So split on the first one we can recognize...
for sep in (' ; ', '; ', ';'):
parts = keyval_str.split(sep)
if len(parts) > 1:
dialect['field separator'] = sep
break
# Is it GFF3? They have key-vals separated by "="
if gff3_kw_pat.match(parts[0]):
key_vals = [p.split('=') for p in parts]
dialect['fmt'] = 'gff3'
dialect['keyval separator'] = '='
# Otherwise, key-vals separated by space. Key is first item.
else:
dialect['keyval separator'] = " "
pieces = []
for p in parts:
# Fix misplaced semicolons in keys in some GFF2 files
if p and p[0] == ';':
p = p[1:]
dialect['leading semicolon'] = True
pieces.append(p.strip().split(' '))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
# Pathological cases where values of a key have within them the key-val
# separator, e.g.,
# Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
# Is the key already in there?
if key in quals:
dialect['repeated keys'] = True
else:
quals[key] = []
# Remove quotes in GFF2
if len(val) > 0 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
dialect['quoted GFF2 values'] = True
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
if (len(vals) > 1) and dialect['repeated keys']:
raise AttributeStringError(
"Internally inconsistent attributes formatting: "
"some have repeated keys, some do not.")
quals[key].extend(vals)
# keep track of the order of keys
dialect['order'].append(key)
if (
(dialect['keyval separator'] == ' ') and
(dialect['quoted GFF2 values'])
):
dialect['fmt'] = 'gtf'
quals = _unquote_quals(quals, dialect)
return quals, dialect | [
"\n Given the string attributes field of a GFF-like line, split it into an\n attributes dictionary and a \"dialect\" dictionary which contains information\n needed to reconstruct the original string.\n\n Lots of logic here to handle all the corner cases.\n\n If `dialect` is None, then do all the logic to infer a dialect from this\n attribute string.\n\n Otherwise, use the provided dialect (and return it at the end).\n ",
"\n Handles the unquoting (decoding) of percent-encoded characters.\n\n See notes on encoding/decoding above.\n "
]
|
Please provide a description of the function:def create_db(data, dbfn, id_spec=None, force=False, verbose=False,
checklines=10, merge_strategy='error', transform=None,
gtf_transcript_key='transcript_id', gtf_gene_key='gene_id',
gtf_subfeature='exon', force_gff=False,
force_dialect_check=False, from_string=False, keep_order=False,
text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None,
pragmas=constants.default_pragmas, sort_attribute_values=False,
dialect=None, _keep_tempfiles=False, infer_gene_extent=True,
disable_infer_genes=False, disable_infer_transcripts=False,
**kwargs):
_locals = locals()
# Check if any older kwargs made it in
deprecation_handler(kwargs)
kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs)
# First construct an iterator so that we can identify the file format.
# DataIterator figures out what kind of data was provided (string of lines,
# filename, or iterable of Features) and checks `checklines` lines to
# identify the dialect.
iterator = iterators.DataIterator(**kwargs)
kwargs.update(**_locals)
if dialect is None:
dialect = iterator.dialect
# However, a side-effect of this is that if `data` was a generator, then
# we've just consumed `checklines` items (see
# iterators.BaseIterator.__init__, which calls iterators.peek).
#
# But it also chains those consumed items back onto the beginning, and the
# result is available as as iterator._iter.
#
# That's what we should be using now for `data:
kwargs['data'] = iterator._iter
kwargs['directives'] = iterator.directives
# Since we've already checked lines, we don't want to do it again
kwargs['checklines'] = 0
if force_gff or (dialect['fmt'] == 'gff3'):
cls = _GFFDBCreator
id_spec = id_spec or 'ID'
add_kwargs = dict(
id_spec=id_spec,
)
elif dialect['fmt'] == 'gtf':
cls = _GTFDBCreator
id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'}
add_kwargs = dict(
transcript_key=gtf_transcript_key,
gene_key=gtf_gene_key,
subfeature=gtf_subfeature,
id_spec=id_spec,
)
kwargs.update(**add_kwargs)
kwargs['dialect'] = dialect
c = cls(**kwargs)
c.create()
if dbfn == ':memory:':
db = interface.FeatureDB(c.conn,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
else:
db = interface.FeatureDB(c,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
return db | [
"\n Create a database from a GFF or GTF file.\n\n For more details on when and how to use the kwargs below, see the examples\n in the online documentation (:ref:`examples`).\n\n Parameters\n ----------\n data : string or iterable\n\n If a string (and `from_string` is False), then `data` is the path to\n the original GFF or GTF file.\n\n If a string and `from_string` is True, then assume `data` is the actual\n data to use.\n\n Otherwise, it's an iterable of Feature objects.\n\n dbfn : string\n\n Path to the database that will be created. Can be the special string\n \":memory:\" to create an in-memory database.\n\n id_spec : string, list, dict, callable, or None\n\n This parameter guides what will be used as the primary key for the\n database, which in turn determines how you will access individual\n features by name from the database.\n\n If `id_spec=None`, then auto-increment primary keys based on the\n feature type (e.g., \"gene_1\", \"gene_2\"). This is also the fallback\n behavior for the other values below.\n\n If `id_spec` is a string, then look for this key in the attributes. If\n it exists, then use its value as the primary key, otherwise\n autoincrement based on the feature type. For many GFF3 files, \"ID\"\n usually works well.\n\n If `id_spec` is a list or tuple of keys, then check for each one in\n order, using the first one found. For GFF3, this might be [\"ID\",\n \"Name\"], which would use the ID if it exists, otherwise the Name,\n otherwise autoincrement based on the feature type.\n\n If `id_spec` is a dictionary, then it is a mapping of feature types to\n what should be used as the ID. For example, for GTF files, `{'gene':\n 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values\n of this dictionary can also be a list, e.g., `{'gene': ['gene_id',\n 'geneID']}`\n\n If `id_spec` is a callable object, then it accepts a dictionary from\n the iterator and returns one of the following:\n\n * None (in which case the feature type will be auto-incremented)\n * string (which will be used as the primary key)\n * special string starting with \"autoincrement:X\", where \"X\" is\n a string that will be used for auto-incrementing. For example,\n if \"autoincrement:chr10\", then the first feature will be\n \"chr10_1\", the second \"chr10_2\", and so on.\n\n force : bool\n\n If `False` (default), then raise an exception if `dbfn` already exists.\n Use `force=True` to overwrite any existing databases.\n\n verbose : bool\n\n Report percent complete and other feedback on how the db creation is\n progressing.\n\n In order to report percent complete, the entire file needs to be read\n once to see how many items there are; for large files you may want to\n use `verbose=False` to avoid this.\n\n checklines : int\n\n Number of lines to check the dialect.\n\n merge_strategy : str\n One of {merge, create_unique, error, warning, replace}.\n\n This parameter specifies the behavior when two items have an identical\n primary key.\n\n Using `merge_strategy=\"merge\"`, then there will be a single entry in\n the database, but the attributes of all features with the same primary\n key will be merged.\n\n Using `merge_strategy=\"create_unique\"`, then the first entry will use\n the original primary key, but the second entry will have a unique,\n autoincremented primary key assigned to it\n\n Using `merge_strategy=\"error\"`, a :class:`gffutils.DuplicateID`\n exception will be raised. This means you will have to edit the file\n yourself to fix the duplicated IDs.\n\n Using `merge_strategy=\"warning\"`, a warning will be printed to the\n logger, and the duplicate feature will be skipped.\n\n Using `merge_strategy=\"replace\"` will replace the entire existing\n feature with the new feature.\n\n transform : callable\n\n Function (or other callable object) that accepts a `Feature` object and\n returns a (possibly modified) `Feature` object.\n\n gtf_transcript_key, gtf_gene_key : string\n\n Which attribute to use as the transcript ID and gene ID respectively\n for GTF files. Default is `transcript_id` and `gene_id` according to\n the GTF spec.\n\n gtf_subfeature : string\n\n Feature type to use as a \"gene component\" when inferring gene and\n transcript extents for GTF files. Default is `exon` according to the\n GTF spec.\n\n force_gff : bool\n If True, do not do automatic format detection -- only use GFF.\n\n force_dialect_check : bool\n If True, the dialect will be checkef for every feature (instead of just\n `checklines` features). This can be slow, but may be necessary for\n inconsistently-formatted input files.\n\n from_string : bool\n If True, then treat `data` as actual data (rather than the path to\n a file).\n\n keep_order : bool\n\n If True, all features returned from this instance will have the\n order of their attributes maintained. This can be turned on or off\n database-wide by setting the `keep_order` attribute or with this\n kwarg, or on a feature-by-feature basis by setting the `keep_order`\n attribute of an individual feature.\n\n Note that a single order of attributes will be used for all features.\n Specifically, the order will be determined by the order of attribute\n keys in the first `checklines` of the input data. See\n helpers._choose_dialect for more information on this.\n\n Default is False, since this includes a sorting step that can get\n time-consuming for many features.\n\n infer_gene_extent : bool\n DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and\n `disable_infer_genes` for more granular control.\n\n disable_infer_transcripts, disable_infer_genes : bool\n Only used for GTF files. By default -- and according to the GTF spec --\n we assume that there are no transcript or gene features in the file.\n gffutils then infers the extent of each transcript based on its\n constituent exons and infers the extent of each gene bases on its\n constituent transcripts.\n\n This default behavior is problematic if the input file already contains\n transcript or gene features (like recent GENCODE GTF files for human),\n since 1) the work to infer extents is unnecessary, and 2)\n trying to insert an inferred feature back into the database triggers\n gffutils' feature-merging routines, which can get time consuming.\n\n The solution is to use `disable_infer_transcripts=True` if your GTF\n already has transcripts in it, and/or `disable_infer_genes=True` if it\n already has genes in it. This can result in dramatic (100x) speedup.\n\n Prior to version 0.8.4, setting `infer_gene_extents=False` would\n disable both transcript and gene inference simultaneously. As of\n version 0.8.4, these argument allow more granular control.\n\n force_merge_fields : list\n If merge_strategy=\"merge\", then features will only be merged if their\n non-attribute values are identical (same chrom, source, start, stop,\n score, strand, phase). Using `force_merge_fields`, you can override\n this behavior to allow merges even when fields are different. This\n list can contain one or more of ['seqid', 'source', 'featuretype',\n 'score', 'strand', 'frame']. The resulting merged fields will be\n strings of comma-separated values. Note that 'start' and 'end' are not\n available, since these fields need to be integers.\n\n text_factory : callable\n Text factory to use for the sqlite3 database. See\n https://docs.python.org/2/library/\\\n sqlite3.html#sqlite3.Connection.text_factory\n for details. The default sqlite3.OptimizedUnicode will return Unicode\n objects only for non-ASCII data, and bytestrings otherwise.\n\n pragmas : dict\n Dictionary of pragmas used when creating the sqlite3 database. See\n http://www.sqlite.org/pragma.html for a list of available pragmas. The\n defaults are stored in constants.default_pragmas, which can be used as\n a template for supplying a custom dictionary.\n\n sort_attribute_values : bool\n All features returned from the database will have their attribute\n values sorted. Typically this is only useful for testing, since this\n can get time-consuming for large numbers of features.\n\n _keep_tempfiles : bool or string\n False by default to clean up intermediate tempfiles created during GTF\n import. If True, then keep these tempfile for testing or debugging.\n If string, then keep the tempfile for testing, but also use the string\n as the suffix fo the tempfile. This can be useful for testing in\n parallel environments.\n\n Returns\n -------\n New :class:`FeatureDB` object.\n "
]
|
Please provide a description of the function:def _id_handler(self, f):
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype) | [
"\n Given a Feature from self.iterator, figure out what the ID should be.\n\n This uses `self.id_spec` identify the ID.\n "
]
|
Please provide a description of the function:def _do_merge(self, f, merge_strategy, add_duplicate=False):
if merge_strategy == 'error':
raise ValueError("Duplicate ID {0.id}".format(f))
if merge_strategy == 'warning':
logger.warning(
"Duplicate lines in file for id '{0.id}'; "
"ignoring all but the first".format(f))
return None, merge_strategy
elif merge_strategy == 'replace':
return f, merge_strategy
# This is by far the most complicated strategy.
elif merge_strategy == 'merge':
# Recall that if we made it to this method, there was at least one
# ID collision.
# This will eventually contain the features that match ID AND that
# match non-attribute fields like start, stop, strand, etc.
features_to_merge = []
# Iterate through all features that have the same ID according to
# the id_spec provided.
if self.verbose == "debug":
logger.debug('candidates with same idspec: %s'
% ([i.id for i in self._candidate_merges(f)]))
# If force_merge_fields was provided, don't pay attention to these
# fields if they're different. We are assuming attributes will be
# different, hence the [:-1]
_gffkeys_to_check = list(
set(constants._gffkeys[:-1])
.difference(self.force_merge_fields))
for existing_feature in self._candidate_merges(f):
# Check other GFF fields (if not specified in
# self.force_merge_fields) to make sure they match.
other_attributes_same = True
for k in _gffkeys_to_check:
if getattr(existing_feature, k) != getattr(f, k):
other_attributes_same = False
break
if other_attributes_same:
# All the other GFF fields match. So this existing feature
# should be merged.
features_to_merge.append(existing_feature)
if self.verbose == 'debug':
logger.debug(
'same attributes between:\nexisting: %s'
'\nthis : %s'
% (existing_feature, f))
else:
# The existing feature's GFF fields don't match, so don't
# append anything.
if self.verbose == 'debug':
logger.debug(
'different attributes between:\nexisting: %s\n'
'this : %s'
% (existing_feature, f))
if (len(features_to_merge) == 0):
# No merge candidates found, so we should make a new ID for
# this feature. This can happen when idspecs match, but other
# fields (like start/stop) are different. Call this method
# again, but using the "create_unique" strategy, and then
# record the newly-created ID in the duplicates table.
orig_id = f.id
uniqued_feature, merge_strategy = self._do_merge(
f, merge_strategy='create_unique')
self._add_duplicate(orig_id, uniqued_feature.id)
return uniqued_feature, merge_strategy
# Whoo! Found some candidates to merge.
else:
if self.verbose == 'debug':
logger.debug('num candidates: %s' % len(features_to_merge))
# This is the attributes dictionary we'll be modifying.
merged_attributes = copy.deepcopy(f.attributes)
# Keep track of non-attribute fields (this will be an empty
# dict if no force_merge_fields)
final_fields = dict(
[(field, set([getattr(f, field)]))
for field in self.force_merge_fields])
# Update the attributes
for existing_feature in features_to_merge:
if self.verbose == 'debug':
logger.debug(
'\nmerging\n\n%s\n%s\n' % (f, existing_feature))
for k in existing_feature.attributes.keys():
v = merged_attributes.setdefault(k, [])
v.extend(existing_feature[k])
merged_attributes[k] = v
# Update the set of non-attribute fields found so far
for field in self.force_merge_fields:
final_fields[field].update(
[getattr(existing_feature, field)])
# Set the merged attributes
for k, v in merged_attributes.items():
merged_attributes[k] = list(set(v))
existing_feature.attributes = merged_attributes
# Set the final merged non-attributes
for k, v in final_fields.items():
setattr(existing_feature, k, ','.join(sorted(map(str, v))))
if self.verbose == 'debug':
logger.debug('\nMERGED:\n%s' % existing_feature)
return existing_feature, merge_strategy
elif merge_strategy == 'create_unique':
f.id = self._increment_featuretype_autoid(f.id)
return f, merge_strategy
else:
raise ValueError("Invalid merge strategy '%s'"
% (merge_strategy)) | [
"\n Different merge strategies upon name conflicts.\n\n \"error\":\n Raise error\n\n \"warning\"\n Log a warning\n\n \"merge\":\n Combine old and new attributes -- but only if everything else\n matches; otherwise error. This can be slow, but is thorough.\n\n \"create_unique\":\n Autoincrement based on the ID, always creating a new ID.\n\n \"replace\":\n Replaces existing database feature with `f`.\n "
]
|
Please provide a description of the function:def _add_duplicate(self, idspecid, newid):
c = self.conn.cursor()
try:
c.execute(
'''
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)''',
(idspecid, newid))
except sqlite3.ProgrammingError:
c.execute(
'''
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)''',
(idspecid.decode(self.default_encoding),
newid.decode(self.default_encoding))
)
if self.verbose == 'debug':
logger.debug('added id=%s; new=%s' % (idspecid, newid))
self.conn.commit() | [
"\n Adds a duplicate ID (as identified by id_spec) and its new ID to the\n duplicates table so that they can be later searched for merging.\n\n Parameters\n ----------\n newid : str\n The primary key used in the features table\n\n idspecid : str\n The ID identified by id_spec\n "
]
|
Please provide a description of the function:def _candidate_merges(self, f):
candidates = [self._get_feature(f.id)]
c = self.conn.cursor()
results = c.execute(
constants._SELECT + '''
JOIN duplicates ON
duplicates.newid = features.id WHERE duplicates.idspecid = ?''',
(f.id,)
)
for i in results:
candidates.append(
feature.Feature(dialect=self.iterator.dialect, **i))
return list(set(candidates)) | [
"\n Identifies those features that originally had the same ID as `f`\n (according to the id_spec), but were modified because of duplicate\n IDs.\n "
]
|
Please provide a description of the function:def _init_tables(self):
c = self.conn.cursor()
v = sqlite3.sqlite_version_info
self.set_pragmas(self.pragmas)
c.executescript(constants.SCHEMA)
self.conn.commit() | [
"\n Table creation\n "
]
|
Please provide a description of the function:def _finalize(self):
c = self.conn.cursor()
directives = self.directives + self.iterator.directives
c.executemany('''
INSERT INTO directives VALUES (?)
''', ((i,) for i in directives))
c.execute(
'''
INSERT INTO meta (version, dialect)
VALUES (:version, :dialect)''',
dict(version=version.version,
dialect=helpers._jsonify(self.iterator.dialect))
)
c.executemany(
'''
INSERT OR REPLACE INTO autoincrements VALUES (?, ?)
''', list(self._autoincrements.items()))
# These indexes are *well* worth the effort and extra storage: over
# 500x speedup on code like this:
#
# genes = []
# for i in db.features_of_type('snoRNA'):
# for k in db.parents(i, level=1, featuretype='gene'):
# genes.append(k.id)
#
logger.info("Creating relations(parent) index")
c.execute('DROP INDEX IF EXISTS relationsparent')
c.execute('CREATE INDEX relationsparent ON relations (parent)')
logger.info("Creating relations(child) index")
c.execute('DROP INDEX IF EXISTS relationschild')
c.execute('CREATE INDEX relationschild ON relations (child)')
logger.info("Creating features(featuretype) index")
c.execute('DROP INDEX IF EXISTS featuretype')
c.execute('CREATE INDEX featuretype ON features (featuretype)')
logger.info("Creating features (seqid, start, end) index")
c.execute('DROP INDEX IF EXISTS seqidstartend')
c.execute('CREATE INDEX seqidstartend ON features (seqid, start, end)')
logger.info("Creating features (seqid, start, end, strand) index")
c.execute('DROP INDEX IF EXISTS seqidstartendstrand')
c.execute('CREATE INDEX seqidstartendstrand ON features (seqid, start, end, strand)')
# speeds computation 1000x in some cases
logger.info("Running ANALYSE features")
c.execute('ANALYZE features')
self.conn.commit()
self.warnings = self.iterator.warnings | [
"\n Various last-minute stuff to perform after file has been parsed and\n imported.\n\n In general, if you'll be adding stuff to the meta table, do it here.\n "
]
|
Please provide a description of the function:def create(self):
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | [
"\n Calls various methods sequentially in order to fully build the\n database.\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.