Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def lss(inlist):
ss = 0
for item in inlist:
ss = ss + item*item
return ss | [
"\nSquares each value in the passed list, adds up these squares and\nreturns the result.\n\nUsage: lss(inlist)\n"
] |
Please provide a description of the function:def lsummult (list1,list2):
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s | [
"\nMultiplies elements in list1 and list2, element by element, and\nreturns the sum of all resulting multiplications. Must provide equal\nlength lists.\n\nUsage: lsummult(list1,list2)\n"
] |
Please provide a description of the function:def lsumdiffsquared(x,y):
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds | [
"\nTakes pairwise differences of the values in lists x and y, squares\nthese differences, and returns the sum of these squares.\n\nUsage: lsumdiffsquared(x,y)\nReturns: sum[(x[i]-y[i])**2]\n"
] |
Please provide a description of the function:def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None | [
"\nPrints or write to a file stats for two groups, using the name, n,\nmean, sterr, min and max for each group, as well as the statistic name,\nits value, and the associated p-value.\n\nUsage: outputpairedstats(fname,writemode,\n name1,n1,mean1,stderr1,min1,max1,\n name2,n2,mean2,stderr2,min2,max2,\n statname,stat,prob)\nReturns: None\n"
] |
Please provide a description of the function:def GeneReader( fh, format='gff' ):
known_formats = ( 'gff', 'gtf', 'bed')
if format not in known_formats:
print('%s format not in %s' % (format, ",".join( known_formats )), file=sys.stderr)
raise Exception('?')
if format == 'bed':
for line in fh:
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
gene_exons = []
for base,offset in zip( blockStarts, blockSizes ):
exon_start = base
exon_end = base+offset
gene_exons.append( (exon_start, exon_end) )
yield chrom, strand, gene_exons, name
genelist = {}
grouplist = []
if format == 'gff' or format == 'gtf':
for line in fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len( fields ) < 9: continue
# fields
chrom = fields[0]
ex_st = int( fields[3] ) - 1 # make zero-centered
ex_end = int( fields[4] ) #+ 1 # make exclusive
strand = fields[6]
if format == 'gtf':
group = fields[8].split(';')[0]
else:
group = fields[8]
if group not in grouplist: grouplist.append( group )
if group not in genelist:
genelist[group] = (chrom, strand, [])
exons_i = 2
genelist[group][exons_i].append( ( ex_st, ex_end ) )
sp = lambda a,b: cmp( a[0], b[0] )
#for gene in genelist.values():
for gene in grouplist:
chrom, strand, gene_exons = genelist[ gene ]
gene_exons = bitset_union( gene_exons )
yield chrom, strand, gene_exons, gene | [
" yield chrom, strand, gene_exons, name "
] |
Please provide a description of the function:def CDSReader( fh, format='gff' ):
known_formats = ( 'gff', 'gtf', 'bed')
if format not in known_formats:
print('%s format not in %s' % (format, ",".join( known_formats )), file=sys.stderr)
raise Exception('?')
if format == 'bed':
for line in fh:
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
cds_exons = []
cds_seq = ''
genome_seq_index = []
for base,offset in zip( blockStarts, blockSizes ):
if (base + offset) < cdsStart: continue
if base > cdsEnd: continue
exon_start = max( base, cdsStart )
exon_end = min( base+offset, cdsEnd )
cds_exons.append( (exon_start, exon_end) )
yield chrom, strand, cds_exons, name
genelist = {}
grouplist = []
if format == 'gff' or format == 'gtf':
for line in fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len( fields ) < 9: continue
if fields[2] not in ('CDS', 'stop_codon', 'start_codon'): continue
# fields
chrom = fields[0]
ex_st = int( fields[3] ) - 1 # make zero-centered
ex_end = int( fields[4] ) #+ 1 # make exclusive
strand = fields[6]
if format == 'gtf':
group = fields[8].split(';')[0]
else:
group = fields[8]
if group not in grouplist: grouplist.append( group )
if group not in genelist:
genelist[group] = (chrom, strand, [])
genelist[group][2].append( ( ex_st, ex_end ) )
sp = lambda a,b: cmp( a[0], b[0] )
#for gene in genelist.values():
for gene in grouplist:
chrom, strand, cds_exons = genelist[ gene ]
seqlen = sum([ a[1]-a[0] for a in cds_exons ])
overhang = seqlen % 3
if overhang > 0:
#print >>sys.stderr, "adjusting ", gene
if strand == '+':
cds_exons[-1] = ( cds_exons[-1][0], cds_exons[-1][1] - overhang )
else:
cds_exons[0] = ( cds_exons[0][0] + overhang, cds_exons[0][1] )
cds_exons = bitset_union( cds_exons )
yield chrom, strand, cds_exons, gene | [
" yield chrom, strand, cds_exons, name "
] |
Please provide a description of the function:def FeatureReader( fh, format='gff', alt_introns_subtract="exons", gtf_parse=None):
known_formats = ( 'gff', 'gtf', 'bed')
if format not in known_formats:
print('%s format not in %s' % (format, ",".join( known_formats )), file=sys.stderr)
raise Exception('?')
if format == 'bed':
for line in fh:
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
cds_exons = []
exons = []
cds_seq = ''
genome_seq_index = []
for base,offset in zip( blockStarts, blockSizes ):
if (base + offset) < cdsStart: continue
if base > cdsEnd: continue
# exons
exon_start = base
exon_end = base+offset
exons.append( (exon_start, exon_end) )
# cds exons
exon_start = max( base, cdsStart )
exon_end = min( base+offset, cdsEnd )
cds_exons.append( (exon_start, exon_end) )
cds_exons = bitset_union( cds_exons )
exons = bitset_union( exons )
introns = bitset_complement( exons )
yield chrom, strand, cds_exons, introns, exons, name
genelist = {}
grouplist = []
if format == 'gff' or format == 'gtf':
for line in fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len( fields ) < 9: continue
# fields
chrom = fields[0]
ex_st = int( fields[3] ) - 1 # make zero-centered
ex_end = int( fields[4] ) #+ 1 # make exclusive
strand = fields[6]
if format == 'gtf':
if not gtf_parse:
group = fields[8].split(';')[0]
else:
group = gtf_parse( fields[8] )
else:
group = fields[8]
# Results are listed in the same order as encountered
if group not in grouplist: grouplist.append( group )
if group not in genelist:
# chrom, strand, cds_exons, introns, exons, cds_start, cds_end
genelist[group] = [chrom, strand, [], [], [], None, None]
if fields[2] == 'exon':
genelist[group][4].append( ( ex_st, ex_end ) )
elif fields[2] in ('CDS', 'stop_codon', 'start_codon'):
genelist[group][2].append( ( ex_st, ex_end ) )
if fields[2] == 'start_codon':
if strand == '+': genelist[group][5] = ex_st
else: genelist[group][5] = ex_end
if fields[2] == 'stop_codon':
if strand == '+': genelist[group][5] = ex_end
else: genelist[group][5] = ex_st
elif fields[2] == 'intron':
genelist[group][3].append( ( ex_st, ex_end ) )
for gene in grouplist:
chrom, strand, cds_exons, introns, exons, cds_start, cds_end = genelist[ gene ]
cds_exons = bitset_union( cds_exons )
exons = bitset_union( exons )
# assure that cds exons were within the cds range
if cds_start is not None and cds_end is not None:
if strand == '+':
cds_exons = bitset_intersect( cds_exons, [(cds_start,cds_end)] )
else:
cds_exons = bitset_intersect( cds_exons, [(cds_end,cds_start)] )
# assure that introns are non-overlapping with themselves or exons
if alt_introns_subtract:
if alt_introns_subtract == 'exons':
introns = bitset_subtract( introns, exons )
if alt_introns_subtract == 'cds_exons':
introns = bitset_subtract( introns, cds_exons )
else: introns = bitset_union( introns )
# assure CDS is a multiple of 3, trim from last exon if necessary
seqlen = sum([ a[1]-a[0] for a in cds_exons ])
overhang = seqlen % 3
if overhang > 0:
if strand == '+':
cds_exons[-1] = ( cds_exons[-1][0], cds_exons[-1][1] - overhang )
else:
cds_exons[0] = ( cds_exons[0][0] + overhang, cds_exons[0][1] )
yield chrom, strand, cds_exons, introns, exons, gene | [
" \n yield chrom, strand, cds_exons, introns, exons, name\n\n gtf_parse Example:\n # parse gene_id from transcript_id \"AC073130.2-001\"; gene_id \"TES\";\n gene_name = lambda s: s.split(';')[1].split()[1].strip('\"')\n\n for chrom, strand, cds_exons, introns, exons, name in FeatureReader( sys.stdin, format='gtf', gtf_parse=gene_name )\n "
] |
Please provide a description of the function:def throw_random_gap_list( lengths, mask, save_interval_func, allow_overlap=False ):
# Use mask to find the gaps; gaps is a list of (length,start,end)
lengths = [length for length in lengths if length > 0]
min_length = min( lengths )
gaps = []
start = end = 0
while 1:
start = mask.next_clear( end )
if start == mask.size: break
end = mask.next_set( start )
if end-start >= min_length:
gaps.append( ( end-start, start, None ) )
# Sort (long regions first)
gaps.sort()
gaps.reverse()
# Throw
throw_random_private( lengths, gaps, save_interval_func, allow_overlap, three_args=False ) | [
"\n Generates a set of non-overlapping random intervals from a length \n distribution.\n \n `lengths`: list containing the length of each interval to be generated.\n We expect this to be sorted by decreasing length to minimize\n the chance of failure (MaxtriesException) and for some\n performance gains when allow_overlap==True and there are\n duplicate lengths\n `mask`: a BitSet in which set bits represent regions not to place \n intervals. The size of the region is also determined from the\n mask.\n "
] |
Please provide a description of the function:def throw_random_intervals( lengths, regions, save_interval_func=None, allow_overlap=False ):
# Copy regions
regions = [( x[1]-x[0], x[0], x ) for x in regions]
# Sort (long regions first)
regions.sort()
regions.reverse()
# Throw
if (save_interval_func != None):
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return
else:
intervals = []
save_interval_func = lambda s, e, rgn: intervals.append( overwrite_start_end ( s, e, rgn ) )
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return intervals | [
"\n Generates a set of non-overlapping random intervals from a length \n distribution.\n \n `lengths`: list containing the length of each interval to be generated.\n We expect this to be sorted by decreasing length to minimize\n the chance of failure (MaxtriesException) and for some\n performance gains when allow_overlap==True and there are\n duplicate lengths.\n `regions`: A list of regions in which intervals can be placed. Elements\n are tuples or lists of the form (start, end, ...), where ...\n indicates any number of items (including zero).\n `save_interval_func`: A function accepting three arguments which will be \n passed the (start,stop,region) for each generated \n interval, where region is an entry in the regions\n list. If this is None, the generated intervals will\n be returned as a list of elements copied from the\n region with start and end modified.\n "
] |
Please provide a description of the function:def throw_random_private( lengths, regions, save_interval_func, allow_overlap=False, three_args=True ):
# Implementation:
# We keep a list of the regions, sorted from largest to smallest. We then
# place each length by following steps:
# (1) construct a candidate counts array (cc array)
# (2) choose a candidate at random
# (3) find region containing that candidate
# (4) map candidate to position in that region
# (5) split region if not allowing overlaps
# (6) report placed segment
#
# The cc array is only constructed if there's a change (different length
# to place, or the region list has changed). It contains, for each
# region, the total number of number of candidate positions in regions
# *preceding* it in the region list:
# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1
# where N is the number of regions and L is the length being thrown.
# At the same time, we determine the total number of candidates (the total
# number of places the current length can be placed) and the index range
# of regions into which the length will fit.
#
# example:
# for L = 20
# i = 0 1 2 3 4 5 6 7 8 9
# length[i] = 96 66 56 50 48 40 29 17 11 8
# cc[i] = 0 77 124 161 192 221 242 X X X
# candidates = 252
# lo_rgn = 0
# hi_rgn = 6
#
# The candidate is chosen in (0..candidates-1). The candidate counts
# array allows us to do a binary search to locate the region that holds that
# candidate. Continuing the example above, we choose a random candidate
# s in (0..251). If s happens to be in (124..160), it will be mapped to
# region 2 at start position s-124.
#
# During the binary search, if we are looking at region 3, if s < cc[3]
# then the desired region is region 2 or lower. Otherwise it is region 3 or
# higher.
min_length = min( lengths )
prev_length = None # (force initial cc array construction)
cc = [0] * (len( regions ) + len(lengths) - 1)
num_thrown = 0
for length in lengths:
# construct cc array (only needed if length has changed or region list has
# changed)
if length != prev_length:
prev_length = length
assert len( cc ) >= len( regions )
candidates = 0
hi_rgn = 0
for region in regions:
rgn_len = region[0]
if rgn_len < length:
break
cc[hi_rgn] = candidates
candidates += rgn_len - length + 1
hi_rgn += 1
if candidates == 0:
raise MaxtriesException( "No region can fit an interval of length %d (we threw %d of %d)" \
% ( length, num_thrown,len( lengths ) ) )
hi_rgn -= 1
# Select a candidate
s = random.randrange( candidates )
#..
#..for ix in range( len( regions ) ):
#.. region = regions[ix]
#.. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] )
#.. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" )
#..print "s = %s (of %s candidates)" % ( s, candidates )
# Locate region containing that candidate, by binary search
lo = 0
hi = hi_rgn
while hi > lo:
mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop)
if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1)
else: lo = mid # (s >= num candidates from 0..mid-1)
s -= cc[lo]
# If we are not allowing overlaps we will remove the placed interval
# from the region list
if allow_overlap:
rgn_length, rgn_start, rgn_extra = regions[lo]
else:
# Remove the chosen region and split
rgn_length, rgn_start, rgn_extra = regions.pop( lo )
rgn_end = rgn_start + rgn_length
assert s >= 0
assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % ( rgn_start, s, length, rgn_start + s + length, rgn_end )
regions.reverse()
if s >= min_length:
bisect.insort( regions, ( s, rgn_start, rgn_extra ) )
if s + length <= rgn_length - min_length:
bisect.insort( regions, ( rgn_length - ( s + length ), rgn_start + s + length, rgn_extra ) )
regions.reverse()
prev_length = None # (force cc array construction)
# Save the new interval
if (three_args):
save_interval_func( rgn_start + s, rgn_start + s + length, rgn_extra )
else:
save_interval_func( rgn_start + s, rgn_start + s + length )
num_thrown += 1 | [
"\n (Internal function; we expect calls only through the interface functions\n above)\n \n `lengths`: A list containing the length of each interval to be generated.\n `regions`: A list of regions in which intervals can be placed, sorted by\n decreasing length. Elements are triples of the form (length,\n start, extra), This list CAN BE MODIFIED by this function.\n `save_interval_func`: A function accepting three arguments which will be \n passed the (start,stop,extra) for each generated \n interval.\n "
] |
Please provide a description of the function:def get(self, start, length):
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length)) | [
"\n Fetch subsequence starting at position `start` with length `length`. \n This method is picky about parameters, the requested interval must \n have non-negative length and fit entirely inside the NIB sequence,\n the returned string will contain exactly 'length' characters, or an\n AssertionError will be generated.\n "
] |
Please provide a description of the function:def read_scoring_scheme( f, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ):
close_it = False
if (type(f) == str):
f = file(f,"rt")
close_it = True
ss = build_scoring_scheme("".join([line for line in f]),gap_open, gap_extend, gap1=gap1, gap2=gap2, **kwargs)
if (close_it):
f.close()
return ss | [
"\n Initialize scoring scheme from a file containint a blastz style text blob.\n f can be either a file or the name of a file.\n "
] |
Please provide a description of the function:def build_scoring_scheme( s, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ):
# perform initial parse to determine alphabets and locate scores
bad_matrix = "invalid scoring matrix"
s = s.rstrip( "\n" )
lines = s.split( "\n" )
rows = []
symbols2 = lines.pop(0).split()
symbols1 = None
rows_have_syms = False
a_la_blastz = True
for i, line in enumerate( lines ):
row_scores = line.split()
if len( row_scores ) == len( symbols2 ): # blastz-style row
if symbols1 == None:
if len( lines ) != len( symbols2 ):
raise bad_matrix
symbols1 = symbols2
elif (rows_have_syms):
raise bad_matrix
elif len( row_scores ) == len( symbols2 ) + 1: # row starts with symbol
if symbols1 == None:
symbols1 = []
rows_have_syms = True
a_la_blastz = False
elif not rows_have_syms:
raise bad_matrix
symbols1.append( row_scores.pop(0) )
else:
raise bad_matrix
rows.append( row_scores )
# convert alphabets from strings to characters
try:
alphabet1 = [sym_to_char( sym ) for sym in symbols1]
alphabet2 = [sym_to_char( sym ) for sym in symbols2]
except ValueError:
raise bad_matrix
if (alphabet1 != symbols1) or (alphabet2 != symbols2):
a_la_blastz = False
if a_la_blastz:
alphabet1 = [ch.upper() for ch in alphabet1]
alphabet2 = [ch.upper() for ch in alphabet2]
# decide if rows and/or columns should reflect case
if a_la_blastz:
foldcase1 = foldcase2 = True
else:
foldcase1 = "".join( alphabet1 ) == "ACGT"
foldcase2 = "".join( alphabet2 ) == "ACGT"
# create appropriately sized matrix
text1_range = text2_range = 128
if ord( max( alphabet1 ) ) >= 128: text1_range = 256
if ord( max( alphabet2 ) ) >= 128: text2_range = 256
typecode = int32
for i, row_scores in enumerate( rows ):
for j, score in enumerate( map( int_or_float, row_scores ) ):
if type( score ) == float:
typecode = float32
if type( gap_open ) == float:
typecode = float32
if type( gap_extend ) == float:
typecode = float32
ss = ScoringScheme( gap_open, gap_extend, alphabet1=alphabet1, alphabet2=alphabet2, gap1=gap1, gap2=gap2, text1_range=text1_range, text2_range=text2_range, typecode=typecode, **kwargs )
# fill matrix
for i, row_scores in enumerate( rows ):
for j, score in enumerate( map( int_or_float, row_scores ) ):
ss.set_score( ord( alphabet1[i] ), ord( alphabet2[j] ), score )
if foldcase1 and foldcase2:
ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j].upper() ), score )
ss.set_score( ord( alphabet1[i].upper() ), ord( alphabet2[j].lower() ), score )
ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j].lower() ), score )
elif foldcase1:
ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j] ), score )
elif foldcase2:
ss.set_score( ord( alphabet1[i] ), ord( alphabet2[j].lower() ), score )
return ss | [
"\n Initialize scoring scheme from a blastz style text blob, first line\n specifies the bases for each row/col, subsequent lines contain the\n corresponding scores. Slaw extensions allow for unusual and/or\n asymmetric alphabets. Symbols can be two digit hex, and each row\n begins with symbol. Note that a row corresponds to a symbol in text1\n and a column to a symbol in text2.\n\n examples:\n\n blastz slaw\n\n A C G T 01 02 A C G T\n 91 -114 -31 -123 01 200 -200 -50 100 -50 100\n -114 100 -125 -31 02 -200 200 100 -50 100 -50\n -31 -125 100 -114\n -123 -31 -114 91\n "
] |
Please provide a description of the function:def accumulate_scores( scoring_scheme, text1, text2, skip_ref_gaps=False ):
if skip_ref_gaps:
rval = zeros( len( text1 ) - text1.count( scoring_scheme.gap1 ) )
else:
rval = zeros( len( text1 ) )
score = 0
pos = 0
last_gap_a = last_gap_b = False
for i in range( len( text1 ) ):
a = text1[i]
b = text2[i]
# Ignore gap/gap pair
if a == scoring_scheme.gap1 and b == scoring_scheme.gap2:
continue
# Gap in first species
elif a == scoring_scheme.gap1:
score -= scoring_scheme.gap_extend
if not last_gap_a:
score -= scoring_scheme.gap_open
last_gap_a = True
last_gap_b = False
# Gap in second species
elif b == scoring_scheme.gap2:
score -= scoring_scheme.gap_extend
if not last_gap_b:
score -= scoring_scheme.gap_open
last_gap_a = False
last_gap_b = True
# Aligned base
else:
score += scoring_scheme._get_score((ord(a),ord(b)))
last_gap_a = last_gap_b = False
if not( skip_ref_gaps ) or a != scoring_scheme.gap1:
rval[pos] = score
pos += 1
return rval | [
"\n Return cumulative scores for each position in alignment as a 1d array.\n \n If `skip_ref_gaps` is False positions in returned array correspond to each\n column in alignment, if True they correspond to each non-gap position (each\n base) in text1.\n "
] |
Please provide a description of the function:def shuffle_columns( a ):
mask = range( a.text_size )
random.shuffle( mask )
for c in a.components:
c.text = ''.join( [ c.text[i] for i in mask ] ) | [
"Randomize the columns of an alignment"
] |
Please provide a description of the function:def slice_by_component( self, component_index, start, end ):
if type( component_index ) == type( 0 ):
ref = self.components[ component_index ]
elif type( component_index ) == type( "" ):
ref = self.get_component_by_src( component_index )
elif type( component_index ) == Component:
ref = component_index
else:
raise ValueError( "can't figure out what to do" )
start_col = ref.coord_to_col( start )
end_col = ref.coord_to_col( end )
if (ref.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col ) | [
"\n Return a slice of the alignment, corresponding to an coordinate interval in a specific component.\n\n component_index is one of\n an integer offset into the components list\n a string indicating the src of the desired component\n a component\n\n start and end are relative to the + strand, regardless of the component's strand.\n\n "
] |
Please provide a description of the function:def remove_all_gap_columns( self ):
seqs = []
for c in self.components:
try:
seqs.append( list( c.text ) )
except TypeError:
seqs.append( None )
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None: continue
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs:
if seq is None: continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( self.components ) ):
if seqs[i] is None: continue
self.components[i].text = ''.join( seqs[i] )
self.text_size = text_size | [
"\n Remove any columns containing only gaps from alignment components,\n text of components is modified IN PLACE.\n "
] |
Please provide a description of the function:def slice_by_coord( self, start, end ):
start_col = self.coord_to_col( start )
end_col = self.coord_to_col( end )
if (self.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col ) | [
"\n Return the slice of the component corresponding to a coordinate interval.\n\n start and end are relative to the + strand, regardless of the component's strand.\n\n "
] |
Please provide a description of the function:def coord_to_col( self, pos ):
start,end = self.get_forward_strand_start(),self.get_forward_strand_end()
if pos < start or pos > end:
raise ValueError("Range error: %d not in %d-%d" % ( pos, start, end ))
if not self.index:
self.index = list()
if (self.strand == '-'):
# nota bene: for - strand self.index[x] maps to one column
# higher than is actually associated with the position; thus
# when slice_by_component() and slice_by_coord() flip the ends,
# the resulting slice is correct
for x in range( len(self.text)-1,-1,-1 ):
if not self.text[x] == '-':
self.index.append( x + 1 )
self.index.append( 0 )
else:
for x in range( len(self.text) ):
if not self.text[x] == '-':
self.index.append(x)
self.index.append( len(self.text) )
x = None
try:
x = self.index[ pos - start ]
except:
raise Exception("Error in index.")
return x | [
"\n Return the alignment column index corresponding to coordinate pos.\n\n pos is relative to the + strand, regardless of the component's strand.\n\n "
] |
Please provide a description of the function:def thread( mafs, species ):
for m in mafs:
new_maf = deepcopy( m )
new_components = get_components_for_species( new_maf, species )
if new_components:
remove_all_gap_columns( new_components )
new_maf.components = new_components
new_maf.score = 0.0
new_maf.text_size = len(new_components[0].text)
yield new_maf | [
"\n Restrict an list of alignments to a given list of species by:\n \n 1) Removing components for any other species \n 2) Remove any columns containing all gaps\n \n Example:\n \n >>> import bx.align.maf\n \n >>> block1 = bx.align.maf.from_string( '''\n ... a score=4964.0\n ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC\n ... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA\n ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT\n ... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT\n ... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc\n ... ''' )\n \n >>> block2 = bx.align.maf.from_string( '''\n ... a score=9151.0\n ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC\n ... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG\n ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT\n ... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa\n ... ''' )\n \n >>> mafs = [ block1, block2 ]\n \n >>> threaded = [ t for t in thread( mafs, [ \"hg18\", \"panTro1\" ] ) ]\n \n >>> len( threaded )\n 2\n \n >>> print(threaded[0])\n a score=0.0\n s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC\n s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT\n <BLANKLINE>\n\n >>> print(threaded[1])\n a score=0.0\n s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC\n s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT\n <BLANKLINE>\n \n "
] |
Please provide a description of the function:def get_components_for_species( alignment, species ):
# If the number of components in the alignment is less that the requested number
# of species we can immediately fail
if len( alignment.components ) < len( species ): return None
# Otherwise, build an index of components by species, then lookup
index = dict( [ ( c.src.split( '.' )[0], c ) for c in alignment.components ] )
try: return [ index[s] for s in species ]
except: return None | [
"Return the component for each species in the list `species` or None"
] |
Please provide a description of the function:def remove_all_gap_columns( components ):
seqs = [ list( c.text ) for c in components ]
i = 0
text_size = len( seqs[0] )
while i < text_size:
all_gap = True
for seq in seqs:
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs: del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( components ) ):
components[i].text = ''.join( seqs[i] ) | [
"\n Remove any columns containing only gaps from a set of alignment components,\n text of components is modified IN PLACE.\n \n TODO: Optimize this with Pyrex.\n "
] |
Please provide a description of the function:def read_next_maf( file, species_to_lengths=None, parse_e_rows=False ):
alignment = Alignment(species_to_lengths=species_to_lengths)
# Attributes line
line = readline( file, skip_blank=True )
if not line: return None
fields = line.split()
if fields[0] != 'a': raise Exception("Expected 'a ...' line")
alignment.attributes = parse_attributes( fields[1:] )
if 'score' in alignment.attributes:
alignment.score = alignment.attributes['score']
del alignment.attributes['score']
else:
alignment.score = 0
# Sequence lines
last_component = None
while 1:
line = readline( file )
# EOF or Blank line terminates alignment components
if not line or line.isspace(): break
if line.isspace(): break
# Parse row
fields = line.split()
if fields[0] == 's':
# An 's' row contains sequence for a component
component = Component()
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
if len(fields) > 6: component.text = fields[6].strip()
# Add to set
alignment.add_component( component )
last_component = component
elif fields[0] == 'e':
# An 'e' row, when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows:
component = Component()
component.empty = True
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
component.text = None
synteny = fields[6].strip()
assert len( synteny ) == 1, \
"Synteny status in 'e' rows should be denoted with a single character code"
component.synteny_empty = synteny
alignment.add_component( component )
last_component = component
elif fields[0] == 'i':
# An 'i' row, indicates left and right synteny status for the
# previous component, we hope ;)
assert fields[1] == last_component.src, "'i' row does not follow matching 's' row"
last_component.synteny_left = ( fields[2], int( fields[3] ) )
last_component.synteny_right = ( fields[4], int( fields[5] ) )
elif fields[0] == 'q':
assert fields[1] == last_component.src, "'q' row does not follow matching 's' row"
# TODO: Should convert this to an integer array?
last_component.quality = fields[2]
return alignment | [
"\n Read the next MAF block from `file` and return as an `Alignment` \n instance. If `parse_i_rows` is true, empty components will be created \n when e rows are encountered.\n "
] |
Please provide a description of the function:def readline( file, skip_blank=False ):
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line | [
"Read a line from provided file, skipping any blank or comment lines"
] |
Please provide a description of the function:def parse_attributes( fields ):
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes | [
"Parse list of key=value strings into a dict"
] |
Please provide a description of the function:def as_dict( self, key="id" ):
rval = {}
for motif in self:
rval[ getattr( motif, key ) ] = motif
return rval | [
"\n Return a dictionary containing all remaining motifs, using `key`\n as the dictionary key.\n "
] |
Please provide a description of the function:def parse_record( self, lines ):
# Break lines up
temp_lines = []
for line in lines:
fields = line.rstrip( "\r\n" ).split( None, 1 )
if len( fields ) == 1:
fields.append( "" )
temp_lines.append( fields )
lines = temp_lines
# Fill in motif from lines
motif = TransfacMotif()
current_line = 0
while 1:
# Done parsing if no more lines to consume
if current_line >= len( lines ):
break
# Remove prefix and first separator from line
prefix, rest = lines[ current_line ]
# No action for this prefix, just ignore the line
if prefix not in self.parse_actions:
current_line += 1
continue
# Get action for line
action = self.parse_actions[ prefix ]
# Store a single line value
if action[0] == "store_single":
key = action[1]
setattr( motif, key, rest )
current_line += 1
# Add a single line value to a list
if action[0] == "store_single_list":
key = action[1]
if not getattr( motif, key ):
setattr( motif, key, [] )
getattr( motif, key ).append( rest )
current_line += 1
# Add a single line value to a dictionary
if action[0] == "store_single_key_value":
key = action[1]
k, v = rest.strip().split( '=', 1 )
if not getattr( motif, key ):
setattr( motif, key, {} )
getattr( motif, key )[k] = v
current_line += 1
# Store a block of text
if action[0] == "store_block":
key = action[1]
value = []
while current_line < len( lines ) and lines[ current_line ][0] == prefix:
value.append( lines[current_line][1] )
current_line += 1
setattr( motif, key, str.join( "\n", value ) )
# Store a matrix
if action[0] == "store_matrix":
# First line is alphabet
alphabet = rest.split()
alphabet_size = len( alphabet )
rows = []
pattern = ""
current_line += 1
# Next lines are the rows of the matrix (we allow 0 rows)
while current_line < len( lines ):
prefix, rest = lines[ current_line ]
# Prefix should be a two digit 0 padded row number
if not prefix.isdigit():
break
# The first `alphabet_size` fields are the row values
values = rest.split()
rows.append( [ float(_) for _ in values[:alphabet_size] ] )
# TRANSFAC includes an extra column with the IUPAC code
if len( values ) > alphabet_size:
pattern += values[alphabet_size]
current_line += 1
# Only store the pattern if it is the correct length (meaning
# that every row had an extra field)
if len( pattern ) != len( rows ):
pattern = None
matrix = FrequencyMatrix.from_rows( alphabet, rows )
setattr( motif, action[1], matrix )
# Only return a motif if we saw at least ID or AC or NA
if motif.id or motif.accession or motif.name:
return motif | [
"\n Parse a TRANSFAC record out of `lines` and return a motif.\n "
] |
Please provide a description of the function:def bit_clone( bits ):
new = BitSet( bits.size )
new.ior( bits )
return new | [
"\n Clone a bitset\n "
] |
Please provide a description of the function:def throw_random( lengths, mask ):
saved = None
for i in range( maxtries ):
try:
return throw_random_bits( lengths, mask )
except MaxtriesException as e:
saved = e
continue
raise e | [
"\n Try multiple times to run 'throw_random'\n "
] |
Please provide a description of the function:def as_bits( region_start, region_length, intervals ):
bits = BitSet( region_length )
for chr, start, stop in intervals:
bits.set_range( start - region_start, stop - start )
return bits | [
"\n Convert a set of intervals overlapping a region of a chromosome into \n a bitset for just that region with the bits covered by the intervals \n set.\n "
] |
Please provide a description of the function:def interval_lengths( bits ):
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
yield end - start | [
"\n Get the length distribution of all contiguous runs of set bits from\n "
] |
Please provide a description of the function:def count_overlap( bits1, bits2 ):
b = BitSet( bits1.size )
b |= bits1
b &= bits2
return b.count_range( 0, b.size ) | [
"\n Count the number of bits that overlap between two sets\n "
] |
Please provide a description of the function:def overlapping_in_bed( fname, r_chr, r_start, r_stop ):
rval = []
for line in open( fname ):
if line.startswith( "#" ) or line.startswith( "track" ):
continue
fields = line.split()
chr, start, stop = fields[0], int( fields[1] ), int( fields[2] )
if chr == r_chr and start < r_stop and stop >= r_start:
rval.append( ( chr, max( start, r_start ), min( stop, r_stop ) ) )
return rval | [
"\n Get from a bed all intervals that overlap the region defined by\n r_chr, r_start, r_stop.\n "
] |
Please provide a description of the function:def tile_interval( sources, index, ref_src, start, end, seq_db=None ):
# First entry in sources should also be on the reference species
assert sources[0].split('.')[0] == ref_src.split('.')[0], \
"%s != %s" % ( sources[0].split('.')[0], ref_src.split('.')[0] )
base_len = end - start
blocks = index.get( ref_src, start, end )
# From low to high score
blocks.sort(key=lambda t: t.score)
mask = [ -1 ] * base_len
ref_src_size = None
for i, block in enumerate( blocks ):
ref = block.get_component_by_src_start( ref_src )
ref_src_size = ref.src_size
assert ref.strand == "+"
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
for j in range( slice_start, slice_end ):
mask[j-start] = i
tiled = []
for i in range( len( sources ) ):
tiled.append( [] )
for ss, ee, index in intervals_from_mask( mask ):
# Interval with no covering alignments
if index < 0:
# Get sequence if available, otherwise just use 'N'
if seq_db:
tiled[0].append( bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( start+ss, ee-ss ) )
else:
tiled[0].append( "N" * (ee-ss) )
# Gaps in all other species
for row in tiled[1:]:
row.append( "-" * ( ee - ss ) )
else:
slice_start = start + ss
slice_end = start + ee
block = blocks[index]
ref = block.get_component_by_src_start( ref_src )
sliced = block.slice_by_component( ref, slice_start, slice_end )
sliced = sliced.limit_to_species( sources )
sliced.remove_all_gap_columns()
for i, src in enumerate( sources ):
comp = sliced.get_component_by_src_start( src )
if comp:
tiled[i].append( comp.text )
else:
tiled[i].append( "-" * sliced.text_size )
return [ "".join( t ) for t in tiled ] | [
"\n Tile maf blocks onto an interval. The resulting block will span the interval\n exactly and contain the column from the highest scoring alignment at each\n position.\n\n `sources`: list of sequence source names to include in final block\n `index`: an instnace that can return maf blocks overlapping intervals\n `ref_src`: source name of the interval (ie, hg17.chr7)\n `start`: start of interval\n `end`: end of interval\n `seq_db`: a mapping for source names in the reference species to nib files\n "
] |
Please provide a description of the function:def MafMotifSelect(mafblock,pwm,motif=None,threshold=0):
if motif != None and len(motif) != len(pwm):
raise Exception("pwm and motif must be the same length")
# generic alignment
alignlist = [ c.text for c in mafblock.components ]
align = pwmx.Align( alignlist )
nrows,ncols = align.dims
#chr,chr_start,chr_stop = align.headers[0]
# required sequence length
minSeqLen = len( motif )
# record the text sizes from the alignment rows
align_match_lens = []
for start in range(ncols - minSeqLen):
if align.rows[0][start] == '-': continue
subseq = ""
pwm_score_vec = []
motif_score_vec = []
max_cols = 0
for ir in range(nrows):
expanded = align.rows[ir].count( '-', start, minSeqLen)
subtext = align.rows[ir][ start : minSeqLen+expanded ]
max_cols = max( len(subtext), max_cols )
subseq = subtext.replace('-','')
revseq = pwmx.reverse_complement(subseq)
# pwm score
nill,f_score = pwm.score_seq( subseq )[0]
r_score, nill = pwm.score_seq( revseq )[0]
pwm_score_vec.append( max(f_score, r_score) )
# consensus score
if motif is not None:
for_score = int( pwmx.match_consensus(subseq,motif) )
rev_score = int( pwmx.match_consensus(revseq,motif) )
motif_score_vec.append( max(for_score, rev_score) )
#check threshold
try:
assert not isnan(max(pwm_score_vec) )
assert not isnan(max(motif_score_vec) )
except:
print(pwm_score_vec, motif_score_vec, file=sys.stderr)
print(len(subseq), len(pwm), file=sys.stderr)
if max(pwm_score_vec) < threshold: continue
if max(motif_score_vec) < threshold: continue
# chop block
col_start = start
col_end = max_cols + 1
motifmaf = mafblock.slice( col_start, col_end )
yield motifmaf, pwm_score_vec, motif_score_vec
| [
"\n for ir in range(nrows):\n # scan alignment row for motif subsequences\n for start in range(ncols):\n if align.rows[ir][start] == '-': continue\n elif align.rows[ir][start] == 'n': continue\n elif align.rows[ir][start] == 'N': continue\n # gather enough subseq for motif\n for ic in range(start,ncols):\n char = align.rows[ir][ic].upper()\n if char == '-' or char == 'N': continue\n else: subseq += char\n if len(subseq) == minSeqLen: \n revseq = pwmx.reverse_complement( subseq )\n align_match_lens.append( ic )\n # pwm score\n nill,f_score = pwm.score_seq( subseq )[0]\n r_score, nill = pwm.score_seq( revseq )[0]\n pwm_score_vec.append( max(f_score, r_score) )\n # consensus score\n if motif is not None:\n for_score = int( pwmx.match_consensus(subseq,motif) )\n rev_score = int( pwmx.match_consensus(revseq,motif) )\n motif_score_vec.append( max(for_score, rev_score) )\n #check threshold\n try:\n assert not isnan(max(pwm_score_vec) )\n assert not isnan(max(motif_score_vec) )\n except:\n print >>sys.stderr, pwm_score_vec, motif_score_vec\n print >>sys.stderr, len(subseq), len(pwm)\n if max(pwm_score_vec) < threshold: continue\n if max(motif_score_vec) < threshold: continue\n # chop block\n col_start = start\n col_end = max( align_match_lens ) + 1\n motifmaf = mafblock.slice( col_start, col_end )\n\n print subseq,revseq,ic\n print align_match_lens\n yield motifmaf, pwm_score_vec, motif_score_vec\n "
] |
Please provide a description of the function:def create_parser():
# Basic tokens
real = Combine( Word( "+-" + nums, nums ) +
Optional( "." + Optional( Word( nums ) ) ) +
Optional( CaselessLiteral( "E" ) + Word( "+-" + nums, nums ) ) )
lpar = Suppress( "(" )
rpar = Suppress( ")" )
colon = Suppress( ":" )
semi = Suppress( ";" )
quot = Suppress( "'" )
# Labels are either unquoted or single quoted, if unquoted underscores will be replaced with spaces
quoted_label = QuotedString( "'", None, "''" ).setParseAction( lambda s, l, t: t[0] )
simple_label = Word( alphas + nums + "_." ).setParseAction( lambda s, l, t: t[0].replace( "_", " " ) )
label = quoted_label | simple_label
# Branch length is a real number (note though that exponents are not in the spec!)
branch_length = real.setParseAction( lambda s, l, t: float( t[0] ) )
# Need to forward declare this due to circularity
node_list = Forward()
# A node might have an list of edges (for a subtree), a label, and/or a branch length
node = ( Optional( node_list, None ) + Optional( label, "" ) + Optional( colon + branch_length, None ) ) \
.setParseAction( lambda s, l, t: Edge( t[2], Tree( t[1] or None, t[0] ) ) )
node_list << ( lpar + delimitedList( node ) + rpar ) \
.setParseAction( lambda s, l, t: [ t.asList() ] )
# The root cannot have a branch length
tree = ( node_list + Optional( label, "" ) + semi )\
.setParseAction( lambda s, l, t: Tree( t[1] or None, t[0] ) )
# Return the outermost element
return tree | [
"\n Create a 'pyparsing' parser for newick format trees roughly based on the\n grammar here:\n http://evolution.genetics.washington.edu/phylip/newick_doc.html\n\n Problems:\n - Is a single leaf a valid tree?\n - Branch length on root? Doesn't make sense to me, and forces the root\n to be an edge.\n "
] |
Please provide a description of the function:def get_fill_char( maf_status ):
## assert maf_status not in ( maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS,
## maf.MAF_MAYBE_NEW_NESTED_STATUS ), \
## "Nested rows do not make sense in a single coverage MAF (or do they?)"
if maf_status in ( maf.MAF_NEW_STATUS, maf.MAF_MAYBE_NEW_STATUS,
maf.MAF_NEW_NESTED_STATUS, maf.MAF_MAYBE_NEW_NESTED_STATUS ):
return "*"
elif maf_status in ( maf.MAF_INVERSE_STATUS, maf.MAF_INSERT_STATUS ):
return "="
elif maf_status in ( maf.MAF_CONTIG_STATUS, maf.MAF_CONTIG_NESTED_STATUS ):
return "#"
elif maf_status == maf.MAF_MISSING_STATUS:
return "X"
else:
raise ValueError("Unknwon maf status") | [
"\n Return the character that should be used to fill between blocks\n having a given status\n "
] |
Please provide a description of the function:def guess_fill_char( left_comp, right_comp ):
# No left component, obiously new
return "*"
# First check that the blocks have the same src (not just species) and
# orientation
if ( left_comp.src == right_comp.src and left_comp.strand != right_comp.strand ):
# Are they completely contiguous? Easy to call that a gap
if left_comp.end == right_comp.start:
return "-"
# TODO: should be able to make some guesses about short insertions
# here
# All other cases we have no clue about
return "*" | [
"\n For the case where there is no annotated synteny we will try to guess it\n "
] |
Please provide a description of the function:def remove_all_gap_columns( texts ):
seqs = [ list( t ) for t in texts ]
i = 0
text_size = len( texts[0] )
while i < text_size:
all_gap = True
for seq in seqs:
if seq[i] not in ( '-', '#', '*', '=', 'X', '@' ):
all_gap = False
if all_gap:
for seq in seqs:
del seq[i]
text_size -= 1
else:
i += 1
return [ ''.join( s ) for s in seqs ] | [
"\n Remove any columns containing only gaps from alignment texts\n "
] |
Please provide a description of the function:def cross_lists(*sets):
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break | [
"Return the cross product of the arguments"
] |
Please provide a description of the function:def read_lengths_file( name ):
chrom_to_length = {}
f = file ( name, "rt" )
for line in f:
line = line.strip()
if line == '' or line[0] == '#': continue
try:
fields = line.split()
if len(fields) != 2: raise
chrom = fields[0]
length = int( fields[1] )
except:
raise ValueError("bad length file line: %s" % line)
if chrom in chrom_to_length and length != chrom_to_length[chrom]:
raise ValueError("%s has more than one length!" % chrom)
chrom_to_length[chrom] = length
f.close()
return chrom_to_length | [
"\n Returns a hash from sequence name to length.\n "
] |
Please provide a description of the function:def IntervalReader( f ):
current_chrom = None
current_pos = None
current_step = None
# always for wiggle data
strand = '+'
mode = "bed"
for line in f:
if line.isspace() or line.startswith( "track" ) or line.startswith( "#" ) or line.startswith( "browser" ):
continue
elif line.startswith( "variableStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = None
current_step = None
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "variableStep"
elif line.startswith( "fixedStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = int( header['start'] ) - 1
current_step = int( header['step'] )
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "fixedStep"
elif mode == "bed":
fields = line.split()
if len( fields ) > 3:
if len( fields ) > 5:
yield fields[0], int( fields[1] ), int( fields[2] ), fields[5], float( fields[3] )
else:
yield fields[0], int( fields[1] ), int( fields[2] ), strand, float( fields[3] )
elif mode == "variableStep":
fields = line.split()
pos = int( fields[0] ) - 1
yield current_chrom, pos, pos + current_span, strand, float( fields[1] )
elif mode == "fixedStep":
yield current_chrom, current_pos, current_pos + current_span, strand, float( line.split()[0] )
current_pos += current_step
else:
raise ValueError("Unexpected input line: %s" % line.strip()) | [
"\n Iterator yielding chrom, start, end, strand, value.\n Values are zero-based, half-open.\n Regions which lack a score are ignored.\n "
] |
Please provide a description of the function:def read_and_unpack( self, format, byte_count=None ):
pattern = "%s%s" % ( self.endian_code, format )
if byte_count is None:
byte_count = struct.calcsize( pattern )
return struct.unpack( pattern, self.file.read( byte_count ) ) | [
"\n Read enough bytes to unpack according to `format` and return the\n tuple of unpacked values.\n "
] |
Please provide a description of the function:def read_c_string( self ):
rval = []
while 1:
ch = self.file.read(1)
assert len( ch ) == 1, "Unexpected end of file"
if ch == b'\0':
break
rval.append( ch )
return b''.join( rval ) | [
"\n Read a zero terminated (C style) string\n "
] |
Please provide a description of the function:def pack_and_write( self, format, value ):
pattern = "%s%s" % ( self.endian_code, format )
return self.file.write( struct.pack( pattern, value ) ) | [
"\n Read enough bytes to unpack according to `format` and return the\n tuple of unpacked values.\n "
] |
Please provide a description of the function:def write_c_string( self, value ):
self.file.write( value )
self.file.write( b'\0' ) | [
"\n Read a zero terminated (C style) string\n "
] |
Please provide a description of the function:def fuse_list( mafs ):
last = None
for m in mafs:
if last is None:
last = m
else:
fused = fuse( last, m )
if fused:
last = fused
else:
yield last
last = m
if last:
yield last | [
"\n Try to fuse a list of blocks by progressively fusing each adjacent pair.\n "
] |
Please provide a description of the function:def fuse( m1, m2 ):
# Check if the blocks are adjacent, return none if not.
if len( m1.components ) != len( m2.components ): return None
for c1, c2 in zip( m1.components, m2.components ):
if c1.src != c2.src: return None
if c1.strand != c2.strand: return None
if c1.end != c2.start: return None
# Try to fuse:
n = deepcopy( m1 )
for c1, c2 in zip( n.components, m2.components ):
c1.text += c2.text
c1.size += c2.size
n.text_size = len( n.components[0].text )
return n | [
"\n Attempt to fuse two blocks. If they can be fused returns a new block, \n otherwise returns None.\n \n Example:\n \n >>> import bx.align.maf\n \n >>> block1 = bx.align.maf.from_string( '''\n ... a score=0.0\n ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC\n ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT\n ... ''' )\n \n >>> block2 = bx.align.maf.from_string( '''\n ... a score=0.0\n ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC\n ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT\n ... ''' )\n \n >>> fused = fuse( block1, block2 )\n \n >>> print(fused)\n a score=0.0\n s hg18.chr10 52686 113 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGCGCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC\n s panTro1.chrUn_random 208115356 113 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGTGCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT\n <BLANKLINE>\n "
] |
Please provide a description of the function:def countedArray( expr ):
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr ) | [
"Helper to define a counted list of expressions.\n This helper defines a pattern of the form::\n integer expr expr expr...\n where the leading integer tells how many expr expressions follow.\n The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.\n "
] |
Please provide a description of the function:def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret | [
"Helper method for defining nested lists enclosed in opening and closing\n delimiters (\"(\" and \")\" are the default).\n\n Parameters:\n - opener - opening character for a nested list (default=\"(\"); can also be a pyparsing expression\n - closer - closing character for a nested list (default=\")\"); can also be a pyparsing expression\n - content - expression for items within the nested lists (default=None)\n - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)\n\n If an expression is not provided for the content argument, the nested\n expression will capture all whitespace-delimited content between delimiters\n as a list of separate values.\n\n Use the ignoreExpr argument to define expressions that may contain\n opening or closing characters that should not be treated as opening\n or closing characters for nesting, such as quotedString or a comment\n expression. Specify multiple expressions using an Or or MatchFirst.\n The default is quotedString, but if no expressions are to be ignored,\n then pass None for this argument.\n "
] |
Please provide a description of the function:def setBreak(self,breakFlag = True):
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self | [
"Method to invoke the Python pdb debugger when this element is\n about to be parsed. Set breakFlag to True to enable, False to\n disable.\n "
] |
Please provide a description of the function:def _normalizeParseActionArgs( f ):
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp | [
"Internal method used to decorate parse actions that take fewer than 3 arguments,\n so that all parse actions can be called as f(s,l,t)."
] |
Please provide a description of the function:def parseString( self, instring, parseAll=False ):
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
loc, tokens = self._parse( instring, 0 )
if parseAll:
StringEnd()._parse( instring, loc )
return tokens | [
"Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set parseAll to True (equivalent to ending\n the grammar with StringEnd()).\n\n Note: parseString implicitly calls expandtabs() on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the loc argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n - calling parseWithTabs on your grammar before calling parseString\n (see L{I{parseWithTabs}<parseWithTabs>})\n - define your parse action using the full (s,loc,toks) signature, and\n reference the input string using the parse action's s argument\n - explictly expand the tabs in your input string before calling\n parseString\n "
] |
Please provide a description of the function:def transformString( self, instring ):
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out)) | [
"Extension to scanString, to modify matching text with modified tokens that may\n be returned from a parse action. To use transformString, define a grammar and\n attach a parse action to it that modifies the returned token list.\n Invoking transformString() on a target string will then scan for matches,\n and replace the matched text patterns according to the logic in the parse\n action. transformString() returns the resulting transformed string."
] |
Please provide a description of the function:def searchString( self, instring, maxMatches=_MAX_INT ):
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) | [
"Another extension to scanString, simplifying the access to the tokens found\n to match the given parse expression. May be called with optional\n maxMatches argument, to clip searching after 'n' matches are found.\n "
] |
Please provide a description of the function:def parseFile( self, file_or_filename ):
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents) | [
"Execute the parse expression on the given file or filename.\n If a filename is specified (instead of a file object),\n the entire file is opened, read, and closed before parsing.\n "
] |
Please provide a description of the function:def _strfactory(cls, line):
assert type(line) == str, "this is a factory from string"
line = line.rstrip().split()[1:] # the first component is the keyword "chain"
tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)]
return tuple.__new__(cls, tup) | [
"factory class method for Chain\n\n :param line: header of a chain (in .chain format)\n "
] |
Please provide a description of the function:def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes):
# size, target, query arrays
S, T, Q = [], [], []
#the target strand of the chain must be on the forward strand
trg_intervals = trg_comp.intervals(reverse = trg_comp.strand == '-')
qr_intervals = qr_comp.intervals(reverse = trg_comp.strand == '-')
if len(trg_intervals) == 0 or len(qr_intervals) == 0:
log.warning("deletion/insertion only intervals")
return None
A, B = rem_dash(trg_intervals, qr_intervals)
# correct for when cigar starts/ends with dashes (in number of bases)
tr_start_correction = max(B[0][0] - A[0][0], 0)
tr_end_correction = max(A[-1][1] - B[-1][1], 0)
qr_start_correction = max(A[0][0] - B[0][0], 0)
qr_end_correction = max(B[-1][1] - A[-1][1], 0)
a, b = A.pop(0), B.pop(0)
# intervals are 0-base, halfo-open => lengths = coordinate difference
while A or B:
if a[1] < b[1]:
T.append(0); Q.append( A[0][0] - a[1] ); S.append( min(a[1], b[1]) - max(a[0], b[0]) )
a = A.pop(0)
elif b[1] < a[1]:
Q.append(0); T.append( B[0][0] - b[1] ); S.append( min(a[1], b[1]) - max(a[0], b[0]) )
b = B.pop(0)
elif A and B:
assert 1 > 2, "there are dash columns"
else:
break
S.append( min(a[1], b[1]) - max(a[0], b[0]) )
assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q)))
tSize = trg_chrom_sizes[trg_comp.chrom]
qSize = qr_chrom_sizes[qr_comp.chrom]
## UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed
## chain_start = epo_start - 1 and chain_end = epo_end
if qr_comp.strand == '+':
chain = Chain(0,
trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction,
qr_comp.gabid)
else:
chain = Chain(0,
trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction,
qr_comp.gabid)
# strand correction. in UCSC coordinates this is: size - coord
if chain.qStrand == '-':
chain = chain._replace(qEnd = chain.qSize - chain.qStart,
qStart = chain.qSize - chain.qEnd)
assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % (str(chain),
chain.tEnd - chain.tStart, sum(S) + sum(T))
assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % (str(chain),
chain.qEnd - chain.qStart, sum(S) + sum(Q))
return chain, S, T, Q | [
"crate a chain of collinear rings from the given components.\n\n The target of the chain will always be on the forward strand.\n This is done to avoid confusion when mapping psl files. So,\n if trg_comp.strand=-, qr_comp.strand=- (resp. +) the\n chain header will have tStrand=+, qStrand=+ (resp. -). No strand\n changes on the other cases.\n\n :param trg_comp: target (i.e, the first) component\n :type trg_comp: L{EPOitem}\n :param qr_comp: query (i.e, the second) component\n :type qr_comp: L{EPOitem}\n :param trg_chrom_sizes: chromosome sizes of the target\n :type trg_chrom_sizes: dictionary of the type (chrom) --> size\n :param qr_chrom_sizes: chromosome sizes of the query\n :type qr_chrom_sizes: dictionary of the type (chrom) --> size\n :return: A L{Chain} instance"
] |
Please provide a description of the function:def slice(self, who):
"return the slice entry (in a bed6 format), AS IS in the chain header"
assert who in ('t', 'q'), "who should be 't' or 'q'"
if who == 't':
return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand)
else:
return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand) | [] |
Please provide a description of the function:def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand) | [] |
Please provide a description of the function:def _parse_file(cls, path, pickle=False):
fname = path
if fname.endswith(".gz"):
fname = path[:-3]
if fname.endswith('.pkl'):
#you asked for the pickled file. I'll give it to you
log.debug("loading pickled file %s ..." % fname)
return cPickle.load( open(fname, "rb") )
elif os.path.isfile("%s.pkl" % fname):
#there is a cached version I can give to you
log.info("loading pickled file %s.pkl ..." % fname)
if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime:
log.critical("*** pickled file %s.pkl is not up to date ***" % (path))
return cPickle.load( open("%s.pkl" % fname, "rb") )
data = fastLoadChain(path, cls._strfactory)
if pickle and not os.path.isfile('%s.pkl' % fname):
log.info("pckling to %s.pkl" % (fname))
with open('%s.pkl' % fname, 'wb') as fd:
cPickle.dump(data, fd)
return data | [
"parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...]\n\n :param fname: name of the file"
] |
Please provide a description of the function:def _strfactory(cls, line):
cmp = line.rstrip().split()
chrom = cmp[2]
if not chrom.startswith("chr"):
chrom = "chr%s" % chrom
instance = tuple.__new__(cls,
(cmp[0], cmp[1],
chrom, int(cmp[3]), int(cmp[4]),
{'1' : '+', '-1' : '-'}[cmp[5]], cmp[6]))
span = instance.end - instance.start + 1
m_num = sum( (t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False) )
if span != m_num:
log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num))
return None
return instance | [
"factory method for an EPOitem\n\n :param line: a line of input"
] |
Please provide a description of the function:def _parse_epo(cls, fname):
data = {}
with open(fname) as fd:
for el in (cls._strfactory(_) for _ in fd):
if el:
data.setdefault(el.gabid, []).append( el )
log.info("parsed %d elements from %s" % (len(data), fname))
return data | [
"Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]}\n\n :param fname: file name"
] |
Please provide a description of the function:def cigar_iter(self, reverse):
l = 0
P = self.cigar_pattern
data = []
cigar = self.cigar
parsed_cigar = re.findall(P, cigar)
if reverse:
parsed_cigar = parsed_cigar[::-1]
for _l, t in parsed_cigar:
# 1M is encoded as M
l = (_l and int(_l) or 1) # int(_l) cannot be 0
data.append( (l, t) )
return data | [
"self.cigar => [(length, type) ... ] iterate the cigar\n\n :param reverse: whether to iterate in the reverse direction (right-to-left)\n :type reverse: boolean\n\n :return a list of pairs of the type [(length, M/D) ..]\n "
] |
Please provide a description of the function:def intervals(self, reverse, thr=0):
d = [(thr,thr)]
dl = 0
for tup in self.cigar_iter(reverse):
if tup[1] == "D":
dl = tup[0]
else:
s = d[-1][1] + dl
d.append( (s, s+tup[0]) )
assert d[0] == (thr, thr)
# assert that nr. of Ms in the interval == sum of produced intervals
assert sum( t[0] for t in self.cigar_iter(False) if t[1] == "M" ) == sum( t[1]-t[0] for t in d )
d_sum = sum( t[1]-t[0] for t in d )
assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % (self.start, self.end,
self.end-self.start+1, d_sum)
return d[1:] | [
"return a list of (0-based half-open) intervals representing the match regions of the cigar\n\n for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)]\n 4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval)\n\n :param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter)\n :type reverse: boolean\n :param thr: shift all intervals by this much\n :type thr: integer\n\n :return: list of pairs"
] |
Please provide a description of the function:def do_interval( sources, index, out, ref_src, start, end, seq_db, missing_data, strand ):
ref_src_size = None
# Make sure the reference component is also the first in the source list
assert sources[0].split('.')[0] == ref_src.split('.')[0], "%s != %s" \
% ( sources[0].split('.')[0], ref_src.split('.')[0] )
# Determine the overall length of the interval
base_len = end - start
# Counter for the last reference species base we have processed
last_stop = start
# Rows in maf blocks come in in arbitrary order, we'll convert things
# to the destred order of the tiled block
source_to_index = dict( ( name, i ) for ( i, name ) in enumerate( sources ) )
# This gets all the maf blocks overlapping our interval of interest
# NOTE: Unlike maf_tile we're expecting
# things to be single coverage in the reference species, so we won't
# sort by score and lay down.
blocks = index.get( ref_src, start, end )
# The last component seen for each species onto which we are tiling
last_components = [ None ] * len( sources )
last_status = [ None ] * len( sources )
cols_needing_fill = [ 0 ] * len( sources )
# The list of strings in which we build up the tiled alignment
tiled_rows = [ "" for i in range( len( sources ) ) ]
# Enumerate the (ordered) list of blocks
for i, block in enumerate( blocks ):
# Check for overlap in reference species
ref = block.get_component_by_src_start( ref_src )
if ref.start < last_stop:
if ref.end < last_stop:
continue
block = block.slice_by_component( ref, last_stop, min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
block = block.slice_by_component( ref, max( start, ref.start ), min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
# print block
assert last_components[0] is None or ref.start >= last_components[0].end, \
"MAF must be sorted and single coverage in reference species!"
assert ref.strand == "+", \
"MAF must have all reference species blocks on the plus strand"
# Store the size of the reference sequence for building fake block
if ref_src_size is None:
ref_src_size = ref.src_size
# Handle the reference component seperately, it has no synteny status
# but we will try to fill in missing sequence
if ref.start > last_stop:
# Need to fill in some reference sequence
chunk_len = ref.start - last_stop
text = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
tiled_rows[0] += text
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Do reference component
chunk_len = len( ref.text )
tiled_rows[0] += ref.text
# Do each other component
for source in sources[1:]:
source_index = source_to_index[ source ]
comp = block.get_component_by_src_start( source )
if comp:
if comp.synteny_left is None:
left_status, left_length = None, -1
else:
left_status, left_length = comp.synteny_left
if comp.synteny_right is None:
right_status, right_length = None, -1
else:
right_status, right_length = comp.synteny_right
# We have a component, do we need to do some filling?
cols_to_fill = cols_needing_fill[ source_index ]
if cols_to_fill > 0:
# Adjacent components should have matching status
## assert last_status[ source_index ] is None or last_status[ source_index ] == left_status, \
## "left status (%s) does not match right status (%s) of last component for %s" \
## % ( left_status, last_status[ source_index ], source )
if left_status is None:
fill_char = guess_fill_char( last_components[source_index], comp )
else:
fill_char = get_fill_char( left_status )
tiled_rows[ source_index ] += ( fill_char * cols_to_fill )
cols_needing_fill[ source_index ] = 0
# Okay, filled up to current position, now append the text
tiled_rows[ source_index ] += comp.text
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
last_components[ source_index ] = comp
last_status[ source_index ] = right_status
else:
# No component, we'll have to fill this region when we know
# the status
cols_needing_fill[ source_index ] += chunk_len
last_stop = ref.end
# No more components, clean up the ends
if last_stop < end:
# Need to fill in some reference sequence
chunk_len = end - last_stop
tiled_rows[0] += bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Any final filling that needs to be done?
for source in sources[1:]:
source_index = source_to_index[ source ]
fill_needed = cols_needing_fill[ source_index ]
if fill_needed > 0:
if last_components[ source_index ] is None:
# print >>sys.stderr, "Never saw any components for %s, filling with @" % source
fill_char = '@'
else:
if last_status[ source_index ] is None:
fill_char = '*'
else:
fill_char = get_fill_char( last_status[ source_index ] )
tiled_rows[ source_index ] += fill_char * fill_needed
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
# Okay, now make up the fake alignment from the tiled rows.
tiled_rows = remove_all_gap_columns( tiled_rows )
a = align.Alignment()
for i, name in enumerate( sources ):
text = "".join( tiled_rows[i] )
size = len( text ) - text.count( "-" )
if i == 0:
if ref_src_size is None: ref_src_size = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).length
c = align.Component( ref_src, start, end-start, "+", ref_src_size, text )
else:
c = align.Component( name + ".fake", 0, size, "?", size, text )
a.add_component( c )
if strand == '-':
a = a.reverse_complement()
out.write( a ) | [
"\n Join together alignment blocks to create a semi human projected local \n alignment (small reference sequence deletions are kept as supported by \n the local alignment).\n "
] |
Please provide a description of the function:def binned_bitsets_from_file( f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream_pad=0, downstream_pad=0, lens={} ):
last_chrom = None
last_bitset = None
bitsets = dict()
for line in f:
if line.startswith("#") or line.isspace():
continue
fields = line.split()
strand = "+"
if len(fields) > strand_col:
if fields[strand_col] == "-": strand = "-"
chrom = fields[chrom_col]
if chrom != last_chrom:
if chrom not in bitsets:
if chrom in lens:
size = lens[chrom]
else:
size = MAX
bitsets[chrom] = BinnedBitSet( size )
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( fields[start_col] ), int( fields[end_col] )
if upstream_pad: start = max( 0, start - upstream_pad )
if downstream_pad: end = min( size, end + downstream_pad )
if start > end: warn( "Interval start after end!" )
last_bitset.set_range( start, end-start )
return bitsets | [
"\n Read a file into a dictionary of bitsets. The defaults arguments \n \n - 'f' should be a file like object (or any iterable containing strings)\n - 'chrom_col', 'start_col', and 'end_col' must exist in each line. \n - 'strand_col' is optional, any line without it will be assumed to be '+'\n - if 'lens' is provided bitset sizes will be looked up from it, otherwise\n chromosomes will be assumed to be the maximum size\n "
] |
Please provide a description of the function:def binned_bitsets_proximity( f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream=0, downstream=0 ):
last_chrom = None
last_bitset = None
bitsets = dict()
for line in f:
if line.startswith("#"): continue
# print "input=%s" % ( line ),
fields = line.split()
strand = "+"
if len(fields) >= strand_col + 1:
if fields[strand_col] == "-": strand = "-"
chrom = fields[chrom_col]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet( MAX )
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( fields[start_col] ), int( fields[end_col] )
if strand == "+":
if upstream: start = max( 0, start - upstream )
if downstream: end = min( MAX, end + downstream )
if strand == "-":
if upstream: end = min( MAX, end + upstream )
if downstream: start = max( 0, start - downstream )
# print "set: start=%d\tend=%d" % ( start, end )
if end-start > 0:
last_bitset.set_range( start, end-start )
return bitsets | [
"Read a file into a dictionary of bitsets"
] |
Please provide a description of the function:def binned_bitsets_from_list( list=[] ):
last_chrom = None
last_bitset = None
bitsets = dict()
for l in list:
chrom = l[0]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet(MAX)
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( l[1] ), int( l[2] )
last_bitset.set_range( start, end - start )
return bitsets | [
"Read a list into a dictionary of bitsets"
] |
Please provide a description of the function:def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2):
bitset = BinnedBitSet( MAX )
for line in f:
if line.startswith("#"): continue
fields = line.split()
if fields[chrom_col] == chrom:
start, end = int( fields[start_col] ), int( fields[end_col] )
bitset.set_range( start, end-start )
return bitset | [
"Read a file by chrom name into a bitset"
] |
Please provide a description of the function:def chop_list( blocks, src, start, end ):
new_blocks = []
for block in blocks:
ref = block.get_component_by_src( src )
# If the reference component is on the '-' strand we should complement the interval
if ref.strand == '-':
slice_start = max( ref.src_size - end, ref.start )
slice_end = max( ref.src_size - start, ref.end )
else:
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
sliced = block.slice_by_component( ref, slice_start, slice_end )
good = True
for c in sliced.components:
if c.size < 1:
good = False
if good:
new_blocks.append( sliced )
return new_blocks | [
"\n For each alignment block in the sequence `blocks`, chop out the portion\n of the block that overlaps the interval [`start`,`end`) in the\n component/species named `src`.\n "
] |
Please provide a description of the function:def _double_as_bytes(dval):
"Use struct.unpack to decode a double precision float into eight bytes"
tmp = list(struct.unpack('8B',struct.pack('d', dval)))
if not _big_endian:
tmp.reverse()
return tmp | [] |
Please provide a description of the function:def _mantissa(dval):
bb = _double_as_bytes(dval)
mantissa = bb[1] & 0x0f << 48
mantissa += bb[2] << 40
mantissa += bb[3] << 32
mantissa += bb[4]
return mantissa | [
"Extract the _mantissa bits from a double-precision floating\n point value."
] |
Please provide a description of the function:def _zero_mantissa(dval):
bb = _double_as_bytes(dval)
return ((bb[1] & 0x0f) | reduce(operator.or_, bb[2:])) == 0 | [
"Determine whether the mantissa bits of the given double are all\n zero."
] |
Please provide a description of the function:def load_scores_wiggle( fname ):
scores_by_chrom = dict()
for chrom, pos, val in bx.wiggle.Reader( misc.open_compressed( fname ) ):
if chrom not in scores_by_chrom:
scores_by_chrom[chrom] = BinnedArray()
scores_by_chrom[chrom][pos] = val
return scores_by_chrom | [
"\n Read a wiggle file and return a dict of BinnedArray objects keyed \n by chromosome.\n "
] |
Please provide a description of the function:def offsets_for_max_size( max_size ):
for i, max in enumerate( reversed( BIN_OFFSETS_MAX ) ):
if max_size < max:
break
else:
raise Exception( "%d is larger than the maximum possible size (%d)" % ( max_size, BIN_OFFSETS_MAX[0] ) )
return BIN_OFFSETS[ ( len(BIN_OFFSETS) - i - 1 ) : ] | [
"\n Return the subset of offsets needed to contain intervals over (0,max_size)\n "
] |
Please provide a description of the function:def bin_for_range( start, end, offsets=None ):
if offsets is None:
offsets = BIN_OFFSETS
start_bin, end_bin = start, max(start, end - 1)
start_bin >>= BIN_FIRST_SHIFT
end_bin >>= BIN_FIRST_SHIFT
for offset in offsets:
if start_bin == end_bin:
return offset + start_bin
else:
start_bin >>= BIN_NEXT_SHIFT
end_bin >>= BIN_NEXT_SHIFT
raise Exceptionn("Interval (%d,%d) out of range") | [
"Find the smallest bin that can contain interval (start,end)"
] |
Please provide a description of the function:def new( self, min, max ):
# Ensure the range will fit given the shifting strategy
assert MIN <= min <= max <= MAX
self.min = min
self.max = max
# Determine offsets to use
self.offsets = offsets_for_max_size( max )
# Determine the largest bin we will actually use
self.bin_count = bin_for_range( max - 1, max, offsets = self.offsets ) + 1
# Create empty bins
self.bins = [ [] for i in range( self.bin_count ) ] | [
"Create an empty index for intervals in the range min, max"
] |
Please provide a description of the function:def add( self, start, end, val ):
insort( self.bins[ bin_for_range( start, end, offsets=self.offsets ) ], ( start, end, val ) )
assert val >= 0
self.max_val = max(self.max_val,val) | [
"Add the interval (start,end) with associated value val to the index"
] |
Please provide a description of the function:def seek( self, offset, whence=0 ):
# Determine absolute target position
if whence == 0:
target_pos = offset
elif whence == 1:
target_pos = self.file_pos + offset
elif whence == 2:
target_pos = self.size - offset
else:
raise Exception( "Invalid `whence` argument: %r", whence )
# Check if this is a noop
if target_pos == self.file_pos:
return
# Verify it is valid
assert 0 <= target_pos < self.size, "Attempt to seek outside file"
# Move the position
self.file_pos = target_pos
# Mark as dirty, the next time a read is done we need to actually
# move the position in the bzip2 file
self.dirty = True | [
"\n Move the file pointer to a particular offset.\n "
] |
Please provide a description of the function:def mtime(self, key):
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
return node.mtime | [
"Return the last modification time for the cache record with key.\n May be useful for cache instances where the stored values can get\n 'stale', such as caching file or network resource contents."
] |
Please provide a description of the function:def class_space(classlevel=3):
"returns the calling class' name and dictionary"
frame = sys._getframe(classlevel)
classname = frame.f_code.co_name
classdict = frame.f_locals
return classname, classdict | [] |
Please provide a description of the function:def _attribute(permission='rwd', **kwds):
classname, classdict = class_space()
def _property(attrname, default):
propname, attrname = attrname, mangle(classname, attrname)
fget, fset, fdel, doc = None, None, None, propname
if 'r' in permission:
def fget(self):
value = default
try: value = getattr(self, attrname)
except AttributeError: setattr(self, attrname, default)
return value
if 'w' in permission:
def fset(self, value):
setattr(self, attrname, value)
if 'd' in permission:
def fdel(self):
try: delattr(self, attrname)
except AttributeError: pass
# calling fget can restore this attribute, so remove property
delattr(self.__class__, propname)
return property(fget=fget, fset=fset, fdel=fdel, doc=doc)
for attrname, default in kwds.items():
classdict[attrname] = _property(attrname, default) | [
"returns one property for each (key,value) pair in kwds;\n each property provides the specified level of access(permission):\n 'r': readable, 'w':writable, 'd':deletable\n "
] |
Please provide a description of the function:def parse_a_stanza(self):
# 's' line -- score, 1 field
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "s"), "s line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
try: score = int(fields[1])
except: score = float(fields[1])
# 'b' line -- begin positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "b"), "b line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
beg1 = int(fields[1]) - 1
beg2 = int(fields[2]) - 1
# 'e' line -- end positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "e"), "e line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
len1 = int(fields[1]) - beg1
len2 = int(fields[2]) - beg2
# 'l' lines
pieces = []
while (True):
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
if (fields[0] != "l"):
break
start1 = int(fields[1]) - 1
start2 = int(fields[2]) - 1
length = int(fields[3]) - start1
length2 = int(fields[4]) - start2
try: pctId = int(fields[5])
except: pctId = float(fields[5])
assert (length2 == length), "length mismatch in a-stanza"
pieces.append((start1+self.seq1_start,start2+self.seq2_start,length,pctId))
assert (line == "}"), "improper a-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
return (score,pieces) | [
"returns the pair (score,pieces)\n\t\t where pieces is a list of ungapped segments (start1,start2,length,pctId)\n\t\t with start1,start2 origin-0"
] |
Please provide a description of the function:def build_alignment(self,score,pieces):
# build text
self.open_seqs()
text1 = text2 = ""
end1 = end2 = None
for (start1,start2,length,pctId) in pieces:
if (end1 != None):
if (start1 == end1): # insertion in sequence 2
text1 += self.seq1_gap * (start2-end2)
text2 += self.seq2_file.get(end2,start2-end2)
else: # insertion in sequence 1
text1 += self.seq1_file.get(end1,start1-end1)
text2 += self.seq2_gap * (start1-end1)
text1 += self.seq1_file.get(start1,length)
text2 += self.seq2_file.get(start2,length)
end1 = start1 + length
end2 = start2 + length
# create alignment
start1 = pieces[0][0]
start2 = pieces[0][1]
end1 = pieces[-1][0] + pieces[-1][2]
end2 = pieces[-1][1] + pieces[-1][2]
size1 = end1 - start1
size2 = end2 - start2
a = Alignment(score=score,species_to_lengths=self.species_to_lengths)
#if (self.seq1_strand == "-"): start1 = self.seq1_file.length - end1
a.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))
#if (self.seq2_strand == "-"): start2 = self.seq2_file.length - end2
a.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))
return a | [
"converts a score and pieces to an alignment"
] |
Please provide a description of the function:def bits_clear_in_range( bits, range_start, range_end ):
end = range_start
while 1:
start = bits.next_clear( end )
if start >= range_end: break
end = min( bits.next_set( start ), range_end )
yield start, end | [
"\n Yield start,end tuples for each span of clear bits in [range_start,range_end)\n "
] |
Please provide a description of the function:def iterprogress( sized_iterable ):
pb = ProgressBar( 0, len( sized_iterable ) )
for i, value in enumerate( sized_iterable ):
yield value
pb.update_and_print( i, sys.stderr ) | [
"\n Iterate something printing progress bar to stdout\n "
] |
Please provide a description of the function:def to_file( Class, dict, file, is_little_endian=True ):
io = BinaryFileWriter( file, is_little_endian=is_little_endian )
start_offset = io.tell()
# Header is of fixed length
io.seek( start_offset + ( 8 * 256 ) )
# For each item, key and value length (written as length prefixed
# strings). We also calculate the subtables on this pass.
# NOTE: This requires the key and value be byte strings, support for
# dealing with encoding specific value types should be
# added to this wrapper
subtables = [ [] for i in range(256) ]
for key, value in dict.items():
pair_offset = io.tell()
io.write_uint32( len( key ) )
io.write_uint32( len( value ) )
io.write( key )
io.write( value )
hash = cdbhash( key )
subtables[ hash % 256 ].append( ( hash, pair_offset ) )
# Save the offset where the subtables will start
subtable_offset = io.tell()
# Write subtables
for subtable in subtables:
if len( subtable ) > 0:
# Construct hashtable to be twice the size of the number
# of items in the subtable, and built it in memory
ncells = len( subtable ) * 2
cells = [ (0,0) for i in range( ncells ) ]
for hash, pair_offset in subtable:
index = ( hash >> 8 ) % ncells
while cells[index][1] != 0:
index = ( index + 1 ) % ncells
# Guaranteed to find a non-empty cell
cells[index] = ( hash, pair_offset )
# Write subtable
for hash, pair_offset in cells:
io.write_uint32( hash )
io.write_uint32( pair_offset )
# Go back and write the header
end_offset = io.tell()
io.seek( start_offset )
index = subtable_offset
for subtable in subtables:
io.write_uint32( index )
io.write_uint32( len( subtable * 2 ) )
# For each cell in the subtable, a hash and a pointer to a value
index += ( len( subtable ) * 2 ) * 8
# Leave fp at end of cdb
io.seek( end_offset ) | [
"\n For constructing a CDB structure in a file. Able to calculate size on\n disk and write to a file\n "
] |
Please provide a description of the function:def read_len( f ):
mapping = dict()
for line in f:
fields = line.split()
mapping[ fields[0] ] = int( fields[1] )
return mapping | [
"Read a 'LEN' file and return a mapping from chromosome to length"
] |
Please provide a description of the function:def freqs_to_heights( matrix ):
# Columns are sequence positions, rows are symbol counts/frequencies
f = matrix.values.transpose()
n, m = f.shape
# Ensure normalized
f = f / sum( f, axis=0 )
# Shannon entropy (the where replaces 0 with 1 so that '0 log 0 == 0')
H = - sum( f * log2( where( f, f, 1 ) ), axis=0 )
# Height
return transpose( f * ( log2( n ) - H ) ) | [
"\n Calculate logo height using the method of:\n \n Schneider TD, Stephens RM. \"Sequence logos: a new way to display consensus \n sequences.\" Nucleic Acids Res. 1990 Oct 25;18(20):6097-100.\n "
] |
Please provide a description of the function:def eps_logo( matrix, base_width, height, colors=DNA_DEFAULT_COLORS ):
alphabet = matrix.sorted_alphabet
rval = StringIO()
# Read header ans substitute in width / height
header = Template( pkg_resources.resource_string( __name__, "template.ps" ) )
rval.write( header.substitute( bounding_box_width = ceil( base_width * matrix.width ) + PAD,
bounding_box_height = ceil( height ) + PAD ) )
# Determine heights
heights = freqs_to_heights( matrix )
height_scale = height / log2( len( alphabet ) )
# Draw each "row" of the matrix
for i, row in enumerate( heights ):
x = ( i * base_width )
y = 0
for j, base_height in enumerate( row ):
char = alphabet[j]
page_height = height_scale * base_height
# print matrix.alphabet[j], base_height, height_scale, page_height
if page_height > 1:
# Draw letter
rval.write( "%s setrgbcolor\n" % colors.get( char, '0 0 0' ) )
rval.write( "%3.2f " % x )
rval.write( "%3.2f " % y )
rval.write( "%3.2f " % ( x + base_width ) )
rval.write( "%3.2f " % ( y + page_height ) )
rval.write( "(%s) textInBox\n" % char )
y += page_height
rval.write( "showpage" )
return rval.getvalue() | [
"\n Return an EPS document containing a sequence logo for matrix where each\n bases is shown as a column of `base_width` points and the total logo\n height is `height` points. If `colors` is provided it is a mapping from\n characters to rgb color strings. \n "
] |
Please provide a description of the function:def transform(elem, chain_CT_CQ, max_gap):
(chain, CT, CQ) = chain_CT_CQ
start, end = max(elem['start'], chain.tStart) - chain.tStart, min(elem['end'], chain.tEnd) - chain.tStart
assert np.all( (CT[:,1] - CT[:,0]) == (CQ[:,1] - CQ[:,0]) )
to_chrom = chain.qName
to_gab_start = chain.qStart
start_idx = np.where( CT[:,1] > start )[0][0]
end_idx = np.where( CT[:,0] < end )[0][-1]
if start_idx > end_idx: #maps to a gap region on the other species
return []
## apply the gap threshold
if max_gap >= 0 and start_idx < end_idx - 1:
if np.max(CT[(start_idx+1):end_idx,0] - CT[start_idx:(end_idx-1),1]) > max_gap or np.max(CQ[(start_idx+1):end_idx,0] - CQ[start_idx:(end_idx-1),1]) > max_gap:
return []
assert start < CT[start_idx, 1]
assert CT[end_idx, 0] < end
to_start = CQ[start_idx, 0] + max(0, start - CT[start_idx,0]) # correct if on middle of interval
to_end = CQ[end_idx, 1] - max(0, CT[end_idx, 1] - end) # idem
if start_idx == end_idx: #elem falls in a single run of matches
slices = [(to_start, to_end)]
else:
slices = [(to_start, CQ[start_idx,1])]
slices += [(CQ[i,0], CQ[i,1]) for i in range(start_idx+1, end_idx)]
slices.append( (CQ[end_idx,0], to_end) )
if chain.qStrand == '-':
Sz = chain.qEnd - chain.qStart
slices = [(Sz-t[1], Sz-t[0]) for t in slices]
return [(to_chrom, to_gab_start + t[0], to_gab_start + t[1], elem['id']) for t in slices] | [
"transform the coordinates of this elem into the other species.\n\n elem intersects this chain's ginterval.\n :return: a list of the type [(to_chr, start, end, elem[id]) ... ]"
] |
Please provide a description of the function:def union_elements(elements):
if len(elements) < 2: return elements
assert set( [e[3] for e in elements] ) == set( [elements[0][3]] ), "more than one id"
el_id = elements[0][3]
unioned_elements = []
for ch, chgrp in groupby(elements, key=itemgetter(0)):
for (s, e) in elem_u( np.array([itemgetter(1, 2)(_) for _ in chgrp], dtype=np.uint) ):
if (s < e):
unioned_elements.append( (ch, s, e, el_id) )
assert len(unioned_elements) <= len(elements)
return unioned_elements | [
"elements = [(chr, s, e, id), ...], this is to join elements that have a\n deletion in the 'to' species\n "
] |
Please provide a description of the function:def transform_file(ELEMS, ofname, EPO, TREE, opt):
"transform/map the elements of this file and dump the output on 'ofname'"
BED4_FRM = "%s\t%d\t%d\t%s\n"
log.info("%s (%d) elements ..." % (opt.screen and "screening" or "transforming", ELEMS.shape[0]))
with open(ofname, 'w') as out_fd:
if opt.screen:
for elem in ELEMS.flat:
matching_blocks = [attrgetter("value")(_) for _ in TREE.find(elem['chrom'], elem['start'], elem['end'])]
assert set( matching_blocks ) <= set( EPO.keys() )
if matching_blocks:
out_fd.write(BED4_FRM % elem)
else:
for chrom in set( ELEMS['chrom'] ):
transform_by_chrom(EPO,
ELEMS[ELEMS['chrom'] == chrom],
TREE, chrom, opt, out_fd)
log.info("DONE!") | [] |
Please provide a description of the function:def loadChains(path):
"name says it."
EPO = epo.Chain._parse_file(path, True)
## convert coordinates w.r.t the forward strand (into slices)
## compute cummulative intervals
for i in range( len(EPO) ):
ch, S, T, Q = EPO[i]
if ch.tStrand == '-':
ch = ch._replace(tEnd = ch.tSize - ch.tStart,
tStart = ch.tSize - ch.tEnd)
if ch.qStrand == '-':
ch = ch._replace(qEnd = ch.qSize - ch.qStart,
qStart = ch.qSize - ch.qEnd)
EPO[i] = (ch,
epo.cummulative_intervals(S, T),
epo.cummulative_intervals(S, Q)
)
##now each element of epo is (chain_header, target_intervals, query_intervals)
assert all( t[0].tStrand == '+' for t in EPO ), "all target strands should be +"
return EPO | [] |
Please provide a description of the function:def loadFeatures(path, opt):
log.info("loading from %s ..." % path)
data = []
if opt.in_format == "BED":
with open(path) as fd:
for line in fd:
cols = line.split()
data.append( (cols[0], int(cols[1]), int(cols[2]), cols[3]) )
data = np.array(data, dtype=elem_t)
else:
with open(path) as fd:
for line in fd:
cols = line.split()
data.append( (cols[0], int(cols[1]), int(cols[2]), cols[3], int(cols[4]),
cols[5], float(cols[6]), float(cols[7]), float(cols[8]),
int(cols[-1])+int(cols[1])) )
data = np.array(data, dtype=narrowPeak_t)
return data | [
"\n Load features. For BED, only BED4 columns are loaded.\n For narrowPeak, all columns are loaded.\n "
] |
Please provide a description of the function:def add(self, chrom, element):
self._trees.setdefault(chrom, IntervalTree()).insert_interval( element ) | [
"insert an element. use this method as the IntervalTree one.\n this will simply call the IntervalTree.add method on the right tree\n\n :param chrom: chromosome\n :param element: the argument of IntervalTree.insert_interval\n :return: None\n "
] |
Please provide a description of the function:def find(self, chrom, start, end):
tree = self._trees.get( chrom, None )
if tree:
return tree.find( start, end )
#return always a list
return [] | [
"find the intersecting elements\n\n :param chrom: chromosome\n :param start: start\n :param end: end\n :return: a list of intersecting elements"
] |
Please provide a description of the function:def from_rows( Class, alphabet, rows ):
# Sorted alphabet
sorted_alphabet = sorted( alphabet )
# Character to index mapping (initialized to -1)
char_to_index = zeros( (256), int16 ) - 1
for i, ch in enumerate( sorted_alphabet ):
char_to_index[ ord(ch) ] = i
# Array
values = zeros( ( len( rows) , len( alphabet ) ), float32 )
for i, row in enumerate( rows ):
assert len( row ) == len( alphabet )
for ch, val in zip( alphabet, row ):
values[i, char_to_index[ord(ch)]] = val
# Matrix
matrix = Class()
matrix.alphabet = alphabet
matrix.sorted_alphabet = sorted_alphabet
matrix.char_to_index = char_to_index
matrix.values = values
return matrix | [
"\n Create a new matrix for a sequence over alphabet `alphabet` taking \n values from `rows` which is a list whose length is the width of the\n matrix, and whose elements are lists of values associated with each\n character (in the order those characters appear in alphabet). \n "
] |
Please provide a description of the function:def create_from_other( Class, other, values=None ):
m = Class()
m.alphabet = other.alphabet
m.sorted_alphabet = other.sorted_alphabet
m.char_to_index = other.char_to_index
if values is not None:
m.values = values
else:
m.values = other.values
return m | [
"\n Create a new Matrix with attributes taken from `other` but with the \n values taken from `values` if provided\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.