rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
msg['From'] = '[email protected]'
msg['From'] = '[email protected]'
def mail(self, to, subject, text): # http://kutuma.blogspot.com/2007/08/sending-emails-via-gmail-with-python.html msg = MIMEMultipart()
return os.getcwd()
return "Not implemented, for now"
def index(self, format='html'): """GET /uploads: All items in the collection""" # url('uploads') return os.getcwd()
permanent_file = open(archive.filename.lstrip(os.sep),'w')
permanent_file = open(os.path.join(self.main_root + '/trunk/openaddresses/uploads',archive.filename.lstrip(os.sep)), 'w')
def create(self): """POST /uploads: Create a new item""" archive = request.POST['uploaded_file'] email = request.POST['email'] permanent_file = open(archive.filename.lstrip(os.sep),'w') shutil.copyfileobj(archive.file, permanent_file) archive.file.close() permanent_file.close() self.mail(email,"OpenAddresses.org upload confirmation","The file " + permanent_file.name + " has been uploaded. Thanks ! The OpenAddresses.org team.") self.mail("[email protected]","OpenAddresses.org new file uploaded !","The file " + permanent_file.name + " has been uploaded by " + email) return dumps({"success": True})
return dumps({"success": True})
return dumps({"success": True, "filename": permanent_file.name})
def create(self): """POST /uploads: Create a new item""" archive = request.POST['uploaded_file'] email = request.POST['email'] permanent_file = open(archive.filename.lstrip(os.sep),'w') shutil.copyfileobj(archive.file, permanent_file) archive.file.close() permanent_file.close() self.mail(email,"OpenAddresses.org upload confirmation","The file " + permanent_file.name + " has been uploaded. Thanks ! The OpenAddresses.org team.") self.mail("[email protected]","OpenAddresses.org new file uploaded !","The file " + permanent_file.name + " has been uploaded by " + email) return dumps({"success": True})
sqlQuery = sqlQuery + " WHERE tsvector_street_housenumber_city @@ to_tsquery('" + tsquery + "')"
sqlQuery = sqlQuery + " WHERE tsvector_street_housenumber_city @@ to_tsquery('english', '" + tsquery + "')"
def fullTextSearch(self,request): # addresses/fullTextSearch?fields=street,city,housenumber&query=ch%20du%2028&tolerance=0.005&easting=6.62379551&northing=46.51687241&limit=20&distinct=true # Read request parameters fields = request.params['fields']
sqlQuery = sqlQuery + " WHERE tsvector_street @@ to_tsquery('" + tsquery + "')"
sqlQuery = sqlQuery + " WHERE tsvector_street @@ to_tsquery('english','" + tsquery + "')"
def fullTextSearch(self,request): # addresses/fullTextSearch?fields=street,city,housenumber&query=ch%20du%2028&tolerance=0.005&easting=6.62379551&northing=46.51687241&limit=20&distinct=true # Read request parameters fields = request.params['fields']
sqlQuery = sqlQuery + " WHERE to_tsvector(" + tsvector + ") @@ to_tsquery('" + tsquery + "')"
sqlQuery = sqlQuery + " WHERE to_tsvector(" + tsvector + ") @@ to_tsquery('english','" + tsquery + "')"
def fullTextSearch(self,request): # addresses/fullTextSearch?fields=street,city,housenumber&query=ch%20du%2028&tolerance=0.005&easting=6.62379551&northing=46.51687241&limit=20&distinct=true # Read request parameters fields = request.params['fields']
responseElements = responseText.split('\'') housenumber = responseElements[7] street = responseElements[3] postcode = responseElements[5] city = responseElements[11]
responseElements = responseText.split('\n') for element in responseElements: if element.rfind('strname1') > -1: strname1_s = element.split('=') street = strname1_s[1].lstrip().lstrip('\'').rstrip().rstrip('\'') if element.rfind('plz4') > -1: plz4_s = element.split('=') postcode = plz4_s[1].lstrip().lstrip('\'').rstrip().rstrip('\'') if element.rfind('deinr') > -1: deinr_s = element.split('=') housenumber = deinr_s[1].lstrip().lstrip('\'').rstrip().rstrip('\'') if element.rfind('plzname') > -1: plzname_s = element.split('=') city = plzname_s[1].lstrip().lstrip('\'').rstrip().rstrip('\'')
def index(self): if 'latitude' in request.params and 'longitude' in request.params: latitude = float(request.params['latitude']) longitude = float(request.params['longitude']) if 'easting' in request.params: easting = float(request.params['easting']) if 'northing' in request.params: northing = float(request.params['northing'])
tsvector = 'tsvector_street_housenumber_city'
tsvector = "to_tsvector('english', coalesce(street,'') || ' ' || coalesce(housenumber,'') || ' ' || coalesce(city,''))"
def index(self, format='json'): """GET /: return all features.""" # If no filter argument is passed to the protocol index method # then the default MapFish filter is used. This default filter # is constructed based on the box, lon, lat, tolerance GET # params. # # If you need your own filter with application-specific params # taken into acount, create your own filter and pass it to the # protocol index method. # # E.g. # # default_filter = create_default_filter( # request, Address # ) # compare_filter = comparison.Comparison( # comparison.Comparison.ILIKE, # Address.mycolumnname, # value=myvalue # ) # filter = logical.Logical(logical.Logical.AND, [default_filter, compare_filter]) # return self.protocol.index(request, response, format=format, filter=filter) # # # You can also create filters using sqlalchemy syntax. # It is possible for example to mix a custom sqlalchemy filter # with the default mapfish filter. # # E.g. # # from sqlalchemy.sql import and_ # # default_filter = create_default_filter( # request, Address # ) # compare_filter = Address.mycolumnname.ilike('%myvalue%') # if default_filter is not None: # filter = and_(default_filter.to_sql_expr(), compare_filter) # else: # filter = compare_filter # return self.protocol.index(request, response, format=format, filter=filter) if 'query' in request.params: # http://lowmanio.co.uk/blog/entries/postgresql-full-text-search-and-sqlalchemy/ terms = request.params.get('query').split() terms = ' & '.join([term + ('' if term.isdigit() else ':*') for term in terms])
tsvector = 'tsvector_street'
tsvector = "to_tsvector('english', coalesce(street,''))"
def index(self, format='json'): """GET /: return all features.""" # If no filter argument is passed to the protocol index method # then the default MapFish filter is used. This default filter # is constructed based on the box, lon, lat, tolerance GET # params. # # If you need your own filter with application-specific params # taken into acount, create your own filter and pass it to the # protocol index method. # # E.g. # # default_filter = create_default_filter( # request, Address # ) # compare_filter = comparison.Comparison( # comparison.Comparison.ILIKE, # Address.mycolumnname, # value=myvalue # ) # filter = logical.Logical(logical.Logical.AND, [default_filter, compare_filter]) # return self.protocol.index(request, response, format=format, filter=filter) # # # You can also create filters using sqlalchemy syntax. # It is possible for example to mix a custom sqlalchemy filter # with the default mapfish filter. # # E.g. # # from sqlalchemy.sql import and_ # # default_filter = create_default_filter( # request, Address # ) # compare_filter = Address.mycolumnname.ilike('%myvalue%') # if default_filter is not None: # filter = and_(default_filter.to_sql_expr(), compare_filter) # else: # filter = compare_filter # return self.protocol.index(request, response, format=format, filter=filter) if 'query' in request.params: # http://lowmanio.co.uk/blog/entries/postgresql-full-text-search-and-sqlalchemy/ terms = request.params.get('query').split() terms = ' & '.join([term + ('' if term.isdigit() else ':*') for term in terms])
limit = request.params['limit']
limit = int(request.params['limit'])
def fullTextSearch(self,request): # addresses/fullTextSearch?fields=street,city,housenumber&query=ch%20du%2028&tolerance=0.005&easting=6.62379551&northing=46.51687241&limit=20&distinct=true # Read request parameters fields = request.params['fields']
yield UInt16(self, "left", "Text Grid Left") yield UInt16(self, "top", "Text Grid Top") yield UInt16(self, "width", "Text Grid Width") yield UInt16(self, "height", "Text Grid Height")
yield UInt16(parent, "left", "Text Grid Left") yield UInt16(parent, "top", "Text Grid Top") yield UInt16(parent, "width", "Text Grid Width") yield UInt16(parent, "height", "Text Grid Height")
def parseTextExtension(parent): yield UInt8(parent, "block_size", "Block Size") yield UInt16(self, "left", "Text Grid Left") yield UInt16(self, "top", "Text Grid Top") yield UInt16(self, "width", "Text Grid Width") yield UInt16(self, "height", "Text Grid Height") yield UInt8(parent, "cell_width", "Character Cell Width") yield UInt8(parent, "cell_height", "Character Cell Height") yield UInt8(parent, "fg_color", "Foreground Color Index") yield UInt8(parent, "bg_color", "Background Color Index") while True: field = PascalString8(parent, "comment[]", strip=" \0\r\n\t") yield field if field.length == 0: break
cvt_time=lambda v:datetime(2001,1,1) + timedelta(seconds=v)
def cvt_time(v): v=timedelta(seconds=v) epoch2001 = datetime(2001,1,1) epoch1970 = datetime(1970,1,1) if (epoch2001 + v - datetime.today()).days > 5*365: return epoch1970 + v return epoch2001 + v
def createFields(self): yield Enum(Bits(self, "marker_type", 4), {0: "Simple", 1: "Int", 2: "Real", 3: "Date", 4: "Data", 5: "ASCII String", 6: "UTF-16-BE String", 8: "UID", 10: "Array", 13: "Dict",}) markertype = self['marker_type'].value if markertype == 0: # Simple (Null) yield Enum(Bits(self, "value", 4), {0: "Null", 8: "False", 9: "True", 15: "Fill Byte",}) if self['value'].display == "False": self.xml=lambda prefix:prefix + "<false/>" elif self['value'].display == "True": self.xml=lambda prefix:prefix + "<true/>" else: self.xml=lambda prefix:prefix + ""
self.xml=lambda prefix:prefix + "<date>%s</date>"%(cvt_time(self['value'].value).isoformat())
self.xml=lambda prefix:prefix + "<date>%sZ</date>"%(cvt_time(self['value'].value).isoformat())
def createFields(self): yield Enum(Bits(self, "marker_type", 4), {0: "Simple", 1: "Int", 2: "Real", 3: "Date", 4: "Data", 5: "ASCII String", 6: "UTF-16-BE String", 8: "UID", 10: "Array", 13: "Dict",}) markertype = self['marker_type'].value if markertype == 0: # Simple (Null) yield Enum(Bits(self, "value", 4), {0: "Null", 8: "False", 9: "True", 15: "Fill Byte",}) if self['value'].display == "False": self.xml=lambda prefix:prefix + "<false/>" elif self['value'].display == "True": self.xml=lambda prefix:prefix + "<true/>" else: self.xml=lambda prefix:prefix + ""
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('iso-8859-1'))
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&amp;').encode('iso-8859-1'))
def createFields(self): yield Enum(Bits(self, "marker_type", 4), {0: "Simple", 1: "Int", 2: "Real", 3: "Date", 4: "Data", 5: "ASCII String", 6: "UTF-16-BE String", 8: "UID", 10: "Array", 13: "Dict",}) markertype = self['marker_type'].value if markertype == 0: # Simple (Null) yield Enum(Bits(self, "value", 4), {0: "Null", 8: "False", 9: "True", 15: "Fill Byte",}) if self['value'].display == "False": self.xml=lambda prefix:prefix + "<false/>" elif self['value'].display == "True": self.xml=lambda prefix:prefix + "<true/>" else: self.xml=lambda prefix:prefix + ""
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('utf-8'))
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&amp;').encode('utf-8'))
def createFields(self): yield Enum(Bits(self, "marker_type", 4), {0: "Simple", 1: "Int", 2: "Real", 3: "Date", 4: "Data", 5: "ASCII String", 6: "UTF-16-BE String", 8: "UID", 10: "Array", 13: "Dict",}) markertype = self['marker_type'].value if markertype == 0: # Simple (Null) yield Enum(Bits(self, "value", 4), {0: "Null", 8: "False", 9: "True", 15: "Fill Byte",}) if self['value'].display == "False": self.xml=lambda prefix:prefix + "<false/>" elif self['value'].display == "True": self.xml=lambda prefix:prefix + "<true/>" else: self.xml=lambda prefix:prefix + ""
while field:
while field is not None:
def _getPath(self): if not self._parent: return '/' names = [] field = self while field: names.append(field._name) field = field._parent names[-1] = '' return '/'.join(reversed(names))
addr_text = ''
addr_text_list = []
def update_addr_view(self): addr_text = '' for i in xrange(self.view.get_height_chars()): addr_text += self.format_addr(self.pos+i*self.view.get_width_chars())+'\n' self.view.addr_view.SetValue(addr_text)
addr_text += self.format_addr(self.pos+i*self.view.get_width_chars())+'\n' self.view.addr_view.SetValue(addr_text)
addr_text_list.append( self.format_addr(self.pos+i*self.view.get_width_chars())+'\n') self.view.addr_view.SetValue(''.join(addr_text_list))
def update_addr_view(self): addr_text = '' for i in xrange(self.view.get_height_chars()): addr_text += self.format_addr(self.pos+i*self.view.get_width_chars())+'\n' self.view.addr_view.SetValue(addr_text)
self.trackCommon(track, video)
def processVideo(self, track): video = Metadata(self) try: self.trackCommon(track, video) video.compression = track["CodecID/string"].value if "Video" in track: video.width = track["Video/PixelWidth/unsigned"].value video.height = track["Video/PixelHeight/unsigned"].value except MissingField: pass self.addGroup("video[]", video, "Video stream")
try: self.trackCommon(track, audio) if "Audio" in track: audio.sample_rate = track["Audio/SamplingFrequency/float"].value
self.trackCommon(track, audio) if "Audio" in track: frequency = self.getDouble(track, "Audio/SamplingFrequency") if frequency is not None: audio.sample_rate = frequency if "Audio/Channels/unsigned" in track:
def processAudio(self, track): audio = Metadata(self) try: self.trackCommon(track, audio) if "Audio" in track: audio.sample_rate = track["Audio/SamplingFrequency/float"].value audio.nb_channel = track["Audio/Channels/unsigned"].value audio.compression = track["CodecID/string"].value except MissingField: pass self.addGroup("audio[]", audio, "Audio stream")
except MissingField: pass
def processAudio(self, track): audio = Metadata(self) try: self.trackCommon(track, audio) if "Audio" in track: audio.sample_rate = track["Audio/SamplingFrequency/float"].value audio.nb_channel = track["Audio/Channels/unsigned"].value audio.compression = track["CodecID/string"].value except MissingField: pass self.addGroup("audio[]", audio, "Audio stream")
self.trackCommon(track, sub)
def processSubtitle(self, track): sub = Metadata(self) try: self.trackCommon(track, sub) sub.compression = track["CodecID/string"].value except MissingField: pass self.addGroup("subtitle[]", sub, "Subtitle")
@fault_tolerant def readDuration(self, duration, timecode_scale): seconds = duration * timecode_scale self.duration = timedelta(seconds=seconds)
def processSimpleTag(self, tag): if "TagName/unicode" not in tag \ or "TagString/unicode" not in tag: return name = tag["TagName/unicode"].value if name not in self.tag_key: return key = self.tag_key[name] value = tag["TagString/unicode"].value setattr(self, key, value)
timecode_scale = info["TimecodeScale/unsigned"].value * 1e-9 if "Duration/float" in info: self.readDuration(info["Duration/float"].value, timecode_scale) elif "Duration/double" in info: self.readDuration(info["Duration/double"].value, timecode_scale)
duration = self.getDouble(info, "Duration") if duration is not None: try: seconds = duration * info["TimecodeScale/unsigned"].value * 1e-9 self.duration = timedelta(seconds=seconds) except OverflowError: pass
def processInfo(self, info): if "TimecodeScale/unsigned" in info: timecode_scale = info["TimecodeScale/unsigned"].value * 1e-9 if "Duration/float" in info: self.readDuration(info["Duration/float"].value, timecode_scale) elif "Duration/double" in info: self.readDuration(info["Duration/double"].value, timecode_scale) if "DateUTC/date" in info: try: self.creation_date = dateToDatetime(info["DateUTC/date"].value) except OverflowError: pass if "WritingApp/unicode" in info: self.producer = info["WritingApp/unicode"].value if "MuxingApp/unicode" in info: self.producer = info["MuxingApp/unicode"].value if "Title/unicode" in info: self.title = info["Title/unicode"].value
"stbl": (AtomList, "stbl", ""),
"stbl": (AtomList, "stbl", "Sample Table"), "stco": (STCO, "stsd", "Sample Table Chunk Offset"), "stsd": (STSD, "stsd", "Sample Table Sample Description"), "stss": (STSS, "stss", "Sample Table Sync Samples"), "stsz": (STSZ, "stsz", "Sample Table Sizes"),
def createFields(self): yield UInt32(self, "unk") yield AtomList(self, "tags")
"file_ext": ("mka", "mkv"),
"file_ext": ("mka", "mkv", "webm"),
def createFields(self): yield RawInt(self, 'id') yield Unsigned(self, 'size') for val in self.val[1:]: if callable(val): yield val(self) else: while not self.eof: yield EBML(self, val)
return False return self.stream.searchBytes('\x42\x82\x88matroska', 5*8, first._size) is not None
return "First chunk size is invalid" if self[0]['DocType/string'].value not in ('matroska', 'webm'): return "Stream isn't a matroska document." return True
def validate(self): if self.stream.readBits(0, 32, self.endian) != self.EBML_SIGNATURE: return False try: first = self[0] except ParserError: return False if None < self._size < first._size: return False return self.stream.searchBytes('\x42\x82\x88matroska', 5*8, first._size) is not None
if hdr['DocType/string'].value != 'matroska': raise ParserError("Stream isn't a matroska document.")
def createFields(self): hdr = EBML(self, ebml) yield hdr if hdr['DocType/string'].value != 'matroska': raise ParserError("Stream isn't a matroska document.")
yield UInt32(parent, "gamma", "Gamma (x10,000)")
yield UInt32(parent, "gamma", "Gamma (x100,000)")
def gammaParse(parent): yield UInt32(parent, "gamma", "Gamma (x10,000)")
return float(parent["gamma"].value) / 10000
return float(parent["gamma"].value) / 100000
def gammaValue(parent): return float(parent["gamma"].value) / 10000
if index+3 < len(text) \
elif index+3 < len(text) \
def parseRange(text, start): r""" >>> parseRange('[a]b', 1) (<RegexRange '[a]'>, 3) >>> parseRange('[a-z]b', 1) (<RegexRange '[a-z]'>, 5) >>> parseRange('[^a-z-]b', 1) (<RegexRange '[^a-z-]'>, 7) >>> parseRange('[^]-]b', 1) (<RegexRange '[^]-]'>, 5) """ index = start char_range = [] exclude = False if text[index] == '^': exclude = True index += 1 if text[index] == ']': char_range.append(RegexRangeCharacter(']')) index += 1 while index < len(text) and text[index] != ']': if index+1 < len(text) \ and text[index] == '-' and text[index+1] == ']': break if index+3 < len(text) \ and text[index+1] == '-' \ and text[index+2] != ']': char_range.append(RegexRangeItem(ord(text[index]), ord(text[index+2]))) index += 3 else: char_range.append(RegexRangeCharacter(text[index])) index += 1 if index < len(text) and text[index] == '-': char_range.append(RegexRangeCharacter('-')) index += 1 if index == len(text) or text[index] != ']': raise SyntaxError('Invalid range: %s' % text[start-1:index]) return RegexRange(char_range, exclude), index+1
if "Duration/float" in info \ and "TimecodeScale/unsigned" in info \ and 0 < info["Duration/float"].value: try: seconds = info["Duration/float"].value * info["TimecodeScale/unsigned"].value * 1e-9 self.duration = timedelta(seconds=seconds) except OverflowError: pass
if "TimecodeScale/unsigned" in info: timecode_scale = info["TimecodeScale/unsigned"].value * 1e-9 if "Duration/float" in info: self.readDuration(info["Duration/float"].value, timecode_scale) elif "Duration/double" in info: self.readDuration(info["Duration/double"].value, timecode_scale)
def processInfo(self, info): if "Duration/float" in info \ and "TimecodeScale/unsigned" in info \ and 0 < info["Duration/float"].value: try: seconds = info["Duration/float"].value * info["TimecodeScale/unsigned"].value * 1e-9 self.duration = timedelta(seconds=seconds) except OverflowError: # Catch OverflowError for timedelta # (long int too large to convert to int) pass if "DateUTC/date" in info: try: self.creation_date = dateToDatetime(info["DateUTC/date"].value) except OverflowError: pass if "WritingApp/unicode" in info: self.producer = info["WritingApp/unicode"].value if "MuxingApp/unicode" in info: self.producer = info["MuxingApp/unicode"].value if "Title/unicode" in info: self.title = info["Title/unicode"].value
install_options["install_requires"] = "hachoir-core>=1.2.1"
install_options["install_requires"] = "hachoir-core>=1.3"
def main(): if "--setuptools" in argv: argv.remove("--setuptools") from setuptools import setup use_setuptools = True else: from distutils.core import setup use_setuptools = False hachoir_parser = load_source("version", path.join("hachoir_parser", "version.py")) PACKAGES = {"hachoir_parser": "hachoir_parser"} for name in MODULES: PACKAGES["hachoir_parser." + name] = "hachoir_parser/" + name install_options = { "name": hachoir_parser.PACKAGE, "version": hachoir_parser.__version__, "url": hachoir_parser.WEBSITE, "download_url": hachoir_parser.WEBSITE, "author": "Hachoir team (see AUTHORS file)", "description": "Package of Hachoir parsers used to open binary files", "long_description": getLongDescription(), "classifiers": CLASSIFIERS, "license": hachoir_parser.LICENSE, "packages": PACKAGES.keys(), "package_dir": PACKAGES, } if use_setuptools: install_options["install_requires"] = "hachoir-core>=1.2.1" install_options["zip_safe"] = True setup(**install_options)
title=title, desc=desc, tags=tags, search_hidden=not visible, safety=safety, is_public=is_public, is_family=is_family, is_friend=is_friend, content_type=content_type)
title=title, desc=desc, tags=tags, search_hidden=not visible, safety=safety, is_public=is_public, is_family=is_family, is_friend=is_friend, content_type=content_type, progress_tracker=self.upload_progress_tracker)
def upload(self, response=None): """Upload worker function, called by the File->Upload callback. As this calls itself in the deferred callback, it takes a response argument."""
search_hidden=not visible, safety=safety, is_public=is_public, is_family=is_family, is_friend=is_friend, content_type=content_type)
search_hidden=not visible, safety=safety, is_public=is_public, is_family=is_family, is_friend=is_friend, content_type=content_type, progress_tracker=self.upload_progress_tracker)
def upload(self, response=None): """Upload worker function, called by the File->Upload callback. As this calls itself in the deferred callback, it takes a response argument."""
lock = "org.gtk.PyUnique.lock"
lock = "%s.lock" % name
def __init__(self, name, startup_id=None): gobject.GObject.__init__(self) self._is_running = False self._name = name self._screen = gdk.screen_get_default()
self._check_for_errors(stderr)
def _download(self): self.log = '' self.information['status'] = DownloadStatus.RUNNING if self.information['download_type'] == DownloadTypes.TORRENT: # download torrent if necessary torrent_filename = os.path.join(self._config.get('general', 'folder_new_otrkeys'), self.filename + '.torrent') if not os.path.exists(torrent_filename): password = base64.b64decode(self._config.get('general', 'password')) hash = hashlib.md5(password).hexdigest() email = self._config.get('general', 'email') url = 'http://81.95.11.2/xbt/xbt_torrent_create.php?filename=%s&email=%s&mode=free&hash=%s' % (self.filename, email, hash) try: urllib.urlretrieve(url, torrent_filename) # read filename f = open(torrent_filename, 'r') line = f.readlines()[0] except IOError, error: self.information['status'] = DownloadStatus.ERROR self.information['message_short'] = 'Torrentdatei konnte nicht geladen werden.' yield "Torrentdatei konnte nicht heruntergeladen werden (%s)!" % error return if "Hash wrong" in line: os.remove(torrent_filename) self.information['status'] = DownloadStatus.ERROR self.information['message_short'] = 'OTR-Daten nicht korrekt!' yield 'OTR-Daten nicht korrekt!' return self.information['output'] = self._config.get('general', 'folder_new_otrkeys') command = self._config.get('downloader', 'aria2c_torrent') + ["-d", self.information['output'], "-T", torrent_filename] yield "Ausgeführt wird:\n%s\n" % " ".join(command) try: self.__process = subprocess.Popen(command, stdout=subprocess.PIPE) except OSError, error: self.information['status'] = DownloadStatus.ERROR self.information['message_short'] = 'Aria2c ist nicht installiert.' yield "Ist aria2c installiert? Der Befehl konnte nicht ausgeführt werden:\nFehlermeldung: %s" % error return while self.__process.poll() == None: line = self.__process.stdout.readline().strip() if "Checksum" in line: result = re.findall('Checksum:.*\((.*%)\)', line) if result: self.information['message_short'] = 'Überprüfen...%s' % result[0] elif "SEEDING" in line: self.information['message_short'] = 'Seeden...' self.information['status'] = DownloadStatus.SEEDING # _NOT_ DownloadStatus.FINISHED self.information['progress'] = 100 self.information['est'] = '' self.information['speed'] = '' self.information['seeders'] = None result = re.findall('ratio:(.*)\)', line) if result: print 'ratio: ', result[0] self.information['ratio'] = result[0] result = re.findall('UP:(.*)\((.*)\)', line) if result: self.information['upspeed'] = result[0][0] self.information['uploaded'] = result[0][1] elif "%" in line: self.information['message_short'] = '' # get size if not self.information['size']: try: # aria2c gives size always in MiB (hopefully) size = re.findall('SIZE:.*/(.*)MiB\(', line)[0] size = size.replace(',', '') size = int(round(float(size))) * 1024 * 1024 self.information['size'] = size yield line except: pass
This path is by default <mfm_lib_path>/../data/ in trunk and /usr/share/mfm in an installed version but this path
This path is by default <otrverwaltung_lib_path>/../data/ in trunk and /usr/share/otrverwaltung in an installed version but this path
def getdatapath(*args): """Retrieve otrverwaltung data path This path is by default <mfm_lib_path>/../data/ in trunk and /usr/share/mfm in an installed version but this path is specified at installation time. """ return os.path.join(os.path.dirname(__file__), data_dir, *args)
self.combobox_archive.fill(archive_directory) self.combobox_archive.set_active(0) self.combobox_archive.connect('changed', self._on_combobox_archive_changed)
if action != Action.DECODE: self.combobox_archive.fill(archive_directory) self.combobox_archive.set_active(0) self.combobox_archive.connect('changed', self._on_combobox_archive_changed)
def _run(self, file_conclusions, action, rename_by_schema, archive_directory): self.action = action self.rename_by_schema = rename_by_schema self.__file_conclusions = file_conclusions self.forward_clicks = 0 self.show_all() self.combobox_archive.fill(archive_directory) self.combobox_archive.set_active(0) self.combobox_archive.connect('changed', self._on_combobox_archive_changed) # basic show/hide widgets_hidden = [] if self.action == Action.DECODE: self.builder.get_object('box_buttons').show() # show buttons, but hide all except play button widgets_hidden = ['image_cut', 'label_cut', 'label_cut_status', 'button_play_cut', 'box_rating', 'check_delete_uncut', 'box_rename', 'box_archive'] elif self.action == Action.CUT: widgets_hidden = ['image_decode', 'label_decode', 'label_decode_status'] for widget in widgets_hidden: self.builder.get_object(widget).hide() self.show_conclusion(0)
self.builder.get_object('button_play_cut').props.visible = cut_ok
self.builder.get_object('button_play_cut').props.visible = cut_ok self.builder.get_object('box_archive').props.visible = cut_ok
def show_conclusion(self, new_iter): self.conclusion_iter = new_iter self.file_conclusion = self.__file_conclusions[self.conclusion_iter] self.builder.get_object('label_count').set_text("Zeige Datei %s/%s" % (str(new_iter + 1), len(self.__file_conclusions)))
url = "%sgetxml.php?ofsb=%s" % (server, str(size))
urls = ["%sgetxml.php?ofsb=%s" % (server, str(size)), "%sgetxml.php?ofsb=%s" % (server, str((size+2*1024**3)%(4*1024**3)- 2*1024**3))]
def download_cutlists(filename, server, choose_cutlists_by, cutlist_mp4_as_hq, error_cb=None, cutlist_found_cb=None): """ Downloads all cutlists for the given file. filename - movie filename server - cutlist server choose_cutlists_by - 0 by size, 1 by name error_cb - callback: an error occurs (message) cutlist_found_cb - callback: a cutlist is found (Cutlist instance) Returns: a list of Cutlist instances """ if choose_cutlists_by == 0: # by size size = fileoperations.get_size(filename) url = "%sgetxml.php?ofsb=%s" % (server, str(size)) else: # by name root, extension = os.path.splitext(os.path.basename(filename)) if cutlist_mp4_as_hq and extension == '.mp4': root += ".HQ" url = "%sgetxml.php?name=%s" % (server, root) print url try: handle = urllib.urlopen(url) except IOError: if error_cb: error_cb("Verbindungsprobleme") return "Verbindungsprobleme", None try: dom_cutlists = xml.dom.minidom.parse(handle) handle.close() dom_cutlists = dom_cutlists.getElementsByTagName('cutlist') except: if error_cb: error_cb("Keine Cutlists gefunden") return "Keine Cutlists gefunden", None cutlists = [] for cutlist in dom_cutlists: c = Cutlist() c.id = __read_value(cutlist, "id") c.author = __read_value(cutlist, "author") c.ratingbyauthor = __read_value(cutlist, "ratingbyauthor") c.rating = __read_value(cutlist, "rating") c.ratingcount = __read_value(cutlist, "ratingcount") c.countcuts = __read_value(cutlist, "cuts") c.actualcontent = __read_value(cutlist, "actualcontent") c.usercomment = __read_value(cutlist, "usercomment") c.filename = __read_value(cutlist, "filename") c.withframes = __read_value(cutlist, "withframes") c.withtime = __read_value(cutlist, "withtime") c.duration = __read_value(cutlist, "duration") c.errors = __read_value(cutlist, "errors") c.othererrordescription = __read_value(cutlist, "othererrordescription") c.downloadcount = __read_value(cutlist, "downloadcount") c.autoname = __read_value(cutlist, "autoname") c.filename_original = __read_value(cutlist, "filename_original") if cutlist_found_cb: cutlist_found_cb(c) cutlists.append(c) if len(cutlists) == 0: return "Keine Cutlists gefunden", None else: return None, cutlists
url = "%sgetxml.php?name=%s" % (server, root) print url try: handle = urllib.urlopen(url) except IOError: if error_cb: error_cb("Verbindungsprobleme") return "Verbindungsprobleme", None
urls = ["%sgetxml.php?name=%s" % (server, root)] cutlists = [] for url in urls: print "[Cutlists] Download by : %s" % url try: handle = urllib.urlopen(url) except IOError: if error_cb: error_cb("Verbindungsprobleme") return "Verbindungsprobleme", None try: dom_cutlists = xml.dom.minidom.parse(handle) handle.close() dom_cutlists = dom_cutlists.getElementsByTagName('cutlist') except: if error_cb: error_cb("Keine Cutlists gefunden") return "Keine Cutlists gefunden", None for cutlist in dom_cutlists: c = Cutlist() c.id = __read_value(cutlist, "id") c.author = __read_value(cutlist, "author") c.ratingbyauthor = __read_value(cutlist, "ratingbyauthor") c.rating = __read_value(cutlist, "rating") c.ratingcount = __read_value(cutlist, "ratingcount") c.countcuts = __read_value(cutlist, "cuts") c.actualcontent = __read_value(cutlist, "actualcontent") c.usercomment = __read_value(cutlist, "usercomment") c.filename = __read_value(cutlist, "filename") c.withframes = __read_value(cutlist, "withframes") c.withtime = __read_value(cutlist, "withtime") c.duration = __read_value(cutlist, "duration") c.errors = __read_value(cutlist, "errors") c.othererrordescription = __read_value(cutlist, "othererrordescription") c.downloadcount = __read_value(cutlist, "downloadcount") c.autoname = __read_value(cutlist, "autoname") c.filename_original = __read_value(cutlist, "filename_original") ids = [cutlist.id for cutlist in cutlists] if not c.id in ids: if cutlist_found_cb: cutlist_found_cb(c)
def download_cutlists(filename, server, choose_cutlists_by, cutlist_mp4_as_hq, error_cb=None, cutlist_found_cb=None): """ Downloads all cutlists for the given file. filename - movie filename server - cutlist server choose_cutlists_by - 0 by size, 1 by name error_cb - callback: an error occurs (message) cutlist_found_cb - callback: a cutlist is found (Cutlist instance) Returns: a list of Cutlist instances """ if choose_cutlists_by == 0: # by size size = fileoperations.get_size(filename) url = "%sgetxml.php?ofsb=%s" % (server, str(size)) else: # by name root, extension = os.path.splitext(os.path.basename(filename)) if cutlist_mp4_as_hq and extension == '.mp4': root += ".HQ" url = "%sgetxml.php?name=%s" % (server, root) print url try: handle = urllib.urlopen(url) except IOError: if error_cb: error_cb("Verbindungsprobleme") return "Verbindungsprobleme", None try: dom_cutlists = xml.dom.minidom.parse(handle) handle.close() dom_cutlists = dom_cutlists.getElementsByTagName('cutlist') except: if error_cb: error_cb("Keine Cutlists gefunden") return "Keine Cutlists gefunden", None cutlists = [] for cutlist in dom_cutlists: c = Cutlist() c.id = __read_value(cutlist, "id") c.author = __read_value(cutlist, "author") c.ratingbyauthor = __read_value(cutlist, "ratingbyauthor") c.rating = __read_value(cutlist, "rating") c.ratingcount = __read_value(cutlist, "ratingcount") c.countcuts = __read_value(cutlist, "cuts") c.actualcontent = __read_value(cutlist, "actualcontent") c.usercomment = __read_value(cutlist, "usercomment") c.filename = __read_value(cutlist, "filename") c.withframes = __read_value(cutlist, "withframes") c.withtime = __read_value(cutlist, "withtime") c.duration = __read_value(cutlist, "duration") c.errors = __read_value(cutlist, "errors") c.othererrordescription = __read_value(cutlist, "othererrordescription") c.downloadcount = __read_value(cutlist, "downloadcount") c.autoname = __read_value(cutlist, "autoname") c.filename_original = __read_value(cutlist, "filename_original") if cutlist_found_cb: cutlist_found_cb(c) cutlists.append(c) if len(cutlists) == 0: return "Keine Cutlists gefunden", None else: return None, cutlists
try: dom_cutlists = xml.dom.minidom.parse(handle) handle.close() dom_cutlists = dom_cutlists.getElementsByTagName('cutlist') except: if error_cb: error_cb("Keine Cutlists gefunden") return "Keine Cutlists gefunden", None cutlists = [] for cutlist in dom_cutlists: c = Cutlist() c.id = __read_value(cutlist, "id") c.author = __read_value(cutlist, "author") c.ratingbyauthor = __read_value(cutlist, "ratingbyauthor") c.rating = __read_value(cutlist, "rating") c.ratingcount = __read_value(cutlist, "ratingcount") c.countcuts = __read_value(cutlist, "cuts") c.actualcontent = __read_value(cutlist, "actualcontent") c.usercomment = __read_value(cutlist, "usercomment") c.filename = __read_value(cutlist, "filename") c.withframes = __read_value(cutlist, "withframes") c.withtime = __read_value(cutlist, "withtime") c.duration = __read_value(cutlist, "duration") c.errors = __read_value(cutlist, "errors") c.othererrordescription = __read_value(cutlist, "othererrordescription") c.downloadcount = __read_value(cutlist, "downloadcount") c.autoname = __read_value(cutlist, "autoname") c.filename_original = __read_value(cutlist, "filename_original") if cutlist_found_cb: cutlist_found_cb(c) cutlists.append(c)
cutlists.append(c)
def download_cutlists(filename, server, choose_cutlists_by, cutlist_mp4_as_hq, error_cb=None, cutlist_found_cb=None): """ Downloads all cutlists for the given file. filename - movie filename server - cutlist server choose_cutlists_by - 0 by size, 1 by name error_cb - callback: an error occurs (message) cutlist_found_cb - callback: a cutlist is found (Cutlist instance) Returns: a list of Cutlist instances """ if choose_cutlists_by == 0: # by size size = fileoperations.get_size(filename) url = "%sgetxml.php?ofsb=%s" % (server, str(size)) else: # by name root, extension = os.path.splitext(os.path.basename(filename)) if cutlist_mp4_as_hq and extension == '.mp4': root += ".HQ" url = "%sgetxml.php?name=%s" % (server, root) print url try: handle = urllib.urlopen(url) except IOError: if error_cb: error_cb("Verbindungsprobleme") return "Verbindungsprobleme", None try: dom_cutlists = xml.dom.minidom.parse(handle) handle.close() dom_cutlists = dom_cutlists.getElementsByTagName('cutlist') except: if error_cb: error_cb("Keine Cutlists gefunden") return "Keine Cutlists gefunden", None cutlists = [] for cutlist in dom_cutlists: c = Cutlist() c.id = __read_value(cutlist, "id") c.author = __read_value(cutlist, "author") c.ratingbyauthor = __read_value(cutlist, "ratingbyauthor") c.rating = __read_value(cutlist, "rating") c.ratingcount = __read_value(cutlist, "ratingcount") c.countcuts = __read_value(cutlist, "cuts") c.actualcontent = __read_value(cutlist, "actualcontent") c.usercomment = __read_value(cutlist, "usercomment") c.filename = __read_value(cutlist, "filename") c.withframes = __read_value(cutlist, "withframes") c.withtime = __read_value(cutlist, "withtime") c.duration = __read_value(cutlist, "duration") c.errors = __read_value(cutlist, "errors") c.othererrordescription = __read_value(cutlist, "othererrordescription") c.downloadcount = __read_value(cutlist, "downloadcount") c.autoname = __read_value(cutlist, "autoname") c.filename_original = __read_value(cutlist, "filename_original") if cutlist_found_cb: cutlist_found_cb(c) cutlists.append(c) if len(cutlists) == 0: return "Keine Cutlists gefunden", None else: return None, cutlists
def foreach(model, path, iter, data=None): index = model.get_value(iter, 0) stamp = self.app.planned_broadcasts[index].datetime if stamp < now: selection.select_iter(iter) self.builder.get_object('treeview_planning').get_model().foreach(foreach)
for row in self.builder.get_object('treeview_planning').get_model(): if row[0].datetime < now: selection.select_iter(row.iter)
def foreach(model, path, iter, data=None): index = model.get_value(iter, 0) stamp = self.app.planned_broadcasts[index].datetime
return "%s %s" % (self.user.username, self.group)
if self.user: username = self.user.username else: username = 'anonymous return "%s %s" % (username, self.group)
def __unicode__(self): return "%s %s" % (self.user.username, self.group)
raise
if settings.DEBUG: raise l.warning("Can't find the GoalType named %s" % goal_name)
def record(cls, goal_name, experiment_user): try: return cls._record(goal_name, experiment_user) except GoalType.DoesNotExist: raise except Exception, e: l.error("Unexpected exception in GoalRecord.record:\n" "%s" % traceback.format_exc)
goal_types = [GoalType.objects.create(name=i) for i in range(3)]
goal_types = [GoalType.objects.create(name=str(i)) for i in range(3)]
def testParticipantConversionCalculator(self): goal_types = [GoalType.objects.create(name=i) for i in range(3)] anonymous_visitor = AnonymousVisitor.objects.create() participant = self.create_participant( anonymous_visitor=anonymous_visitor, experiment=self.experiment, enrollment_date=self.experiment.start_date + timedelta(days=2), group=Participant.TEST_GROUP) days = [datetime.combine(self.experiment.start_date + timedelta(days=i), time(hour=12)) for i in range(5)] nb_goal_records = GoalRecord.objects.all().count() self.create_goal_record(days[0], anonymous_visitor, goal_types[0]) self.create_goal_record(days[0], anonymous_visitor, goal_types[1]) self.create_goal_record(days[1], anonymous_visitor, goal_types[0]) self.create_goal_record(days[1], anonymous_visitor, goal_types[0]) self.create_goal_record(days[2], anonymous_visitor, goal_types[1]) self.create_goal_record(days[3], anonymous_visitor, goal_types[0]) self.create_goal_record(days[4], anonymous_visitor, goal_types[0]) self.create_goal_record(days[4], anonymous_visitor, goal_types[0]) self.assertEquals(nb_goal_records + 8, GoalRecord.objects.all().count()) # wasn't enrolled yet! self.assertEquals(calculate_participant_conversion(participant, goal_types[0], days[0]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[1], days[0]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[2], days[0]), 0) self.assertEquals(calculate_participant_conversion(participant, None, days[0]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[0], days[1]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[1], days[1]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[2], days[1]), 0) self.assertEquals(calculate_participant_conversion(participant, None, days[1]), 0) # now enrolled self.assertEquals(calculate_participant_conversion(participant, goal_types[0], days[2]), 0) self.assertEquals(calculate_participant_conversion(participant, goal_types[1], days[2]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[2], days[2]), 0) # "any" is one self.assertEquals(calculate_participant_conversion(participant, None, days[2]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[0], days[3]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[1], days[3]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[2], days[3]), 0) # "any" is one, even though two different goals were achieved self.assertEquals(calculate_participant_conversion(participant, None, days[3]), 1) # there were three conversions on this day for goal 0, but we only count the first! self.assertEquals(calculate_participant_conversion(participant, goal_types[0], days[4]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[1], days[4]), 1) self.assertEquals(calculate_participant_conversion(participant, goal_types[2], days[4]), 0) self.assertEquals(calculate_participant_conversion(participant, None, days[4]), 1)
l.error("Unexpected exception in GoalRecord.record:\n" "%s" % traceback.format_exc)
l.exception("Unexpected exception in GoalRecord.record")
def record(cls, goal_name, experiment_user): try: return cls._record(goal_name, experiment_user) except GoalType.DoesNotExist: if settings.DEBUG: raise l.warning("Can't find the GoalType named %s" % goal_name) except Exception, e: l.error("Unexpected exception in GoalRecord.record:\n" "%s" % traceback.format_exc)
scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Originally written by Gary Strangman. Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
scores a, and b. Returns t-value, and prob. Originally written by Gary Strangman. Usage: lttest_ind(a,b)
def ttest_ind(a, b): """ Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Originally written by Gary Strangman. Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a') Returns: t-value, two-tailed prob """ x1 = mean(a) x2 = mean(b) v1 = stdev(a)**2 v2 = stdev(b)**2 n1 = len(a) n2 = len(b) df = n1+n2-2 svar = ((n1-1)*v1+(n2-1)*v2)/float(df) t = (x1-x2)/sqrt(svar*(1.0/n1 + 1.0/n2)) prob = betai(0.5*df,0.5,df/(df+t*t)) return t, prob
x1 = mean(a) x2 = mean(b) v1 = stdev(a)**2 v2 = stdev(b)**2 n1 = len(a) n2 = len(b)
x1, x2 = mean(a), mean(b) v1, v2 = stdev(a)**2, stdev(b)**2 n1, n2 = len(a), len(b)
def ttest_ind(a, b): """ Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Originally written by Gary Strangman. Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a') Returns: t-value, two-tailed prob """ x1 = mean(a) x2 = mean(b) v1 = stdev(a)**2 v2 = stdev(b)**2 n1 = len(a) n2 = len(b) df = n1+n2-2 svar = ((n1-1)*v1+(n2-1)*v2)/float(df) t = (x1-x2)/sqrt(svar*(1.0/n1 + 1.0/n2)) prob = betai(0.5*df,0.5,df/(df+t*t)) return t, prob
svar = ((n1-1)*v1+(n2-1)*v2)/float(df) t = (x1-x2)/sqrt(svar*(1.0/n1 + 1.0/n2))
try: svar = ((n1-1)*v1+(n2-1)*v2)/float(df) except ZeroDivisionError: return float('nan'), float('nan') try: t = (x1-x2)/sqrt(svar*(1.0/n1 + 1.0/n2)) except ZeroDivisionError: t = 1.0
def ttest_ind(a, b): """ Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Originally written by Gary Strangman. Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a') Returns: t-value, two-tailed prob """ x1 = mean(a) x2 = mean(b) v1 = stdev(a)**2 v2 = stdev(b)**2 n1 = len(a) n2 = len(b) df = n1+n2-2 svar = ((n1-1)*v1+(n2-1)*v2)/float(df) t = (x1-x2)/sqrt(svar*(1.0/n1 + 1.0/n2)) prob = betai(0.5*df,0.5,df/(df+t*t)) return t, prob
arg = len(names)
arg = len(proposals)
def code_assist(self, prefix): proposals = self._calculate_proposals() if prefix is not None: arg = self.env.prefix_value(prefix) if arg == 0: arg = len(names) common_start = self._calculate_prefix(proposals[:arg]) self.env.insert(common_start[self.offset - self.starting_offset:]) self._starting = common_start self._offset = self.starting_offset + len(common_start) prompt = 'Completion for %s: ' % self.expression proposals = map(self.env._completion_data, proposals) result = self.env.ask_completion(prompt, proposals, self.starting) if result is not None: self._apply_assist(result)
proposals = codeassist.sorted_proposals(proposals)
if self.env.get('sorted_completions', True): proposals = codeassist.sorted_proposals(proposals)
def _calculate_proposals(self): self.interface._check_project() resource = self.interface.resource maxfixes = self.env.get('codeassist_maxfixes') proposals = codeassist.code_assist( self.interface.project, self.source, self.offset, resource, maxfixes=maxfixes) proposals = codeassist.sorted_proposals(proposals) if self.autoimport is not None: if self.starting.strip() and '.' not in self.expression: import_assists = self.autoimport.import_assist(self.starting) for assist in import_assists: p = codeassist.CompletionProposal(' : '.join(assist), 'autoimport') proposals.append(p) return proposals
response = self.requestor.request('execute_cql_query', request_params)
try: response = self.requestor.request('execute_cql_query', request_params) except AvroRemoteException, are: raise CQLException(are)
def execute(self, query, compression=None): compress = compression is None and DEFAULT_COMPRESSION \ or compression.upper() if not compress in COMPRESSION_SCHEMES: raise InvalidCompressionScheme(compress) compressed_query = Connection.compress_query(query, compress) request_params = dict(query=compressed_query, compression=compress) response = self.requestor.request('execute_cql_query', request_params)
newSnpData = SNPData(col_id_ls=copy.deepcopy(snpData.col_id_ls), row_id_ls=[]) newSnpData.data_matrix = num.zeros([no_of_rows, no_of_cols], num.int8)
new_col_id_ls = copy.deepcopy(snpData.col_id_ls) new_row_id_ls = [] new_data_matrix = num.zeros([no_of_rows, no_of_cols], num.int8)
def keepRowsByRowID(cls, snpData, row_id_ls): """ 2009-05-19 keep certain rows in snpData given row_id_ls """ sys.stderr.write("Keeping rows given row_id_ls ...") no_of_rows = len(row_id_ls) row_id_wanted_set = set(row_id_ls) no_of_cols = len(snpData.col_id_ls) newSnpData = SNPData(col_id_ls=copy.deepcopy(snpData.col_id_ls), row_id_ls=[]) newSnpData.data_matrix = num.zeros([no_of_rows, no_of_cols], num.int8) row_index = 0 for i in range(len(snpData.row_id_ls)): row_id = snpData.row_id_ls[i] if row_id in row_id_wanted_set: newSnpData.row_id_ls.append(row_id) newSnpData.data_matrix[row_index] = snpData.data_matrix[i] row_index += 1 newSnpData.no_of_rows_filtered_by_mismatch = len(snpData.row_id_ls)-no_of_rows sys.stderr.write("%s rows discarded. Done.\n"%(newSnpData.no_of_rows_filtered_by_mismatch)) return newSnpData
newSnpData.row_id_ls.append(row_id) newSnpData.data_matrix[row_index] = snpData.data_matrix[i]
new_row_id_ls.append(row_id) new_data_matrix[row_index] = snpData.data_matrix[i]
def keepRowsByRowID(cls, snpData, row_id_ls): """ 2009-05-19 keep certain rows in snpData given row_id_ls """ sys.stderr.write("Keeping rows given row_id_ls ...") no_of_rows = len(row_id_ls) row_id_wanted_set = set(row_id_ls) no_of_cols = len(snpData.col_id_ls) newSnpData = SNPData(col_id_ls=copy.deepcopy(snpData.col_id_ls), row_id_ls=[]) newSnpData.data_matrix = num.zeros([no_of_rows, no_of_cols], num.int8) row_index = 0 for i in range(len(snpData.row_id_ls)): row_id = snpData.row_id_ls[i] if row_id in row_id_wanted_set: newSnpData.row_id_ls.append(row_id) newSnpData.data_matrix[row_index] = snpData.data_matrix[i] row_index += 1 newSnpData.no_of_rows_filtered_by_mismatch = len(snpData.row_id_ls)-no_of_rows sys.stderr.write("%s rows discarded. Done.\n"%(newSnpData.no_of_rows_filtered_by_mismatch)) return newSnpData
report=1, run_type=1): """
cofactor_phenotype_id_ls=[], report=1, run_type=1,): """ 2010-1-11 add argument cofactor_phenotype_id_ls to have the functionality of treating some phenotypes as cofactors
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.)
one phenotype at a time: 1. create a new SNP matrix which includes accessions whose phenotypes (this phenotype + cofactor_phenotype_id_ls) are non-NA 2. one SNP at a time 1. add cofactor SNP matrix if cofactors are present 2. add cofactor phenotype matrix if cofactor_phenotype_id_ls exist 3. run association parameter run_type is passed to Association.linear_model() run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.)
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
chromosome, start_pos = start_snp.split('_')[:2]
start_chr, start_pos = start_snp.split('_')[:2] start_chr = int(start_chr)
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
stop_pos = int(stop_snp.split('_')[1])
stop_chr, stop_pos = stop_snp.split('_')[:2] stop_chr = int(stop_chr) stop_pos = int(stop_pos)
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
if not numpy.isnan(phenotype_ls[i]):
this_row_has_NA_phenotype = False if cofactor_phenotype_index_ls: for phenotype_index in cofactor_phenotype_index_ls: if numpy.isnan(initData.phenData.data_matrix[i, phenotype_index]): this_row_has_NA_phenotype = True break if numpy.isnan(phenotype_ls[i]): this_row_has_NA_phenotype = True if not this_row_has_NA_phenotype:
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
if chr==chromosome and pos>=start_pos and pos<=stop_pos:
if chr>=start_chr and chr<=stop_chr and pos>=start_pos and pos<=stop_pos:
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
phenotype_method_id_ls = [43]
phenotype_method_id_ls = [285]
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
GWA.cofactorLM(genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=cofactors)
cofactor_phenotype_id_ls = [77] GWA.cofactorLM(genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, \ cofactors=cofactors, cofactor_phenotype_id_ls=cofactor_phenotype_id_ls)
def cofactorLM(cls, genotype_fname, phenotype_fname, phenotype_method_id_ls, output_fname_prefix, start_snp, stop_snp, cofactors=[],\ report=1, run_type=1): """ 2009-8-26 run_type 1: pure linear model by python run_type 2: EMMA run_type 3: pure linear model by R (Field test shows run_type 3 is same as 1.) start_snp and end_snp are on the same chromosome """ sys.stderr.write("Running association (pure linear model or EMMA) with cofactor ... \n") from Association import Association chromosome, start_pos = start_snp.split('_')[:2] start_pos = int(start_pos) stop_pos = int(stop_snp.split('_')[1]) eigen_vector_fname = '' test_type = 3 #Emma initData = Association.readInData(phenotype_fname, genotype_fname, eigen_vector_fname, phenotype_method_id_ls, test_type=test_type) which_phenotype_index_ls = initData.which_phenotype_ls environment_matrix = None data_matrix = initData.snpData.data_matrix min_data_point = 3 #create an index list of cofactor SNPs cofactors_indices = [] cofactors_set = set(cofactors) for i in range(len(initData.snpData.col_id_ls)): col_id = initData.snpData.col_id_ls[i] if col_id in cofactors_set: cofactors_indices.append(i) import numpy, rpy rpy.r.source(os.path.expanduser('~/script/variation/src/gwa/emma/R/emma.R')) for which_phenotype in which_phenotype_index_ls: phenotype_name = initData.phenData.col_id_ls[which_phenotype] phenotype_name = phenotype_name.replace('/', '_') #'/' will be recognized as directory in output_fname output_fname='%s_pheno_%s.tsv'%(os.path.splitext(output_fname_prefix)[0], phenotype_name) #make up a new name corresponding to this phenotype #create non NA phenotype phenotype_ls = initData.phenData.data_matrix[:, which_phenotype] non_phenotype_NA_row_index_ls = [] non_NA_phenotype_ls = [] for i in range(len(phenotype_ls)): if not numpy.isnan(phenotype_ls[i]): non_phenotype_NA_row_index_ls.append(i) non_NA_phenotype_ls.append(phenotype_ls[i]) non_NA_phenotype_ar = numpy.array(non_NA_phenotype_ls) new_data_matrix = data_matrix[non_phenotype_NA_row_index_ls,:] if run_type==2: kinship_matrix = Association.get_kinship_matrix(new_data_matrix) eig_L = rpy.r.emma_eigen_L(None, kinship_matrix) #to avoid repeating the computation of eig_L inside emma.REMLE else: kinship_matrix = None eig_L = None
max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """
max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False, \ min_reciprocal_overlap=0.6, report=True): """ 2010-1-26 value of the ecotype_id2cnv_qc_call_data dictionary is a RBDict (RBTree dictionary) structure. 2009-12-8 add argument min_reciprocal_overlap
def compareCNVSegmentsAgainstQCHandler(cls, input_fname_ls, ecotype_id2cnv_qc_call_data, function_handler, param_obj, \ deletion_cutoff=None, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """ 2009-11-4 a general handler to compare CNV segments from input_fname_ls with cnv_qc_call_data. Upon a match between a CNV segment from input_fname_ls and cnv_qc_call_data, function_handler would be called with param_obj as argument. If deletion_cutoff is None, all segments who have matches in ecotype_id2cnv_qc_call_data would be considered in function_handler(). If deletion_cutoff is not None (some float), only those segments whose amplitude is below this value would be considered in function_handler(). """ import fileinput from pymodule import getColName2IndexFromHeader, PassingData sys.stderr.write("Getting probe amplitude from %s ... \n"%repr(input_fname_ls)) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_deletions = 0 no_of_valid_deletions = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') ecotype_id_idx = col_name2index.get('ecotype_id', col_name2index.get('array_id')) cnv_ecotype_id = int(row[ecotype_id_idx]) array_id = int(row[col_name2index.get('array_id')]) #row[ecotype_id_idx] = cnv_ecotype_id counter += 1 if cnv_ecotype_id in ecotype_id2cnv_qc_call_data: # array is in CNVQCDat cnv_qc_call_data = ecotype_id2cnv_qc_call_data.get(cnv_ecotype_id) start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) no_of_probes = int(row[col_name2index['length']]) if no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos+1) if deletion_cutoff is not None and amplitude>deletion_cutoff: continue no_of_deletions+=1 for cnv_qc_call in cnv_qc_call_data: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] valid_match = False if qc_chromosome==segment_chromosome: boundary_diff1 = abs(segment_start_pos-qc_start) boundary_diff2 = abs(segment_stop_pos-qc_stop) diff1_perc = boundary_diff1/float(segment_length) diff2_perc = boundary_diff2/float(segment_length) if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc: no_of_valid_deletions += 1 valid_match = True elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: #the segment doesn't match the criteria but very small and within no_of_valid_deletions += 1 valid_match = True if valid_match: cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj) elif qc_chromosome>segment_chromosome: break if counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_deletions, no_of_valid_deletions)) setattr(param_obj, "no_of_deletions", no_of_deletions) setattr(param_obj, "no_of_valid_deletions", no_of_valid_deletions) sys.stderr.write("\n")
if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc:
is_overlap = is_reciprocal_overlap([segment_start_pos, segment_stop_pos], [qc_start, qc_stop], \ min_reciprocal_overlap=min_reciprocal_overlap) if is_overlap:
def compareCNVSegmentsAgainstQCHandler(cls, input_fname_ls, ecotype_id2cnv_qc_call_data, function_handler, param_obj, \ deletion_cutoff=None, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """ 2009-11-4 a general handler to compare CNV segments from input_fname_ls with cnv_qc_call_data. Upon a match between a CNV segment from input_fname_ls and cnv_qc_call_data, function_handler would be called with param_obj as argument. If deletion_cutoff is None, all segments who have matches in ecotype_id2cnv_qc_call_data would be considered in function_handler(). If deletion_cutoff is not None (some float), only those segments whose amplitude is below this value would be considered in function_handler(). """ import fileinput from pymodule import getColName2IndexFromHeader, PassingData sys.stderr.write("Getting probe amplitude from %s ... \n"%repr(input_fname_ls)) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_deletions = 0 no_of_valid_deletions = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') ecotype_id_idx = col_name2index.get('ecotype_id', col_name2index.get('array_id')) cnv_ecotype_id = int(row[ecotype_id_idx]) array_id = int(row[col_name2index.get('array_id')]) #row[ecotype_id_idx] = cnv_ecotype_id counter += 1 if cnv_ecotype_id in ecotype_id2cnv_qc_call_data: # array is in CNVQCDat cnv_qc_call_data = ecotype_id2cnv_qc_call_data.get(cnv_ecotype_id) start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) no_of_probes = int(row[col_name2index['length']]) if no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos+1) if deletion_cutoff is not None and amplitude>deletion_cutoff: continue no_of_deletions+=1 for cnv_qc_call in cnv_qc_call_data: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] valid_match = False if qc_chromosome==segment_chromosome: boundary_diff1 = abs(segment_start_pos-qc_start) boundary_diff2 = abs(segment_stop_pos-qc_stop) diff1_perc = boundary_diff1/float(segment_length) diff2_perc = boundary_diff2/float(segment_length) if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc: no_of_valid_deletions += 1 valid_match = True elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: #the segment doesn't match the criteria but very small and within no_of_valid_deletions += 1 valid_match = True if valid_match: cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj) elif qc_chromosome>segment_chromosome: break if counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_deletions, no_of_valid_deletions)) setattr(param_obj, "no_of_deletions", no_of_deletions) setattr(param_obj, "no_of_valid_deletions", no_of_valid_deletions) sys.stderr.write("\n")
elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: no_of_valid_deletions += 1 valid_match = True
def compareCNVSegmentsAgainstQCHandler(cls, input_fname_ls, ecotype_id2cnv_qc_call_data, function_handler, param_obj, \ deletion_cutoff=None, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """ 2009-11-4 a general handler to compare CNV segments from input_fname_ls with cnv_qc_call_data. Upon a match between a CNV segment from input_fname_ls and cnv_qc_call_data, function_handler would be called with param_obj as argument. If deletion_cutoff is None, all segments who have matches in ecotype_id2cnv_qc_call_data would be considered in function_handler(). If deletion_cutoff is not None (some float), only those segments whose amplitude is below this value would be considered in function_handler(). """ import fileinput from pymodule import getColName2IndexFromHeader, PassingData sys.stderr.write("Getting probe amplitude from %s ... \n"%repr(input_fname_ls)) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_deletions = 0 no_of_valid_deletions = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') ecotype_id_idx = col_name2index.get('ecotype_id', col_name2index.get('array_id')) cnv_ecotype_id = int(row[ecotype_id_idx]) array_id = int(row[col_name2index.get('array_id')]) #row[ecotype_id_idx] = cnv_ecotype_id counter += 1 if cnv_ecotype_id in ecotype_id2cnv_qc_call_data: # array is in CNVQCDat cnv_qc_call_data = ecotype_id2cnv_qc_call_data.get(cnv_ecotype_id) start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) no_of_probes = int(row[col_name2index['length']]) if no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos+1) if deletion_cutoff is not None and amplitude>deletion_cutoff: continue no_of_deletions+=1 for cnv_qc_call in cnv_qc_call_data: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] valid_match = False if qc_chromosome==segment_chromosome: boundary_diff1 = abs(segment_start_pos-qc_start) boundary_diff2 = abs(segment_stop_pos-qc_stop) diff1_perc = boundary_diff1/float(segment_length) diff2_perc = boundary_diff2/float(segment_length) if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc: no_of_valid_deletions += 1 valid_match = True elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: #the segment doesn't match the criteria but very small and within no_of_valid_deletions += 1 valid_match = True if valid_match: cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj) elif qc_chromosome>segment_chromosome: break if counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_deletions, no_of_valid_deletions)) setattr(param_obj, "no_of_deletions", no_of_deletions) setattr(param_obj, "no_of_valid_deletions", no_of_valid_deletions) sys.stderr.write("\n")
cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj)
function_handler(param_obj, cnv_segment_obj, cnv_qc_call, )
def compareCNVSegmentsAgainstQCHandler(cls, input_fname_ls, ecotype_id2cnv_qc_call_data, function_handler, param_obj, \ deletion_cutoff=None, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """ 2009-11-4 a general handler to compare CNV segments from input_fname_ls with cnv_qc_call_data. Upon a match between a CNV segment from input_fname_ls and cnv_qc_call_data, function_handler would be called with param_obj as argument. If deletion_cutoff is None, all segments who have matches in ecotype_id2cnv_qc_call_data would be considered in function_handler(). If deletion_cutoff is not None (some float), only those segments whose amplitude is below this value would be considered in function_handler(). """ import fileinput from pymodule import getColName2IndexFromHeader, PassingData sys.stderr.write("Getting probe amplitude from %s ... \n"%repr(input_fname_ls)) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_deletions = 0 no_of_valid_deletions = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') ecotype_id_idx = col_name2index.get('ecotype_id', col_name2index.get('array_id')) cnv_ecotype_id = int(row[ecotype_id_idx]) array_id = int(row[col_name2index.get('array_id')]) #row[ecotype_id_idx] = cnv_ecotype_id counter += 1 if cnv_ecotype_id in ecotype_id2cnv_qc_call_data: # array is in CNVQCDat cnv_qc_call_data = ecotype_id2cnv_qc_call_data.get(cnv_ecotype_id) start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) no_of_probes = int(row[col_name2index['length']]) if no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos+1) if deletion_cutoff is not None and amplitude>deletion_cutoff: continue no_of_deletions+=1 for cnv_qc_call in cnv_qc_call_data: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] valid_match = False if qc_chromosome==segment_chromosome: boundary_diff1 = abs(segment_start_pos-qc_start) boundary_diff2 = abs(segment_stop_pos-qc_stop) diff1_perc = boundary_diff1/float(segment_length) diff2_perc = boundary_diff2/float(segment_length) if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc: no_of_valid_deletions += 1 valid_match = True elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: #the segment doesn't match the criteria but very small and within no_of_valid_deletions += 1 valid_match = True if valid_match: cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj) elif qc_chromosome>segment_chromosome: break if counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_deletions, no_of_valid_deletions)) setattr(param_obj, "no_of_deletions", no_of_deletions) setattr(param_obj, "no_of_valid_deletions", no_of_valid_deletions) sys.stderr.write("\n")
if counter%10000==0:
""" if report and counter%10000==0:
def compareCNVSegmentsAgainstQCHandler(cls, input_fname_ls, ecotype_id2cnv_qc_call_data, function_handler, param_obj, \ deletion_cutoff=None, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5, count_embedded_segment_as_match=False): """ 2009-11-4 a general handler to compare CNV segments from input_fname_ls with cnv_qc_call_data. Upon a match between a CNV segment from input_fname_ls and cnv_qc_call_data, function_handler would be called with param_obj as argument. If deletion_cutoff is None, all segments who have matches in ecotype_id2cnv_qc_call_data would be considered in function_handler(). If deletion_cutoff is not None (some float), only those segments whose amplitude is below this value would be considered in function_handler(). """ import fileinput from pymodule import getColName2IndexFromHeader, PassingData sys.stderr.write("Getting probe amplitude from %s ... \n"%repr(input_fname_ls)) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_deletions = 0 no_of_valid_deletions = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') ecotype_id_idx = col_name2index.get('ecotype_id', col_name2index.get('array_id')) cnv_ecotype_id = int(row[ecotype_id_idx]) array_id = int(row[col_name2index.get('array_id')]) #row[ecotype_id_idx] = cnv_ecotype_id counter += 1 if cnv_ecotype_id in ecotype_id2cnv_qc_call_data: # array is in CNVQCDat cnv_qc_call_data = ecotype_id2cnv_qc_call_data.get(cnv_ecotype_id) start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) no_of_probes = int(row[col_name2index['length']]) if no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos+1) if deletion_cutoff is not None and amplitude>deletion_cutoff: continue no_of_deletions+=1 for cnv_qc_call in cnv_qc_call_data: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] valid_match = False if qc_chromosome==segment_chromosome: boundary_diff1 = abs(segment_start_pos-qc_start) boundary_diff2 = abs(segment_stop_pos-qc_stop) diff1_perc = boundary_diff1/float(segment_length) diff2_perc = boundary_diff2/float(segment_length) if boundary_diff1<=max_boundary_diff and boundary_diff2<=max_boundary_diff and diff1_perc<=max_diff_perc and \ diff2_perc<=max_diff_perc: no_of_valid_deletions += 1 valid_match = True elif count_embedded_segment_as_match and segment_start_pos>=qc_start and segment_stop_pos<=qc_stop: #the segment doesn't match the criteria but very small and within no_of_valid_deletions += 1 valid_match = True if valid_match: cnv_segment_obj = PassingData(ecotype_id=cnv_ecotype_id, start_probe=start_probe, stop_probe=stop_probe,\ no_of_probes=no_of_probes, amplitude=amplitude, segment_length=segment_length,\ segment_chromosome=segment_chromosome, ) function_handler(cnv_segment_obj, cnv_qc_call, param_obj) elif qc_chromosome>segment_chromosome: break if counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_deletions, no_of_valid_deletions)) setattr(param_obj, "no_of_deletions", no_of_deletions) setattr(param_obj, "no_of_valid_deletions", no_of_valid_deletions) sys.stderr.write("\n")
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=None): """
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=None, \ min_QC_segment_size=None, min_no_of_probes=None, min_reciprocal_overlap=0.6): """ 2010-1-26 replace the list structure of cnv_qc_call_data in ecotype_id2cnv_qc_call_data with binary_tree structure 2009-12-9 add no_of_probes_covered into returning data add cnv_type_id
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=None): """ 2009-11-4 get CNV QC data from database """ sys.stderr.write("Getting CNV QC data ... \n") import Stock_250kDB sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s and c.size_affected>=%s \ and c.cnv_type_id=%s"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id,\ min_QC_segment_size, cnv_type_id) if ecotype_id is not None: sql_string += " and a.ecotype_id=%s"%ecotype_id if min_no_of_probes is not None: sql_string += " and c.no_of_probes_covered>=%s"%min_no_of_probes rows = db_250k.metadata.bind.execute(sql_string) count = 0 ecotype_id2cnv_qc_call_data = {} for row in rows: if row.ecotype_id not in ecotype_id2cnv_qc_call_data: ecotype_id2cnv_qc_call_data[row.ecotype_id] = [] cnv_qc_call_data = ecotype_id2cnv_qc_call_data[row.ecotype_id] cnv_qc_call_data.append((row.chromosome, row.start, row.stop, row.size_affected, row.id)) count += 1 for ecotype_id, cnv_qc_call_data in ecotype_id2cnv_qc_call_data.iteritems(): cnv_qc_call_data.sort() ecotype_id2cnv_qc_call_data[ecotype_id] = cnv_qc_call_data sys.stderr.write("%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data))) return ecotype_id2cnv_qc_call_data
sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s and c.size_affected>=%s \ and c.cnv_type_id=%s"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id,\ min_QC_segment_size, cnv_type_id)
sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.no_of_probes_covered, c.copy_number, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s order by RAND()"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id) if cnv_type_id is not None: sql_string += " and c.cnv_type_id=%s"%cnv_type_id
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=None): """ 2009-11-4 get CNV QC data from database """ sys.stderr.write("Getting CNV QC data ... \n") import Stock_250kDB sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s and c.size_affected>=%s \ and c.cnv_type_id=%s"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id,\ min_QC_segment_size, cnv_type_id) if ecotype_id is not None: sql_string += " and a.ecotype_id=%s"%ecotype_id if min_no_of_probes is not None: sql_string += " and c.no_of_probes_covered>=%s"%min_no_of_probes rows = db_250k.metadata.bind.execute(sql_string) count = 0 ecotype_id2cnv_qc_call_data = {} for row in rows: if row.ecotype_id not in ecotype_id2cnv_qc_call_data: ecotype_id2cnv_qc_call_data[row.ecotype_id] = [] cnv_qc_call_data = ecotype_id2cnv_qc_call_data[row.ecotype_id] cnv_qc_call_data.append((row.chromosome, row.start, row.stop, row.size_affected, row.id)) count += 1 for ecotype_id, cnv_qc_call_data in ecotype_id2cnv_qc_call_data.iteritems(): cnv_qc_call_data.sort() ecotype_id2cnv_qc_call_data[ecotype_id] = cnv_qc_call_data sys.stderr.write("%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data))) return ecotype_id2cnv_qc_call_data
ecotype_id2cnv_qc_call_data[row.ecotype_id] = [] cnv_qc_call_data = ecotype_id2cnv_qc_call_data[row.ecotype_id] cnv_qc_call_data.append((row.chromosome, row.start, row.stop, row.size_affected, row.id))
ecotype_id2cnv_qc_call_data[row.ecotype_id] = RBDict() segmentKey = CNVSegmentBinarySearchTreeKey(chromosome=row.chromosome, span_ls=[row.start, row.stop], \ min_reciprocal_overlap=min_reciprocal_overlap) ecotype_id2cnv_qc_call_data[row.ecotype_id][segmentKey] = (row.chromosome, row.start, row.stop, row.size_affected, row.no_of_probes_covered, row.copy_number, row.id)
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=None): """ 2009-11-4 get CNV QC data from database """ sys.stderr.write("Getting CNV QC data ... \n") import Stock_250kDB sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s and c.size_affected>=%s \ and c.cnv_type_id=%s"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id,\ min_QC_segment_size, cnv_type_id) if ecotype_id is not None: sql_string += " and a.ecotype_id=%s"%ecotype_id if min_no_of_probes is not None: sql_string += " and c.no_of_probes_covered>=%s"%min_no_of_probes rows = db_250k.metadata.bind.execute(sql_string) count = 0 ecotype_id2cnv_qc_call_data = {} for row in rows: if row.ecotype_id not in ecotype_id2cnv_qc_call_data: ecotype_id2cnv_qc_call_data[row.ecotype_id] = [] cnv_qc_call_data = ecotype_id2cnv_qc_call_data[row.ecotype_id] cnv_qc_call_data.append((row.chromosome, row.start, row.stop, row.size_affected, row.id)) count += 1 for ecotype_id, cnv_qc_call_data in ecotype_id2cnv_qc_call_data.iteritems(): cnv_qc_call_data.sort() ecotype_id2cnv_qc_call_data[ecotype_id] = cnv_qc_call_data sys.stderr.write("%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data))) return ecotype_id2cnv_qc_call_data
for ecotype_id, cnv_qc_call_data in ecotype_id2cnv_qc_call_data.iteritems(): cnv_qc_call_data.sort() ecotype_id2cnv_qc_call_data[ecotype_id] = cnv_qc_call_data sys.stderr.write("%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data)))
import math for ecotype_id, tree in ecotype_id2cnv_qc_call_data.iteritems(): print "\tDepth of Ecotype %s's tree: %d" % (ecotype_id, tree.depth()) print "\tOptimum Depth: %f (%d) (%f%% depth efficiency)" % (tree.optimumdepth(), math.ceil(tree.optimumdepth()), math.ceil(tree.optimumdepth()) / tree.depth()) sys.stderr.write("\t%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data)))
def getCNVQCDataFromDB(cls, data_source_id=1, ecotype_id=None, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=None): """ 2009-11-4 get CNV QC data from database """ sys.stderr.write("Getting CNV QC data ... \n") import Stock_250kDB sql_string = "select a.ecotype_id, c.chromosome, c.start, c.stop, c.size_affected, c.id from %s c,\ %s a where c.accession_id=a.id and a.data_source_id=%s and c.size_affected>=%s \ and c.cnv_type_id=%s"%\ (Stock_250kDB.CNVQCCalls.table.name, Stock_250kDB.CNVQCAccession.table.name, data_source_id,\ min_QC_segment_size, cnv_type_id) if ecotype_id is not None: sql_string += " and a.ecotype_id=%s"%ecotype_id if min_no_of_probes is not None: sql_string += " and c.no_of_probes_covered>=%s"%min_no_of_probes rows = db_250k.metadata.bind.execute(sql_string) count = 0 ecotype_id2cnv_qc_call_data = {} for row in rows: if row.ecotype_id not in ecotype_id2cnv_qc_call_data: ecotype_id2cnv_qc_call_data[row.ecotype_id] = [] cnv_qc_call_data = ecotype_id2cnv_qc_call_data[row.ecotype_id] cnv_qc_call_data.append((row.chromosome, row.start, row.stop, row.size_affected, row.id)) count += 1 for ecotype_id, cnv_qc_call_data in ecotype_id2cnv_qc_call_data.iteritems(): cnv_qc_call_data.sort() ecotype_id2cnv_qc_call_data[ecotype_id] = cnv_qc_call_data sys.stderr.write("%s cnv qc calls for %s ecotypes. Done.\n"%(count, len(ecotype_id2cnv_qc_call_data))) return ecotype_id2cnv_qc_call_data
def countMatchedDeletionsFunctor(cls, cnv_segment_obj, cnv_qc_call, param_obj): """
def countMatchedDeletionsFunctor(cls, param_obj, cnv_segment_obj=None, cnv_qc_call=None): """ 2009-12-9 store qc data in param_obj.array_id2qc_data
def countMatchedDeletionsFunctor(cls, cnv_segment_obj, cnv_qc_call, param_obj): """ 2009-11-4 a functor to be called in """ if not hasattr(param_obj, 'no_of_valid_deletions'): setattr(param_obj, 'no_of_valid_deletions', 0) qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.no_of_valid_deletions += 1
qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.no_of_valid_deletions += 1
if not hasattr(param_obj, "array_id2qc_data"): param_obj.array_id2qc_data = {} if not hasattr(param_obj, "array_id2no_of_probes2qc_data"): param_obj.array_id2no_of_probes2qc_data = {} if not hasattr(param_obj, "array_id2qc_no_of_probes2qc_data"): param_obj.array_id2qc_no_of_probes2qc_data = {} array_id = cnv_segment_obj.array_id no_of_probes = cnv_segment_obj.no_of_probes if array_id not in param_obj.array_id2qc_data: param_obj.array_id2qc_data[array_id] = PassingData(ecotype_id=cnv_segment_obj.ecotype_id, \ no_of_valid_deletions=0,\ no_of_deletions=0,\ cnv_qc_call_id_set=set()) param_obj.array_id2no_of_probes2qc_data[array_id] = {} param_obj.array_id2qc_no_of_probes2qc_data[array_id] = {} if no_of_probes not in param_obj.array_id2no_of_probes2qc_data[array_id]: param_obj.array_id2no_of_probes2qc_data[array_id][no_of_probes] = PassingData(ecotype_id=cnv_segment_obj.ecotype_id, \ no_of_valid_deletions=0,\ no_of_deletions=0,\ cnv_qc_call_id_set=set()) param_obj.array_id2qc_data[array_id].no_of_deletions += 1 param_obj.array_id2no_of_probes2qc_data[array_id][no_of_probes].no_of_deletions += 1 if cnv_qc_call is not None: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.array_id2qc_data[array_id].cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.array_id2qc_data[array_id].no_of_valid_deletions += 1 qc_no_of_probes = cnv_qc_call[4] if qc_no_of_probes not in param_obj.array_id2qc_no_of_probes2qc_data[array_id]: param_obj.array_id2qc_no_of_probes2qc_data[array_id][qc_no_of_probes] = PassingData(ecotype_id=cnv_segment_obj.ecotype_id, \ cnv_qc_call_id_set=set()) param_obj.array_id2qc_no_of_probes2qc_data[array_id][qc_no_of_probes].cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.array_id2no_of_probes2qc_data[array_id][no_of_probes].cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.array_id2no_of_probes2qc_data[array_id][no_of_probes].no_of_valid_deletions += 1
def countMatchedDeletionsFunctor(cls, cnv_segment_obj, cnv_qc_call, param_obj): """ 2009-11-4 a functor to be called in """ if not hasattr(param_obj, 'no_of_valid_deletions'): setattr(param_obj, 'no_of_valid_deletions', 0) qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.no_of_valid_deletions += 1
no_of_QCCalls_matched = len(param_obj.cnv_qc_call_id_set) no_of_total_QCCalls = sum(map(len, param_obj.ecotype_id2cnv_qc_call_data.values())) false_negative_rate = (no_of_total_QCCalls-no_of_QCCalls_matched)/float(no_of_total_QCCalls) sys.stderr.write("False negative rate: %s/%s(%s).\n"%(no_of_total_QCCalls-no_of_QCCalls_matched, no_of_total_QCCalls, false_negative_rate))
for array_id, qc_data in param_obj.array_id2qc_data.iteritems(): no_of_QCCalls_matched = len(qc_data.cnv_qc_call_id_set) no_of_total_QCCalls = len(param_obj.ecotype_id2cnv_qc_call_data[qc_data.ecotype_id]) false_negative_rate = (no_of_total_QCCalls-no_of_QCCalls_matched)/float(no_of_total_QCCalls) sys.stderr.write("Array %s false negative rate: %s/%s(%s).\n"%(array_id, \ no_of_total_QCCalls-no_of_QCCalls_matched,\ no_of_total_QCCalls, false_negative_rate)) if getattr(param_obj, 'array_id2qc_no_of_probes2qc_data', None): qc_no_of_probes2qc_data = param_obj.array_id2qc_no_of_probes2qc_data[array_id] no_of_probes_ls = qc_no_of_probes2qc_data.keys() no_of_probes_ls.sort() for no_of_probes in no_of_probes_ls: qc_data = qc_no_of_probes2qc_data[no_of_probes] no_of_QCCalls_matched = len(qc_data.cnv_qc_call_id_set) no_of_total_QCCalls = len(param_obj.ecotype_id2qc_no_of_probes2cnv_qc_call_id_set[qc_data.ecotype_id][no_of_probes]) false_negative_rate = (no_of_total_QCCalls-no_of_QCCalls_matched)/float(no_of_total_QCCalls) sys.stderr.write("\t%s\t%s\t%s\t%s\n"%(no_of_probes, \ no_of_total_QCCalls-no_of_QCCalls_matched,\ no_of_total_QCCalls, false_negative_rate))
def outputFalseNegativeRate(cls, param_obj): """ 2009-11-4 """ no_of_QCCalls_matched = len(param_obj.cnv_qc_call_id_set) no_of_total_QCCalls = sum(map(len, param_obj.ecotype_id2cnv_qc_call_data.values())) false_negative_rate = (no_of_total_QCCalls-no_of_QCCalls_matched)/float(no_of_total_QCCalls) sys.stderr.write("False negative rate: %s/%s(%s).\n"%(no_of_total_QCCalls-no_of_QCCalls_matched, no_of_total_QCCalls, false_negative_rate))
no_of_valid_deletions = param_obj.no_of_valid_deletions no_of_deletions = param_obj.no_of_deletions no_of_non_valid_deletions = no_of_deletions-no_of_valid_deletions false_positive_rate = no_of_non_valid_deletions/float(no_of_deletions) sys.stderr.write("False positive rate: %s/%s(%s).\n"%\ (no_of_non_valid_deletions, no_of_deletions, false_positive_rate))
for array_id, qc_data in param_obj.array_id2qc_data.iteritems(): no_of_valid_deletions = qc_data.no_of_valid_deletions no_of_deletions = qc_data.no_of_deletions no_of_non_valid_deletions = no_of_deletions-no_of_valid_deletions false_positive_rate = no_of_non_valid_deletions/float(no_of_deletions) sys.stderr.write("Array %s false positive rate: %s/%s(%s).\n"%(array_id, \ no_of_non_valid_deletions, no_of_deletions, false_positive_rate)) if getattr(param_obj, 'array_id2no_of_probes2qc_data', None): no_of_probes2qc_data = param_obj.array_id2no_of_probes2qc_data[array_id] no_of_probes_ls = no_of_probes2qc_data.keys() no_of_probes_ls.sort() for no_of_probes in no_of_probes_ls: qc_data = no_of_probes2qc_data[no_of_probes] no_of_valid_deletions = qc_data.no_of_valid_deletions no_of_deletions = qc_data.no_of_deletions no_of_non_valid_deletions = no_of_deletions-no_of_valid_deletions false_positive_rate = no_of_non_valid_deletions/float(no_of_deletions) sys.stderr.write("\t%s\t%s\t%s\t%s\n"%(no_of_probes, \ no_of_non_valid_deletions, no_of_deletions, false_positive_rate))
def outputFalsePositiveRate(cls, param_obj): """ 2009-11-4 """ no_of_valid_deletions = param_obj.no_of_valid_deletions no_of_deletions = param_obj.no_of_deletions no_of_non_valid_deletions = no_of_deletions-no_of_valid_deletions false_positive_rate = no_of_non_valid_deletions/float(no_of_deletions) sys.stderr.write("False positive rate: %s/%s(%s).\n"%\ (no_of_non_valid_deletions, no_of_deletions, false_positive_rate))
count_embedded_segment_as_match=True): """
count_embedded_segment_as_match=True, min_reciprocal_overlap=0.6): """ 2010-1-26 pass min_reciprocal_overlap to cls.getCNVQCDataFromDB() 2009-12-9 calculate FNR for each class with same number of probes
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes)
ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes,\ min_reciprocal_overlap=min_reciprocal_overlap)
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set())
param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set(), array_id2qc_data={})
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\
count_embedded_segment_as_match=count_embedded_segment_as_match, \ min_reciprocal_overlap=min_reciprocal_overlap, report=False) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, min_reciprocal_overlap: %s.\n"%\
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc))
data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, min_reciprocal_overlap))
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
for max_boundary_diff in [10000]: for max_diff_perc in [0.20, 0.3]: CNV.countNoOfCNVDeletionsMatchQC(db_250k, input_fname_ls, ecotype_id=ecotype_id, data_source_id=data_source_id, \ cnv_type_id=1,\
for min_reciprocal_overlap in [0.4, 0.6, 0.8]: CNV.countNoOfCNVDeletionsMatchQC(db_250k, input_fname_ls, ecotype_id=ecotype_id, data_source_id=data_source_id, \ cnv_type_id=1,\
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
max_boundary_diff=max_boundary_diff, max_diff_perc=max_diff_perc, \
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
count_embedded_segment_as_match=count_embedded_segment_as_match) """ @classmethod def addAmplitudeFunctor(cls, cnv_segment_obj, cnv_qc_call, param_obj): """
count_embedded_segment_as_match=count_embedded_segment_as_match,\ min_reciprocal_overlap=min_reciprocal_overlap) input_fname_ls = [] for i in range(1,6): input_fname_ls.append(os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity_QNorm_sub_ref_chr%s.GADA_A0.5T4M5.tsv'%i)) ecotype_id_data_source_id_ls = [(6932, 7)] min_QC_segment_size = 5 min_no_of_probes = 5 count_embedded_segment_as_match = True for ecotype_id, data_source_id in ecotype_id_data_source_id_ls: for deletion_cutoff in [-0.33, -0.5]: for min_reciprocal_overlap in [0.4, 0.8]: CNV.countNoOfCNVDeletionsMatchQC(db_250k, input_fname_ls, ecotype_id=ecotype_id, data_source_id=data_source_id, \ cnv_type_id=1,\ min_QC_segment_size=min_QC_segment_size, deletion_cutoff=deletion_cutoff, \ min_no_of_probes=min_no_of_probes,\ count_embedded_segment_as_match=count_embedded_segment_as_match,\ min_reciprocal_overlap=min_reciprocal_overlap) """ @classmethod def addAmplitudeFunctor(cls, param_obj, cnv_segment_obj, cnv_qc_call=None): """ 2009-12-9 adjust argument order and process only if cnv_qc_call is not None
def countNoOfCNVDeletionsMatchQC(cls, db_250k, input_fname_ls, ecotype_id=6909, data_source_id=3, cnv_type_id=1, \ min_QC_segment_size=200, deletion_cutoff=-0.33, max_boundary_diff=10000, \ max_diff_perc=0.10, min_no_of_probes=5,\ count_embedded_segment_as_match=True): """ 2009-10-29 for all CNV deletions, check how many are in QC dataset. """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, ecotype_id, cnv_type_id, min_QC_segment_size, min_no_of_probes) from pymodule import PassingData param_obj = PassingData(no_of_valid_deletions=0, cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.countMatchedDeletionsFunctor, param_obj, \ deletion_cutoff, max_boundary_diff, max_diff_perc, min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match) sys.stderr.write("For ecotype_id %s, data_source_id %s, min_QC_segment_size %s, deletion_cutoff: %s, min_no_of_probes: %s, max_boundary_diff: %s, max_diff_perc %s.\n"%\ (ecotype_id, \ data_source_id, min_QC_segment_size, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc)) param_obj.ecotype_id2cnv_qc_call_data = ecotype_id2cnv_qc_call_data cls.outputFalsePositiveRate(param_obj) cls.outputFalseNegativeRate(param_obj)
qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.amp_ls.append(cnv_segment_obj.amplitude)
if cnv_qc_call is not None: qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.amp_ls.append(cnv_segment_obj.amplitude)
def addAmplitudeFunctor(cls, cnv_segment_obj, cnv_qc_call, param_obj): """ 2009-11-4 """ if not hasattr(param_obj, 'amp_ls'): setattr(param_obj, 'amp_ls', []) qc_chromosome, qc_start, qc_stop = cnv_qc_call[:3] cnv_qc_call_id = cnv_qc_call[-1] param_obj.cnv_qc_call_id_set.add(cnv_qc_call_id) param_obj.amp_ls.append(cnv_segment_obj.amplitude)
max_diff_perc=0.10, count_embedded_segment_as_match=True):
max_diff_perc=0.10, count_embedded_segment_as_match=True, min_reciprocal_overlap=0.6):
def drawHistOfAmpOfValidatedDeletions(cls, db_250k, input_fname_ls, output_fname_prefix, data_source_id=1, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=5, max_boundary_diff=10000, \ max_diff_perc=0.10, count_embedded_segment_as_match=True): """ 2009-11-4 draw histogram of amplitude of segments who are validated according to certain QC data """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, cnv_type_id=cnv_type_id, \ min_QC_segment_size=min_QC_segment_size,\ min_no_of_probes=min_no_of_probes) from pymodule import PassingData param_obj = PassingData(amp_ls=[], cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.addAmplitudeFunctor, param_obj, \ deletion_cutoff=None, max_boundary_diff=max_boundary_diff, max_diff_perc=max_diff_perc,\ min_no_of_probes=min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match)
min_no_of_probes=min_no_of_probes)
min_no_of_probes=min_no_of_probes, \ min_reciprocal_overlap=min_reciprocal_overlap)
def drawHistOfAmpOfValidatedDeletions(cls, db_250k, input_fname_ls, output_fname_prefix, data_source_id=1, cnv_type_id=1, \ min_QC_segment_size=200, min_no_of_probes=5, max_boundary_diff=10000, \ max_diff_perc=0.10, count_embedded_segment_as_match=True): """ 2009-11-4 draw histogram of amplitude of segments who are validated according to certain QC data """ ecotype_id2cnv_qc_call_data = cls.getCNVQCDataFromDB(data_source_id, cnv_type_id=cnv_type_id, \ min_QC_segment_size=min_QC_segment_size,\ min_no_of_probes=min_no_of_probes) from pymodule import PassingData param_obj = PassingData(amp_ls=[], cnv_qc_call_id_set=set()) cls.compareCNVSegmentsAgainstQCHandler(input_fname_ls, ecotype_id2cnv_qc_call_data, cls.addAmplitudeFunctor, param_obj, \ deletion_cutoff=None, max_boundary_diff=max_boundary_diff, max_diff_perc=max_diff_perc,\ min_no_of_probes=min_no_of_probes, \ count_embedded_segment_as_match=count_embedded_segment_as_match)
overlap_length = max(0, stop-self.start) - max(0, stop-self.stop) - max(0, start-self.start) overlap_length = float(overlap_length) overlap1 = overlap_length/(stop-start) overlap2 = overlap_length/self.segment_length if overlap1>=self.min_reciprocal_overlap and overlap2>=self.min_reciprocal_overlap:
is_overlap = is_reciprocal_overlap([start, stop], [self.start, self.stop], \ min_reciprocal_overlap=self.min_reciprocal_overlap) if is_overlap:
def addNewCNV(self, chromosome, start, stop, array_id=None): """ """ if self.chromosome is None: self.addOneCNV(chromosome, start, stop, array_id) elif self.chromosome is not None and chromosome!=self.chromosome: return False else: """ boundary_diff1 = abs(start-self.start) boundary_diff2 = abs(stop-self.stop) diff1_perc = boundary_diff1/float(self.segment_length) diff2_perc = boundary_diff2/float(self.segment_length) if boundary_diff1<=self.max_boundary_diff and boundary_diff2<=self.max_boundary_diff and \ diff1_perc<=self.max_diff_perc and diff2_perc<=self.max_diff_perc: self.addOneCNV(chromosome, start, stop, array_id) else: return False """ overlap_length = max(0, stop-self.start) - max(0, stop-self.stop) - max(0, start-self.start) # accomodates 6 scenarios overlap_length = float(overlap_length) overlap1 = overlap_length/(stop-start) overlap2 = overlap_length/self.segment_length if overlap1>=self.min_reciprocal_overlap and overlap2>=self.min_reciprocal_overlap: self.addOneCNV(chromosome, start, stop, array_id) else: return False
2 functions: 1. detect deletions. make sure the deletion is covered by the sequencing.
Two functions: 1. deletion_only=True. make sure the deletion is covered by the sequencing.
def discoverLerDeletionDuplication(cls, db_250k, ler_blast_result_fname, output_fname, deletion_only=True, min_no_of_matches=25): """ 2009-12-7 ler_blast_result_fname is the output of blasting all CNV probes against Ler contigs http://www.arabidopsis.org/browse/Cereon/index.jsp. 2 functions: 1. detect deletions. make sure the deletion is covered by the sequencing. one naive criteria is if the boundary (two adjacent non-deleted probes) is within the same contig, then yes. 2. detect copy number changes. If two adjacent probes have different number of contigs, then it's a copy number change point. """ from pymodule import getColName2IndexFromHeader, PassingData, figureOutDelimiter sys.stderr.write("Reading from %s ... \n"%ler_blast_result_fname) counter = 0 real_counter = 0 import csv reader = csv.reader(open(ler_blast_result_fname), delimiter=figureOutDelimiter(ler_blast_result_fname)) header = reader.next() col_name2index = getColName2IndexFromHeader(header) probe_id2contig_id_ls = {} for row in reader: contig_label = row[col_name2index['Alignment_title']] probe_id = int(row[col_name2index['Probe_ID']]) no_of_matches = int(row[col_name2index['Number_matches']]) if no_of_matches>=min_no_of_matches: contig_id = ' '.join(contig_label.split()[1:]) if probe_id not in probe_id2contig_id_ls: probe_id2contig_id_ls[probe_id] = [] probe_id2contig_id_ls[probe_id].append(contig_id) sys.stderr.write("Done.\n") del reader import Stock_250kDB from DB_250k2Array import DB_250k2Array session = db_250k.session probes, xy_ls, chr_pos_ls, total_probe_id_ls = DB_250k2Array.get_probes(db_250k.metadata.bind, Stock_250kDB.Probes.table.name, \ snps=None, run_type=2) chr2xy_ls, chr2probe_id_ls = DB_250k2Array.organizeProbesIntoChromosome(xy_ls, chr_pos_ls, total_probe_id_ls) writer = csv.writer(open(output_fname, 'w'), delimiter='\t') header_row = ['start_probe_id', 'start_chr_pos', 'stop_probe_id', 'stop_chr_pos', 'no_of_probes', 'length', 'copy_number'] writer.writerow(header_row) sys.stderr.write("Discovering deletions ...\n") counter = 0 real_counter = 0 for chr, probe_id_ls in chr2probe_id_ls.iteritems(): no_of_probes = len(probe_id_ls) index_of_prev_probe_within_a_contig = None index_of_prev_probe_with_a_different_copy_number = None for i in range(no_of_probes): probe_id = probe_id_ls[i] contig_id_ls = probe_id2contig_id_ls.get(probe_id,[]) copy_number = len(contig_id_ls) if i==0: index_of_prev_probe_with_a_different_copy_number = -1 # set before the first probe (index=0) else: prev_probe_contig_id_ls = probe_id2contig_id_ls.get(probe_id_ls[i-1],[]) prev_copy_number = len(prev_probe_contig_id_ls) if not deletion_only: if copy_number != prev_copy_number: # a change point of copy number if index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i-1] stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number-1, stop_probe.pos-start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_with_a_different_copy_number = i-1 else: # look for deleton only. The only difference from above is make sure the deletion is covered by the sequencing. #one naive criteria is if the boundary is within the same contig, then yes. if prev_copy_number>0 and copy_number==0: # from non-deletion to deletion index_of_prev_probe_within_a_contig = i-1 elif prev_copy_number==0 and copy_number>0: # from deletion to non-deletion if index_of_prev_probe_within_a_contig is not None: # found a potential deletion current_contig_id_set = set(contig_id_ls) prev_non_deleted_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig] prev_non_deleted_probe_contig_id_ls = probe_id2contig_id_ls.get(prev_non_deleted_probe_id, []) prev_non_deleted_probe_contig_id_set = set(prev_non_deleted_probe_contig_id_ls) if len(prev_non_deleted_probe_contig_id_set&current_contig_id_set)>0: #share at least one contig. deletion confirmed deletion_start_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig+1] deletion_start_probe = probes.get_one_probe(deletion_start_probe_id) deletion_start_chr_pos = '%s_%s'%(deletion_start_probe.chr, deletion_start_probe.pos) deletion_stop_probe_id = probe_id_ls[i-1] deletion_stop_probe = probes.get_one_probe(deletion_stop_probe_id) deletion_stop_chr_pos = '%s_%s'%(deletion_stop_probe.chr, deletion_stop_probe.pos) row = [deletion_start_probe_id, deletion_start_chr_pos, deletion_stop_probe_id, deletion_stop_chr_pos, \ i-index_of_prev_probe_within_a_contig-1, deletion_stop_probe.pos-deletion_start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_within_a_contig = i elif prev_copy_number>0 and copy_number>0: # from non-deletion to non-deletion index_of_prev_probe_within_a_contig = i counter += 1 if counter%10000==0: sys.stderr.write("%s%s\t%s"%('\x08'*80, counter, real_counter)) # don't forget the last segment if it's not in the deletion_only mode. if not deletion_only and index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i] # watch: not i-1. stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number, stop_probe.pos-start_probe.pos, copy_number] # watch no -1, and it's copy_number writer.writerow(row) real_counter += 1 sys.stderr.write("Done.\n") del writer
2. detect copy number changes. If two adjacent probes have different number of contigs, then it's a copy number change point.
2. deletion_only=False, detect copy number changes. If two adjacent probes have different number of contigs, then it's a copy number change point.
def discoverLerDeletionDuplication(cls, db_250k, ler_blast_result_fname, output_fname, deletion_only=True, min_no_of_matches=25): """ 2009-12-7 ler_blast_result_fname is the output of blasting all CNV probes against Ler contigs http://www.arabidopsis.org/browse/Cereon/index.jsp. 2 functions: 1. detect deletions. make sure the deletion is covered by the sequencing. one naive criteria is if the boundary (two adjacent non-deleted probes) is within the same contig, then yes. 2. detect copy number changes. If two adjacent probes have different number of contigs, then it's a copy number change point. """ from pymodule import getColName2IndexFromHeader, PassingData, figureOutDelimiter sys.stderr.write("Reading from %s ... \n"%ler_blast_result_fname) counter = 0 real_counter = 0 import csv reader = csv.reader(open(ler_blast_result_fname), delimiter=figureOutDelimiter(ler_blast_result_fname)) header = reader.next() col_name2index = getColName2IndexFromHeader(header) probe_id2contig_id_ls = {} for row in reader: contig_label = row[col_name2index['Alignment_title']] probe_id = int(row[col_name2index['Probe_ID']]) no_of_matches = int(row[col_name2index['Number_matches']]) if no_of_matches>=min_no_of_matches: contig_id = ' '.join(contig_label.split()[1:]) if probe_id not in probe_id2contig_id_ls: probe_id2contig_id_ls[probe_id] = [] probe_id2contig_id_ls[probe_id].append(contig_id) sys.stderr.write("Done.\n") del reader import Stock_250kDB from DB_250k2Array import DB_250k2Array session = db_250k.session probes, xy_ls, chr_pos_ls, total_probe_id_ls = DB_250k2Array.get_probes(db_250k.metadata.bind, Stock_250kDB.Probes.table.name, \ snps=None, run_type=2) chr2xy_ls, chr2probe_id_ls = DB_250k2Array.organizeProbesIntoChromosome(xy_ls, chr_pos_ls, total_probe_id_ls) writer = csv.writer(open(output_fname, 'w'), delimiter='\t') header_row = ['start_probe_id', 'start_chr_pos', 'stop_probe_id', 'stop_chr_pos', 'no_of_probes', 'length', 'copy_number'] writer.writerow(header_row) sys.stderr.write("Discovering deletions ...\n") counter = 0 real_counter = 0 for chr, probe_id_ls in chr2probe_id_ls.iteritems(): no_of_probes = len(probe_id_ls) index_of_prev_probe_within_a_contig = None index_of_prev_probe_with_a_different_copy_number = None for i in range(no_of_probes): probe_id = probe_id_ls[i] contig_id_ls = probe_id2contig_id_ls.get(probe_id,[]) copy_number = len(contig_id_ls) if i==0: index_of_prev_probe_with_a_different_copy_number = -1 # set before the first probe (index=0) else: prev_probe_contig_id_ls = probe_id2contig_id_ls.get(probe_id_ls[i-1],[]) prev_copy_number = len(prev_probe_contig_id_ls) if not deletion_only: if copy_number != prev_copy_number: # a change point of copy number if index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i-1] stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number-1, stop_probe.pos-start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_with_a_different_copy_number = i-1 else: # look for deleton only. The only difference from above is make sure the deletion is covered by the sequencing. #one naive criteria is if the boundary is within the same contig, then yes. if prev_copy_number>0 and copy_number==0: # from non-deletion to deletion index_of_prev_probe_within_a_contig = i-1 elif prev_copy_number==0 and copy_number>0: # from deletion to non-deletion if index_of_prev_probe_within_a_contig is not None: # found a potential deletion current_contig_id_set = set(contig_id_ls) prev_non_deleted_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig] prev_non_deleted_probe_contig_id_ls = probe_id2contig_id_ls.get(prev_non_deleted_probe_id, []) prev_non_deleted_probe_contig_id_set = set(prev_non_deleted_probe_contig_id_ls) if len(prev_non_deleted_probe_contig_id_set&current_contig_id_set)>0: #share at least one contig. deletion confirmed deletion_start_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig+1] deletion_start_probe = probes.get_one_probe(deletion_start_probe_id) deletion_start_chr_pos = '%s_%s'%(deletion_start_probe.chr, deletion_start_probe.pos) deletion_stop_probe_id = probe_id_ls[i-1] deletion_stop_probe = probes.get_one_probe(deletion_stop_probe_id) deletion_stop_chr_pos = '%s_%s'%(deletion_stop_probe.chr, deletion_stop_probe.pos) row = [deletion_start_probe_id, deletion_start_chr_pos, deletion_stop_probe_id, deletion_stop_chr_pos, \ i-index_of_prev_probe_within_a_contig-1, deletion_stop_probe.pos-deletion_start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_within_a_contig = i elif prev_copy_number>0 and copy_number>0: # from non-deletion to non-deletion index_of_prev_probe_within_a_contig = i counter += 1 if counter%10000==0: sys.stderr.write("%s%s\t%s"%('\x08'*80, counter, real_counter)) # don't forget the last segment if it's not in the deletion_only mode. if not deletion_only and index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i] # watch: not i-1. stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number, stop_probe.pos-start_probe.pos, copy_number] # watch no -1, and it's copy_number writer.writerow(row) real_counter += 1 sys.stderr.write("Done.\n") del writer
session = db_250k.session
def discoverLerDeletionDuplication(cls, db_250k, ler_blast_result_fname, output_fname, deletion_only=True, min_no_of_matches=25): """ 2009-12-7 ler_blast_result_fname is the output of blasting all CNV probes against Ler contigs http://www.arabidopsis.org/browse/Cereon/index.jsp. 2 functions: 1. detect deletions. make sure the deletion is covered by the sequencing. one naive criteria is if the boundary (two adjacent non-deleted probes) is within the same contig, then yes. 2. detect copy number changes. If two adjacent probes have different number of contigs, then it's a copy number change point. """ from pymodule import getColName2IndexFromHeader, PassingData, figureOutDelimiter sys.stderr.write("Reading from %s ... \n"%ler_blast_result_fname) counter = 0 real_counter = 0 import csv reader = csv.reader(open(ler_blast_result_fname), delimiter=figureOutDelimiter(ler_blast_result_fname)) header = reader.next() col_name2index = getColName2IndexFromHeader(header) probe_id2contig_id_ls = {} for row in reader: contig_label = row[col_name2index['Alignment_title']] probe_id = int(row[col_name2index['Probe_ID']]) no_of_matches = int(row[col_name2index['Number_matches']]) if no_of_matches>=min_no_of_matches: contig_id = ' '.join(contig_label.split()[1:]) if probe_id not in probe_id2contig_id_ls: probe_id2contig_id_ls[probe_id] = [] probe_id2contig_id_ls[probe_id].append(contig_id) sys.stderr.write("Done.\n") del reader import Stock_250kDB from DB_250k2Array import DB_250k2Array session = db_250k.session probes, xy_ls, chr_pos_ls, total_probe_id_ls = DB_250k2Array.get_probes(db_250k.metadata.bind, Stock_250kDB.Probes.table.name, \ snps=None, run_type=2) chr2xy_ls, chr2probe_id_ls = DB_250k2Array.organizeProbesIntoChromosome(xy_ls, chr_pos_ls, total_probe_id_ls) writer = csv.writer(open(output_fname, 'w'), delimiter='\t') header_row = ['start_probe_id', 'start_chr_pos', 'stop_probe_id', 'stop_chr_pos', 'no_of_probes', 'length', 'copy_number'] writer.writerow(header_row) sys.stderr.write("Discovering deletions ...\n") counter = 0 real_counter = 0 for chr, probe_id_ls in chr2probe_id_ls.iteritems(): no_of_probes = len(probe_id_ls) index_of_prev_probe_within_a_contig = None index_of_prev_probe_with_a_different_copy_number = None for i in range(no_of_probes): probe_id = probe_id_ls[i] contig_id_ls = probe_id2contig_id_ls.get(probe_id,[]) copy_number = len(contig_id_ls) if i==0: index_of_prev_probe_with_a_different_copy_number = -1 # set before the first probe (index=0) else: prev_probe_contig_id_ls = probe_id2contig_id_ls.get(probe_id_ls[i-1],[]) prev_copy_number = len(prev_probe_contig_id_ls) if not deletion_only: if copy_number != prev_copy_number: # a change point of copy number if index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i-1] stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number-1, stop_probe.pos-start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_with_a_different_copy_number = i-1 else: # look for deleton only. The only difference from above is make sure the deletion is covered by the sequencing. #one naive criteria is if the boundary is within the same contig, then yes. if prev_copy_number>0 and copy_number==0: # from non-deletion to deletion index_of_prev_probe_within_a_contig = i-1 elif prev_copy_number==0 and copy_number>0: # from deletion to non-deletion if index_of_prev_probe_within_a_contig is not None: # found a potential deletion current_contig_id_set = set(contig_id_ls) prev_non_deleted_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig] prev_non_deleted_probe_contig_id_ls = probe_id2contig_id_ls.get(prev_non_deleted_probe_id, []) prev_non_deleted_probe_contig_id_set = set(prev_non_deleted_probe_contig_id_ls) if len(prev_non_deleted_probe_contig_id_set&current_contig_id_set)>0: #share at least one contig. deletion confirmed deletion_start_probe_id = probe_id_ls[index_of_prev_probe_within_a_contig+1] deletion_start_probe = probes.get_one_probe(deletion_start_probe_id) deletion_start_chr_pos = '%s_%s'%(deletion_start_probe.chr, deletion_start_probe.pos) deletion_stop_probe_id = probe_id_ls[i-1] deletion_stop_probe = probes.get_one_probe(deletion_stop_probe_id) deletion_stop_chr_pos = '%s_%s'%(deletion_stop_probe.chr, deletion_stop_probe.pos) row = [deletion_start_probe_id, deletion_start_chr_pos, deletion_stop_probe_id, deletion_stop_chr_pos, \ i-index_of_prev_probe_within_a_contig-1, deletion_stop_probe.pos-deletion_start_probe.pos, prev_copy_number] writer.writerow(row) real_counter += 1 index_of_prev_probe_within_a_contig = i elif prev_copy_number>0 and copy_number>0: # from non-deletion to non-deletion index_of_prev_probe_within_a_contig = i counter += 1 if counter%10000==0: sys.stderr.write("%s%s\t%s"%('\x08'*80, counter, real_counter)) # don't forget the last segment if it's not in the deletion_only mode. if not deletion_only and index_of_prev_probe_with_a_different_copy_number is not None: start_probe_id = probe_id_ls[index_of_prev_probe_with_a_different_copy_number+1] start_probe = probes.get_one_probe(start_probe_id) start_chr_pos = '%s_%s'%(start_probe.chr, start_probe.pos) stop_probe_id = probe_id_ls[i] # watch: not i-1. stop_probe = probes.get_one_probe(stop_probe_id) stop_chr_pos = '%s_%s'%(stop_probe.chr, stop_probe.pos) row = [start_probe_id, start_chr_pos, stop_probe_id, stop_chr_pos, \ i-index_of_prev_probe_with_a_different_copy_number, stop_probe.pos-start_probe.pos, copy_number] # watch no -1, and it's copy_number writer.writerow(row) real_counter += 1 sys.stderr.write("Done.\n") del writer
snpData = SNPData(input_fname=inputFname, turn_into_array=1)
row_id_key_set = set([row_id1, row_id2]) snpData = SNPData(input_fname=inputFname, turn_into_array=1, row_id_key_set=row_id_key_set)
def cmpOneRowToTheOther(cls, inputFname, row_id1, row_id2): """ 2009-6-17 compare SNP data of one accession to the other in the same dataset """ sys.stderr.write("Comparing one row to the other ... \n") from pymodule import SNPData, TwoSNPData, PassingData snpData = SNPData(input_fname=inputFname, turn_into_array=1) twoSNPData = TwoSNPData(SNPData1=snpData, SNPData2=snpData) print twoSNPData.cmpOneRow(row_id1, row_id2)
inputFname = '/Network/Data/250k/db/dataset/call_method_29.tsv' row_id1 = ('6910', '62') row_id2 = ('8290', '181') AnalyzeSNPData.cmpOneRowToTheOther(inputFname, row_id1, row_id2)
inputFname = '/Network/Data/250k/db/dataset/call_method_29.tsv' row_id1 = ('6910', '62') row_id2 = ('8290', '181') AnalyzeSNPData.cmpOneRowToTheOther(inputFname, row_id1, row_id2) inputFname = os.path.expanduser('~/mnt2/panfs/NPUTE_data/input/250k_l3_y.85_20091208.tsv') row_id1 = ('7034', '1338') row_id2 = ('7035', '336') AnalyzeSNPData.cmpOneRowToTheOther(inputFname, row_id1, row_id2) """ @classmethod def cmpAllDuplicatesOfOneEcotype(cls, inputFname, ecotype_id_ls): """ 2009-12-11 For each ecotype_id in ecotype_id_ls, compare mismatch-rates between duplicates """ sys.stderr.write("Comparing one row to the other ... \n") from pymodule import SNPData, TwoSNPData, PassingData ecotype_id_set = set(ecotype_id_ls) def row_id_hash_func(row_id): return int(row_id[0]) snpData = SNPData(input_fname=inputFname, turn_into_array=1, row_id_key_set=ecotype_id_set, row_id_hash_func=row_id_hash_func) ecotype_id2row_id_to_check_ls = {} for row_id in snpData.row_id_ls: ecotype_id = int(row_id[0]) if ecotype_id in ecotype_id_set: if ecotype_id not in ecotype_id2row_id_to_check_ls: ecotype_id2row_id_to_check_ls[ecotype_id] = [] ecotype_id2row_id_to_check_ls[ecotype_id].append(row_id) twoSNPData = TwoSNPData(SNPData1=snpData, SNPData2=snpData) for ecotype_id, row_id_to_check_ls in ecotype_id2row_id_to_check_ls.iteritems(): if len(row_id_to_check_ls)>1: print "ecotype_id: %s"%ecotype_id no_of_arrays = len(row_id_to_check_ls) for i in range(no_of_arrays): for j in range(i+1, no_of_arrays): row_id1 = row_id_to_check_ls[i] row_id2 = row_id_to_check_ls[j] print "row_id1 %s vs row_id2 %s"%(row_id1, row_id2) print twoSNPData.cmpOneRow(row_id1, row_id2) """ inputFname = os.path.expanduser('~/mnt2/panfs/NPUTE_data/input/250k_l3_y.85_20091208.tsv') ecotype_id_ls = [8297, 7317, 6910, 8274, 6911, 6905, 7034, 6909, 6962, 7373, 7270, 6983, 6899] AnalyzeSNPData.cmpAllDuplicatesOfOneEcotype(inputFname, ecotype_id_ls)
def cmpOneRowToTheOther(cls, inputFname, row_id1, row_id2): """ 2009-6-17 compare SNP data of one accession to the other in the same dataset """ sys.stderr.write("Comparing one row to the other ... \n") from pymodule import SNPData, TwoSNPData, PassingData snpData = SNPData(input_fname=inputFname, turn_into_array=1) twoSNPData = TwoSNPData(SNPData1=snpData, SNPData2=snpData) print twoSNPData.cmpOneRow(row_id1, row_id2)
ler_blast_result_fname = '/Network/Data/250k/tmp-dazhe/tair9_raw.csv' output_fname = '/tmp/Col-copy-number.tsv' CNV.discoverLerDeletionDuplication(db_250k, ler_blast_result_fname, output_fname, deletion_only=False) """ cnv_intensity_fname = os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity.tsv') aAlpha_ls = [0.2, 0.5, 0.8, 1.0, 2.0] T_ls = [2.0,4.0,8.0,12.0,14.0] M_ls = [5,10] deletion_cutoff_ls = [-0.33, -0.4, -0.5, -0.6] max_boundary_diff = 20000 max_diff_perc = 0.2 for aAlpha in aAlpha_ls: for T in T_ls: for M in M_ls: for deletion_cutoff in deletion_cutoff_ls: for min_no_of_probes in [5,10,20,40]: for min_reciprocal_overlap in [0.4, 0.6, 0.8]: GADA_output_fname_ls = [] for chr in range(1,6): fname = os.path.expanduser('~/mnt2/panfs/250k/CNV/GADA_output/call_method_48_CNV_intensity_QNorm_sub_ref_chr%s_A%sT%sM%s.tsv'%(chr, aAlpha, T, M)) if os.path.isfile(fname): GADA_output_fname_ls.append(fname) output_fname_prefix = '/Network/Data/250k/tmp-yh/CNV/CNVOccurrenceByOverlap/call_48_A%sT%sM%s_delCutoff%s_min_p%s_mdist%s_mperc%s_moverlap%s'%\ (aAlpha, T, M, deletion_cutoff, min_no_of_probes, max_boundary_diff, max_diff_perc, min_reciprocal_overlap) CNV.plotCNVOccurrenceInReplicatesHist(db_250k, cnv_intensity_fname, GADA_output_fname_ls, \ output_fname_prefix, \ min_no_of_replicates=5, \ min_no_of_probes=min_no_of_probes, deletion_cutoff=deletion_cutoff, \ max_boundary_diff=max_boundary_diff, \ max_diff_perc=max_diff_perc, min_reciprocal_overlap=min_reciprocal_overlap) """
ler_blast_result_fname = '/Network/Data/250k/tmp-dazhe/ler_raw_CNV_QC.csv' max_delta_ratio = 0.4 max_length_delta = 10000 for max_length_delta in range(1,7): max_length_delta = max_length_delta*10000 output_fname = '/tmp/Ler-span-over-Col-mdr%s-mld%s.tsv'%(max_delta_ratio, max_length_delta) CNV.discoverLerContigSpanOverCol(ler_blast_result_fname, output_fname, min_no_of_matches=25, max_delta_ratio=max_delta_ratio, max_length_delta=max_length_delta) """ max_copy_number = 16 input_fname_ls = [] for i in range(1,6): input_fname_ls.append(os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity_QNorm_sub_ref_chr%s.tsv'%i)) output_fname_prefix = os.path.expanduser('~/tmp/call_48_Col_intensity_QNorm_sub_ref_vs_true_copy_number_m%s'%max_copy_number) CNV.drawIntensityVsProbeTrueCopyNumber(db_250k, input_fname_ls, output_fname_prefix, \ ecotype_id=6909, data_source_id=9, cnv_type_id=None, max_copy_number=max_copy_number) input_fname_ls = [os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity.tsv')] output_fname_prefix = os.path.expanduser('~/tmp/call_48_Col_intensity_vs_true_copy_number_m%s'%max_copy_number) CNV.drawIntensityVsProbeTrueCopyNumber(db_250k, input_fname_ls, output_fname_prefix, \ ecotype_id=6909, data_source_id=9, cnv_type_id=None, max_copy_number=max_copy_number) input_fname_ls = [os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity.tsv')] output_fname_prefix = os.path.expanduser('~/tmp/call_48_Ler_intensity_vs_true_copy_number_m%s'%max_copy_number) CNV.drawIntensityVsProbeTrueCopyNumber(db_250k, input_fname_ls, output_fname_prefix, \ ecotype_id=6932, data_source_id=8, cnv_type_id=None, max_copy_number=max_copy_number) input_fname_ls = [] for i in range(1,6): input_fname_ls.append(os.path.expanduser('~/mnt2/panfs/250k/CNV/call_method_48_CNV_intensity_QNorm_sub_ref_chr%s.tsv'%i)) output_fname_prefix = os.path.expanduser('~/tmp/call_48_Ler_intensity_QNorm_sub_ref_vs_true_copy_number_m%s'%max_copy_number) CNV.drawIntensityVsProbeTrueCopyNumber(db_250k, input_fname_ls, output_fname_prefix, \ ecotype_id=6932, data_source_id=8, cnv_type_id=None, max_copy_number=max_copy_number) """
def linkEcotypeIDFromSuziPhenotype(cls, fname_with_ID, fname_with_phenotype, output_fname): """ 2009-7-31 she gave me two files one has phenotype data and accession names but with no ecotype ID 2nd is a map from accession name to ecotype ID """ sys.stderr.write("Linking accession names to ecotype ID ... ") import csv inf_phenotype = csv.reader(open(fname_with_phenotype, 'r'), delimiter='\t') accession_name_ls = [] #skip two lines inf_phenotype.next() inf_phenotype.next() for row in inf_phenotype: accession_name_ls.append(row[0]) del inf_phenotype inf_with_ID = csv.reader(open(fname_with_ID), delimiter='\t') inf_with_ID.next() accession_name2ecotype_id = {} for row in inf_with_ID: ecotype_id = row[0] accession_name = row[5] accession_name2ecotype_id[accession_name] = ecotype_id del inf_with_ID print accession_name2ecotype_id writer = csv.writer(open(output_fname, 'w'), delimiter='\t') for accession_name in accession_name_ls: ecotype_id = accession_name2ecotype_id.get(accession_name) writer.writerow([accession_name, ecotype_id]) del writer sys.stderr.write("Done.\n")
overlap1 = overlap_length/(qc_stop-qc_start) overlap2 = overlap_length/(segment_stop_pos-segment_start_pos)
overlap1 = overlap_length/(segment_stop_pos-segment_start_pos) overlap2 = overlap_length/(qc_stop-qc_start)
def get_overlap_ratio(span1_ls, span2_ls): """ 2009-12-13 calculate the two overlap ratios for two segments """ segment_start_pos, segment_stop_pos = span1_ls qc_start, qc_stop = span2_ls overlap_length = max(0, segment_stop_pos - qc_start) - max(0, segment_stop_pos - qc_stop) - max(0, segment_start_pos - qc_start) # accomodates 6 scenarios overlap_length = float(overlap_length) overlap1 = overlap_length/(qc_stop-qc_start) overlap2 = overlap_length/(segment_stop_pos-segment_start_pos) return overlap1, overlap2
a key designed to represent a CNV segment in a binary search tree (BinarySearchTree.py), which could be used to do == or >, or < operators
a key designed to represent a CNV segment in the node of a binary search tree (BinarySearchTree.py) or RBTree (RBTree.py), It has custom comparison function based on the is_reciprocal_overlap() function.
def is_reciprocal_overlap(span1_ls, span2_ls, min_reciprocal_overlap=0.6): """ 2009-12-12 return True if both overlap ratios are above the min_reciprocal_overlap """ overlap1, overlap2 = get_overlap_ratio(span1_ls, span2_ls) if overlap1>=min_reciprocal_overlap and overlap2>=min_reciprocal_overlap: return True else: return False
return self.span_ls[0]>=other.span_ls[0] and self.span_ls[1]<=other.span_ls[0]
return self.span_ls[0]<=other.span_ls[0] and self.span_ls[1]>=other.span_ls[0]
def __eq__(self, other): """ 2009-12-12 """ if self.chromosome==other.chromosome: if len(self.span_ls)==1: if len(other.span_ls)==1: return self.span_ls[0]==other.span_ls[0] elif len(other.span_ls)>1: return self.span_ls[0]>=other.span_ls[0] and self.span_ls[0]<=other.span_ls[1] # equal if self is within the "other" segment else: return None elif len(self.span_ls)>1: if len(other.span_ls)==1: # self is a segment. other is a point position. return self.span_ls[0]>=other.span_ls[0] and self.span_ls[1]<=other.span_ls[0] # if self includes the point position, yes it's equal elif len(other.span_ls)>1: # need to calculate min_reciprocal_overlap return is_reciprocal_overlap(self.span_ls, other.span_ls, min_reciprocal_overlap=self.min_reciprocal_overlap) #return self.span_ls[1]<other.span_ls[0] # whether the stop of this segment is ahead of the start of other else: return None else: return False
import os, sys
import os, sys, math
def getCNVDataFromFileInGWA(input_fname_ls, array_id, max_amp=-0.33, min_amp=-0.33, min_size=50, min_no_of_probes=None, report=False): """ 2009-10-31 get deletion (below max_amp) or duplication (above min_amp) from files (output by RunGADA.py) """ sys.stderr.write("Getting CNV calls for array %s, min_size %s, min_no_of_probes %s from %s ..."%\ (array_id, min_size, min_no_of_probes, repr(input_fname_ls))) gwr_name = "(a-id %s)"%(array_id) gwr = GenomeWideResult(name=gwr_name) gwr.data_obj_ls = [] #list and dictionary are crazy references. gwr.data_obj_id2index = {} genome_wide_result_id = id(gwr) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_segments = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) ecotype_id = None for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') cnv_array_id = int(row[col_name2index['array_id']]) cnv_ecotype_id = int(row[col_name2index.get('ecotype_id', col_name2index['array_id'])]) counter += 1 if cnv_array_id==array_id: no_of_segments += 1 if ecotype_id is None: ecotype_id = cnv_ecotype_id start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) start_probe_id = row[col_name2index.get('start_probe_id', col_name2index['start_probe'])] stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) end_probe_id = row[col_name2index.get('end_probe_id', col_name2index['end_probe'])] no_of_probes = int(row[col_name2index['length']]) if min_no_of_probes is not None and no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos) if min_size is not None and segment_length<min_size: continue if amplitude<=max_amp or amplitude>=min_amp: real_counter += 1 data_obj = DataObject(chromosome=segment_chromosome, position=segment_start_pos, stop_position=segment_stop_pos, \ value=amplitude) data_obj.comment = 'start probe-id %s, end probe-id %s, no of probes %s'%\ (start_probe_id, end_probe_id, no_of_probes) data_obj.genome_wide_result_id = genome_wide_result_id gwr.add_one_data_obj(data_obj) if report and counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_segments, real_counter)) sys.stderr.write("\n") if gwr.max_value<3: # insertion at y=3 gwr.max_value=3 if gwr.min_value>-1: # deletion at y = -1 gwr.min_value = -1 gwr.name = '%s '%ecotype_id + gwr.name setattr(gwr, 'ecotype_id', ecotype_id) sys.stderr.write(" %s segments. Done.\n"%(len(gwr.data_obj_ls))) return gwr