| repository_name
				 stringlengths 5 67 | func_path_in_repository
				 stringlengths 4 234 | func_name
				 stringlengths 0 314 | whole_func_string
				 stringlengths 52 3.87M | language
				 stringclasses 6
				values | func_code_string
				 stringlengths 52 3.87M | func_documentation_string
				 stringlengths 1 47.2k | func_code_url
				 stringlengths 85 339 | 
|---|---|---|---|---|---|---|---|
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.__clientDefer | 
	def __clientDefer(self, c):
        """Return a deferred for a HTTP client, after handling incoming headers"""
        def handle_headers(r):
            self.gotHeaders(c.response_headers)
            return r
        return c.deferred.addBoth(handle_headers) | 
	python | 
	def __clientDefer(self, c):
        """Return a deferred for a HTTP client, after handling incoming headers"""
        def handle_headers(r):
            self.gotHeaders(c.response_headers)
            return r
        return c.deferred.addBoth(handle_headers) | 
	Return a deferred for a HTTP client, after handling incoming headers | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L231-L237 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.__doDownloadPage | 
	def __doDownloadPage(self, *args, **kwargs):
        """Works like client.downloadPage(), but handle incoming headers
        """
        logger.debug("download page: %r, %r", args, kwargs)
        return self.__clientDefer(downloadPage(*args, **kwargs)) | 
	python | 
	def __doDownloadPage(self, *args, **kwargs):
        """Works like client.downloadPage(), but handle incoming headers
        """
        logger.debug("download page: %r, %r", args, kwargs)
        return self.__clientDefer(downloadPage(*args, **kwargs)) | 
	Works like client.downloadPage(), but handle incoming headers | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L271-L276 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.verify_credentials | 
	def verify_credentials(self, delegate=None):
        "Verify a user's credentials."
        parser = txml.Users(delegate)
        return self.__downloadPage('/account/verify_credentials.xml', parser) | 
	python | 
	def verify_credentials(self, delegate=None):
        "Verify a user's credentials."
        parser = txml.Users(delegate)
        return self.__downloadPage('/account/verify_credentials.xml', parser) | 
	Verify a user's credentials. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L304-L307 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.update | 
	def update(self, status, source=None, params={}):
        "Update your status.  Returns the ID of the new post."
        params = params.copy()
        params['status'] = status
        if source:
            params['source'] = source
        return self.__parsed_post(self.__post('/statuses/update.xml', params),
            txml.parseUpdateResponse) | 
	python | 
	def update(self, status, source=None, params={}):
        "Update your status.  Returns the ID of the new post."
        params = params.copy()
        params['status'] = status
        if source:
            params['source'] = source
        return self.__parsed_post(self.__post('/statuses/update.xml', params),
            txml.parseUpdateResponse) | 
	Update your status.  Returns the ID of the new post. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L315-L322 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.retweet | 
	def retweet(self, id, delegate):
        """Retweet a post
        Returns the retweet status info back to the given delegate
        """
        parser = txml.Statuses(delegate)
        return self.__postPage('/statuses/retweet/%s.xml' % (id), parser) | 
	python | 
	def retweet(self, id, delegate):
        """Retweet a post
        Returns the retweet status info back to the given delegate
        """
        parser = txml.Statuses(delegate)
        return self.__postPage('/statuses/retweet/%s.xml' % (id), parser) | 
	Retweet a post
        Returns the retweet status info back to the given delegate | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L324-L330 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.friends | 
	def friends(self, delegate, params={}, extra_args=None):
        """Get updates from friends.
        Calls the delgate once for each status object received."""
        return self.__get('/statuses/friends_timeline.xml', delegate, params,
            txml.Statuses, extra_args=extra_args) | 
	python | 
	def friends(self, delegate, params={}, extra_args=None):
        """Get updates from friends.
        Calls the delgate once for each status object received."""
        return self.__get('/statuses/friends_timeline.xml', delegate, params,
            txml.Statuses, extra_args=extra_args) | 
	Get updates from friends.
        Calls the delgate once for each status object received. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L332-L337 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.home_timeline | 
	def home_timeline(self, delegate, params={}, extra_args=None):
        """Get updates from friends.
        Calls the delgate once for each status object received."""
        return self.__get('/statuses/home_timeline.xml', delegate, params,
            txml.Statuses, extra_args=extra_args) | 
	python | 
	def home_timeline(self, delegate, params={}, extra_args=None):
        """Get updates from friends.
        Calls the delgate once for each status object received."""
        return self.__get('/statuses/home_timeline.xml', delegate, params,
            txml.Statuses, extra_args=extra_args) | 
	Get updates from friends.
        Calls the delgate once for each status object received. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L339-L344 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.user_timeline | 
	def user_timeline(self, delegate, user=None, params={}, extra_args=None):
        """Get the most recent updates for a user.
        If no user is specified, the statuses for the authenticating user are
        returned.
        See search for example of how results are returned."""
        if user:
            params['id'] = user
        return self.__get('/statuses/user_timeline.xml', delegate, params,
                          txml.Statuses, extra_args=extra_args) | 
	python | 
	def user_timeline(self, delegate, user=None, params={}, extra_args=None):
        """Get the most recent updates for a user.
        If no user is specified, the statuses for the authenticating user are
        returned.
        See search for example of how results are returned."""
        if user:
            params['id'] = user
        return self.__get('/statuses/user_timeline.xml', delegate, params,
                          txml.Statuses, extra_args=extra_args) | 
	Get the most recent updates for a user.
        If no user is specified, the statuses for the authenticating user are
        returned.
        See search for example of how results are returned. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L350-L360 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.public_timeline | 
	def public_timeline(self, delegate, params={}, extra_args=None):
        "Get the most recent public timeline."
        return self.__get('/statuses/public_timeline.atom', delegate, params,
                          extra_args=extra_args) | 
	python | 
	def public_timeline(self, delegate, params={}, extra_args=None):
        "Get the most recent public timeline."
        return self.__get('/statuses/public_timeline.atom', delegate, params,
                          extra_args=extra_args) | 
	Get the most recent public timeline. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L367-L371 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.direct_messages | 
	def direct_messages(self, delegate, params={}, extra_args=None):
        """Get direct messages for the authenticating user.
        Search results are returned one message at a time a DirectMessage
        objects"""
        return self.__get('/direct_messages.xml', delegate, params,
                          txml.Direct, extra_args=extra_args) | 
	python | 
	def direct_messages(self, delegate, params={}, extra_args=None):
        """Get direct messages for the authenticating user.
        Search results are returned one message at a time a DirectMessage
        objects"""
        return self.__get('/direct_messages.xml', delegate, params,
                          txml.Direct, extra_args=extra_args) | 
	Get direct messages for the authenticating user.
        Search results are returned one message at a time a DirectMessage
        objects | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L373-L379 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.send_direct_message | 
	def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}):
        """Send a direct message
        """
        params = params.copy()
        if user is not None:
            params['user'] = user
        if user_id is not None:
            params['user_id'] = user_id
        if screen_name is not None:
            params['screen_name'] = screen_name
        params['text'] = text
        parser = txml.Direct(delegate)
        return self.__postPage('/direct_messages/new.xml', parser, params) | 
	python | 
	def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}):
        """Send a direct message
        """
        params = params.copy()
        if user is not None:
            params['user'] = user
        if user_id is not None:
            params['user_id'] = user_id
        if screen_name is not None:
            params['screen_name'] = screen_name
        params['text'] = text
        parser = txml.Direct(delegate)
        return self.__postPage('/direct_messages/new.xml', parser, params) | 
	Send a direct message | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L381-L393 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.replies | 
	def replies(self, delegate, params={}, extra_args=None):
        """Get the most recent replies for the authenticating user.
        See search for example of how results are returned."""
        return self.__get('/statuses/replies.atom', delegate, params,
                          extra_args=extra_args) | 
	python | 
	def replies(self, delegate, params={}, extra_args=None):
        """Get the most recent replies for the authenticating user.
        See search for example of how results are returned."""
        return self.__get('/statuses/replies.atom', delegate, params,
                          extra_args=extra_args) | 
	Get the most recent replies for the authenticating user.
        See search for example of how results are returned. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L395-L400 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.follow_user | 
	def follow_user(self, user, delegate):
        """Follow the given user.
        Returns the user info back to the given delegate
        """
        parser = txml.Users(delegate)
        return self.__postPage('/friendships/create/%s.xml' % (user), parser) | 
	python | 
	def follow_user(self, user, delegate):
        """Follow the given user.
        Returns the user info back to the given delegate
        """
        parser = txml.Users(delegate)
        return self.__postPage('/friendships/create/%s.xml' % (user), parser) | 
	Follow the given user.
        Returns the user info back to the given delegate | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L414-L420 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.unfollow_user | 
	def unfollow_user(self, user, delegate):
        """Unfollow the given user.
        Returns the user info back to the given delegate
        """
        parser = txml.Users(delegate)
        return self.__postPage('/friendships/destroy/%s.xml' % (user), parser) | 
	python | 
	def unfollow_user(self, user, delegate):
        """Unfollow the given user.
        Returns the user info back to the given delegate
        """
        parser = txml.Users(delegate)
        return self.__postPage('/friendships/destroy/%s.xml' % (user), parser) | 
	Unfollow the given user.
        Returns the user info back to the given delegate | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L422-L428 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.list_friends | 
	def list_friends(self, delegate, user=None, params={}, extra_args=None, page_delegate=None):
        """Get the list of friends for a user.
        Calls the delegate with each user object found."""
        if user:
            url = '/statuses/friends/' + user + '.xml'
        else:
            url = '/statuses/friends.xml'
        return self.__get_maybe_paging(url, delegate, params, txml.PagedUserList, extra_args, page_delegate) | 
	python | 
	def list_friends(self, delegate, user=None, params={}, extra_args=None, page_delegate=None):
        """Get the list of friends for a user.
        Calls the delegate with each user object found."""
        if user:
            url = '/statuses/friends/' + user + '.xml'
        else:
            url = '/statuses/friends.xml'
        return self.__get_maybe_paging(url, delegate, params, txml.PagedUserList, extra_args, page_delegate) | 
	Get the list of friends for a user.
        Calls the delegate with each user object found. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L457-L466 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.show_user | 
	def show_user(self, user):
        """Get the info for a specific user.
        Returns a delegate that will receive the user in a callback."""
        url = '/users/show/%s.xml' % (user)
        d = defer.Deferred()
        self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \
            .addErrback(lambda e: d.errback(e))
        return d | 
	python | 
	def show_user(self, user):
        """Get the info for a specific user.
        Returns a delegate that will receive the user in a callback."""
        url = '/users/show/%s.xml' % (user)
        d = defer.Deferred()
        self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \
            .addErrback(lambda e: d.errback(e))
        return d | 
	Get the info for a specific user.
        Returns a delegate that will receive the user in a callback. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L488-L499 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	Twitter.search | 
	def search(self, query, delegate, args=None, extra_args=None):
        """Perform a search query.
        Results are given one at a time to the delegate.  An example delegate
        may look like this:
        def exampleDelegate(entry):
            print entry.title"""
        if args is None:
            args = {}
        args['q'] = query
        return self.__doDownloadPage(self.search_url + '?' + self._urlencode(args),
            txml.Feed(delegate, extra_args), agent=self.agent) | 
	python | 
	def search(self, query, delegate, args=None, extra_args=None):
        """Perform a search query.
        Results are given one at a time to the delegate.  An example delegate
        may look like this:
        def exampleDelegate(entry):
            print entry.title"""
        if args is None:
            args = {}
        args['q'] = query
        return self.__doDownloadPage(self.search_url + '?' + self._urlencode(args),
            txml.Feed(delegate, extra_args), agent=self.agent) | 
	Perform a search query.
        Results are given one at a time to the delegate.  An example delegate
        may look like this:
        def exampleDelegate(entry):
            print entry.title | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L501-L513 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor.startService | 
	def startService(self):
        """
        Start the service.
        This causes a transition to the C{'idle'} state, and then calls
        L{connect} to attempt an initial conection.
        """
        service.Service.startService(self)
        self._toState('idle')
        try:
            self.connect()
        except NoConsumerError:
            pass | 
	python | 
	def startService(self):
        """
        Start the service.
        This causes a transition to the C{'idle'} state, and then calls
        L{connect} to attempt an initial conection.
        """
        service.Service.startService(self)
        self._toState('idle')
        try:
            self.connect()
        except NoConsumerError:
            pass | 
	Start the service.
        This causes a transition to the C{'idle'} state, and then calls
        L{connect} to attempt an initial conection. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L880-L893 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor.connect | 
	def connect(self, forceReconnect=False):
        """
        Check current conditions and initiate connection if possible.
        This is called to check preconditions for starting a new connection,
        and initating the connection itself.
        If the service is not running, this will do nothing.
        @param forceReconnect: Drop an existing connection to reconnnect.
        @type forceReconnect: C{False}
        @raises L{ConnectError}: When a connection (attempt) is already in
            progress, unless C{forceReconnect} is set.
        @raises L{NoConsumerError}: When there is no consumer for incoming
        tweets. No further connection attempts will be made, unless L{connect}
        is called again.
        """
        if self._state == 'stopped':
            raise Error("This service is not running. Not connecting.")
        if self._state == 'connected':
            if forceReconnect:
                self._toState('disconnecting')
                return True
            else:
                raise ConnectError("Already connected.")
        elif self._state == 'aborting':
            raise ConnectError("Aborting connection in progress.")
        elif self._state == 'disconnecting':
            raise ConnectError("Disconnect in progress.")
        elif self._state == 'connecting':
            if forceReconnect:
                self._toState('aborting')
                return True
            else:
                raise ConnectError("Connect in progress.")
        if self.delegate is None:
            if self._state != 'idle':
                self._toState('idle')
            raise NoConsumerError()
        if self._state == 'waiting':
            if self._reconnectDelayedCall.called:
                self._reconnectDelayedCall = None
                pass
            else:
                self._reconnectDelayedCall.reset(0)
                return True
        self._toState('connecting')
        return True | 
	python | 
	def connect(self, forceReconnect=False):
        """
        Check current conditions and initiate connection if possible.
        This is called to check preconditions for starting a new connection,
        and initating the connection itself.
        If the service is not running, this will do nothing.
        @param forceReconnect: Drop an existing connection to reconnnect.
        @type forceReconnect: C{False}
        @raises L{ConnectError}: When a connection (attempt) is already in
            progress, unless C{forceReconnect} is set.
        @raises L{NoConsumerError}: When there is no consumer for incoming
        tweets. No further connection attempts will be made, unless L{connect}
        is called again.
        """
        if self._state == 'stopped':
            raise Error("This service is not running. Not connecting.")
        if self._state == 'connected':
            if forceReconnect:
                self._toState('disconnecting')
                return True
            else:
                raise ConnectError("Already connected.")
        elif self._state == 'aborting':
            raise ConnectError("Aborting connection in progress.")
        elif self._state == 'disconnecting':
            raise ConnectError("Disconnect in progress.")
        elif self._state == 'connecting':
            if forceReconnect:
                self._toState('aborting')
                return True
            else:
                raise ConnectError("Connect in progress.")
        if self.delegate is None:
            if self._state != 'idle':
                self._toState('idle')
            raise NoConsumerError()
        if self._state == 'waiting':
            if self._reconnectDelayedCall.called:
                self._reconnectDelayedCall = None
                pass
            else:
                self._reconnectDelayedCall.reset(0)
                return True
        self._toState('connecting')
        return True | 
	Check current conditions and initiate connection if possible.
        This is called to check preconditions for starting a new connection,
        and initating the connection itself.
        If the service is not running, this will do nothing.
        @param forceReconnect: Drop an existing connection to reconnnect.
        @type forceReconnect: C{False}
        @raises L{ConnectError}: When a connection (attempt) is already in
            progress, unless C{forceReconnect} is set.
        @raises L{NoConsumerError}: When there is no consumer for incoming
        tweets. No further connection attempts will be made, unless L{connect}
        is called again. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L906-L958 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor.makeConnection | 
	def makeConnection(self, protocol):
        """
        Called when the connection has been established.
        This method is called when an HTTP 200 response has been received,
        with the protocol that decodes the individual Twitter stream elements.
        That protocol will call the consumer for all Twitter entries received.
        The protocol, stored in L{protocol}, has a deferred that fires when
        the connection is closed, causing a transition to the
        C{'disconnected'} state.
        @param protocol: The Twitter stream protocol.
        @type protocol: L{TwitterStream}
        """
        self._errorState = None
        def cb(result):
            self.protocol = None
            if self._state == 'stopped':
                # Don't transition to any other state. We are stopped.
                pass
            else:
                if isinstance(result, failure.Failure):
                    reason = result
                else:
                    reason = None
                self._toState('disconnected', reason)
        self.protocol = protocol
        d = protocol.deferred
        d.addBoth(cb) | 
	python | 
	def makeConnection(self, protocol):
        """
        Called when the connection has been established.
        This method is called when an HTTP 200 response has been received,
        with the protocol that decodes the individual Twitter stream elements.
        That protocol will call the consumer for all Twitter entries received.
        The protocol, stored in L{protocol}, has a deferred that fires when
        the connection is closed, causing a transition to the
        C{'disconnected'} state.
        @param protocol: The Twitter stream protocol.
        @type protocol: L{TwitterStream}
        """
        self._errorState = None
        def cb(result):
            self.protocol = None
            if self._state == 'stopped':
                # Don't transition to any other state. We are stopped.
                pass
            else:
                if isinstance(result, failure.Failure):
                    reason = result
                else:
                    reason = None
                self._toState('disconnected', reason)
        self.protocol = protocol
        d = protocol.deferred
        d.addBoth(cb) | 
	Called when the connection has been established.
        This method is called when an HTTP 200 response has been received,
        with the protocol that decodes the individual Twitter stream elements.
        That protocol will call the consumer for all Twitter entries received.
        The protocol, stored in L{protocol}, has a deferred that fires when
        the connection is closed, causing a transition to the
        C{'disconnected'} state.
        @param protocol: The Twitter stream protocol.
        @type protocol: L{TwitterStream} | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L969-L1000 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor._reconnect | 
	def _reconnect(self, errorState):
        """
        Attempt to reconnect.
        If the current back-off delay is 0, L{connect} is called. Otherwise,
        it will cause a transition to the C{'waiting'} state, ultimately
        causing a call to L{connect} when the delay expires.
        """
        def connect():
            if self.noisy:
                log.msg("Reconnecting now.")
            self.connect()
        backOff = self.backOffs[errorState]
        if self._errorState != errorState or self._delay is None:
            self._errorState = errorState
            self._delay = backOff['initial']
        else:
            self._delay = min(backOff['max'], self._delay * backOff['factor'])
        if self._delay == 0:
            connect()
        else:
            self._reconnectDelayedCall = self.reactor.callLater(self._delay,
                                                                connect)
            self._toState('waiting') | 
	python | 
	def _reconnect(self, errorState):
        """
        Attempt to reconnect.
        If the current back-off delay is 0, L{connect} is called. Otherwise,
        it will cause a transition to the C{'waiting'} state, ultimately
        causing a call to L{connect} when the delay expires.
        """
        def connect():
            if self.noisy:
                log.msg("Reconnecting now.")
            self.connect()
        backOff = self.backOffs[errorState]
        if self._errorState != errorState or self._delay is None:
            self._errorState = errorState
            self._delay = backOff['initial']
        else:
            self._delay = min(backOff['max'], self._delay * backOff['factor'])
        if self._delay == 0:
            connect()
        else:
            self._reconnectDelayedCall = self.reactor.callLater(self._delay,
                                                                connect)
            self._toState('waiting') | 
	Attempt to reconnect.
        If the current back-off delay is 0, L{connect} is called. Otherwise,
        it will cause a transition to the C{'waiting'} state, ultimately
        causing a call to L{connect} when the delay expires. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1003-L1029 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor._toState | 
	def _toState(self, state, *args, **kwargs):
        """
        Transition to the next state.
        @param state: Name of the next state.
        """
        try:
            method = getattr(self, '_state_%s' % state)
        except AttributeError:
            raise ValueError("No such state %r" % state)
        log.msg("%s: to state %r" % (self.__class__.__name__, state))
        self._state = state
        method(*args, **kwargs) | 
	python | 
	def _toState(self, state, *args, **kwargs):
        """
        Transition to the next state.
        @param state: Name of the next state.
        """
        try:
            method = getattr(self, '_state_%s' % state)
        except AttributeError:
            raise ValueError("No such state %r" % state)
        log.msg("%s: to state %r" % (self.__class__.__name__, state))
        self._state = state
        method(*args, **kwargs) | 
	Transition to the next state.
        @param state: Name of the next state. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1032-L1045 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor._state_stopped | 
	def _state_stopped(self):
        """
        The service is not running.
        This is the initial state, and the state after L{stopService} was
        called. To get out of this state, call L{startService}. If there is a
        current connection, we disconnect.
        """
        if self._reconnectDelayedCall:
            self._reconnectDelayedCall.cancel()
            self._reconnectDelayedCall = None
        self.loseConnection() | 
	python | 
	def _state_stopped(self):
        """
        The service is not running.
        This is the initial state, and the state after L{stopService} was
        called. To get out of this state, call L{startService}. If there is a
        current connection, we disconnect.
        """
        if self._reconnectDelayedCall:
            self._reconnectDelayedCall.cancel()
            self._reconnectDelayedCall = None
        self.loseConnection() | 
	The service is not running.
        This is the initial state, and the state after L{stopService} was
        called. To get out of this state, call L{startService}. If there is a
        current connection, we disconnect. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1048-L1059 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor._state_connecting | 
	def _state_connecting(self):
        """
        A connection is being started.
        A succesful attempt results in the state C{'connected'} when the
        first response from Twitter has been received. Transitioning
        to the state C{'aborting'} will cause an immediate disconnect instead,
        by transitioning to C{'disconnecting'}.
        Errors will cause a transition to the C{'error'} state.
        """
        def responseReceived(protocol):
            self.makeConnection(protocol)
            if self._state == 'aborting':
                self._toState('disconnecting')
            else:
                self._toState('connected')
        def trapError(failure):
            self._toState('error', failure)
        def onEntry(entry):
            if self.delegate:
                try:
                    self.delegate(entry)
                except:
                    log.err()
            else:
                pass
        d = self.api(onEntry, self.args)
        d.addCallback(responseReceived)
        d.addErrback(trapError) | 
	python | 
	def _state_connecting(self):
        """
        A connection is being started.
        A succesful attempt results in the state C{'connected'} when the
        first response from Twitter has been received. Transitioning
        to the state C{'aborting'} will cause an immediate disconnect instead,
        by transitioning to C{'disconnecting'}.
        Errors will cause a transition to the C{'error'} state.
        """
        def responseReceived(protocol):
            self.makeConnection(protocol)
            if self._state == 'aborting':
                self._toState('disconnecting')
            else:
                self._toState('connected')
        def trapError(failure):
            self._toState('error', failure)
        def onEntry(entry):
            if self.delegate:
                try:
                    self.delegate(entry)
                except:
                    log.err()
            else:
                pass
        d = self.api(onEntry, self.args)
        d.addCallback(responseReceived)
        d.addErrback(trapError) | 
	A connection is being started.
        A succesful attempt results in the state C{'connected'} when the
        first response from Twitter has been received. Transitioning
        to the state C{'aborting'} will cause an immediate disconnect instead,
        by transitioning to C{'disconnecting'}.
        Errors will cause a transition to the C{'error'} state. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1081-L1114 | 
| 
	dustin/twitty-twister | 
	twittytwister/twitter.py | 
	TwitterMonitor._state_error | 
	def _state_error(self, reason):
        """
        The connection attempt resulted in an error.
        Attempt a reconnect with a back-off algorithm.
        """
        log.err(reason)
        def matchException(failure):
            for errorState, backOff in self.backOffs.iteritems():
                if 'errorTypes' not in backOff:
                    continue
                if failure.check(*backOff['errorTypes']):
                    return errorState
            return 'other'
        errorState = matchException(reason)
        self._reconnect(errorState) | 
	python | 
	def _state_error(self, reason):
        """
        The connection attempt resulted in an error.
        Attempt a reconnect with a back-off algorithm.
        """
        log.err(reason)
        def matchException(failure):
            for errorState, backOff in self.backOffs.iteritems():
                if 'errorTypes' not in backOff:
                    continue
                if failure.check(*backOff['errorTypes']):
                    return errorState
            return 'other'
        errorState = matchException(reason)
        self._reconnect(errorState) | 
	The connection attempt resulted in an error.
        Attempt a reconnect with a back-off algorithm. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1168-L1186 | 
| 
	dustin/twitty-twister | 
	twittytwister/streaming.py | 
	LengthDelimitedStream.lineReceived | 
	def lineReceived(self, line):
        """
        Called when a line is received.
        We expect a length in bytes or an empty line for keep-alive. If
        we got a length, switch to raw mode to receive that amount of bytes.
        """
        if line and line.isdigit():
            self._expectedLength = int(line)
            self._rawBuffer = []
            self._rawBufferLength = 0
            self.setRawMode()
        else:
            self.keepAliveReceived() | 
	python | 
	def lineReceived(self, line):
        """
        Called when a line is received.
        We expect a length in bytes or an empty line for keep-alive. If
        we got a length, switch to raw mode to receive that amount of bytes.
        """
        if line and line.isdigit():
            self._expectedLength = int(line)
            self._rawBuffer = []
            self._rawBufferLength = 0
            self.setRawMode()
        else:
            self.keepAliveReceived() | 
	Called when a line is received.
        We expect a length in bytes or an empty line for keep-alive. If
        we got a length, switch to raw mode to receive that amount of bytes. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L35-L48 | 
| 
	dustin/twitty-twister | 
	twittytwister/streaming.py | 
	LengthDelimitedStream.rawDataReceived | 
	def rawDataReceived(self, data):
        """
        Called when raw data is received.
        Fill the raw buffer C{_rawBuffer} until we have received at least
        C{_expectedLength} bytes. Call C{datagramReceived} with the received
        byte string of the expected size. Then switch back to line mode with
        the remainder of the buffer.
        """
        self._rawBuffer.append(data)
        self._rawBufferLength += len(data)
        if self._rawBufferLength >= self._expectedLength:
            receivedData = ''.join(self._rawBuffer)
            expectedData = receivedData[:self._expectedLength]
            extraData = receivedData[self._expectedLength:]
            self._rawBuffer = None
            self._rawBufferLength = None
            self._expectedLength = None
            self.datagramReceived(expectedData)
            self.setLineMode(extraData) | 
	python | 
	def rawDataReceived(self, data):
        """
        Called when raw data is received.
        Fill the raw buffer C{_rawBuffer} until we have received at least
        C{_expectedLength} bytes. Call C{datagramReceived} with the received
        byte string of the expected size. Then switch back to line mode with
        the remainder of the buffer.
        """
        self._rawBuffer.append(data)
        self._rawBufferLength += len(data)
        if self._rawBufferLength >= self._expectedLength:
            receivedData = ''.join(self._rawBuffer)
            expectedData = receivedData[:self._expectedLength]
            extraData = receivedData[self._expectedLength:]
            self._rawBuffer = None
            self._rawBufferLength = None
            self._expectedLength = None
            self.datagramReceived(expectedData)
            self.setLineMode(extraData) | 
	Called when raw data is received.
        Fill the raw buffer C{_rawBuffer} until we have received at least
        C{_expectedLength} bytes. Call C{datagramReceived} with the received
        byte string of the expected size. Then switch back to line mode with
        the remainder of the buffer. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L51-L73 | 
| 
	dustin/twitty-twister | 
	twittytwister/streaming.py | 
	TwitterObject.fromDict | 
	def fromDict(cls, data):
        """
        Fill this objects attributes from a dict for known properties.
        """
        obj = cls()
        obj.raw = data
        for name, value in data.iteritems():
            if cls.SIMPLE_PROPS and name in cls.SIMPLE_PROPS:
                setattr(obj, name, value)
            elif cls.COMPLEX_PROPS and name in cls.COMPLEX_PROPS:
                value = cls.COMPLEX_PROPS[name].fromDict(value)
                setattr(obj, name, value)
            elif cls.LIST_PROPS and name in cls.LIST_PROPS:
                value = [cls.LIST_PROPS[name].fromDict(item)
                         for item in value]
                setattr(obj, name, value)
        return obj | 
	python | 
	def fromDict(cls, data):
        """
        Fill this objects attributes from a dict for known properties.
        """
        obj = cls()
        obj.raw = data
        for name, value in data.iteritems():
            if cls.SIMPLE_PROPS and name in cls.SIMPLE_PROPS:
                setattr(obj, name, value)
            elif cls.COMPLEX_PROPS and name in cls.COMPLEX_PROPS:
                value = cls.COMPLEX_PROPS[name].fromDict(value)
                setattr(obj, name, value)
            elif cls.LIST_PROPS and name in cls.LIST_PROPS:
                value = [cls.LIST_PROPS[name].fromDict(item)
                         for item in value]
                setattr(obj, name, value)
        return obj | 
	Fill this objects attributes from a dict for known properties. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L102-L119 | 
| 
	dustin/twitty-twister | 
	twittytwister/streaming.py | 
	TwitterStream.datagramReceived | 
	def datagramReceived(self, data):
        """
        Decode the JSON-encoded datagram and call the callback.
        """
        try:
            obj = json.loads(data)
        except ValueError, e:
            log.err(e, 'Invalid JSON in stream: %r' % data)
            return
        if u'text' in obj:
            obj = Status.fromDict(obj)
        else:
            log.msg('Unsupported object %r' % obj)
            return
        self.callback(obj) | 
	python | 
	def datagramReceived(self, data):
        """
        Decode the JSON-encoded datagram and call the callback.
        """
        try:
            obj = json.loads(data)
        except ValueError, e:
            log.err(e, 'Invalid JSON in stream: %r' % data)
            return
        if u'text' in obj:
            obj = Status.fromDict(obj)
        else:
            log.msg('Unsupported object %r' % obj)
            return
        self.callback(obj) | 
	Decode the JSON-encoded datagram and call the callback. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L306-L322 | 
| 
	dustin/twitty-twister | 
	twittytwister/streaming.py | 
	TwitterStream.connectionLost | 
	def connectionLost(self, reason):
        """
        Called when the body is complete or the connection was lost.
        @note: As the body length is usually not known at the beginning of the
        response we expect a L{PotentialDataLoss} when Twitter closes the
        stream, instead of L{ResponseDone}. Other exceptions are treated
        as error conditions.
        """
        self.setTimeout(None)
        if reason.check(ResponseDone, PotentialDataLoss):
            self.deferred.callback(None)
        else:
            self.deferred.errback(reason) | 
	python | 
	def connectionLost(self, reason):
        """
        Called when the body is complete or the connection was lost.
        @note: As the body length is usually not known at the beginning of the
        response we expect a L{PotentialDataLoss} when Twitter closes the
        stream, instead of L{ResponseDone}. Other exceptions are treated
        as error conditions.
        """
        self.setTimeout(None)
        if reason.check(ResponseDone, PotentialDataLoss):
            self.deferred.callback(None)
        else:
            self.deferred.errback(reason) | 
	Called when the body is complete or the connection was lost.
        @note: As the body length is usually not known at the beginning of the
        response we expect a L{PotentialDataLoss} when Twitter closes the
        stream, instead of L{ResponseDone}. Other exceptions are treated
        as error conditions. | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L325-L338 | 
| 
	dustin/twitty-twister | 
	twittytwister/txml.py | 
	simpleListFactory | 
	def simpleListFactory(list_type):
    """Used for simple parsers that support only one type of object"""
    def create(delegate, extra_args=None):
        """Create a Parser object for the specific tag type, on the fly"""
        return listParser(list_type, delegate, extra_args)
    return create | 
	python | 
	def simpleListFactory(list_type):
    """Used for simple parsers that support only one type of object"""
    def create(delegate, extra_args=None):
        """Create a Parser object for the specific tag type, on the fly"""
        return listParser(list_type, delegate, extra_args)
    return create | 
	Used for simple parsers that support only one type of object | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/txml.py#L305-L310 | 
| 
	dustin/twitty-twister | 
	twittytwister/txml.py | 
	BaseXMLHandler.setSubDelegates | 
	def setSubDelegates(self, namelist, before=None, after=None):
        """Set a delegate for a sub-sub-item, according to a list of names"""
        if len(namelist) > 1:
            def set_sub(i):
                i.setSubDelegates(namelist[1:], before, after)
            self.setBeforeDelegate(namelist[0], set_sub)
        elif len(namelist) == 1:
            self.setDelegate(namelist[0], before, after) | 
	python | 
	def setSubDelegates(self, namelist, before=None, after=None):
        """Set a delegate for a sub-sub-item, according to a list of names"""
        if len(namelist) > 1:
            def set_sub(i):
                i.setSubDelegates(namelist[1:], before, after)
            self.setBeforeDelegate(namelist[0], set_sub)
        elif len(namelist) == 1:
            self.setDelegate(namelist[0], before, after) | 
	Set a delegate for a sub-sub-item, according to a list of names | 
	https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/txml.py#L48-L55 | 
| 
	jupyter/jupyter-drive | 
	jupyterdrive/mixednbmanager.py | 
	_split_path | 
	def _split_path(path):
    """split a path return by the api
    return
        - the sentinel:
        - the rest of the path as a list.
        - the original path stripped of / for normalisation.
    """
    path = path.strip('/')
    list_path = path.split('/')
    sentinel = list_path.pop(0)
    return sentinel, list_path, path | 
	python | 
	def _split_path(path):
    """split a path return by the api
    return
        - the sentinel:
        - the rest of the path as a list.
        - the original path stripped of / for normalisation.
    """
    path = path.strip('/')
    list_path = path.split('/')
    sentinel = list_path.pop(0)
    return sentinel, list_path, path | 
	split a path return by the api
    return
        - the sentinel:
        - the rest of the path as a list.
        - the original path stripped of / for normalisation. | 
	https://github.com/jupyter/jupyter-drive/blob/545813377cb901235e8ea81f83b0ac7755dbd7a9/jupyterdrive/mixednbmanager.py#L21-L32 | 
| 
	jupyter/jupyter-drive | 
	jupyterdrive/mixednbmanager.py | 
	MixedContentsManager.path_dispatch_rename | 
	def path_dispatch_rename(rename_like_method):
        """
        decorator for rename-like function, that need dispatch on 2 arguments
        """
        def _wrapper_method(self, old_path, new_path):
            old_path, _old_path, old_sentinel =  _split_path(old_path);
            new_path, _new_path, new_sentinel =  _split_path(new_path);
            if old_sentinel != new_sentinel:
                raise ValueError('Does not know how to move things across contents manager mountpoints')
            else:
                sentinel = new_sentinel
            man = self.managers.get(sentinel, None)
            if man is not None:
                rename_meth = getattr(man, rename_like_method.__name__)
                sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
                return sub
            else :
                return rename_meth(self, old_path, new_path)
        return _wrapper_method | 
	python | 
	def path_dispatch_rename(rename_like_method):
        """
        decorator for rename-like function, that need dispatch on 2 arguments
        """
        def _wrapper_method(self, old_path, new_path):
            old_path, _old_path, old_sentinel =  _split_path(old_path);
            new_path, _new_path, new_sentinel =  _split_path(new_path);
            if old_sentinel != new_sentinel:
                raise ValueError('Does not know how to move things across contents manager mountpoints')
            else:
                sentinel = new_sentinel
            man = self.managers.get(sentinel, None)
            if man is not None:
                rename_meth = getattr(man, rename_like_method.__name__)
                sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
                return sub
            else :
                return rename_meth(self, old_path, new_path)
        return _wrapper_method | 
	decorator for rename-like function, that need dispatch on 2 arguments | 
	https://github.com/jupyter/jupyter-drive/blob/545813377cb901235e8ea81f83b0ac7755dbd7a9/jupyterdrive/mixednbmanager.py#L186-L208 | 
| 
	jupyter/jupyter-drive | 
	jupyterdrive/__init__.py | 
	deactivate | 
	def deactivate(profile='default'):
    """should be a matter of just unsetting the above keys
    """
    with jconfig(profile) as config:
        deact = True;
        if not getattr(config.NotebookApp.contents_manager_class, 'startswith',lambda x:False)('jupyterdrive'):
            deact=False
        if 'gdrive' not in getattr(config.NotebookApp.tornado_settings,'get', lambda _,__:'')('contents_js_source',''):
            deact=False
        if deact:
            del config['NotebookApp']['tornado_settings']['contents_js_source']
            del config['NotebookApp']['contents_manager_class'] | 
	python | 
	def deactivate(profile='default'):
    """should be a matter of just unsetting the above keys
    """
    with jconfig(profile) as config:
        deact = True;
        if not getattr(config.NotebookApp.contents_manager_class, 'startswith',lambda x:False)('jupyterdrive'):
            deact=False
        if 'gdrive' not in getattr(config.NotebookApp.tornado_settings,'get', lambda _,__:'')('contents_js_source',''):
            deact=False
        if deact:
            del config['NotebookApp']['tornado_settings']['contents_js_source']
            del config['NotebookApp']['contents_manager_class'] | 
	should be a matter of just unsetting the above keys | 
	https://github.com/jupyter/jupyter-drive/blob/545813377cb901235e8ea81f83b0ac7755dbd7a9/jupyterdrive/__init__.py#L111-L122 | 
| 
	klavinslab/coral | 
	coral/analysis/utils.py | 
	sequence_type | 
	def sequence_type(seq):
    '''Validates a coral.sequence data type.
    :param sequence_in: input DNA sequence.
    :type sequence_in: any
    :returns: The material - 'dna', 'rna', or 'peptide'.
    :rtype: str
    :raises: ValueError
    '''
    if isinstance(seq, coral.DNA):
        material = 'dna'
    elif isinstance(seq, coral.RNA):
        material = 'rna'
    elif isinstance(seq, coral.Peptide):
        material = 'peptide'
    else:
        raise ValueError('Input was not a recognized coral.sequence object.')
    return material | 
	python | 
	def sequence_type(seq):
    '''Validates a coral.sequence data type.
    :param sequence_in: input DNA sequence.
    :type sequence_in: any
    :returns: The material - 'dna', 'rna', or 'peptide'.
    :rtype: str
    :raises: ValueError
    '''
    if isinstance(seq, coral.DNA):
        material = 'dna'
    elif isinstance(seq, coral.RNA):
        material = 'rna'
    elif isinstance(seq, coral.Peptide):
        material = 'peptide'
    else:
        raise ValueError('Input was not a recognized coral.sequence object.')
    return material | 
	Validates a coral.sequence data type.
    :param sequence_in: input DNA sequence.
    :type sequence_in: any
    :returns: The material - 'dna', 'rna', or 'peptide'.
    :rtype: str
    :raises: ValueError | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/utils.py#L5-L23 | 
| 
	klavinslab/coral | 
	coral/reaction/_pcr.py | 
	pcr | 
	def pcr(template, primer1, primer2, min_tm=50.0, min_primer_len=14):
    '''Simulate a PCR.
    :param template: DNA template from which to PCR.
    :type template: coral.DNA
    :param primer1: First PCR primer.
    :type primer1: coral.Primer
    :param primer2: First PCR primer.
    :type primer2: coral.Primer
    :param min_tm: Minimum melting temperature (Tm) at which primers must bind
                   to the template.
    :type min_tm: float
    :param min_primer_len: Minimum amount of template homology required at the
                           3' end of each primer.
    :type min_primer_len: int
    :returns: A dsDNA Amplicon.
    :rtype: coral.DNA
    :raises: PrimingError if a primer binds more than once on the template,
             primers bind in overlapping sequence of the template, there are no
             forward primer binding sites or reverse priming sites, or if the
             PCR would work only on a circular version of the template (if
             template is linear).
    '''
    # Find match in top or bottom strands for each primer
    p1_matches = coral.analysis.anneal(template, primer1, min_tm=min_tm,
                                       min_len=min_primer_len)
    p2_matches = coral.analysis.anneal(template, primer2, min_tm=min_tm,
                                       min_len=min_primer_len)
    p1_binding_locations = [m[0] for strand in p1_matches for m in strand]
    p2_binding_locations = [m[0] for strand in p2_matches for m in strand]
    # Ensure unique top and bottom matches
    if len(p1_binding_locations) > 1:
        primer_msg = 'Multiple primer 1 binding locations: {}'
        raise PrimingError(primer_msg.format(p1_binding_locations))
    if len(p2_binding_locations) > 1:
        primer_msg = 'Multiple primer 2 binding locations: {}'
        raise PrimingError(primer_msg.format(p2_binding_locations))
    if not p1_binding_locations and not p2_binding_locations:
        raise PrimingError('Neither primer binds the template')
    if not p1_binding_locations:
        raise PrimingError('Primer 1 does not bind the template')
    if not p2_binding_locations:
        raise PrimingError('Primer 2 does not bind the template')
    # Check that primers bind on opposite strands of the template
    tops = p1_matches[0] + p2_matches[0]
    bottoms = p1_matches[1] + p2_matches[1]
    if not tops:
        raise PrimingError('No primers bind the template\'s top strand.')
    if not bottoms:
        raise PrimingError('No primers bind the template\'s bottom strand.')
    # Figure out which primer matches the top strand
    if p1_matches[0]:
        # primer1 is top
        fwd = primer1
        rev = primer2
    else:
        # primer2 matches top strand
        fwd = primer2
        rev = primer1
    # Now we can simulate the PCR. If primer locations are overlapping, we
    # throw an error. If the primers won't amplify a product (e.g. a linear
    # template with primers facing away from one another), throw a different
    # error. Otherwise, amplify the product, including any overhangs.
    # 3' locations, annealing region length
    fwd_3, fwd_len = tops[0]
    rev_3, rev_len = bottoms[0]
    # 5' locations
    fwd_5 = fwd_3 - fwd_len
    rev_5 = rev_3 - rev_len
    # location on the top strand where the 'reverse' primer ends (its 3' end)
    rev_3_top = len(template) - rev_3
    rev_5_top = len(template) - rev_5
    # TODO: Use % operator?
    if rev_5_top > len(template):
        rev_5_top = rev_5_top - len(template)
    # overhangs
    fwd_overhang = fwd.primer()[:-fwd_len]
    rev_overhang = rev.primer()[:-rev_len]
    # TODO: what about searching substrings over circulate templates?
    # Cases:
    # 1)  Primers point towards one another - overlapping is fine
    #       -> isolate region between 5' annealing regions and tack on the
    #          rest of the overhang.
    # 2)  Primers point away from one another, template is linear
    #       -> error
    # 3)  Primers point away from one another, template is circular
    #       a) Primers don't overlap
    #           -> rotate to 'top' primer start, extract sequence
    #       b) Primers overlap
    #           -> Extract whole sequence as linear fragment, tack on rest of
    #              'bottom' primer. May disrupt features.
    if template.circular:
        # Circular template - primers always point towards one another
        if rev_3_top > fwd_3:
            # Inter-primer region doesn't go over the origin (index 0)
            # However, the 'anneal' region may extend over it.
            # FIXME: handle case where 'anneal' region extends over origin
            # FIXME: simplify - just generate 'before' and 'after', then
            # combine with preamplicon later
            if rev_3_top + rev_len > len(template):
                # Reverse primer extends over the origin
                if fwd_5 - fwd_len < 0:
                    # Forward primer extends over the origin
                    preamplicon = template.linearize()
                    # Add extra anneal regions
                    before = template[fwd_5:]
                    after = template[:rev_5_top]
                    preamplicon = before + preamplicon + after
                else:
                    # Only the reverse primer extends over the origin
                    preamplicon = template[fwd_5:]
                    after = template[:rev_5_top]
                    preamplicon = preamplicon + after
            elif fwd_5 - fwd_len < 0:
                # Only the forward primer extends over the origin
                before = template[fwd_5:]
                preamplicon = before + template[:rev_5_top]
            else:
                # Extract like normal
                preamplicon = template[fwd_5:len(template) - rev_5]
        else:
            # Inter-primer region goes over the origin (index 0)
            preamplicon_len = len(template) - fwd_5 + rev_5_top
            preamplicon = template.rotate(-fwd_5)[:preamplicon_len]
    else:
        # Linear template
        if rev_3_top < fwd_5 or fwd_3 > rev_5_top:
            # Primers point away from one another.
            raise PrimingError('Primers point away from one another.')
        else:
            # Primers point towards each other.
            preamplicon = template[fwd_5:len(template) - rev_5]
    # Add overhangs
    amplicon = (fwd_overhang.to_ds() +
                preamplicon +
                rev_overhang.to_ds().reverse_complement())
    return amplicon | 
	python | 
	def pcr(template, primer1, primer2, min_tm=50.0, min_primer_len=14):
    '''Simulate a PCR.
    :param template: DNA template from which to PCR.
    :type template: coral.DNA
    :param primer1: First PCR primer.
    :type primer1: coral.Primer
    :param primer2: First PCR primer.
    :type primer2: coral.Primer
    :param min_tm: Minimum melting temperature (Tm) at which primers must bind
                   to the template.
    :type min_tm: float
    :param min_primer_len: Minimum amount of template homology required at the
                           3' end of each primer.
    :type min_primer_len: int
    :returns: A dsDNA Amplicon.
    :rtype: coral.DNA
    :raises: PrimingError if a primer binds more than once on the template,
             primers bind in overlapping sequence of the template, there are no
             forward primer binding sites or reverse priming sites, or if the
             PCR would work only on a circular version of the template (if
             template is linear).
    '''
    # Find match in top or bottom strands for each primer
    p1_matches = coral.analysis.anneal(template, primer1, min_tm=min_tm,
                                       min_len=min_primer_len)
    p2_matches = coral.analysis.anneal(template, primer2, min_tm=min_tm,
                                       min_len=min_primer_len)
    p1_binding_locations = [m[0] for strand in p1_matches for m in strand]
    p2_binding_locations = [m[0] for strand in p2_matches for m in strand]
    # Ensure unique top and bottom matches
    if len(p1_binding_locations) > 1:
        primer_msg = 'Multiple primer 1 binding locations: {}'
        raise PrimingError(primer_msg.format(p1_binding_locations))
    if len(p2_binding_locations) > 1:
        primer_msg = 'Multiple primer 2 binding locations: {}'
        raise PrimingError(primer_msg.format(p2_binding_locations))
    if not p1_binding_locations and not p2_binding_locations:
        raise PrimingError('Neither primer binds the template')
    if not p1_binding_locations:
        raise PrimingError('Primer 1 does not bind the template')
    if not p2_binding_locations:
        raise PrimingError('Primer 2 does not bind the template')
    # Check that primers bind on opposite strands of the template
    tops = p1_matches[0] + p2_matches[0]
    bottoms = p1_matches[1] + p2_matches[1]
    if not tops:
        raise PrimingError('No primers bind the template\'s top strand.')
    if not bottoms:
        raise PrimingError('No primers bind the template\'s bottom strand.')
    # Figure out which primer matches the top strand
    if p1_matches[0]:
        # primer1 is top
        fwd = primer1
        rev = primer2
    else:
        # primer2 matches top strand
        fwd = primer2
        rev = primer1
    # Now we can simulate the PCR. If primer locations are overlapping, we
    # throw an error. If the primers won't amplify a product (e.g. a linear
    # template with primers facing away from one another), throw a different
    # error. Otherwise, amplify the product, including any overhangs.
    # 3' locations, annealing region length
    fwd_3, fwd_len = tops[0]
    rev_3, rev_len = bottoms[0]
    # 5' locations
    fwd_5 = fwd_3 - fwd_len
    rev_5 = rev_3 - rev_len
    # location on the top strand where the 'reverse' primer ends (its 3' end)
    rev_3_top = len(template) - rev_3
    rev_5_top = len(template) - rev_5
    # TODO: Use % operator?
    if rev_5_top > len(template):
        rev_5_top = rev_5_top - len(template)
    # overhangs
    fwd_overhang = fwd.primer()[:-fwd_len]
    rev_overhang = rev.primer()[:-rev_len]
    # TODO: what about searching substrings over circulate templates?
    # Cases:
    # 1)  Primers point towards one another - overlapping is fine
    #       -> isolate region between 5' annealing regions and tack on the
    #          rest of the overhang.
    # 2)  Primers point away from one another, template is linear
    #       -> error
    # 3)  Primers point away from one another, template is circular
    #       a) Primers don't overlap
    #           -> rotate to 'top' primer start, extract sequence
    #       b) Primers overlap
    #           -> Extract whole sequence as linear fragment, tack on rest of
    #              'bottom' primer. May disrupt features.
    if template.circular:
        # Circular template - primers always point towards one another
        if rev_3_top > fwd_3:
            # Inter-primer region doesn't go over the origin (index 0)
            # However, the 'anneal' region may extend over it.
            # FIXME: handle case where 'anneal' region extends over origin
            # FIXME: simplify - just generate 'before' and 'after', then
            # combine with preamplicon later
            if rev_3_top + rev_len > len(template):
                # Reverse primer extends over the origin
                if fwd_5 - fwd_len < 0:
                    # Forward primer extends over the origin
                    preamplicon = template.linearize()
                    # Add extra anneal regions
                    before = template[fwd_5:]
                    after = template[:rev_5_top]
                    preamplicon = before + preamplicon + after
                else:
                    # Only the reverse primer extends over the origin
                    preamplicon = template[fwd_5:]
                    after = template[:rev_5_top]
                    preamplicon = preamplicon + after
            elif fwd_5 - fwd_len < 0:
                # Only the forward primer extends over the origin
                before = template[fwd_5:]
                preamplicon = before + template[:rev_5_top]
            else:
                # Extract like normal
                preamplicon = template[fwd_5:len(template) - rev_5]
        else:
            # Inter-primer region goes over the origin (index 0)
            preamplicon_len = len(template) - fwd_5 + rev_5_top
            preamplicon = template.rotate(-fwd_5)[:preamplicon_len]
    else:
        # Linear template
        if rev_3_top < fwd_5 or fwd_3 > rev_5_top:
            # Primers point away from one another.
            raise PrimingError('Primers point away from one another.')
        else:
            # Primers point towards each other.
            preamplicon = template[fwd_5:len(template) - rev_5]
    # Add overhangs
    amplicon = (fwd_overhang.to_ds() +
                preamplicon +
                rev_overhang.to_ds().reverse_complement())
    return amplicon | 
	Simulate a PCR.
    :param template: DNA template from which to PCR.
    :type template: coral.DNA
    :param primer1: First PCR primer.
    :type primer1: coral.Primer
    :param primer2: First PCR primer.
    :type primer2: coral.Primer
    :param min_tm: Minimum melting temperature (Tm) at which primers must bind
                   to the template.
    :type min_tm: float
    :param min_primer_len: Minimum amount of template homology required at the
                           3' end of each primer.
    :type min_primer_len: int
    :returns: A dsDNA Amplicon.
    :rtype: coral.DNA
    :raises: PrimingError if a primer binds more than once on the template,
             primers bind in overlapping sequence of the template, there are no
             forward primer binding sites or reverse priming sites, or if the
             PCR would work only on a circular version of the template (if
             template is linear). | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_pcr.py#L9-L161 | 
| 
	klavinslab/coral | 
	coral/reaction/_restriction.py | 
	digest | 
	def digest(dna, restriction_enzyme):
    '''Restriction endonuclease reaction.
    :param dna: DNA template to digest.
    :type dna: coral.DNA
    :param restriction_site: Restriction site to use.
    :type restriction_site: RestrictionSite
    :returns: list of digested DNA fragments.
    :rtype: coral.DNA list
    '''
    pattern = restriction_enzyme.recognition_site
    located = dna.locate(pattern)
    if not located[0] and not located[1]:
        return [dna]
    # Bottom strand indices are relative to the bottom strand 5' end.
    # Convert to same type as top strand
    pattern_len = len(pattern)
    r_indices = [len(dna) - index - pattern_len for index in
                 located[1]]
    # If sequence is palindrome, remove redundant results
    if pattern.is_palindrome():
        r_indices = [index for index in r_indices if index not in
                     located[0]]
    # Flatten cut site indices
    cut_sites = sorted(located[0] + r_indices)
    # Go through each cut site starting at highest one
    # Cut remaining template once, generating remaining + new
    current = [dna]
    for cut_site in cut_sites[::-1]:
        new = _cut(current, cut_site, restriction_enzyme)
        current.append(new[1])
        current.append(new[0])
    current.reverse()
    # Combine first and last back together if digest was circular
    if dna.circular:
        current[0] = current.pop() + current[0]
    return current | 
	python | 
	def digest(dna, restriction_enzyme):
    '''Restriction endonuclease reaction.
    :param dna: DNA template to digest.
    :type dna: coral.DNA
    :param restriction_site: Restriction site to use.
    :type restriction_site: RestrictionSite
    :returns: list of digested DNA fragments.
    :rtype: coral.DNA list
    '''
    pattern = restriction_enzyme.recognition_site
    located = dna.locate(pattern)
    if not located[0] and not located[1]:
        return [dna]
    # Bottom strand indices are relative to the bottom strand 5' end.
    # Convert to same type as top strand
    pattern_len = len(pattern)
    r_indices = [len(dna) - index - pattern_len for index in
                 located[1]]
    # If sequence is palindrome, remove redundant results
    if pattern.is_palindrome():
        r_indices = [index for index in r_indices if index not in
                     located[0]]
    # Flatten cut site indices
    cut_sites = sorted(located[0] + r_indices)
    # Go through each cut site starting at highest one
    # Cut remaining template once, generating remaining + new
    current = [dna]
    for cut_site in cut_sites[::-1]:
        new = _cut(current, cut_site, restriction_enzyme)
        current.append(new[1])
        current.append(new[0])
    current.reverse()
    # Combine first and last back together if digest was circular
    if dna.circular:
        current[0] = current.pop() + current[0]
    return current | 
	Restriction endonuclease reaction.
    :param dna: DNA template to digest.
    :type dna: coral.DNA
    :param restriction_site: Restriction site to use.
    :type restriction_site: RestrictionSite
    :returns: list of digested DNA fragments.
    :rtype: coral.DNA list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_restriction.py#L5-L42 | 
| 
	klavinslab/coral | 
	coral/reaction/_restriction.py | 
	_cut | 
	def _cut(dna, index, restriction_enzyme):
    '''Cuts template once at the specified index.
    :param dna: DNA to cut
    :type dna: coral.DNA
    :param index: index at which to cut
    :type index: int
    :param restriction_enzyme: Enzyme with which to cut
    :type restriction_enzyme: coral.RestrictionSite
    :returns: 2-element list of digested sequence, including any overhangs.
    :rtype: list
    '''
    # TODO: handle case where cut site is outside of recognition sequence,
    # for both circular and linear cases where site is at index 0
    # Find absolute indices at which to cut
    cut_site = restriction_enzyme.cut_site
    top_cut = index + cut_site[0]
    bottom_cut = index + cut_site[1]
    # Isolate left and ride sequences
    to_cut = dna.pop()
    max_cut = max(top_cut, bottom_cut)
    min_cut = min(top_cut, bottom_cut)
    left = to_cut[:max_cut]
    right = to_cut[min_cut:]
    # If applicable, leave overhangs
    diff = top_cut - bottom_cut
    if not diff:
        # Blunt-end cutter, no adjustment necessary
        pass
    elif diff > 0:
        # 3' overhangs
        left = coral.reaction.five_resect(left.flip(), diff).flip()
        right = coral.reaction.five_resect(right, diff)
    else:
        # 5' overhangs
        left = coral.reaction.three_resect(left, abs(diff))
        right = coral.reaction.three_resect(right.flip(), abs(diff)).flip()
    return [left, right] | 
	python | 
	def _cut(dna, index, restriction_enzyme):
    '''Cuts template once at the specified index.
    :param dna: DNA to cut
    :type dna: coral.DNA
    :param index: index at which to cut
    :type index: int
    :param restriction_enzyme: Enzyme with which to cut
    :type restriction_enzyme: coral.RestrictionSite
    :returns: 2-element list of digested sequence, including any overhangs.
    :rtype: list
    '''
    # TODO: handle case where cut site is outside of recognition sequence,
    # for both circular and linear cases where site is at index 0
    # Find absolute indices at which to cut
    cut_site = restriction_enzyme.cut_site
    top_cut = index + cut_site[0]
    bottom_cut = index + cut_site[1]
    # Isolate left and ride sequences
    to_cut = dna.pop()
    max_cut = max(top_cut, bottom_cut)
    min_cut = min(top_cut, bottom_cut)
    left = to_cut[:max_cut]
    right = to_cut[min_cut:]
    # If applicable, leave overhangs
    diff = top_cut - bottom_cut
    if not diff:
        # Blunt-end cutter, no adjustment necessary
        pass
    elif diff > 0:
        # 3' overhangs
        left = coral.reaction.five_resect(left.flip(), diff).flip()
        right = coral.reaction.five_resect(right, diff)
    else:
        # 5' overhangs
        left = coral.reaction.three_resect(left, abs(diff))
        right = coral.reaction.three_resect(right.flip(), abs(diff)).flip()
    return [left, right] | 
	Cuts template once at the specified index.
    :param dna: DNA to cut
    :type dna: coral.DNA
    :param index: index at which to cut
    :type index: int
    :param restriction_enzyme: Enzyme with which to cut
    :type restriction_enzyme: coral.RestrictionSite
    :returns: 2-element list of digested sequence, including any overhangs.
    :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_restriction.py#L45-L86 | 
| 
	klavinslab/coral | 
	bin/ipynb2rst.py | 
	ipynb_to_rst | 
	def ipynb_to_rst(directory, filename):
    """Converts a given file in a directory to an rst in the same directory."""
    print(filename)
    os.chdir(directory)
    subprocess.Popen(["ipython", "nbconvert", "--to", "rst",
                      filename],
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE,
                     cwd=directory) | 
	python | 
	def ipynb_to_rst(directory, filename):
    """Converts a given file in a directory to an rst in the same directory."""
    print(filename)
    os.chdir(directory)
    subprocess.Popen(["ipython", "nbconvert", "--to", "rst",
                      filename],
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE,
                     cwd=directory) | 
	Converts a given file in a directory to an rst in the same directory. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/bin/ipynb2rst.py#L13-L21 | 
| 
	klavinslab/coral | 
	bin/ipynb2rst.py | 
	convert_ipynbs | 
	def convert_ipynbs(directory):
    """Recursively converts all ipynb files in a directory into rst files in
    the same directory."""
    # The ipython_examples dir has to be in the same dir as this script
    for root, subfolders, files in os.walk(os.path.abspath(directory)):
        for f in files:
            if ".ipynb_checkpoints" not in root:
                if f.endswith("ipynb"):
                    ipynb_to_rst(root, f) | 
	python | 
	def convert_ipynbs(directory):
    """Recursively converts all ipynb files in a directory into rst files in
    the same directory."""
    # The ipython_examples dir has to be in the same dir as this script
    for root, subfolders, files in os.walk(os.path.abspath(directory)):
        for f in files:
            if ".ipynb_checkpoints" not in root:
                if f.endswith("ipynb"):
                    ipynb_to_rst(root, f) | 
	Recursively converts all ipynb files in a directory into rst files in
    the same directory. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/bin/ipynb2rst.py#L24-L32 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/structure_windows.py | 
	_context_walk | 
	def _context_walk(dna, window_size, context_len, step):
    '''Generate context-dependent 'non-boundedness' scores for a DNA sequence.
    :param dna: Sequence to score.
    :type dna: coral.DNA
    :param window_size: Window size in base pairs.
    :type window_size: int
    :param context_len: The number of bases of context to use when analyzing
                        each window.
    :type context_len: int
    :param step: The number of base pairs to move for each new window.
    :type step: int
    '''
    # Generate window indices
    window_start_ceiling = len(dna) - context_len - window_size
    window_starts = range(context_len - 1, window_start_ceiling, step)
    window_ends = [start + window_size for start in window_starts]
    # Generate left and right in-context subsequences
    l_starts = [step * i for i in range(len(window_starts))]
    l_seqs = [dna[start:end] for start, end in zip(l_starts, window_ends)]
    r_ends = [x + window_size + context_len for x in window_starts]
    r_seqs = [dna[start:end].reverse_complement() for start, end in
              zip(window_starts, r_ends)]
    # Combine and calculate nupack pair probabilities
    seqs = l_seqs + r_seqs
    pairs_run = coral.analysis.nupack_multi(seqs, 'dna', 'pairs', {'index': 0})
    # Focus on pair probabilities that matter - those in the window
    pairs = [run[-window_size:] for run in pairs_run]
    # Score by average pair probability
    lr_scores = [sum(pair) / len(pair) for pair in pairs]
    # Split into left-right contexts again and sum for each window
    l_scores = lr_scores[0:len(seqs) / 2]
    r_scores = lr_scores[len(seqs) / 2:]
    scores = [(l + r) / 2 for l, r in zip(l_scores, r_scores)]
    # Summarize and return window indices and score
    summary = zip(window_starts, window_ends, scores)
    return summary | 
	python | 
	def _context_walk(dna, window_size, context_len, step):
    '''Generate context-dependent 'non-boundedness' scores for a DNA sequence.
    :param dna: Sequence to score.
    :type dna: coral.DNA
    :param window_size: Window size in base pairs.
    :type window_size: int
    :param context_len: The number of bases of context to use when analyzing
                        each window.
    :type context_len: int
    :param step: The number of base pairs to move for each new window.
    :type step: int
    '''
    # Generate window indices
    window_start_ceiling = len(dna) - context_len - window_size
    window_starts = range(context_len - 1, window_start_ceiling, step)
    window_ends = [start + window_size for start in window_starts]
    # Generate left and right in-context subsequences
    l_starts = [step * i for i in range(len(window_starts))]
    l_seqs = [dna[start:end] for start, end in zip(l_starts, window_ends)]
    r_ends = [x + window_size + context_len for x in window_starts]
    r_seqs = [dna[start:end].reverse_complement() for start, end in
              zip(window_starts, r_ends)]
    # Combine and calculate nupack pair probabilities
    seqs = l_seqs + r_seqs
    pairs_run = coral.analysis.nupack_multi(seqs, 'dna', 'pairs', {'index': 0})
    # Focus on pair probabilities that matter - those in the window
    pairs = [run[-window_size:] for run in pairs_run]
    # Score by average pair probability
    lr_scores = [sum(pair) / len(pair) for pair in pairs]
    # Split into left-right contexts again and sum for each window
    l_scores = lr_scores[0:len(seqs) / 2]
    r_scores = lr_scores[len(seqs) / 2:]
    scores = [(l + r) / 2 for l, r in zip(l_scores, r_scores)]
    # Summarize and return window indices and score
    summary = zip(window_starts, window_ends, scores)
    return summary | 
	Generate context-dependent 'non-boundedness' scores for a DNA sequence.
    :param dna: Sequence to score.
    :type dna: coral.DNA
    :param window_size: Window size in base pairs.
    :type window_size: int
    :param context_len: The number of bases of context to use when analyzing
                        each window.
    :type context_len: int
    :param step: The number of base pairs to move for each new window.
    :type step: int | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/structure_windows.py#L56-L98 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/structure_windows.py | 
	StructureWindows.windows | 
	def windows(self, window_size=60, context_len=90, step=10):
        '''Walk through the sequence of interest in windows of window_size,
        evaluate free (unbound) pair probabilities.
        :param window_size: Window size in base pairs.
        :type window_size: int
        :param context_len: The number of bases of context to use when
                            analyzing each window.
        :type context_len: int
        :param step: The number of base pairs to move for each new window.
        :type step: int
        '''
        self.walked = _context_walk(self.template, window_size, context_len,
                                    step)
        self.core_starts, self.core_ends, self.scores = zip(*self.walked)
        return self.walked | 
	python | 
	def windows(self, window_size=60, context_len=90, step=10):
        '''Walk through the sequence of interest in windows of window_size,
        evaluate free (unbound) pair probabilities.
        :param window_size: Window size in base pairs.
        :type window_size: int
        :param context_len: The number of bases of context to use when
                            analyzing each window.
        :type context_len: int
        :param step: The number of base pairs to move for each new window.
        :type step: int
        '''
        self.walked = _context_walk(self.template, window_size, context_len,
                                    step)
        self.core_starts, self.core_ends, self.scores = zip(*self.walked)
        return self.walked | 
	Walk through the sequence of interest in windows of window_size,
        evaluate free (unbound) pair probabilities.
        :param window_size: Window size in base pairs.
        :type window_size: int
        :param context_len: The number of bases of context to use when
                            analyzing each window.
        :type context_len: int
        :param step: The number of base pairs to move for each new window.
        :type step: int | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/structure_windows.py#L20-L36 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/structure_windows.py | 
	StructureWindows.plot | 
	def plot(self):
        '''Plot the results of the run method.'''
        try:
            from matplotlib import pylab
        except ImportError:
            raise ImportError('Optional dependency matplotlib not installed.')
        if self.walked:
            fig = pylab.figure()
            ax1 = fig.add_subplot(111)
            ax1.plot(self.core_starts, self.scores, 'bo-')
            pylab.xlabel('Core sequence start position (base pairs).')
            pylab.ylabel('Score - Probability of being unbound.')
            pylab.show()
        else:
            raise Exception('Run calculate() first so there\'s data to plot!') | 
	python | 
	def plot(self):
        '''Plot the results of the run method.'''
        try:
            from matplotlib import pylab
        except ImportError:
            raise ImportError('Optional dependency matplotlib not installed.')
        if self.walked:
            fig = pylab.figure()
            ax1 = fig.add_subplot(111)
            ax1.plot(self.core_starts, self.scores, 'bo-')
            pylab.xlabel('Core sequence start position (base pairs).')
            pylab.ylabel('Score - Probability of being unbound.')
            pylab.show()
        else:
            raise Exception('Run calculate() first so there\'s data to plot!') | 
	Plot the results of the run method. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/structure_windows.py#L38-L53 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequence/anneal.py | 
	anneal | 
	def anneal(template, primer, min_tm=50.0, min_len=10):
    '''Simulates a primer binding event. Will find the maximum subset
    of bases in the primer that binds to the template, including overhang
    sequences. **Note**: Primer binding locations indicate the 3' end of the
    primer, not the begining of the annealing sequence.
    :param template: DNA template for which to bind a primer.
    :type template: coral.DNA
    :param primer: Primer to bind to template.
    :type primer: coral.Primer
    :param min_tm: The cutoff melting temperature for primer binding - a binder
                   with a lower Tm will be rejected.
    :type min_tm: float
    :param min_len: The cutoff for bases required for binding - a binder with
                    fewer bases will be rejected.
    :type min_len: int
    :returns: A length 2 list (top and bottom strands) of matches. Each
              match is itself a 2-tuple indicating (1) the location on the
              template of the 3' end of the primer binding site and (2) the
              length of the match (number of bases), e.g. [[(25, 15)],[]] would
              indicate a single top-strand match at template position 25 with
              15 bases of 3' primer homology.
    :rtype: list
    :raises: PrimerLengthError if primer length is too small.
             AnnealError if inputs are of the wrong type.
    '''
    # TODO: add possibility for primer basepair mismatch
    if len(primer) < min_len:
        msg = 'Primer length is shorter than min_len argument.'
        raise PrimerLengthError(msg)
    if len(template) < min_len:
        msg = 'Template is shorter than the min_len argument.'
        raise AnnealError(msg)
    # Strategy: locate all min-length matches, then extend them until they
    # no longer match. This provides an advantage over the previous strategy of
    # updating a dictionary with indices from coral.DNA.locate() as keys, as
    # the latter's indices may actually move for a given primer as it passes
    # over the origin
    def update_match_linear(base, location_length, anneal_seq):
        '''Increase the location and length of binding site, if applicable.'''
        # TODO: this is very inefficient - should stop updating once the first
        # mismatch occurs.
        location, length = location_length
        if location == 0:
            return location_length
        location_next = location - 1
        length_next = length + 1
        seq = base[location_next:location_next + length_next]
        if seq == anneal_seq:
            return (location_next, length_next)
        else:
            return location_length
    def update_match_circular(base, location_length, anneal_seq):
        '''Increase the location and length of binding site, if applicable.'''
        # TODO: this is very inefficient - should stop updating once the first
        # mismatch occurs.
        base_len = len(base)
        location, length = location_length
        if location == 0:
            location_next = base_len - 1
        else:
            location_next = location - 1
        length_next = length + 1
        if (location_next + length_next) > base_len:
            upstream = base[location_next:]
            downstream = base[:length_next - (base_len - location_next)]
            seq = upstream + downstream
        else:
            # No need to 'rotate' sequence
            seq = base[location_next:location_next + length_next]
        if seq == anneal_seq:
            return (location_next, length_next)
        else:
            return location_length
    if template.circular:
        update_fun = update_match_circular
    else:
        update_fun = update_match_linear
    # Maximum annealing length to test (can't exceed template length)
    max_len = min(len(template), len(primer))
    primer_dna = primer.to_ds()
    anneal_len = min_len
    anneal_seq = primer_dna[-anneal_len:]
    binding_data = []
    for k, strand_locs in enumerate(template.locate(anneal_seq)):
        matches = zip(strand_locs, [min_len] * len(strand_locs))
        for i in range(anneal_len + 1, max_len + 1):
            anneal_seq = primer_dna[-i:]
            for j, match in enumerate(matches):
                if k == 0:
                    matches[j] = update_fun(template.top, match, anneal_seq)
                else:
                    matches[j] = update_fun(template.bottom, match,
                                            anneal_seq)
        binding_data.append(matches)
    # Now, filter out all the matches that are too short
    for i in reversed(range(len(primer_dna) + 1)):
        min_len = i + 1
        tm = primer_dna[-min_len:].tm()
        if tm < min_tm:
            break
    for strand in binding_data:
        for i in reversed(range(len(strand))):
            if strand[i][1] < min_len:
                strand.pop(i)
    # Finally, adjust the position to be the 3' end
    for strand in binding_data:
        for i, match in enumerate(strand):
            length = match[1]
            loc_new = match[0] + length
            if loc_new > len(template):
                # Circularly permute
                loc_new = loc_new - len(template)
            strand[i] = [loc_new, length]
    # Overwriting dictionary keys ensures uniqueness
    # fwd_matches = {}
    # rev_matches = {}
    # for i in range(len(primer) - min_len + 1)[::-1]:
    #     primer_dna = primer.overhang + primer.anneal
    #     annealing = primer_dna[i:]
    #     anneal_temp = annealing.tm()
    #     anneal_len = len(annealing)
    #     if anneal_temp > min_tm:
    #         p_matches = template.locate(annealing)
    #         for match in p_matches[0]:
    #             fwd_matches[match + anneal_len] = anneal_len
    #         for match in p_matches[1]:
    #             rev_matches[match + anneal_len] = anneal_len
    # # Convert dictionaries to lists
    # fwds = [[key, val] for key, val in fwd_matches.iteritems()]
    # revs = [[key, val] for key, val in rev_matches.iteritems()]
    return binding_data | 
	python | 
	def anneal(template, primer, min_tm=50.0, min_len=10):
    '''Simulates a primer binding event. Will find the maximum subset
    of bases in the primer that binds to the template, including overhang
    sequences. **Note**: Primer binding locations indicate the 3' end of the
    primer, not the begining of the annealing sequence.
    :param template: DNA template for which to bind a primer.
    :type template: coral.DNA
    :param primer: Primer to bind to template.
    :type primer: coral.Primer
    :param min_tm: The cutoff melting temperature for primer binding - a binder
                   with a lower Tm will be rejected.
    :type min_tm: float
    :param min_len: The cutoff for bases required for binding - a binder with
                    fewer bases will be rejected.
    :type min_len: int
    :returns: A length 2 list (top and bottom strands) of matches. Each
              match is itself a 2-tuple indicating (1) the location on the
              template of the 3' end of the primer binding site and (2) the
              length of the match (number of bases), e.g. [[(25, 15)],[]] would
              indicate a single top-strand match at template position 25 with
              15 bases of 3' primer homology.
    :rtype: list
    :raises: PrimerLengthError if primer length is too small.
             AnnealError if inputs are of the wrong type.
    '''
    # TODO: add possibility for primer basepair mismatch
    if len(primer) < min_len:
        msg = 'Primer length is shorter than min_len argument.'
        raise PrimerLengthError(msg)
    if len(template) < min_len:
        msg = 'Template is shorter than the min_len argument.'
        raise AnnealError(msg)
    # Strategy: locate all min-length matches, then extend them until they
    # no longer match. This provides an advantage over the previous strategy of
    # updating a dictionary with indices from coral.DNA.locate() as keys, as
    # the latter's indices may actually move for a given primer as it passes
    # over the origin
    def update_match_linear(base, location_length, anneal_seq):
        '''Increase the location and length of binding site, if applicable.'''
        # TODO: this is very inefficient - should stop updating once the first
        # mismatch occurs.
        location, length = location_length
        if location == 0:
            return location_length
        location_next = location - 1
        length_next = length + 1
        seq = base[location_next:location_next + length_next]
        if seq == anneal_seq:
            return (location_next, length_next)
        else:
            return location_length
    def update_match_circular(base, location_length, anneal_seq):
        '''Increase the location and length of binding site, if applicable.'''
        # TODO: this is very inefficient - should stop updating once the first
        # mismatch occurs.
        base_len = len(base)
        location, length = location_length
        if location == 0:
            location_next = base_len - 1
        else:
            location_next = location - 1
        length_next = length + 1
        if (location_next + length_next) > base_len:
            upstream = base[location_next:]
            downstream = base[:length_next - (base_len - location_next)]
            seq = upstream + downstream
        else:
            # No need to 'rotate' sequence
            seq = base[location_next:location_next + length_next]
        if seq == anneal_seq:
            return (location_next, length_next)
        else:
            return location_length
    if template.circular:
        update_fun = update_match_circular
    else:
        update_fun = update_match_linear
    # Maximum annealing length to test (can't exceed template length)
    max_len = min(len(template), len(primer))
    primer_dna = primer.to_ds()
    anneal_len = min_len
    anneal_seq = primer_dna[-anneal_len:]
    binding_data = []
    for k, strand_locs in enumerate(template.locate(anneal_seq)):
        matches = zip(strand_locs, [min_len] * len(strand_locs))
        for i in range(anneal_len + 1, max_len + 1):
            anneal_seq = primer_dna[-i:]
            for j, match in enumerate(matches):
                if k == 0:
                    matches[j] = update_fun(template.top, match, anneal_seq)
                else:
                    matches[j] = update_fun(template.bottom, match,
                                            anneal_seq)
        binding_data.append(matches)
    # Now, filter out all the matches that are too short
    for i in reversed(range(len(primer_dna) + 1)):
        min_len = i + 1
        tm = primer_dna[-min_len:].tm()
        if tm < min_tm:
            break
    for strand in binding_data:
        for i in reversed(range(len(strand))):
            if strand[i][1] < min_len:
                strand.pop(i)
    # Finally, adjust the position to be the 3' end
    for strand in binding_data:
        for i, match in enumerate(strand):
            length = match[1]
            loc_new = match[0] + length
            if loc_new > len(template):
                # Circularly permute
                loc_new = loc_new - len(template)
            strand[i] = [loc_new, length]
    # Overwriting dictionary keys ensures uniqueness
    # fwd_matches = {}
    # rev_matches = {}
    # for i in range(len(primer) - min_len + 1)[::-1]:
    #     primer_dna = primer.overhang + primer.anneal
    #     annealing = primer_dna[i:]
    #     anneal_temp = annealing.tm()
    #     anneal_len = len(annealing)
    #     if anneal_temp > min_tm:
    #         p_matches = template.locate(annealing)
    #         for match in p_matches[0]:
    #             fwd_matches[match + anneal_len] = anneal_len
    #         for match in p_matches[1]:
    #             rev_matches[match + anneal_len] = anneal_len
    # # Convert dictionaries to lists
    # fwds = [[key, val] for key, val in fwd_matches.iteritems()]
    # revs = [[key, val] for key, val in rev_matches.iteritems()]
    return binding_data | 
	Simulates a primer binding event. Will find the maximum subset
    of bases in the primer that binds to the template, including overhang
    sequences. **Note**: Primer binding locations indicate the 3' end of the
    primer, not the begining of the annealing sequence.
    :param template: DNA template for which to bind a primer.
    :type template: coral.DNA
    :param primer: Primer to bind to template.
    :type primer: coral.Primer
    :param min_tm: The cutoff melting temperature for primer binding - a binder
                   with a lower Tm will be rejected.
    :type min_tm: float
    :param min_len: The cutoff for bases required for binding - a binder with
                    fewer bases will be rejected.
    :type min_len: int
    :returns: A length 2 list (top and bottom strands) of matches. Each
              match is itself a 2-tuple indicating (1) the location on the
              template of the 3' end of the primer binding site and (2) the
              length of the match (number of bases), e.g. [[(25, 15)],[]] would
              indicate a single top-strand match at template position 25 with
              15 bases of 3' primer homology.
    :rtype: list
    :raises: PrimerLengthError if primer length is too small.
             AnnealError if inputs are of the wrong type. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequence/anneal.py#L12-L160 | 
| 
	klavinslab/coral | 
	coral/design/_primers.py | 
	primer | 
	def primer(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
           end_gc=False, tm_parameters='cloning', overhang=None,
           structure=False):
    '''Design primer to a nearest-neighbor Tm setpoint.
    :param dna: Sequence for which to design a primer.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhang: Append the primer to this overhang sequence.
    :type overhang: str
    :param structure: Evaluate primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A primer.
    :rtype: coral.Primer
    :raises: ValueError if the input sequence is lower than the Tm settings
             allow.
             ValueError if a primer ending with G or C can't be found given
             the Tm settings.
    '''
    # Check Tm of input sequence to see if it's already too low
    seq_tm = coral.analysis.tm(dna, parameters=tm_parameters)
    if seq_tm < (tm - tm_undershoot):
        msg = 'Input sequence Tm is lower than primer Tm setting'
        raise ValueError(msg)
    # Focus on first 90 bases - shouldn't need more than 90bp to anneal
    dna = dna[0:90]
    # Generate primers from min_len to 'tm' + tm_overshoot
    # TODO: this is a good place for optimization. Only calculate as many
    # primers as are needed. Use binary search.
    primers_tms = []
    last_tm = 0
    bases = min_len
    while last_tm <= tm + tm_overshoot and bases != len(dna):
        next_primer = dna[0:bases]
        last_tm = coral.analysis.tm(next_primer, parameters=tm_parameters)
        primers_tms.append((next_primer, last_tm))
        bases += 1
    # Trim primer list based on tm_undershoot and end_gc
    primers_tms = [(primer, melt) for primer, melt in primers_tms if
                   melt >= tm - tm_undershoot]
    if end_gc:
        primers_tms = [pair for pair in primers_tms if
                       pair[0][-1] == coral.DNA('C') or
                       pair[0][-1] == coral.DNA('G')]
    if not primers_tms:
        raise ValueError('No primers could be generated using these settings')
    # Find the primer closest to the set Tm, make it single stranded
    tm_diffs = [abs(melt - tm) for primer, melt in primers_tms]
    best_index = tm_diffs.index(min(tm_diffs))
    best_primer, best_tm = primers_tms[best_index]
    best_primer = best_primer.top
    # Apply overhang
    if overhang:
        overhang = overhang.top
    output_primer = coral.Primer(best_primer, best_tm, overhang=overhang)
    def _structure(primer):
        '''Check annealing sequence for structure.
        :param primer: Primer for which to evaluate structure
        :type primer: sequence.Primer
        '''
        # Check whole primer for high-probability structure, focus in on
        # annealing sequence, report average
        nupack = coral.analysis.Nupack(primer.primer())
        pairs = nupack.pairs(0)
        anneal_len = len(primer.anneal)
        pairs_mean = sum(pairs[-anneal_len:]) / anneal_len
        if pairs_mean < 0.5:
            warnings.warn('High probability structure', Warning)
        return pairs_mean
    if structure:
        _structure(output_primer)
    return output_primer | 
	python | 
	def primer(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
           end_gc=False, tm_parameters='cloning', overhang=None,
           structure=False):
    '''Design primer to a nearest-neighbor Tm setpoint.
    :param dna: Sequence for which to design a primer.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhang: Append the primer to this overhang sequence.
    :type overhang: str
    :param structure: Evaluate primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A primer.
    :rtype: coral.Primer
    :raises: ValueError if the input sequence is lower than the Tm settings
             allow.
             ValueError if a primer ending with G or C can't be found given
             the Tm settings.
    '''
    # Check Tm of input sequence to see if it's already too low
    seq_tm = coral.analysis.tm(dna, parameters=tm_parameters)
    if seq_tm < (tm - tm_undershoot):
        msg = 'Input sequence Tm is lower than primer Tm setting'
        raise ValueError(msg)
    # Focus on first 90 bases - shouldn't need more than 90bp to anneal
    dna = dna[0:90]
    # Generate primers from min_len to 'tm' + tm_overshoot
    # TODO: this is a good place for optimization. Only calculate as many
    # primers as are needed. Use binary search.
    primers_tms = []
    last_tm = 0
    bases = min_len
    while last_tm <= tm + tm_overshoot and bases != len(dna):
        next_primer = dna[0:bases]
        last_tm = coral.analysis.tm(next_primer, parameters=tm_parameters)
        primers_tms.append((next_primer, last_tm))
        bases += 1
    # Trim primer list based on tm_undershoot and end_gc
    primers_tms = [(primer, melt) for primer, melt in primers_tms if
                   melt >= tm - tm_undershoot]
    if end_gc:
        primers_tms = [pair for pair in primers_tms if
                       pair[0][-1] == coral.DNA('C') or
                       pair[0][-1] == coral.DNA('G')]
    if not primers_tms:
        raise ValueError('No primers could be generated using these settings')
    # Find the primer closest to the set Tm, make it single stranded
    tm_diffs = [abs(melt - tm) for primer, melt in primers_tms]
    best_index = tm_diffs.index(min(tm_diffs))
    best_primer, best_tm = primers_tms[best_index]
    best_primer = best_primer.top
    # Apply overhang
    if overhang:
        overhang = overhang.top
    output_primer = coral.Primer(best_primer, best_tm, overhang=overhang)
    def _structure(primer):
        '''Check annealing sequence for structure.
        :param primer: Primer for which to evaluate structure
        :type primer: sequence.Primer
        '''
        # Check whole primer for high-probability structure, focus in on
        # annealing sequence, report average
        nupack = coral.analysis.Nupack(primer.primer())
        pairs = nupack.pairs(0)
        anneal_len = len(primer.anneal)
        pairs_mean = sum(pairs[-anneal_len:]) / anneal_len
        if pairs_mean < 0.5:
            warnings.warn('High probability structure', Warning)
        return pairs_mean
    if structure:
        _structure(output_primer)
    return output_primer | 
	Design primer to a nearest-neighbor Tm setpoint.
    :param dna: Sequence for which to design a primer.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhang: Append the primer to this overhang sequence.
    :type overhang: str
    :param structure: Evaluate primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A primer.
    :rtype: coral.Primer
    :raises: ValueError if the input sequence is lower than the Tm settings
             allow.
             ValueError if a primer ending with G or C can't be found given
             the Tm settings. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_primers.py#L6-L98 | 
| 
	klavinslab/coral | 
	coral/design/_primers.py | 
	primers | 
	def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
            end_gc=False, tm_parameters='cloning', overhangs=None,
            structure=False):
    '''Design primers for PCR amplifying any arbitrary sequence.
    :param dna: Input sequence.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhangs: 2-tuple of overhang sequences.
    :type overhangs: tuple
    :param structure: Evaluate each primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A list primers (the output of primer).
    :rtype: list
    '''
    if not overhangs:
        overhangs = [None, None]
    templates = [dna, dna.reverse_complement()]
    primer_list = []
    for template, overhang in zip(templates, overhangs):
        primer_i = primer(template, tm=tm, min_len=min_len,
                          tm_undershoot=tm_undershoot,
                          tm_overshoot=tm_overshoot, end_gc=end_gc,
                          tm_parameters=tm_parameters,
                          overhang=overhang, structure=structure)
        primer_list.append(primer_i)
    return primer_list | 
	python | 
	def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
            end_gc=False, tm_parameters='cloning', overhangs=None,
            structure=False):
    '''Design primers for PCR amplifying any arbitrary sequence.
    :param dna: Input sequence.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhangs: 2-tuple of overhang sequences.
    :type overhangs: tuple
    :param structure: Evaluate each primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A list primers (the output of primer).
    :rtype: list
    '''
    if not overhangs:
        overhangs = [None, None]
    templates = [dna, dna.reverse_complement()]
    primer_list = []
    for template, overhang in zip(templates, overhangs):
        primer_i = primer(template, tm=tm, min_len=min_len,
                          tm_undershoot=tm_undershoot,
                          tm_overshoot=tm_overshoot, end_gc=end_gc,
                          tm_parameters=tm_parameters,
                          overhang=overhang, structure=structure)
        primer_list.append(primer_i)
    return primer_list | 
	Design primers for PCR amplifying any arbitrary sequence.
    :param dna: Input sequence.
    :type dna: coral.DNA
    :param tm: Ideal primer Tm in degrees C.
    :type tm: float
    :param min_len: Minimum primer length.
    :type min_len: int
    :param tm_undershoot: Allowed Tm undershoot.
    :type tm_undershoot: float
    :param tm_overshoot: Allowed Tm overshoot.
    :type tm_overshoot: float
    :param end_gc: Obey the 'end on G or C' rule.
    :type end_gc: bool
    :param tm_parameters: Melting temp calculator method to use.
    :type tm_parameters: string
    :param overhangs: 2-tuple of overhang sequences.
    :type overhangs: tuple
    :param structure: Evaluate each primer for structure, with warning for high
                      structure.
    :type structure: bool
    :returns: A list primers (the output of primer).
    :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_primers.py#L101-L140 | 
| 
	klavinslab/coral | 
	coral/reaction/_oligo_assembly.py | 
	assemble_oligos | 
	def assemble_oligos(dna_list, reference=None):
    '''Given a list of DNA sequences, assemble into a single construct.
    :param dna_list: List of DNA sequences - they must be single-stranded.
    :type dna_list: coral.DNA list
    :param reference: Expected sequence - once assembly completed, this will
    be used to reorient the DNA (assembly could potentially occur from either
    side of a linear DNA construct if oligos are in a random order). If this
    fails, an AssemblyError is raised.
    :type reference: coral.DNA
    :raises: AssemblyError if it can't assemble for any reason.
    :returns: A single assembled DNA sequence
    :rtype: coral.DNA
    '''
    # FIXME: this protocol currently only supports 5' ends on the assembly
    # Find all matches for every oligo. If more than 2 per side, error.
    # Self-oligo is included in case the 3' end is self-complementary.
    # 1) Find all unique 3' binders (and non-binders).
    match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in
               enumerate(dna_list)]
    # 2) Find all unique 5' binders (and non-binders).
    match_5 = [bind_unique(seq, dna_list, right=False) for i, seq in
               enumerate(dna_list)]
    # Assemble into 2-tuple
    zipped = zip(match_5, match_3)
    # 3) If none found, error out with 'oligo n has no binders'
    for i, oligo_match in enumerate(zipped):
        if not any(oligo_match):
            error = 'Oligo {} has no binding partners.'.format(i + 1)
            raise AssemblyError(error)
    # 4) There should be exactly 2 oligos that bind at 3' end but
    # not 5'.
    ends = []
    for i, (five, three) in enumerate(zipped):
        if five is None and three is not None:
            ends.append(i)
    # 5) If more than 2, error with 'too many ends'.
    if len(ends) > 2:
        raise AssemblyError('Too many (>2) end oligos found.')
    # 6) If more than 2, error with 'not enough ends'.
    if len(ends) < 2:
        raise AssemblyError('Not enough (<2) end oligos found.')
    # NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
    # 8) Start with first end and build iteratively
    last_index = ends[0]
    assembly = dna_list[last_index]
    flip = True
    # This would be slightly less complicated if the sequences were tied to
    # their match info in a tuple
    # Append next region n - 1 times
    for i in range(len(dna_list) - 1):
        if flip:
            # Next oligo needs to be flipped before concatenation
            # Grab 3' match from last oligo's info
            current_index, matchlen = zipped[last_index][1]
            # Get new oligo sequence, make double-stranded for concatenation
            next_oligo = dna_list[current_index].to_ds()
            # Reverse complement for concatenation
            next_oligo = next_oligo.reverse_complement()
            # Don't reverse complement the next one
            flip = False
        else:
            # Grab 5' match from last oligo's info
            current_index, matchlen = zipped[last_index][0]
            # Get new oligo sequence, make double-stranded for concatenation
            next_oligo = dna_list[current_index].to_ds()
            # Reverse complement the next one
            flip = True
        # Trim overlap from new sequence
        next_oligo = next_oligo[(matchlen - 1):]
        # Concatenate and update last oligo's information
        assembly += next_oligo
        last_index = current_index
    if reference:
        if assembly == reference or assembly == reference.reverse_complement():
            return assembly
        else:
            raise AssemblyError('Assembly did not match reference')
    else:
        return assembly | 
	python | 
	def assemble_oligos(dna_list, reference=None):
    '''Given a list of DNA sequences, assemble into a single construct.
    :param dna_list: List of DNA sequences - they must be single-stranded.
    :type dna_list: coral.DNA list
    :param reference: Expected sequence - once assembly completed, this will
    be used to reorient the DNA (assembly could potentially occur from either
    side of a linear DNA construct if oligos are in a random order). If this
    fails, an AssemblyError is raised.
    :type reference: coral.DNA
    :raises: AssemblyError if it can't assemble for any reason.
    :returns: A single assembled DNA sequence
    :rtype: coral.DNA
    '''
    # FIXME: this protocol currently only supports 5' ends on the assembly
    # Find all matches for every oligo. If more than 2 per side, error.
    # Self-oligo is included in case the 3' end is self-complementary.
    # 1) Find all unique 3' binders (and non-binders).
    match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in
               enumerate(dna_list)]
    # 2) Find all unique 5' binders (and non-binders).
    match_5 = [bind_unique(seq, dna_list, right=False) for i, seq in
               enumerate(dna_list)]
    # Assemble into 2-tuple
    zipped = zip(match_5, match_3)
    # 3) If none found, error out with 'oligo n has no binders'
    for i, oligo_match in enumerate(zipped):
        if not any(oligo_match):
            error = 'Oligo {} has no binding partners.'.format(i + 1)
            raise AssemblyError(error)
    # 4) There should be exactly 2 oligos that bind at 3' end but
    # not 5'.
    ends = []
    for i, (five, three) in enumerate(zipped):
        if five is None and three is not None:
            ends.append(i)
    # 5) If more than 2, error with 'too many ends'.
    if len(ends) > 2:
        raise AssemblyError('Too many (>2) end oligos found.')
    # 6) If more than 2, error with 'not enough ends'.
    if len(ends) < 2:
        raise AssemblyError('Not enough (<2) end oligos found.')
    # NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
    # 8) Start with first end and build iteratively
    last_index = ends[0]
    assembly = dna_list[last_index]
    flip = True
    # This would be slightly less complicated if the sequences were tied to
    # their match info in a tuple
    # Append next region n - 1 times
    for i in range(len(dna_list) - 1):
        if flip:
            # Next oligo needs to be flipped before concatenation
            # Grab 3' match from last oligo's info
            current_index, matchlen = zipped[last_index][1]
            # Get new oligo sequence, make double-stranded for concatenation
            next_oligo = dna_list[current_index].to_ds()
            # Reverse complement for concatenation
            next_oligo = next_oligo.reverse_complement()
            # Don't reverse complement the next one
            flip = False
        else:
            # Grab 5' match from last oligo's info
            current_index, matchlen = zipped[last_index][0]
            # Get new oligo sequence, make double-stranded for concatenation
            next_oligo = dna_list[current_index].to_ds()
            # Reverse complement the next one
            flip = True
        # Trim overlap from new sequence
        next_oligo = next_oligo[(matchlen - 1):]
        # Concatenate and update last oligo's information
        assembly += next_oligo
        last_index = current_index
    if reference:
        if assembly == reference or assembly == reference.reverse_complement():
            return assembly
        else:
            raise AssemblyError('Assembly did not match reference')
    else:
        return assembly | 
	Given a list of DNA sequences, assemble into a single construct.
    :param dna_list: List of DNA sequences - they must be single-stranded.
    :type dna_list: coral.DNA list
    :param reference: Expected sequence - once assembly completed, this will
    be used to reorient the DNA (assembly could potentially occur from either
    side of a linear DNA construct if oligos are in a random order). If this
    fails, an AssemblyError is raised.
    :type reference: coral.DNA
    :raises: AssemblyError if it can't assemble for any reason.
    :returns: A single assembled DNA sequence
    :rtype: coral.DNA | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_oligo_assembly.py#L12-L92 | 
| 
	klavinslab/coral | 
	coral/reaction/_oligo_assembly.py | 
	bind_unique | 
	def bind_unique(reference, query_list, min_overlap=12, right=True):
    '''(5' or 3' region on reference sequence that uniquely matches the reverse
    complement of the associated (5' or 3') region of one sequence in a list of
    query sequences.
    :param reference: Reference sequence.
    :type reference: coral.DNA
    :param query_list: List of query sequences.
    :type query_list: coral.DNA list
    :param min_overlap: Minimum overlap for a match (in bp).
    :type min_overlap: int
    :param right: Check right side of sequence (3'). False results in 5' check.
    :type right: bool
    :returns: Tuple of the indices of any matches and the size of the match in
              bp.
    :rtype: tuple of ints
    :raises: AssemblyError if more than one match is found.
    '''
    size = min_overlap
    found = []
    # Reverse complementing here provides massive speedup?
    rev_query = [seq.reverse_complement() for seq in query_list]
    while not found and not size > len(reference):
        for i, seq in enumerate(rev_query):
            if right:
                # FIXME: these getitems are the slowest part of assembly
                # Easiest speedup?
                if reference.endswith(seq[:size]):
                    found.append(i)
            else:
                if reference.startswith(seq[-size:]):
                    found.append(i)
        size += 1
    if len(found) > 1:
        raise AssemblyError('Ambiguous oligo binding')
    if not found:
        return None
    else:
        return found[0], size | 
	python | 
	def bind_unique(reference, query_list, min_overlap=12, right=True):
    '''(5' or 3' region on reference sequence that uniquely matches the reverse
    complement of the associated (5' or 3') region of one sequence in a list of
    query sequences.
    :param reference: Reference sequence.
    :type reference: coral.DNA
    :param query_list: List of query sequences.
    :type query_list: coral.DNA list
    :param min_overlap: Minimum overlap for a match (in bp).
    :type min_overlap: int
    :param right: Check right side of sequence (3'). False results in 5' check.
    :type right: bool
    :returns: Tuple of the indices of any matches and the size of the match in
              bp.
    :rtype: tuple of ints
    :raises: AssemblyError if more than one match is found.
    '''
    size = min_overlap
    found = []
    # Reverse complementing here provides massive speedup?
    rev_query = [seq.reverse_complement() for seq in query_list]
    while not found and not size > len(reference):
        for i, seq in enumerate(rev_query):
            if right:
                # FIXME: these getitems are the slowest part of assembly
                # Easiest speedup?
                if reference.endswith(seq[:size]):
                    found.append(i)
            else:
                if reference.startswith(seq[-size:]):
                    found.append(i)
        size += 1
    if len(found) > 1:
        raise AssemblyError('Ambiguous oligo binding')
    if not found:
        return None
    else:
        return found[0], size | 
	(5' or 3' region on reference sequence that uniquely matches the reverse
    complement of the associated (5' or 3') region of one sequence in a list of
    query sequences.
    :param reference: Reference sequence.
    :type reference: coral.DNA
    :param query_list: List of query sequences.
    :type query_list: coral.DNA list
    :param min_overlap: Minimum overlap for a match (in bp).
    :type min_overlap: int
    :param right: Check right side of sequence (3'). False results in 5' check.
    :type right: bool
    :returns: Tuple of the indices of any matches and the size of the match in
              bp.
    :rtype: tuple of ints
    :raises: AssemblyError if more than one match is found. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_oligo_assembly.py#L95-L134 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/sanger.py | 
	Sanger.nonmatches | 
	def nonmatches(self):
        '''Report mismatches, indels, and coverage.'''
        # For every result, keep a dictionary of mismatches, insertions, and
        # deletions
        report = []
        for result in self.aligned_results:
            report.append(self._analyze_single(self.aligned_reference, result))
        return report | 
	python | 
	def nonmatches(self):
        '''Report mismatches, indels, and coverage.'''
        # For every result, keep a dictionary of mismatches, insertions, and
        # deletions
        report = []
        for result in self.aligned_results:
            report.append(self._analyze_single(self.aligned_reference, result))
        return report | 
	Report mismatches, indels, and coverage. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/sanger.py#L72-L80 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/sanger.py | 
	Sanger.plot | 
	def plot(self):
        '''Make a summary plot of the alignment and highlight nonmatches.'''
        import matplotlib.pyplot as plt
        import matplotlib.patches as patches
        # Constants to use throughout drawing
        n = len(self.results)
        nbases = len(self.aligned_reference)
        barheight = 0.4
        # Vary height of figure based on number of results
        figheight = 3 + 3 * (n - 1)
        fig = plt.figure(figsize=(9, figheight))
        ax1 = fig.add_subplot(111)
        # Plot bars to represent coverage area
        # Reference sequence
        ax1.add_patch(patches.Rectangle((0, 0), nbases, barheight,
                                        facecolor='black'))
        # Results
        for i, report in enumerate(self.nonmatches()):
            j = i + 1
            start, stop = report['coverage']
            patch = patches.Rectangle((start, j), stop - start, barheight,
                                      facecolor='darkgray')
            ax1.add_patch(patch)
            # Draw a vertical line for each type of result
            plt.vlines(report['mismatches'], j, j + barheight,
                       colors='b')
            plt.vlines(report['insertions'], j, j + barheight,
                       colors='r')
            # Terminal trailing deletions shouldn't be added
            deletions = []
            crange = range(*report['coverage'])
            deletions = [idx for idx in report['deletions'] if idx in crange]
            plt.vlines(deletions, j, j + barheight,
                       colors='g')
        ax1.set_xlim((0, nbases))
        ax1.set_ylim((-0.3, n + 1))
        ax1.set_yticks([i + barheight / 2 for i in range(n + 1)])
        ax1.set_yticklabels(['Reference'] + self.names)
        # Add legend
        mismatch_patch = patches.Patch(color='blue', label='Mismatch')
        insertion_patch = patches.Patch(color='red', label='Insertion')
        deletion_patch = patches.Patch(color='green', label='Deletion')
        plt.legend(handles=[mismatch_patch, insertion_patch, deletion_patch],
                   loc=1, ncol=3, mode='expand', borderaxespad=0.)
        plt.show() | 
	python | 
	def plot(self):
        '''Make a summary plot of the alignment and highlight nonmatches.'''
        import matplotlib.pyplot as plt
        import matplotlib.patches as patches
        # Constants to use throughout drawing
        n = len(self.results)
        nbases = len(self.aligned_reference)
        barheight = 0.4
        # Vary height of figure based on number of results
        figheight = 3 + 3 * (n - 1)
        fig = plt.figure(figsize=(9, figheight))
        ax1 = fig.add_subplot(111)
        # Plot bars to represent coverage area
        # Reference sequence
        ax1.add_patch(patches.Rectangle((0, 0), nbases, barheight,
                                        facecolor='black'))
        # Results
        for i, report in enumerate(self.nonmatches()):
            j = i + 1
            start, stop = report['coverage']
            patch = patches.Rectangle((start, j), stop - start, barheight,
                                      facecolor='darkgray')
            ax1.add_patch(patch)
            # Draw a vertical line for each type of result
            plt.vlines(report['mismatches'], j, j + barheight,
                       colors='b')
            plt.vlines(report['insertions'], j, j + barheight,
                       colors='r')
            # Terminal trailing deletions shouldn't be added
            deletions = []
            crange = range(*report['coverage'])
            deletions = [idx for idx in report['deletions'] if idx in crange]
            plt.vlines(deletions, j, j + barheight,
                       colors='g')
        ax1.set_xlim((0, nbases))
        ax1.set_ylim((-0.3, n + 1))
        ax1.set_yticks([i + barheight / 2 for i in range(n + 1)])
        ax1.set_yticklabels(['Reference'] + self.names)
        # Add legend
        mismatch_patch = patches.Patch(color='blue', label='Mismatch')
        insertion_patch = patches.Patch(color='red', label='Insertion')
        deletion_patch = patches.Patch(color='green', label='Deletion')
        plt.legend(handles=[mismatch_patch, insertion_patch, deletion_patch],
                   loc=1, ncol=3, mode='expand', borderaxespad=0.)
        plt.show() | 
	Make a summary plot of the alignment and highlight nonmatches. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/sanger.py#L82-L134 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/sanger.py | 
	Sanger._analyze_single | 
	def _analyze_single(self, reference, result):
        '''Report mistmatches and indels for a single (aligned) reference and
        result.'''
        # TODO: Recalculate coverage based on reference (e.g. sequencing result
        # longer than template
        reference_str = str(reference)
        result_str = str(result)
        report = {'mismatches': [], 'insertions': [], 'deletions': []}
        for i, (ref, res) in enumerate(zip(reference_str, result_str)):
            if ref != res:
                # It's a mismatch or indel
                if ref == '-':
                    report['insertions'].append(i)
                if res == '-':
                    report['deletions'].append(i)
                else:
                    report['mismatches'].append(i)
        start = len(result_str) - len(result_str.lstrip('-'))
        stop = len(result_str) - len(result_str.rstrip('-'))
        report['coverage'] = [start, stop]
        return report | 
	python | 
	def _analyze_single(self, reference, result):
        '''Report mistmatches and indels for a single (aligned) reference and
        result.'''
        # TODO: Recalculate coverage based on reference (e.g. sequencing result
        # longer than template
        reference_str = str(reference)
        result_str = str(result)
        report = {'mismatches': [], 'insertions': [], 'deletions': []}
        for i, (ref, res) in enumerate(zip(reference_str, result_str)):
            if ref != res:
                # It's a mismatch or indel
                if ref == '-':
                    report['insertions'].append(i)
                if res == '-':
                    report['deletions'].append(i)
                else:
                    report['mismatches'].append(i)
        start = len(result_str) - len(result_str.lstrip('-'))
        stop = len(result_str) - len(result_str.rstrip('-'))
        report['coverage'] = [start, stop]
        return report | 
	Report mistmatches and indels for a single (aligned) reference and
        result. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/sanger.py#L136-L158 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/sanger.py | 
	Sanger._remove_n | 
	def _remove_n(self):
        '''Remove terminal Ns from sequencing results.'''
        for i, result in enumerate(self.results):
            largest = max(str(result).split('N'), key=len)
            start = result.locate(largest)[0][0]
            stop = start + len(largest)
            if start != stop:
                self.results[i] = self.results[i][start:stop] | 
	python | 
	def _remove_n(self):
        '''Remove terminal Ns from sequencing results.'''
        for i, result in enumerate(self.results):
            largest = max(str(result).split('N'), key=len)
            start = result.locate(largest)[0][0]
            stop = start + len(largest)
            if start != stop:
                self.results[i] = self.results[i][start:stop] | 
	Remove terminal Ns from sequencing results. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/sanger.py#L160-L167 | 
| 
	klavinslab/coral | 
	coral/design/_sequence_generation/random_sequences.py | 
	random_dna | 
	def random_dna(n):
    '''Generate a random DNA sequence.
    :param n: Output sequence length.
    :type n: int
    :returns: Random DNA sequence of length n.
    :rtype: coral.DNA
    '''
    return coral.DNA(''.join([random.choice('ATGC') for i in range(n)])) | 
	python | 
	def random_dna(n):
    '''Generate a random DNA sequence.
    :param n: Output sequence length.
    :type n: int
    :returns: Random DNA sequence of length n.
    :rtype: coral.DNA
    '''
    return coral.DNA(''.join([random.choice('ATGC') for i in range(n)])) | 
	Generate a random DNA sequence.
    :param n: Output sequence length.
    :type n: int
    :returns: Random DNA sequence of length n.
    :rtype: coral.DNA | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_sequence_generation/random_sequences.py#L7-L16 | 
| 
	klavinslab/coral | 
	coral/design/_sequence_generation/random_sequences.py | 
	random_codons | 
	def random_codons(peptide, frequency_cutoff=0.0, weighted=False, table=None):
    '''Generate randomized codons given a peptide sequence.
    :param peptide: Peptide sequence for which to generate randomized
                    codons.
    :type peptide: coral.Peptide
    :param frequency_cutoff: Relative codon usage cutoff - codons that
                             are rarer will not be used. Frequency is
                             relative to average over all codons for a
                             given amino acid.
    :param frequency_cutoff: Codon frequency table to use.
    :param weighted: Use codon table
    :type weighted: bool
    :param table: Codon frequency table to use. Table should be organized
                  by amino acid, then be a dict of codon: frequency.
                  Only relevant if weighted=True or frequency_cutoff > 0.
                  Tables available:
                  constants.molecular_bio.CODON_FREQ_BY_AA['sc'] (default)
    :type table: dict
    :returns: Randomized sequence of codons (DNA) that code for the input
              peptide.
    :rtype: coral.DNA
    :raises: ValueError if frequency_cutoff is set so high that there are no
             codons available for an amino acid in the input peptide.
    '''
    if table is None:
        table = CODON_FREQ_BY_AA['sc']
    # Process codon table using frequency_cutoff
    new_table = _cutoff(table, frequency_cutoff)
    # Select codons randomly or using weighted distribution
    rna = ''
    for amino_acid in str(peptide):
        codons = new_table[amino_acid.upper()]
        if not codons:
            raise ValueError('No {} codons at freq cutoff'.format(amino_acid))
        if weighted:
            cumsum = []
            running_sum = 0
            for codon, frequency in codons.iteritems():
                running_sum += frequency
                cumsum.append(running_sum)
            random_num = random.uniform(0, max(cumsum))
            for codon, value in zip(codons, cumsum):
                if value > random_num:
                    selection = codon
                    break
        else:
            selection = random.choice(codons.keys())
        rna += selection
    return coral.RNA(rna) | 
	python | 
	def random_codons(peptide, frequency_cutoff=0.0, weighted=False, table=None):
    '''Generate randomized codons given a peptide sequence.
    :param peptide: Peptide sequence for which to generate randomized
                    codons.
    :type peptide: coral.Peptide
    :param frequency_cutoff: Relative codon usage cutoff - codons that
                             are rarer will not be used. Frequency is
                             relative to average over all codons for a
                             given amino acid.
    :param frequency_cutoff: Codon frequency table to use.
    :param weighted: Use codon table
    :type weighted: bool
    :param table: Codon frequency table to use. Table should be organized
                  by amino acid, then be a dict of codon: frequency.
                  Only relevant if weighted=True or frequency_cutoff > 0.
                  Tables available:
                  constants.molecular_bio.CODON_FREQ_BY_AA['sc'] (default)
    :type table: dict
    :returns: Randomized sequence of codons (DNA) that code for the input
              peptide.
    :rtype: coral.DNA
    :raises: ValueError if frequency_cutoff is set so high that there are no
             codons available for an amino acid in the input peptide.
    '''
    if table is None:
        table = CODON_FREQ_BY_AA['sc']
    # Process codon table using frequency_cutoff
    new_table = _cutoff(table, frequency_cutoff)
    # Select codons randomly or using weighted distribution
    rna = ''
    for amino_acid in str(peptide):
        codons = new_table[amino_acid.upper()]
        if not codons:
            raise ValueError('No {} codons at freq cutoff'.format(amino_acid))
        if weighted:
            cumsum = []
            running_sum = 0
            for codon, frequency in codons.iteritems():
                running_sum += frequency
                cumsum.append(running_sum)
            random_num = random.uniform(0, max(cumsum))
            for codon, value in zip(codons, cumsum):
                if value > random_num:
                    selection = codon
                    break
        else:
            selection = random.choice(codons.keys())
        rna += selection
    return coral.RNA(rna) | 
	Generate randomized codons given a peptide sequence.
    :param peptide: Peptide sequence for which to generate randomized
                    codons.
    :type peptide: coral.Peptide
    :param frequency_cutoff: Relative codon usage cutoff - codons that
                             are rarer will not be used. Frequency is
                             relative to average over all codons for a
                             given amino acid.
    :param frequency_cutoff: Codon frequency table to use.
    :param weighted: Use codon table
    :type weighted: bool
    :param table: Codon frequency table to use. Table should be organized
                  by amino acid, then be a dict of codon: frequency.
                  Only relevant if weighted=True or frequency_cutoff > 0.
                  Tables available:
                  constants.molecular_bio.CODON_FREQ_BY_AA['sc'] (default)
    :type table: dict
    :returns: Randomized sequence of codons (DNA) that code for the input
              peptide.
    :rtype: coral.DNA
    :raises: ValueError if frequency_cutoff is set so high that there are no
             codons available for an amino acid in the input peptide. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_sequence_generation/random_sequences.py#L19-L70 | 
| 
	klavinslab/coral | 
	coral/design/_sequence_generation/random_sequences.py | 
	_cutoff | 
	def _cutoff(table, frequency_cutoff):
    '''Generate new codon frequency table given a mean cutoff.
    :param table: codon frequency table of form {amino acid: codon: frequency}
    :type table: dict
    :param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
    :type frequency_cutoff: float
    :returns: A codon frequency table with some codons removed.
    :rtype: dict
    '''
    new_table = {}
    # IDEA: cutoff should be relative to most-frequent codon, not average?
    for amino_acid, codons in table.iteritems():
        average_cutoff = frequency_cutoff * sum(codons.values()) / len(codons)
        new_table[amino_acid] = {}
        for codon, frequency in codons.iteritems():
            if frequency > average_cutoff:
                new_table[amino_acid][codon] = frequency
    return new_table | 
	python | 
	def _cutoff(table, frequency_cutoff):
    '''Generate new codon frequency table given a mean cutoff.
    :param table: codon frequency table of form {amino acid: codon: frequency}
    :type table: dict
    :param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
    :type frequency_cutoff: float
    :returns: A codon frequency table with some codons removed.
    :rtype: dict
    '''
    new_table = {}
    # IDEA: cutoff should be relative to most-frequent codon, not average?
    for amino_acid, codons in table.iteritems():
        average_cutoff = frequency_cutoff * sum(codons.values()) / len(codons)
        new_table[amino_acid] = {}
        for codon, frequency in codons.iteritems():
            if frequency > average_cutoff:
                new_table[amino_acid][codon] = frequency
    return new_table | 
	Generate new codon frequency table given a mean cutoff.
    :param table: codon frequency table of form {amino acid: codon: frequency}
    :type table: dict
    :param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
    :type frequency_cutoff: float
    :returns: A codon frequency table with some codons removed.
    :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_sequence_generation/random_sequences.py#L73-L92 | 
| 
	klavinslab/coral | 
	coral/database/_entrez.py | 
	fetch_genome | 
	def fetch_genome(genome_id):
    '''Acquire a genome from Entrez
    '''
    # TODO: Can strandedness by found in fetched genome attributes?
    # TODO: skip read/write step?
    # Using a dummy email for now - does this violate NCBI guidelines?
    email = '[email protected]'
    Entrez.email = email
    print 'Downloading Genome...'
    handle = Entrez.efetch(db='nucleotide', id=str(genome_id), rettype='gb',
                           retmode='text')
    print 'Genome Downloaded...'
    tmpfile = os.path.join(mkdtemp(), 'tmp.gb')
    with open(tmpfile, 'w') as f:
        f.write(handle.read())
    genome = coral.seqio.read_dna(tmpfile)
    return genome | 
	python | 
	def fetch_genome(genome_id):
    '''Acquire a genome from Entrez
    '''
    # TODO: Can strandedness by found in fetched genome attributes?
    # TODO: skip read/write step?
    # Using a dummy email for now - does this violate NCBI guidelines?
    email = '[email protected]'
    Entrez.email = email
    print 'Downloading Genome...'
    handle = Entrez.efetch(db='nucleotide', id=str(genome_id), rettype='gb',
                           retmode='text')
    print 'Genome Downloaded...'
    tmpfile = os.path.join(mkdtemp(), 'tmp.gb')
    with open(tmpfile, 'w') as f:
        f.write(handle.read())
    genome = coral.seqio.read_dna(tmpfile)
    return genome | 
	Acquire a genome from Entrez | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/database/_entrez.py#L15-L34 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/viennarna.py | 
	ViennaRNA.cofold | 
	def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False,
               nogu=False, noclosinggu=False, constraints=None,
               canonicalbponly=False, partition=-1, pfscale=None, gquad=False):
        '''Run the RNAcofold command and retrieve the result in a dictionary.
        :param strand1: Strand 1 for running RNAcofold.
        :type strand1: coral.DNA or coral.RNA
        :param strand1: Strand 2 for running RNAcofold.
        :type strand2: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Calculates the partition function for the sequence.
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict
        '''
        cmd_args = []
        cmd_kwargs = {'--temp=': str(temp)}
        cmd_kwargs['--dangles='] = dangles
        if nolp:
            cmd_args.append('--noLP')
        if nogu:
            cmd_args.append('--noGU')
        if noclosinggu:
            cmd_args.append('--noClosingGU')
        if constraints is not None:
            cmd_args.append('--constraint')
            if canonicalbponly:
                cmd_args.append('--canonicalBPonly')
        if partition:
            cmd_args.append('--partfunc')
        if pfscale is not None:
            cmd_kwargs['pfScale'] = float(pfscale)
        if gquad:
            cmd_args.append('--gquad')
        inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))]
        if constraints is not None:
            inputs.append(constraints)
        rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs)
        # Process the output
        output = {}
        lines = rnafold_output.splitlines()
        # Line 1 is the name of the sequence input, line 2 is the sequence
        lines.pop(0)
        lines.pop(0)
        # Line 3 is the dotbracket + mfe for strand1
        line3 = lines.pop(0)
        output['dotbracket'] = self._lparse(line3, '^(.*) \(')
        output['mfe'] = float(self._lparse(line3, ' \((.*)\)$'))
        # Optional outputs
        if partition:
            # Line 4 is 'a coarse representation of the pair probabilities' and
            # the ensemble free energy
            line4 = lines.pop(0)
            output['coarse'] = self._lparse(line4, '^(.*) \[')
            output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$'))
            # Line 5 is the centroid structure, its free energy, and distance
            # to the ensemble
            line5 = lines.pop(0)
            'ensemble (.*),'
            output['frequency'] = float(self._lparse(line5, 'ensemble (.*),'))
            output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$'))
            # Parse the postscript file (the only place the probability matrix
            # is)
            with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f:
                pattern = 'start of base pair probability data\n(.*)\nshowpage'
                dotplot_file = f.read()
                dotplot_data = re.search(pattern, dotplot_file,
                                         flags=re.DOTALL).group(1).split('\n')
                # Dimension of the dotplot - compares seq1, seq2 to self and
                # to each other (concatenation of seq1 and seq2 = axis)
                dim = len(strand1) + len(strand2)
                ensemble_probs = np.zeros((dim, dim))
                optimal_probs = np.zeros((dim, dim))
                for point in dotplot_data:
                    point_split = point.split(' ')
                    # Use zero indexing
                    i = int(point_split[0]) - 1
                    j = int(point_split[1]) - 1
                    sqprob = float(point_split[2])
                    probtype = point_split[3]
                    if probtype == 'ubox':
                        ensemble_probs[i][j] = sqprob**2
                    else:
                        optimal_probs[i][j] = sqprob**2
                output['ensemble_matrix'] = ensemble_probs
                output['optimal_matrix'] = optimal_probs
        return output | 
	python | 
	def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False,
               nogu=False, noclosinggu=False, constraints=None,
               canonicalbponly=False, partition=-1, pfscale=None, gquad=False):
        '''Run the RNAcofold command and retrieve the result in a dictionary.
        :param strand1: Strand 1 for running RNAcofold.
        :type strand1: coral.DNA or coral.RNA
        :param strand1: Strand 2 for running RNAcofold.
        :type strand2: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Calculates the partition function for the sequence.
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict
        '''
        cmd_args = []
        cmd_kwargs = {'--temp=': str(temp)}
        cmd_kwargs['--dangles='] = dangles
        if nolp:
            cmd_args.append('--noLP')
        if nogu:
            cmd_args.append('--noGU')
        if noclosinggu:
            cmd_args.append('--noClosingGU')
        if constraints is not None:
            cmd_args.append('--constraint')
            if canonicalbponly:
                cmd_args.append('--canonicalBPonly')
        if partition:
            cmd_args.append('--partfunc')
        if pfscale is not None:
            cmd_kwargs['pfScale'] = float(pfscale)
        if gquad:
            cmd_args.append('--gquad')
        inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))]
        if constraints is not None:
            inputs.append(constraints)
        rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs)
        # Process the output
        output = {}
        lines = rnafold_output.splitlines()
        # Line 1 is the name of the sequence input, line 2 is the sequence
        lines.pop(0)
        lines.pop(0)
        # Line 3 is the dotbracket + mfe for strand1
        line3 = lines.pop(0)
        output['dotbracket'] = self._lparse(line3, '^(.*) \(')
        output['mfe'] = float(self._lparse(line3, ' \((.*)\)$'))
        # Optional outputs
        if partition:
            # Line 4 is 'a coarse representation of the pair probabilities' and
            # the ensemble free energy
            line4 = lines.pop(0)
            output['coarse'] = self._lparse(line4, '^(.*) \[')
            output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$'))
            # Line 5 is the centroid structure, its free energy, and distance
            # to the ensemble
            line5 = lines.pop(0)
            'ensemble (.*),'
            output['frequency'] = float(self._lparse(line5, 'ensemble (.*),'))
            output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$'))
            # Parse the postscript file (the only place the probability matrix
            # is)
            with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f:
                pattern = 'start of base pair probability data\n(.*)\nshowpage'
                dotplot_file = f.read()
                dotplot_data = re.search(pattern, dotplot_file,
                                         flags=re.DOTALL).group(1).split('\n')
                # Dimension of the dotplot - compares seq1, seq2 to self and
                # to each other (concatenation of seq1 and seq2 = axis)
                dim = len(strand1) + len(strand2)
                ensemble_probs = np.zeros((dim, dim))
                optimal_probs = np.zeros((dim, dim))
                for point in dotplot_data:
                    point_split = point.split(' ')
                    # Use zero indexing
                    i = int(point_split[0]) - 1
                    j = int(point_split[1]) - 1
                    sqprob = float(point_split[2])
                    probtype = point_split[3]
                    if probtype == 'ubox':
                        ensemble_probs[i][j] = sqprob**2
                    else:
                        optimal_probs[i][j] = sqprob**2
                output['ensemble_matrix'] = ensemble_probs
                output['optimal_matrix'] = optimal_probs
        return output | 
	Run the RNAcofold command and retrieve the result in a dictionary.
        :param strand1: Strand 1 for running RNAcofold.
        :type strand1: coral.DNA or coral.RNA
        :param strand1: Strand 2 for running RNAcofold.
        :type strand2: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Calculates the partition function for the sequence.
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/viennarna.py#L18-L140 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/viennarna.py | 
	ViennaRNA.fold | 
	def fold(self, strand, temp=37.0, dangles=2, nolp=False, nogu=False,
             noclosinggu=False, constraints=None, canonicalbponly=False,
             partition=False, pfscale=None, imfeelinglucky=False, gquad=False):
        '''Run the RNAfold command and retrieve the result in a dictionary.
        :param strand: The DNA or RNA sequence on which to run RNAfold.
        :type strand: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Generates the partition function, generating a coarse
                          grain structure ('coarse') in the format described at
                          http://www.itc.univie.ac.at/~ivo/RNA/RNAlib/PF-Fold.h
                          tml, the ensemble free energy ('ensemble'), the
                          centroid structure ('centroid'), the free energy of
                          the centroid structure ('centroid_fe'), and its
                          distance from the ensemble ('centroid_d').
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param imfeelinglucky: Returns the one secondary structure from the
                               Boltzmann equilibrium according to its
                               probability in the ensemble.
        :type imfeelinglucky: bool
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict
        '''
        cmd_args = []
        cmd_kwargs = {'--temp=': str(temp)}
        cmd_kwargs['--dangles='] = dangles
        if nolp:
            cmd_args.append('--noLP')
        if nogu:
            cmd_args.append('--noGU')
        if noclosinggu:
            cmd_args.append('--noClosingGU')
        if constraints is not None:
            cmd_args.append('--constraint')
            if canonicalbponly:
                cmd_args.append('--canonicalBPonly')
        if partition:
            cmd_args.append('--partfunc')
        if pfscale is not None:
            cmd_kwargs['pfScale'] = float(pfscale)
        if gquad:
            cmd_args.append('--gquad')
        inputs = [str(strand)]
        if constraints is not None:
            inputs.append(constraints)
        if strand.circular:
            cmd_args.append('--circ')
        rnafold_output = self._run('RNAfold', inputs, cmd_args, cmd_kwargs)
        # Process the output
        output = {}
        lines = rnafold_output.splitlines()
        # Line 1 is the sequence as RNA
        lines.pop(0)
        # Line 2 is the dotbracket + mfe
        line2 = lines.pop(0)
        output['dotbracket'] = self._lparse(line2, '^(.*) \(')
        output['mfe'] = float(self._lparse(line2, ' \((.*)\)$'))
        # Optional outputs
        if partition:
            # Line 3 is 'a coarse representation of the pair probabilities' and
            # the ensemble free energy
            line3 = lines.pop(0)
            output['coarse'] = self._lparse(line3, '^(.*) \[')
            output['ensemble'] = float(self._lparse(line3, ' \[(.*)\]$'))
            # Line 4 is the centroid structure, its free energy, and distance
            # to the ensemble
            line4 = lines.pop(0)
            output['centroid'] = self._lparse(line4, '^(.*) \{')
            output['centroid_fe'] = float(self._lparse(line4, '^.*{(.*) d'))
            output['centroid_d'] = float(self._lparse(line4, 'd=(.*)}$'))
        return output | 
	python | 
	def fold(self, strand, temp=37.0, dangles=2, nolp=False, nogu=False,
             noclosinggu=False, constraints=None, canonicalbponly=False,
             partition=False, pfscale=None, imfeelinglucky=False, gquad=False):
        '''Run the RNAfold command and retrieve the result in a dictionary.
        :param strand: The DNA or RNA sequence on which to run RNAfold.
        :type strand: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Generates the partition function, generating a coarse
                          grain structure ('coarse') in the format described at
                          http://www.itc.univie.ac.at/~ivo/RNA/RNAlib/PF-Fold.h
                          tml, the ensemble free energy ('ensemble'), the
                          centroid structure ('centroid'), the free energy of
                          the centroid structure ('centroid_fe'), and its
                          distance from the ensemble ('centroid_d').
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param imfeelinglucky: Returns the one secondary structure from the
                               Boltzmann equilibrium according to its
                               probability in the ensemble.
        :type imfeelinglucky: bool
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict
        '''
        cmd_args = []
        cmd_kwargs = {'--temp=': str(temp)}
        cmd_kwargs['--dangles='] = dangles
        if nolp:
            cmd_args.append('--noLP')
        if nogu:
            cmd_args.append('--noGU')
        if noclosinggu:
            cmd_args.append('--noClosingGU')
        if constraints is not None:
            cmd_args.append('--constraint')
            if canonicalbponly:
                cmd_args.append('--canonicalBPonly')
        if partition:
            cmd_args.append('--partfunc')
        if pfscale is not None:
            cmd_kwargs['pfScale'] = float(pfscale)
        if gquad:
            cmd_args.append('--gquad')
        inputs = [str(strand)]
        if constraints is not None:
            inputs.append(constraints)
        if strand.circular:
            cmd_args.append('--circ')
        rnafold_output = self._run('RNAfold', inputs, cmd_args, cmd_kwargs)
        # Process the output
        output = {}
        lines = rnafold_output.splitlines()
        # Line 1 is the sequence as RNA
        lines.pop(0)
        # Line 2 is the dotbracket + mfe
        line2 = lines.pop(0)
        output['dotbracket'] = self._lparse(line2, '^(.*) \(')
        output['mfe'] = float(self._lparse(line2, ' \((.*)\)$'))
        # Optional outputs
        if partition:
            # Line 3 is 'a coarse representation of the pair probabilities' and
            # the ensemble free energy
            line3 = lines.pop(0)
            output['coarse'] = self._lparse(line3, '^(.*) \[')
            output['ensemble'] = float(self._lparse(line3, ' \[(.*)\]$'))
            # Line 4 is the centroid structure, its free energy, and distance
            # to the ensemble
            line4 = lines.pop(0)
            output['centroid'] = self._lparse(line4, '^(.*) \{')
            output['centroid_fe'] = float(self._lparse(line4, '^.*{(.*) d'))
            output['centroid_d'] = float(self._lparse(line4, 'd=(.*)}$'))
        return output | 
	Run the RNAfold command and retrieve the result in a dictionary.
        :param strand: The DNA or RNA sequence on which to run RNAfold.
        :type strand: coral.DNA or coral.RNA
        :param temp: Temperature at which to run the calculations.
        :type temp: float
        :param dangles: How to treat dangling end energies. Set to 0 to ignore
                        dangling ends. Set to 1 to limit unpaired bases to
                        at most one dangling end (default for MFE calc). Set to
                        2 (the default) to remove the limit in 1. Set to 3 to
                        allow coaxial stacking of adjacent helices in
                        .multi-loops
        :type dangles: int
        :param nolp: Produce structures without lonely pairs (isolated single
                     base pairs).
        :type nolp: bool
        :param nogu: Do not allow GU pairs.
        :type nogu: bool
        :param noclosinggu: Do not allow GU pairs at the end of helices.
        :type noclosinggu: bool
        :param constraints: Any structural constraints to use. Format is
                            defined at
                            http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
        :type constraints: str
        :param canonicalbponly: Remove non-canonical base pairs from the
                                structure constraint (if applicable).
        :type canonicalbponly: bool
        :param partition: Generates the partition function, generating a coarse
                          grain structure ('coarse') in the format described at
                          http://www.itc.univie.ac.at/~ivo/RNA/RNAlib/PF-Fold.h
                          tml, the ensemble free energy ('ensemble'), the
                          centroid structure ('centroid'), the free energy of
                          the centroid structure ('centroid_fe'), and its
                          distance from the ensemble ('centroid_d').
        :type partition: int
        :param pfscale: Scaling factor for the partition function.
        :type pfScale: float
        :param imfeelinglucky: Returns the one secondary structure from the
                               Boltzmann equilibrium according to its
                               probability in the ensemble.
        :type imfeelinglucky: bool
        :param gquad: Incorporate G-Quadruplex formation into the structure
                      prediction.
        :type gquad: bool
        :returns: Dictionary of calculated values, defaulting to values of MFE
                  ('mfe': float) and dotbracket structure ('dotbracket': str).
                  More keys are added depending on keyword arguments.
        :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/viennarna.py#L143-L248 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/dimers.py | 
	dimers | 
	def dimers(primer1, primer2, concentrations=[5e-7, 3e-11]):
    '''Calculate expected fraction of primer dimers.
    :param primer1: Forward primer.
    :type primer1: coral.DNA
    :param primer2: Reverse primer.
    :type primer2: coral.DNA
    :param template: DNA template.
    :type template: coral.DNA
    :param concentrations: list of concentrations for primers and the
                           template. Defaults are those for PCR with 1kb
                           template.
    :type concentrations: list
    :returns: Fraction of dimers versus the total amount of primer added.
    :rtype: float
    '''
    # It is not reasonable (yet) to use a long template for doing these
    # computations directly, as NUPACK does an exhaustive calculation and
    # would take too long without a cluster.
    # Instead, this function compares primer-primer binding to
    # primer-complement binding
    # Simulate binding of template vs. primers
    nupack = coral.analysis.NUPACK([primer1.primer(), primer2.primer(),
                                    primer1.primer().reverse_complement(),
                                    primer2.primer().reverse_complement()])
    # Include reverse complement concentration
    primer_concs = [concentrations[0]] * 2
    template_concs = [concentrations[1]] * 2
    concs = primer_concs + template_concs
    nupack_concs = nupack.concentrations(2, conc=concs)
    dimer_conc = nupack_concs['concentrations'][5]
    #primer1_template = nupack_concs['concentrations'][6]
    #primer2_template = nupack_concs['concentrations'][10]
    return dimer_conc / concs[0] | 
	python | 
	def dimers(primer1, primer2, concentrations=[5e-7, 3e-11]):
    '''Calculate expected fraction of primer dimers.
    :param primer1: Forward primer.
    :type primer1: coral.DNA
    :param primer2: Reverse primer.
    :type primer2: coral.DNA
    :param template: DNA template.
    :type template: coral.DNA
    :param concentrations: list of concentrations for primers and the
                           template. Defaults are those for PCR with 1kb
                           template.
    :type concentrations: list
    :returns: Fraction of dimers versus the total amount of primer added.
    :rtype: float
    '''
    # It is not reasonable (yet) to use a long template for doing these
    # computations directly, as NUPACK does an exhaustive calculation and
    # would take too long without a cluster.
    # Instead, this function compares primer-primer binding to
    # primer-complement binding
    # Simulate binding of template vs. primers
    nupack = coral.analysis.NUPACK([primer1.primer(), primer2.primer(),
                                    primer1.primer().reverse_complement(),
                                    primer2.primer().reverse_complement()])
    # Include reverse complement concentration
    primer_concs = [concentrations[0]] * 2
    template_concs = [concentrations[1]] * 2
    concs = primer_concs + template_concs
    nupack_concs = nupack.concentrations(2, conc=concs)
    dimer_conc = nupack_concs['concentrations'][5]
    #primer1_template = nupack_concs['concentrations'][6]
    #primer2_template = nupack_concs['concentrations'][10]
    return dimer_conc / concs[0] | 
	Calculate expected fraction of primer dimers.
    :param primer1: Forward primer.
    :type primer1: coral.DNA
    :param primer2: Reverse primer.
    :type primer2: coral.DNA
    :param template: DNA template.
    :type template: coral.DNA
    :param concentrations: list of concentrations for primers and the
                           template. Defaults are those for PCR with 1kb
                           template.
    :type concentrations: list
    :returns: Fraction of dimers versus the total amount of primer added.
    :rtype: float | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/dimers.py#L5-L40 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	read_dna | 
	def read_dna(path):
    '''Read DNA from file. Uses BioPython and coerces to coral format.
    :param path: Full path to input file.
    :type path: str
    :returns: DNA sequence.
    :rtype: coral.DNA
    '''
    filename, ext = os.path.splitext(os.path.split(path)[-1])
    genbank_exts = ['.gb', '.ape']
    fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
    abi_exts = ['.abi', '.ab1']
    if any([ext == extension for extension in genbank_exts]):
        file_format = 'genbank'
    elif any([ext == extension for extension in fasta_exts]):
        file_format = 'fasta'
    elif any([ext == extension for extension in abi_exts]):
        file_format = 'abi'
    else:
        raise ValueError('File format not recognized.')
    seq = SeqIO.read(path, file_format)
    dna = coral.DNA(str(seq.seq))
    if seq.name == '.':
        dna.name = filename
    else:
        dna.name = seq.name
    # Features
    for feature in seq.features:
        try:
            dna.features.append(_seqfeature_to_coral(feature))
        except FeatureNameError:
            pass
    dna.features = sorted(dna.features, key=lambda feature: feature.start)
    # Used to use data_file_division, but it's inconsistent (not always the
    # molecule type)
    dna.circular = False
    with open(path) as f:
        first_line = f.read().split()
        for word in first_line:
            if word == 'circular':
                dna.circular = True
    return dna | 
	python | 
	def read_dna(path):
    '''Read DNA from file. Uses BioPython and coerces to coral format.
    :param path: Full path to input file.
    :type path: str
    :returns: DNA sequence.
    :rtype: coral.DNA
    '''
    filename, ext = os.path.splitext(os.path.split(path)[-1])
    genbank_exts = ['.gb', '.ape']
    fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
    abi_exts = ['.abi', '.ab1']
    if any([ext == extension for extension in genbank_exts]):
        file_format = 'genbank'
    elif any([ext == extension for extension in fasta_exts]):
        file_format = 'fasta'
    elif any([ext == extension for extension in abi_exts]):
        file_format = 'abi'
    else:
        raise ValueError('File format not recognized.')
    seq = SeqIO.read(path, file_format)
    dna = coral.DNA(str(seq.seq))
    if seq.name == '.':
        dna.name = filename
    else:
        dna.name = seq.name
    # Features
    for feature in seq.features:
        try:
            dna.features.append(_seqfeature_to_coral(feature))
        except FeatureNameError:
            pass
    dna.features = sorted(dna.features, key=lambda feature: feature.start)
    # Used to use data_file_division, but it's inconsistent (not always the
    # molecule type)
    dna.circular = False
    with open(path) as f:
        first_line = f.read().split()
        for word in first_line:
            if word == 'circular':
                dna.circular = True
    return dna | 
	Read DNA from file. Uses BioPython and coerces to coral format.
    :param path: Full path to input file.
    :type path: str
    :returns: DNA sequence.
    :rtype: coral.DNA | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L22-L69 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	read_sequencing | 
	def read_sequencing(directory):
    '''Read .seq and .abi/.ab1 results files from a dir.
    :param directory: Path to directory containing sequencing files.
    :type directory: str
    :returns: A list of DNA sequences.
    :rtype: coral.DNA list
    '''
    dirfiles = os.listdir(directory)
    seq_exts = ['.seq', '.abi', '.ab1']
    # Exclude files that aren't sequencing results
    seq_paths = [x for x in dirfiles if os.path.splitext(x)[1] in seq_exts]
    paths = [os.path.join(directory, x) for x in seq_paths]
    sequences = [read_dna(x) for x in paths]
    return sequences | 
	python | 
	def read_sequencing(directory):
    '''Read .seq and .abi/.ab1 results files from a dir.
    :param directory: Path to directory containing sequencing files.
    :type directory: str
    :returns: A list of DNA sequences.
    :rtype: coral.DNA list
    '''
    dirfiles = os.listdir(directory)
    seq_exts = ['.seq', '.abi', '.ab1']
    # Exclude files that aren't sequencing results
    seq_paths = [x for x in dirfiles if os.path.splitext(x)[1] in seq_exts]
    paths = [os.path.join(directory, x) for x in seq_paths]
    sequences = [read_dna(x) for x in paths]
    return sequences | 
	Read .seq and .abi/.ab1 results files from a dir.
    :param directory: Path to directory containing sequencing files.
    :type directory: str
    :returns: A list of DNA sequences.
    :rtype: coral.DNA list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L72-L88 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	write_dna | 
	def write_dna(dna, path):
    '''Write DNA to a file (genbank or fasta).
    :param dna: DNA sequence to write to file
    :type dna: coral.DNA
    :param path: file path to write. Has to be genbank or fasta file.
    :type path: str
    '''
    # Check if path filetype is valid, remember for later
    ext = os.path.splitext(path)[1]
    if ext == '.gb' or ext == '.ape':
        filetype = 'genbank'
    elif ext == '.fa' or ext == '.fasta':
        filetype = 'fasta'
    else:
        raise ValueError('Only genbank or fasta files are supported.')
    # Convert features to Biopython form
    # Information lost on conversion:
    #     specificity of feature type
    #     strandedness
    #     topology
    features = []
    for feature in dna.features:
        features.append(_coral_to_seqfeature(feature))
    # Biopython doesn't like 'None' here
    # FIXME: this is a legacy feature - remove?
    bio_id = dna.id if hasattr(dna, 'id') else ''
    # Maximum length of name is 16
    seq = SeqRecord(Seq(str(dna), alphabet=ambiguous_dna), id=bio_id,
                    name=dna.name[0:16].replace(' ', '_'), features=features,
                    description=dna.name)
    if dna.circular:
        seq.annotations['data_file_division'] = 'circular'
    else:
        seq.annotations['data_file_division'] = 'linear'
    if filetype == 'genbank':
        SeqIO.write(seq, path, 'genbank')
    elif filetype == 'fasta':
        SeqIO.write(seq, path, 'fasta') | 
	python | 
	def write_dna(dna, path):
    '''Write DNA to a file (genbank or fasta).
    :param dna: DNA sequence to write to file
    :type dna: coral.DNA
    :param path: file path to write. Has to be genbank or fasta file.
    :type path: str
    '''
    # Check if path filetype is valid, remember for later
    ext = os.path.splitext(path)[1]
    if ext == '.gb' or ext == '.ape':
        filetype = 'genbank'
    elif ext == '.fa' or ext == '.fasta':
        filetype = 'fasta'
    else:
        raise ValueError('Only genbank or fasta files are supported.')
    # Convert features to Biopython form
    # Information lost on conversion:
    #     specificity of feature type
    #     strandedness
    #     topology
    features = []
    for feature in dna.features:
        features.append(_coral_to_seqfeature(feature))
    # Biopython doesn't like 'None' here
    # FIXME: this is a legacy feature - remove?
    bio_id = dna.id if hasattr(dna, 'id') else ''
    # Maximum length of name is 16
    seq = SeqRecord(Seq(str(dna), alphabet=ambiguous_dna), id=bio_id,
                    name=dna.name[0:16].replace(' ', '_'), features=features,
                    description=dna.name)
    if dna.circular:
        seq.annotations['data_file_division'] = 'circular'
    else:
        seq.annotations['data_file_division'] = 'linear'
    if filetype == 'genbank':
        SeqIO.write(seq, path, 'genbank')
    elif filetype == 'fasta':
        SeqIO.write(seq, path, 'fasta') | 
	Write DNA to a file (genbank or fasta).
    :param dna: DNA sequence to write to file
    :type dna: coral.DNA
    :param path: file path to write. Has to be genbank or fasta file.
    :type path: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L91-L132 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	write_primers | 
	def write_primers(primer_list, path, names=None, notes=None):
    '''Write a list of primers out to a csv file. The first three columns are
    compatible with the current IDT order form (name, sequence, notes). By
    default there are no notes, which is an optional parameter.
    :param primer_list: A list of primers.
    :type primer_list: coral.Primer list
    :param path: A path to the csv you want to write.
    :type path: str
    :param names: A list of strings to name each oligo. Must be the same length
                  as the primer_list.
    :type names: str list
    :param notes: A list of strings to provide a note for each oligo. Must be
                  the same length as the primer_list.
    :type notes: str list
    '''
    # Check for notes and names having the right length, apply them to primers
    if names is not None:
        if len(names) != len(primer_list):
            names_msg = 'Mismatch in number of notes and primers.'
            raise PrimerAnnotationError(names_msg)
        for i, name in enumerate(names):
            primer_list[i].name = name
    if notes is not None:
        if len(notes) != len(primer_list):
            notes_msg = 'Mismatch in number of notes and primers.'
            raise PrimerAnnotationError(notes_msg)
        for i, note in enumerate(notes):
            primer_list[i].note = note
    # Write to csv
    with open(path, 'w') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(['name', 'sequence', 'notes'])
        for primer in primer_list:
            string_rep = str(primer.overhang).lower() + str(primer.anneal)
            writer.writerow([primer.name, string_rep, primer.note]) | 
	python | 
	def write_primers(primer_list, path, names=None, notes=None):
    '''Write a list of primers out to a csv file. The first three columns are
    compatible with the current IDT order form (name, sequence, notes). By
    default there are no notes, which is an optional parameter.
    :param primer_list: A list of primers.
    :type primer_list: coral.Primer list
    :param path: A path to the csv you want to write.
    :type path: str
    :param names: A list of strings to name each oligo. Must be the same length
                  as the primer_list.
    :type names: str list
    :param notes: A list of strings to provide a note for each oligo. Must be
                  the same length as the primer_list.
    :type notes: str list
    '''
    # Check for notes and names having the right length, apply them to primers
    if names is not None:
        if len(names) != len(primer_list):
            names_msg = 'Mismatch in number of notes and primers.'
            raise PrimerAnnotationError(names_msg)
        for i, name in enumerate(names):
            primer_list[i].name = name
    if notes is not None:
        if len(notes) != len(primer_list):
            notes_msg = 'Mismatch in number of notes and primers.'
            raise PrimerAnnotationError(notes_msg)
        for i, note in enumerate(notes):
            primer_list[i].note = note
    # Write to csv
    with open(path, 'w') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(['name', 'sequence', 'notes'])
        for primer in primer_list:
            string_rep = str(primer.overhang).lower() + str(primer.anneal)
            writer.writerow([primer.name, string_rep, primer.note]) | 
	Write a list of primers out to a csv file. The first three columns are
    compatible with the current IDT order form (name, sequence, notes). By
    default there are no notes, which is an optional parameter.
    :param primer_list: A list of primers.
    :type primer_list: coral.Primer list
    :param path: A path to the csv you want to write.
    :type path: str
    :param names: A list of strings to name each oligo. Must be the same length
                  as the primer_list.
    :type names: str list
    :param notes: A list of strings to provide a note for each oligo. Must be
                  the same length as the primer_list.
    :type notes: str list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L135-L172 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	_process_feature_type | 
	def _process_feature_type(feature_type, bio_to_coral=True):
    '''Translate genbank feature types into usable ones (currently identical).
    The feature table is derived from the official genbank spec (gbrel.txt)
    available at http://www.insdc.org/documents/feature-table
    :param feature_type: feature to convert
    :type feature_type: str
    :param bio_to_coral: from coral to Biopython (True) or the other direction
                   (False)
    :param bio_to_coral: bool
    :returns: coral version of genbank feature_type, or vice-versa.
    :rtype: str
    '''
    err_msg = 'Unrecognized feature type: {}'.format(feature_type)
    if bio_to_coral:
        try:
            name = coral.constants.genbank.TO_CORAL[feature_type]
        except KeyError:
            raise ValueError(err_msg)
    else:
        try:
            name = coral.constants.genbank.TO_BIO[feature_type]
        except KeyError:
            raise ValueError(err_msg)
    return name | 
	python | 
	def _process_feature_type(feature_type, bio_to_coral=True):
    '''Translate genbank feature types into usable ones (currently identical).
    The feature table is derived from the official genbank spec (gbrel.txt)
    available at http://www.insdc.org/documents/feature-table
    :param feature_type: feature to convert
    :type feature_type: str
    :param bio_to_coral: from coral to Biopython (True) or the other direction
                   (False)
    :param bio_to_coral: bool
    :returns: coral version of genbank feature_type, or vice-versa.
    :rtype: str
    '''
    err_msg = 'Unrecognized feature type: {}'.format(feature_type)
    if bio_to_coral:
        try:
            name = coral.constants.genbank.TO_CORAL[feature_type]
        except KeyError:
            raise ValueError(err_msg)
    else:
        try:
            name = coral.constants.genbank.TO_BIO[feature_type]
        except KeyError:
            raise ValueError(err_msg)
    return name | 
	Translate genbank feature types into usable ones (currently identical).
    The feature table is derived from the official genbank spec (gbrel.txt)
    available at http://www.insdc.org/documents/feature-table
    :param feature_type: feature to convert
    :type feature_type: str
    :param bio_to_coral: from coral to Biopython (True) or the other direction
                   (False)
    :param bio_to_coral: bool
    :returns: coral version of genbank feature_type, or vice-versa.
    :rtype: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L175-L201 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	_seqfeature_to_coral | 
	def _seqfeature_to_coral(feature):
    '''Convert a Biopython SeqFeature to a coral.Feature.
    :param feature: Biopython SeqFeature
    :type feature: Bio.SeqFeature
    '''
    # Some genomic sequences don't have a label attribute
    # TODO: handle genomic cases differently than others. Some features lack
    # a label but should still be incorporated somehow.
    qualifiers = feature.qualifiers
    if 'label' in qualifiers:
        feature_name = qualifiers['label'][0]
    elif 'locus_tag' in qualifiers:
        feature_name = qualifiers['locus_tag'][0]
    else:
        raise FeatureNameError('Unrecognized feature name')
    # Features with gaps are special, require looking at subfeatures
    # Assumption: subfeatures are never more than one level deep
    if feature.location_operator == 'join':
        # Feature has gaps. Have to figure out start/stop from subfeatures,
        # calculate gap indices. A nested feature model may be required
        # eventually.
        # Reorder the sub_feature list by start location
        # Assumption: none of the subfeatures overlap so the last entry in
        # the reordered list also has the final stop point of the feature.
        # FIXME: Getting a deprecation warning about using sub_features
        # instead of feature.location being a CompoundFeatureLocation
        reordered = sorted(feature.location.parts,
                           key=lambda location: location.start)
        starts = [int(location.start) for location in reordered]
        stops = [int(location.end) for location in reordered]
        feature_start = starts.pop(0)
        feature_stop = stops.pop(-1)
        starts = [start - feature_start for start in starts]
        stops = [stop - feature_start for stop in stops]
        feature_gaps = list(zip(stops, starts))
    else:
        # Feature doesn't have gaps. Ignore subfeatures.
        feature_start = int(feature.location.start)
        feature_stop = int(feature.location.end)
        feature_gaps = []
    feature_type = _process_feature_type(feature.type)
    if feature.location.strand == -1:
        feature_strand = 1
    else:
        feature_strand = 0
    if 'gene' in qualifiers:
        gene = qualifiers['gene']
    else:
        gene = []
    if 'locus_tag' in qualifiers:
        locus_tag = qualifiers['locus_tag']
    else:
        locus_tag = []
    coral_feature = coral.Feature(feature_name, feature_start,
                                  feature_stop, feature_type,
                                  gene=gene, locus_tag=locus_tag,
                                  qualifiers=qualifiers,
                                  strand=feature_strand,
                                  gaps=feature_gaps)
    return coral_feature | 
	python | 
	def _seqfeature_to_coral(feature):
    '''Convert a Biopython SeqFeature to a coral.Feature.
    :param feature: Biopython SeqFeature
    :type feature: Bio.SeqFeature
    '''
    # Some genomic sequences don't have a label attribute
    # TODO: handle genomic cases differently than others. Some features lack
    # a label but should still be incorporated somehow.
    qualifiers = feature.qualifiers
    if 'label' in qualifiers:
        feature_name = qualifiers['label'][0]
    elif 'locus_tag' in qualifiers:
        feature_name = qualifiers['locus_tag'][0]
    else:
        raise FeatureNameError('Unrecognized feature name')
    # Features with gaps are special, require looking at subfeatures
    # Assumption: subfeatures are never more than one level deep
    if feature.location_operator == 'join':
        # Feature has gaps. Have to figure out start/stop from subfeatures,
        # calculate gap indices. A nested feature model may be required
        # eventually.
        # Reorder the sub_feature list by start location
        # Assumption: none of the subfeatures overlap so the last entry in
        # the reordered list also has the final stop point of the feature.
        # FIXME: Getting a deprecation warning about using sub_features
        # instead of feature.location being a CompoundFeatureLocation
        reordered = sorted(feature.location.parts,
                           key=lambda location: location.start)
        starts = [int(location.start) for location in reordered]
        stops = [int(location.end) for location in reordered]
        feature_start = starts.pop(0)
        feature_stop = stops.pop(-1)
        starts = [start - feature_start for start in starts]
        stops = [stop - feature_start for stop in stops]
        feature_gaps = list(zip(stops, starts))
    else:
        # Feature doesn't have gaps. Ignore subfeatures.
        feature_start = int(feature.location.start)
        feature_stop = int(feature.location.end)
        feature_gaps = []
    feature_type = _process_feature_type(feature.type)
    if feature.location.strand == -1:
        feature_strand = 1
    else:
        feature_strand = 0
    if 'gene' in qualifiers:
        gene = qualifiers['gene']
    else:
        gene = []
    if 'locus_tag' in qualifiers:
        locus_tag = qualifiers['locus_tag']
    else:
        locus_tag = []
    coral_feature = coral.Feature(feature_name, feature_start,
                                  feature_stop, feature_type,
                                  gene=gene, locus_tag=locus_tag,
                                  qualifiers=qualifiers,
                                  strand=feature_strand,
                                  gaps=feature_gaps)
    return coral_feature | 
	Convert a Biopython SeqFeature to a coral.Feature.
    :param feature: Biopython SeqFeature
    :type feature: Bio.SeqFeature | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L204-L265 | 
| 
	klavinslab/coral | 
	coral/seqio/_dna.py | 
	_coral_to_seqfeature | 
	def _coral_to_seqfeature(feature):
    '''Convert a coral.Feature to a Biopython SeqFeature.
    :param feature: coral Feature.
    :type feature: coral.Feature
    '''
    bio_strand = 1 if feature.strand == 1 else -1
    ftype = _process_feature_type(feature.feature_type, bio_to_coral=False)
    sublocations = []
    if feature.gaps:
        # There are gaps. Have to define location_operator and  add subfeatures
        location_operator = 'join'
        # Feature location means nothing for 'join' sequences?
        # TODO: verify
        location = FeatureLocation(ExactPosition(0), ExactPosition(1),
                                   strand=bio_strand)
        # Reconstruct start/stop indices for each subfeature
        stops, starts = zip(*feature.gaps)
        starts = [feature.start] + [start + feature.start for start in starts]
        stops = [stop + feature.start for stop in stops] + [feature.stop]
        # Build subfeatures
        for start, stop in zip(starts, stops):
            sublocation = FeatureLocation(ExactPosition(start),
                                          ExactPosition(stop),
                                          strand=bio_strand)
            sublocations.append(sublocation)
        location = CompoundLocation(sublocations, operator='join')
    else:
        # No gaps, feature is simple
        location_operator = ''
        location = FeatureLocation(ExactPosition(feature.start),
                                   ExactPosition(feature.stop),
                                   strand=bio_strand)
    qualifiers = feature.qualifiers
    qualifiers['label'] = [feature.name]
    seqfeature = SeqFeature(location, type=ftype,
                            qualifiers=qualifiers,
                            location_operator=location_operator)
    return seqfeature | 
	python | 
	def _coral_to_seqfeature(feature):
    '''Convert a coral.Feature to a Biopython SeqFeature.
    :param feature: coral Feature.
    :type feature: coral.Feature
    '''
    bio_strand = 1 if feature.strand == 1 else -1
    ftype = _process_feature_type(feature.feature_type, bio_to_coral=False)
    sublocations = []
    if feature.gaps:
        # There are gaps. Have to define location_operator and  add subfeatures
        location_operator = 'join'
        # Feature location means nothing for 'join' sequences?
        # TODO: verify
        location = FeatureLocation(ExactPosition(0), ExactPosition(1),
                                   strand=bio_strand)
        # Reconstruct start/stop indices for each subfeature
        stops, starts = zip(*feature.gaps)
        starts = [feature.start] + [start + feature.start for start in starts]
        stops = [stop + feature.start for stop in stops] + [feature.stop]
        # Build subfeatures
        for start, stop in zip(starts, stops):
            sublocation = FeatureLocation(ExactPosition(start),
                                          ExactPosition(stop),
                                          strand=bio_strand)
            sublocations.append(sublocation)
        location = CompoundLocation(sublocations, operator='join')
    else:
        # No gaps, feature is simple
        location_operator = ''
        location = FeatureLocation(ExactPosition(feature.start),
                                   ExactPosition(feature.stop),
                                   strand=bio_strand)
    qualifiers = feature.qualifiers
    qualifiers['label'] = [feature.name]
    seqfeature = SeqFeature(location, type=ftype,
                            qualifiers=qualifiers,
                            location_operator=location_operator)
    return seqfeature | 
	Convert a coral.Feature to a Biopython SeqFeature.
    :param feature: coral Feature.
    :type feature: coral.Feature | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L268-L307 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/align.py | 
	as_ord_matrix | 
	def as_ord_matrix(matrix, alphabet):
    '''Given the SubstitutionMatrix input, generate an equivalent matrix that
    is indexed by the ASCII number of each residue (e.g. A -> 65).'''
    ords = [ord(c) for c in alphabet]
    ord_matrix = np.zeros((max(ords) + 1, max(ords) + 1), dtype=np.integer)
    for i, row_ord in enumerate(ords):
        for j, col_ord in enumerate(ords):
            ord_matrix[row_ord, col_ord] = matrix[i, j]
    return ord_matrix | 
	python | 
	def as_ord_matrix(matrix, alphabet):
    '''Given the SubstitutionMatrix input, generate an equivalent matrix that
    is indexed by the ASCII number of each residue (e.g. A -> 65).'''
    ords = [ord(c) for c in alphabet]
    ord_matrix = np.zeros((max(ords) + 1, max(ords) + 1), dtype=np.integer)
    for i, row_ord in enumerate(ords):
        for j, col_ord in enumerate(ords):
            ord_matrix[row_ord, col_ord] = matrix[i, j]
    return ord_matrix | 
	Given the SubstitutionMatrix input, generate an equivalent matrix that
    is indexed by the ASCII number of each residue (e.g. A -> 65). | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/align.py#L6-L15 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/align.py | 
	aligner | 
	def aligner(seqj, seqi, method='global', gap_open=-7, gap_extend=-7,
            gap_double=-7, matrix=submat.DNA_SIMPLE.matrix,
            alphabet=submat.DNA_SIMPLE.alphabet):
    '''Calculates the alignment of two sequences. The global method uses
    a global Needleman-Wunsh algorithm, local does a a local
    Smith-Waterman alignment, global_cfe does a global alignment with
    cost-free ends and glocal does an alignment which is global only with
    respect to the shorter sequence, also known as a semi-global
    alignment. Returns the aligned (sub)sequences as character arrays.
    Gotoh, O. (1982). J. Mol. Biol. 162, 705-708.
    Needleman, S. & Wunsch, C. (1970). J. Mol. Biol. 48(3), 443-53.
    Smith, T.F. & Waterman M.S. (1981). J. Mol. Biol. 147, 195-197.
    :param seqj: First sequence.
    :type seqj: str
    :param seqi: Second sequence.
    :type seqi: str
    :param method: Type of alignment: 'global', 'global_cfe', 'local', or
                   'glocal'.
    :type method: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: float
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: float
    :param gap_double: The gap-opening cost if a gap is already open in the
                       other sequence (negative number).
    :type gap_double: float
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module.
    :type matrix: str
    :param alphabet: The characters corresponding to matrix rows/columns.
    :type alphabet: str
    '''
    amatrix = as_ord_matrix(matrix, alphabet)
    NONE, LEFT, UP, DIAG = range(4)  # NONE is 0
    max_j = len(seqj)
    max_i = len(seqi)
    if max_j > max_i:
        flip = 1
        seqi, seqj = seqj, seqi
        max_i, max_j = max_j, max_i
    else:
        flip = 0
    F = np.zeros((max_i + 1, max_j + 1), dtype=np.float32)
    I = np.ndarray((max_i + 1, max_j + 1), dtype=np.float32)
    I.fill(-np.inf)
    J = np.ndarray((max_i + 1, max_j + 1), dtype=np.float32)
    J.fill(-np.inf)
    pointer = np.zeros((max_i + 1, max_j + 1), dtype=np.uint)  # NONE
    if method == 'global':
        pointer[0, 1:] = LEFT
        pointer[1:, 0] = UP
        F[0, 1:] = gap_open + gap_extend * np.arange(0, max_j,
                                                     dtype=np.float32)
        F[1:, 0] = gap_open + gap_extend * np.arange(0, max_i,
                                                     dtype=np.float32)
    elif method == 'global_cfe':
        pointer[0, 1:] = LEFT
        pointer[1:, 0] = UP
    elif method == 'glocal':
        pointer[0, 1:] = LEFT
        F[0, 1:] = gap_open + gap_extend * np.arange(0, max_j,
                                                     dtype=np.float32)
    seqi_ord = [ord(base) for base in seqi]
    seqj_ord = [ord(base) for base in seqj]
    for i in range(1, max_i + 1):
        ci = seqi_ord[i - 1]
        for j in range(1, max_j + 1):
            cj = seqj_ord[j - 1]
            # I
            I[i, j] = max(F[i, j - 1] + gap_open,
                          I[i, j - 1] + gap_extend,
                          J[i, j - 1] + gap_double)
            # J
            J[i, j] = max(F[i - 1, j] + gap_open,
                          J[i - 1, j] + gap_extend,
                          I[i - 1, j] + gap_double)
            # F
            diag_score = F[i - 1, j - 1] + amatrix[ci, cj]
            left_score = I[i, j]
            up_score = J[i, j]
            max_score = max(diag_score, up_score, left_score)
            F[i, j] = max(0, max_score) if method == 'local' else max_score
            if method == 'local':
                if F[i, j] == 0:
                    pass  # point[i,j] = NONE
                elif max_score == diag_score:
                    pointer[i, j] = DIAG
                elif max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == left_score:
                    pointer[i, j] = LEFT
            elif method == 'glocal':
                # In a semi-global alignment we want to consume as much as
                # possible of the longer sequence.
                if max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == diag_score:
                    pointer[i, j] = DIAG
                elif max_score == left_score:
                    pointer[i, j] = LEFT
            else:
                # global
                if max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == left_score:
                    pointer[i, j] = LEFT
                else:
                    pointer[i, j] = DIAG
    align_j = []
    align_i = []
    if method == 'local':
        # max anywhere
        i, j = max_index(F)
    elif method == 'glocal':
        # max in last col
        i, j = (F[:, -1].argmax(), max_j)
    elif method == 'global_cfe':
        # from i,j to max(max(last row), max(last col)) for free
        row_max, col_idx = F[-1].max(), F[-1].argmax()
        col_max, row_idx = F[:, -1].max(), F[:, -1].argmax()
        if row_max > col_max:
            pointer[-1, col_idx + 1:] = LEFT
        else:
            pointer[row_idx + 1:, -1] = UP
    p = pointer[i, j]
    while p != NONE:
        if p == DIAG:
            i -= 1
            j -= 1
            align_j.append(seqj[j])
            align_i.append(seqi[i])
        elif p == LEFT:
            j -= 1
            align_j.append(seqj[j])
            align_i.append('-')
        elif p == UP:
            i -= 1
            align_j.append('-')
            align_i.append(seqi[i])
        else:
            raise Exception('wtf!')
        p = pointer[i, j]
    align_i = ''.join(align_i[::-1])
    align_j = ''.join(align_j[::-1])
    # np.array(align_i.reverse())
    return ((align_i, align_j) if flip else (align_j, align_i)) | 
	python | 
	def aligner(seqj, seqi, method='global', gap_open=-7, gap_extend=-7,
            gap_double=-7, matrix=submat.DNA_SIMPLE.matrix,
            alphabet=submat.DNA_SIMPLE.alphabet):
    '''Calculates the alignment of two sequences. The global method uses
    a global Needleman-Wunsh algorithm, local does a a local
    Smith-Waterman alignment, global_cfe does a global alignment with
    cost-free ends and glocal does an alignment which is global only with
    respect to the shorter sequence, also known as a semi-global
    alignment. Returns the aligned (sub)sequences as character arrays.
    Gotoh, O. (1982). J. Mol. Biol. 162, 705-708.
    Needleman, S. & Wunsch, C. (1970). J. Mol. Biol. 48(3), 443-53.
    Smith, T.F. & Waterman M.S. (1981). J. Mol. Biol. 147, 195-197.
    :param seqj: First sequence.
    :type seqj: str
    :param seqi: Second sequence.
    :type seqi: str
    :param method: Type of alignment: 'global', 'global_cfe', 'local', or
                   'glocal'.
    :type method: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: float
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: float
    :param gap_double: The gap-opening cost if a gap is already open in the
                       other sequence (negative number).
    :type gap_double: float
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module.
    :type matrix: str
    :param alphabet: The characters corresponding to matrix rows/columns.
    :type alphabet: str
    '''
    amatrix = as_ord_matrix(matrix, alphabet)
    NONE, LEFT, UP, DIAG = range(4)  # NONE is 0
    max_j = len(seqj)
    max_i = len(seqi)
    if max_j > max_i:
        flip = 1
        seqi, seqj = seqj, seqi
        max_i, max_j = max_j, max_i
    else:
        flip = 0
    F = np.zeros((max_i + 1, max_j + 1), dtype=np.float32)
    I = np.ndarray((max_i + 1, max_j + 1), dtype=np.float32)
    I.fill(-np.inf)
    J = np.ndarray((max_i + 1, max_j + 1), dtype=np.float32)
    J.fill(-np.inf)
    pointer = np.zeros((max_i + 1, max_j + 1), dtype=np.uint)  # NONE
    if method == 'global':
        pointer[0, 1:] = LEFT
        pointer[1:, 0] = UP
        F[0, 1:] = gap_open + gap_extend * np.arange(0, max_j,
                                                     dtype=np.float32)
        F[1:, 0] = gap_open + gap_extend * np.arange(0, max_i,
                                                     dtype=np.float32)
    elif method == 'global_cfe':
        pointer[0, 1:] = LEFT
        pointer[1:, 0] = UP
    elif method == 'glocal':
        pointer[0, 1:] = LEFT
        F[0, 1:] = gap_open + gap_extend * np.arange(0, max_j,
                                                     dtype=np.float32)
    seqi_ord = [ord(base) for base in seqi]
    seqj_ord = [ord(base) for base in seqj]
    for i in range(1, max_i + 1):
        ci = seqi_ord[i - 1]
        for j in range(1, max_j + 1):
            cj = seqj_ord[j - 1]
            # I
            I[i, j] = max(F[i, j - 1] + gap_open,
                          I[i, j - 1] + gap_extend,
                          J[i, j - 1] + gap_double)
            # J
            J[i, j] = max(F[i - 1, j] + gap_open,
                          J[i - 1, j] + gap_extend,
                          I[i - 1, j] + gap_double)
            # F
            diag_score = F[i - 1, j - 1] + amatrix[ci, cj]
            left_score = I[i, j]
            up_score = J[i, j]
            max_score = max(diag_score, up_score, left_score)
            F[i, j] = max(0, max_score) if method == 'local' else max_score
            if method == 'local':
                if F[i, j] == 0:
                    pass  # point[i,j] = NONE
                elif max_score == diag_score:
                    pointer[i, j] = DIAG
                elif max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == left_score:
                    pointer[i, j] = LEFT
            elif method == 'glocal':
                # In a semi-global alignment we want to consume as much as
                # possible of the longer sequence.
                if max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == diag_score:
                    pointer[i, j] = DIAG
                elif max_score == left_score:
                    pointer[i, j] = LEFT
            else:
                # global
                if max_score == up_score:
                    pointer[i, j] = UP
                elif max_score == left_score:
                    pointer[i, j] = LEFT
                else:
                    pointer[i, j] = DIAG
    align_j = []
    align_i = []
    if method == 'local':
        # max anywhere
        i, j = max_index(F)
    elif method == 'glocal':
        # max in last col
        i, j = (F[:, -1].argmax(), max_j)
    elif method == 'global_cfe':
        # from i,j to max(max(last row), max(last col)) for free
        row_max, col_idx = F[-1].max(), F[-1].argmax()
        col_max, row_idx = F[:, -1].max(), F[:, -1].argmax()
        if row_max > col_max:
            pointer[-1, col_idx + 1:] = LEFT
        else:
            pointer[row_idx + 1:, -1] = UP
    p = pointer[i, j]
    while p != NONE:
        if p == DIAG:
            i -= 1
            j -= 1
            align_j.append(seqj[j])
            align_i.append(seqi[i])
        elif p == LEFT:
            j -= 1
            align_j.append(seqj[j])
            align_i.append('-')
        elif p == UP:
            i -= 1
            align_j.append('-')
            align_i.append(seqi[i])
        else:
            raise Exception('wtf!')
        p = pointer[i, j]
    align_i = ''.join(align_i[::-1])
    align_j = ''.join(align_j[::-1])
    # np.array(align_i.reverse())
    return ((align_i, align_j) if flip else (align_j, align_i)) | 
	Calculates the alignment of two sequences. The global method uses
    a global Needleman-Wunsh algorithm, local does a a local
    Smith-Waterman alignment, global_cfe does a global alignment with
    cost-free ends and glocal does an alignment which is global only with
    respect to the shorter sequence, also known as a semi-global
    alignment. Returns the aligned (sub)sequences as character arrays.
    Gotoh, O. (1982). J. Mol. Biol. 162, 705-708.
    Needleman, S. & Wunsch, C. (1970). J. Mol. Biol. 48(3), 443-53.
    Smith, T.F. & Waterman M.S. (1981). J. Mol. Biol. 147, 195-197.
    :param seqj: First sequence.
    :type seqj: str
    :param seqi: Second sequence.
    :type seqi: str
    :param method: Type of alignment: 'global', 'global_cfe', 'local', or
                   'glocal'.
    :type method: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: float
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: float
    :param gap_double: The gap-opening cost if a gap is already open in the
                       other sequence (negative number).
    :type gap_double: float
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module.
    :type matrix: str
    :param alphabet: The characters corresponding to matrix rows/columns.
    :type alphabet: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/align.py#L29-L185 | 
| 
	klavinslab/coral | 
	coral/analysis/_sequencing/align.py | 
	score_alignment | 
	def score_alignment(a, b, gap_open, gap_extend, matrix):
    '''Calculate the alignment score from two aligned sequences.
    :param a: The first aligned sequence.
    :type a: str
    :param b: The second aligned sequence.
    :type b: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: int
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: int.
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module.
    '''
    al = a
    bl = b
    l = len(al)
    score = 0
    assert len(bl) == l, 'Alignment lengths must be the same'
    mat = as_ord_matrix(matrix)
    gap_started = 0
    for i in range(l):
        if al[i] == '-' or bl[i] == '-':
            score += gap_extend if gap_started else gap_open
            gap_started = 1
        else:
            score += mat[ord(al[i]), ord(bl[i])]
            gap_started = 0
    return score | 
	python | 
	def score_alignment(a, b, gap_open, gap_extend, matrix):
    '''Calculate the alignment score from two aligned sequences.
    :param a: The first aligned sequence.
    :type a: str
    :param b: The second aligned sequence.
    :type b: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: int
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: int.
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module.
    '''
    al = a
    bl = b
    l = len(al)
    score = 0
    assert len(bl) == l, 'Alignment lengths must be the same'
    mat = as_ord_matrix(matrix)
    gap_started = 0
    for i in range(l):
        if al[i] == '-' or bl[i] == '-':
            score += gap_extend if gap_started else gap_open
            gap_started = 1
        else:
            score += mat[ord(al[i]), ord(bl[i])]
            gap_started = 0
    return score | 
	Calculate the alignment score from two aligned sequences.
    :param a: The first aligned sequence.
    :type a: str
    :param b: The second aligned sequence.
    :type b: str
    :param gap_open: The cost of opening a gap (negative number).
    :type gap_open: int
    :param gap_extend: The cost of extending an open gap (negative number).
    :type gap_extend: int.
    :param matrix: A score matrix dictionary name. Examples can be found in
                   the substitution_matrices module. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/align.py#L188-L219 | 
| 
	klavinslab/coral | 
	bin/build_sphinx_docs.py | 
	build_docs | 
	def build_docs(directory):
    """Builds sphinx docs from a given directory."""
    os.chdir(directory)
    process = subprocess.Popen(["make", "html"], cwd=directory)
    process.communicate() | 
	python | 
	def build_docs(directory):
    """Builds sphinx docs from a given directory."""
    os.chdir(directory)
    process = subprocess.Popen(["make", "html"], cwd=directory)
    process.communicate() | 
	Builds sphinx docs from a given directory. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/bin/build_sphinx_docs.py#L11-L15 | 
| 
	klavinslab/coral | 
	coral/design/_gibson.py | 
	gibson_primers | 
	def gibson_primers(dna1, dna2, overlap='mixed', maxlen=80, overlap_tm=65.0,
                   insert=None, primer_kwargs=None):
    '''Design Gibson primers given two DNA sequences (connect left to right)
    :param dna1: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna1: coral.DNA
    :param dna2: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna2: coral.DNA
    :param overlap: Specifies location of overlap. 'left' puts it on the 'dna1'
                    side (i.e. the primer to amplify dna2). 'right' puts it on
                    the dna2 side, and 'mixed' does a ~50:50 split
    :type overlap: str
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param insert: A DNA insert to add with primers and use as assembly
                   homology. This overrides the 'split' argument.
    :type insert: coral.DNA
    :param primer_kwargs: keyword arguments to pass to design_primer()
    :type primer_kwargs: dict
    :returns: Reverse, then forward primer for bridging the two sequences.
              Note that the forward primer binds dna2, reverse dna1.
    :rtype: A sequence.Primer tuple
    :raises: ValueError if split parameter is an invalid string.
    '''
    if primer_kwargs is None:
        primer_kwargs = {}
    # Annealing sequences
    # DNA 2 primer is a forward primer
    fwd_anneal = coral.design.primer(dna2, **primer_kwargs)
    # DNA 1 primer is a reverse primer
    rev_anneal = coral.design.primer(dna1.flip(), **primer_kwargs)
    # Overhangs
    if insert is None:
        # No insert, so follow split argument
        if overlap == 'left':
            # If splitting left, put overhang on forward primer
            overlap_revcomp = coral.design.primer(dna1.flip(), tm=overlap_tm,
                                                  tm_undershoot=0)
            fwd_overhang = overlap_revcomp.primer().reverse_complement()
            rev_overhang = None
        elif overlap == 'right':
            # If splitting right, put overhang on reverse primer
            overlap = coral.design.primer(dna2, tm=overlap_tm, tm_undershoot=0)
            fwd_overhang = None
            rev_overhang = overlap.primer().reverse_complement()
        elif overlap == 'mixed':
            # If mixed, grow size of both until overlap Tm is reached
            overlap_l = dna1[0:0]  # Empty sequence.DNA
            overlap_r = dna2[0]  # First base
            overlap_melt = overlap_r.tm()
            while overlap_melt < overlap_tm:
                rlen = len(overlap_r)
                llen = len(overlap_l)
                if rlen > llen:
                    # Increase left side of overlap
                    overlap_l = dna1[-(rlen + 1):]
                else:
                    # Increase right side of overlap
                    overlap_r = dna2[:(llen + 1)]
                overlap = overlap_l + overlap_r
                overlap_melt = overlap.tm()
            fwd_overhang = overlap_l
            rev_overhang = overlap_r.reverse_complement()
        else:
            raise ValueError('split argument must be left, right, or mixed')
        # Generate primers using anneal, overhang, and tm data
        fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                           overhang=fwd_overhang)
        rev = coral.Primer(rev_anneal.primer(), tm=rev_anneal.tm,
                           overhang=rev_overhang)
    else:
        # There's an insert to use as the overhang
        overlap = insert
        fwd_overhang = insert.primer()
        rev_overhang = insert.reverse_complement().primer()
        # Generate primers using anneal, overhang, and tm data
        fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                           overhang=fwd_overhang)
        rev = coral.Primer(rev_anneal.primer(), tm=rev_anneal.tm,
                           overhang=rev_overhang)
        left_trim = 0
        # If either primer is too long, try trimming the overhang
        while len(fwd) > maxlen:
            # Generate new overlap
            overlap = insert[left_trim:]
            # Tm must be above overlap_tm
            if coral.analysis.tm(overlap) < overlap_tm:
                raise TmError('Right primer is too long with this Tm setting.')
            # Regenerate forward overhang
            fwd_overhang = overlap.primer()
            # Regenerate primer with new overhang
            fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                               overhang=fwd_overhang)
            # Increase 'trimming' index
            left_trim += 1
        right_trim = 0
        while len(rev) > maxlen:
            # Generate new overlap
            overlap = insert[:len(insert) - right_trim]
            # Tm must be above overlap_tm
            if coral.analysis.tm(overlap) < overlap_tm:
                raise TmError('Left primer is too long with this Tm setting.')
            # Regenerate reverse overhang
            rev_overhang = overlap.reverse_complement().to_ss()
            rev = coral.Primer(rev_anneal.to_ss(), tm=rev_anneal.tm,
                               overhang=rev_overhang)
            # Increase 'trimming' index
            right_trim += 1
    # Check primer lengths
    if any([len(primer) > maxlen for primer in (fwd, rev)]):
        raise LengthError('At least one of the primers is longer than maxlen.')
    return rev, fwd | 
	python | 
	def gibson_primers(dna1, dna2, overlap='mixed', maxlen=80, overlap_tm=65.0,
                   insert=None, primer_kwargs=None):
    '''Design Gibson primers given two DNA sequences (connect left to right)
    :param dna1: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna1: coral.DNA
    :param dna2: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna2: coral.DNA
    :param overlap: Specifies location of overlap. 'left' puts it on the 'dna1'
                    side (i.e. the primer to amplify dna2). 'right' puts it on
                    the dna2 side, and 'mixed' does a ~50:50 split
    :type overlap: str
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param insert: A DNA insert to add with primers and use as assembly
                   homology. This overrides the 'split' argument.
    :type insert: coral.DNA
    :param primer_kwargs: keyword arguments to pass to design_primer()
    :type primer_kwargs: dict
    :returns: Reverse, then forward primer for bridging the two sequences.
              Note that the forward primer binds dna2, reverse dna1.
    :rtype: A sequence.Primer tuple
    :raises: ValueError if split parameter is an invalid string.
    '''
    if primer_kwargs is None:
        primer_kwargs = {}
    # Annealing sequences
    # DNA 2 primer is a forward primer
    fwd_anneal = coral.design.primer(dna2, **primer_kwargs)
    # DNA 1 primer is a reverse primer
    rev_anneal = coral.design.primer(dna1.flip(), **primer_kwargs)
    # Overhangs
    if insert is None:
        # No insert, so follow split argument
        if overlap == 'left':
            # If splitting left, put overhang on forward primer
            overlap_revcomp = coral.design.primer(dna1.flip(), tm=overlap_tm,
                                                  tm_undershoot=0)
            fwd_overhang = overlap_revcomp.primer().reverse_complement()
            rev_overhang = None
        elif overlap == 'right':
            # If splitting right, put overhang on reverse primer
            overlap = coral.design.primer(dna2, tm=overlap_tm, tm_undershoot=0)
            fwd_overhang = None
            rev_overhang = overlap.primer().reverse_complement()
        elif overlap == 'mixed':
            # If mixed, grow size of both until overlap Tm is reached
            overlap_l = dna1[0:0]  # Empty sequence.DNA
            overlap_r = dna2[0]  # First base
            overlap_melt = overlap_r.tm()
            while overlap_melt < overlap_tm:
                rlen = len(overlap_r)
                llen = len(overlap_l)
                if rlen > llen:
                    # Increase left side of overlap
                    overlap_l = dna1[-(rlen + 1):]
                else:
                    # Increase right side of overlap
                    overlap_r = dna2[:(llen + 1)]
                overlap = overlap_l + overlap_r
                overlap_melt = overlap.tm()
            fwd_overhang = overlap_l
            rev_overhang = overlap_r.reverse_complement()
        else:
            raise ValueError('split argument must be left, right, or mixed')
        # Generate primers using anneal, overhang, and tm data
        fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                           overhang=fwd_overhang)
        rev = coral.Primer(rev_anneal.primer(), tm=rev_anneal.tm,
                           overhang=rev_overhang)
    else:
        # There's an insert to use as the overhang
        overlap = insert
        fwd_overhang = insert.primer()
        rev_overhang = insert.reverse_complement().primer()
        # Generate primers using anneal, overhang, and tm data
        fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                           overhang=fwd_overhang)
        rev = coral.Primer(rev_anneal.primer(), tm=rev_anneal.tm,
                           overhang=rev_overhang)
        left_trim = 0
        # If either primer is too long, try trimming the overhang
        while len(fwd) > maxlen:
            # Generate new overlap
            overlap = insert[left_trim:]
            # Tm must be above overlap_tm
            if coral.analysis.tm(overlap) < overlap_tm:
                raise TmError('Right primer is too long with this Tm setting.')
            # Regenerate forward overhang
            fwd_overhang = overlap.primer()
            # Regenerate primer with new overhang
            fwd = coral.Primer(fwd_anneal.primer(), tm=fwd_anneal.tm,
                               overhang=fwd_overhang)
            # Increase 'trimming' index
            left_trim += 1
        right_trim = 0
        while len(rev) > maxlen:
            # Generate new overlap
            overlap = insert[:len(insert) - right_trim]
            # Tm must be above overlap_tm
            if coral.analysis.tm(overlap) < overlap_tm:
                raise TmError('Left primer is too long with this Tm setting.')
            # Regenerate reverse overhang
            rev_overhang = overlap.reverse_complement().to_ss()
            rev = coral.Primer(rev_anneal.to_ss(), tm=rev_anneal.tm,
                               overhang=rev_overhang)
            # Increase 'trimming' index
            right_trim += 1
    # Check primer lengths
    if any([len(primer) > maxlen for primer in (fwd, rev)]):
        raise LengthError('At least one of the primers is longer than maxlen.')
    return rev, fwd | 
	Design Gibson primers given two DNA sequences (connect left to right)
    :param dna1: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna1: coral.DNA
    :param dna2: First piece of DNA for which to design primers. Once Gibsoned,
                 would be connected at its right side to dna2.
    :type dna2: coral.DNA
    :param overlap: Specifies location of overlap. 'left' puts it on the 'dna1'
                    side (i.e. the primer to amplify dna2). 'right' puts it on
                    the dna2 side, and 'mixed' does a ~50:50 split
    :type overlap: str
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param insert: A DNA insert to add with primers and use as assembly
                   homology. This overrides the 'split' argument.
    :type insert: coral.DNA
    :param primer_kwargs: keyword arguments to pass to design_primer()
    :type primer_kwargs: dict
    :returns: Reverse, then forward primer for bridging the two sequences.
              Note that the forward primer binds dna2, reverse dna1.
    :rtype: A sequence.Primer tuple
    :raises: ValueError if split parameter is an invalid string. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_gibson.py#L15-L133 | 
| 
	klavinslab/coral | 
	coral/design/_gibson.py | 
	gibson | 
	def gibson(seq_list, circular=True, overlaps='mixed', overlap_tm=65,
           maxlen=80, terminal_primers=True, primer_kwargs=None):
    '''Design Gibson primers given a set of sequences
    :param seq_list: List of DNA sequences to stitch together
    :type seq_list: list containing coral.DNA
    :param circular: If true, designs primers for making a circular construct.
                     If false, designs primers for a linear construct.
    :type circular: bool
    :param splits: Specifies locations of overlap. Must be either a single
                   entry of the same type as the 'split' parameter in
                   gibson_primers or a list of those types of the appropriate
                   length (for circular construct, len(seq_list), for
                   linear construct, len(seq_list) - 1)
    :type splits: str or list of str
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param terminal_primers: If the output is not circular, will design
                             non-Gibson primers for amplifying the first and
                             last fragments sans homology. If False, there will
                             be one less set of primers returned.
    :type terminal_primers: bool
    :param primer_kwargs: keyword arguments to pass to design.primer
    :type primer_kwargs: dict
    :returns: Forward and reverse primers for amplifying every fragment.
    :rtype: a list of sequence.Primer tuples
    :raises: ValueError if split parameter is an invalid string or wrong size.
    '''
    # Input checking
    if circular:
        n_overlaps = len(seq_list)
    else:
        n_overlaps = len(seq_list) - 1
    if type(overlaps) is str:
        overlaps = [overlaps] * n_overlaps
    else:
        if len(overlaps) != n_overlaps:
            raise ValueError('Incorrect number of \'overlaps\' entries.')
        else:
            for overlap in overlaps:
                if overlap not in ['left', 'right', 'mixed']:
                    raise ValueError('Invalid \'overlaps\' setting.')
    if primer_kwargs is None:
        primer_kwargs = {}
    # If here, inputs were good
    # Design primers for linear constructs:
    primers_list = []
    for i, (left, right) in enumerate(zip(seq_list[:-1], seq_list[1:])):
        primers_list.append(gibson_primers(left, right, overlaps[i],
                                           overlap_tm=overlap_tm,
                                           primer_kwargs=primer_kwargs))
    if circular:
        primers_list.append(gibson_primers(seq_list[-1], seq_list[0],
                                           overlaps[-1],
                                           overlap_tm=overlap_tm,
                                           primer_kwargs=primer_kwargs))
    else:
        if terminal_primers:
            primer_f = coral.design.primer(seq_list[0], **primer_kwargs)
            primer_r = coral.design.primer(seq_list[-1].reverse_complement(),
                                           **primer_kwargs)
            primers_list.append((primer_r, primer_f))
    # Primers are now in order of 'reverse for seq1, forward for seq2' config
    # Should be in 'forward and reverse primers for seq1, then seq2', etc
    # Just need to rotate one to the right
    flat = [y for x in primers_list for y in x]
    flat = [flat[-1]] + flat[:-1]
    grouped_primers = [(flat[2 * i], flat[2 * i + 1]) for i in
                       range(len(flat) / 2)]
    return grouped_primers | 
	python | 
	def gibson(seq_list, circular=True, overlaps='mixed', overlap_tm=65,
           maxlen=80, terminal_primers=True, primer_kwargs=None):
    '''Design Gibson primers given a set of sequences
    :param seq_list: List of DNA sequences to stitch together
    :type seq_list: list containing coral.DNA
    :param circular: If true, designs primers for making a circular construct.
                     If false, designs primers for a linear construct.
    :type circular: bool
    :param splits: Specifies locations of overlap. Must be either a single
                   entry of the same type as the 'split' parameter in
                   gibson_primers or a list of those types of the appropriate
                   length (for circular construct, len(seq_list), for
                   linear construct, len(seq_list) - 1)
    :type splits: str or list of str
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param terminal_primers: If the output is not circular, will design
                             non-Gibson primers for amplifying the first and
                             last fragments sans homology. If False, there will
                             be one less set of primers returned.
    :type terminal_primers: bool
    :param primer_kwargs: keyword arguments to pass to design.primer
    :type primer_kwargs: dict
    :returns: Forward and reverse primers for amplifying every fragment.
    :rtype: a list of sequence.Primer tuples
    :raises: ValueError if split parameter is an invalid string or wrong size.
    '''
    # Input checking
    if circular:
        n_overlaps = len(seq_list)
    else:
        n_overlaps = len(seq_list) - 1
    if type(overlaps) is str:
        overlaps = [overlaps] * n_overlaps
    else:
        if len(overlaps) != n_overlaps:
            raise ValueError('Incorrect number of \'overlaps\' entries.')
        else:
            for overlap in overlaps:
                if overlap not in ['left', 'right', 'mixed']:
                    raise ValueError('Invalid \'overlaps\' setting.')
    if primer_kwargs is None:
        primer_kwargs = {}
    # If here, inputs were good
    # Design primers for linear constructs:
    primers_list = []
    for i, (left, right) in enumerate(zip(seq_list[:-1], seq_list[1:])):
        primers_list.append(gibson_primers(left, right, overlaps[i],
                                           overlap_tm=overlap_tm,
                                           primer_kwargs=primer_kwargs))
    if circular:
        primers_list.append(gibson_primers(seq_list[-1], seq_list[0],
                                           overlaps[-1],
                                           overlap_tm=overlap_tm,
                                           primer_kwargs=primer_kwargs))
    else:
        if terminal_primers:
            primer_f = coral.design.primer(seq_list[0], **primer_kwargs)
            primer_r = coral.design.primer(seq_list[-1].reverse_complement(),
                                           **primer_kwargs)
            primers_list.append((primer_r, primer_f))
    # Primers are now in order of 'reverse for seq1, forward for seq2' config
    # Should be in 'forward and reverse primers for seq1, then seq2', etc
    # Just need to rotate one to the right
    flat = [y for x in primers_list for y in x]
    flat = [flat[-1]] + flat[:-1]
    grouped_primers = [(flat[2 * i], flat[2 * i + 1]) for i in
                       range(len(flat) / 2)]
    return grouped_primers | 
	Design Gibson primers given a set of sequences
    :param seq_list: List of DNA sequences to stitch together
    :type seq_list: list containing coral.DNA
    :param circular: If true, designs primers for making a circular construct.
                     If false, designs primers for a linear construct.
    :type circular: bool
    :param splits: Specifies locations of overlap. Must be either a single
                   entry of the same type as the 'split' parameter in
                   gibson_primers or a list of those types of the appropriate
                   length (for circular construct, len(seq_list), for
                   linear construct, len(seq_list) - 1)
    :type splits: str or list of str
    :param overlap_tm: Minimum Tm of overlap
    :type overlap_tm: float
    :param maxlen: Maximum length of each primer.
    :type maxlen: int
    :param terminal_primers: If the output is not circular, will design
                             non-Gibson primers for amplifying the first and
                             last fragments sans homology. If False, there will
                             be one less set of primers returned.
    :type terminal_primers: bool
    :param primer_kwargs: keyword arguments to pass to design.primer
    :type primer_kwargs: dict
    :returns: Forward and reverse primers for amplifying every fragment.
    :rtype: a list of sequence.Primer tuples
    :raises: ValueError if split parameter is an invalid string or wrong size. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_gibson.py#L136-L214 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	_decompose | 
	def _decompose(string, n):
    '''Given string and multiplier n, find m**2 decomposition.
    :param string: input string
    :type string: str
    :param n: multiplier
    :type n: int
    :returns: generator that produces m**2 * string if m**2 is a factor of n
    :rtype: generator of 0 or 1
    '''
    binary = [int(x) for x in bin(n)[2:]]
    new_string = string
    counter = 1
    while counter <= len(binary):
        if binary[-counter]:
            yield new_string
        new_string += new_string
        counter += 1 | 
	python | 
	def _decompose(string, n):
    '''Given string and multiplier n, find m**2 decomposition.
    :param string: input string
    :type string: str
    :param n: multiplier
    :type n: int
    :returns: generator that produces m**2 * string if m**2 is a factor of n
    :rtype: generator of 0 or 1
    '''
    binary = [int(x) for x in bin(n)[2:]]
    new_string = string
    counter = 1
    while counter <= len(binary):
        if binary[-counter]:
            yield new_string
        new_string += new_string
        counter += 1 | 
	Given string and multiplier n, find m**2 decomposition.
    :param string: input string
    :type string: str
    :param n: multiplier
    :type n: int
    :returns: generator that produces m**2 * string if m**2 is a factor of n
    :rtype: generator of 0 or 1 | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L244-L262 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	reverse_complement | 
	def reverse_complement(sequence, material):
    '''Reverse complement a sequence.
    :param sequence: Sequence to reverse complement
    :type sequence: str
    :param material: dna, rna, or peptide.
    :type material: str
    '''
    code = dict(COMPLEMENTS[material])
    reverse_sequence = sequence[::-1]
    return ''.join([code[str(base)] for base in reverse_sequence]) | 
	python | 
	def reverse_complement(sequence, material):
    '''Reverse complement a sequence.
    :param sequence: Sequence to reverse complement
    :type sequence: str
    :param material: dna, rna, or peptide.
    :type material: str
    '''
    code = dict(COMPLEMENTS[material])
    reverse_sequence = sequence[::-1]
    return ''.join([code[str(base)] for base in reverse_sequence]) | 
	Reverse complement a sequence.
    :param sequence: Sequence to reverse complement
    :type sequence: str
    :param material: dna, rna, or peptide.
    :type material: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L386-L396 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	check_alphabet | 
	def check_alphabet(seq, material):
    '''Verify that a given string is valid DNA, RNA, or peptide characters.
    :param seq: DNA, RNA, or peptide sequence.
    :type seq: str
    :param material: Input material - 'dna', 'rna', or 'pepide'.
    :type sequence: str
    :returns: Whether the `seq` is a valid string of `material`.
    :rtype: bool
    :raises: ValueError if `material` isn't \'dna\', \'rna\', or \'peptide\'.
             ValueError if `seq` contains invalid characters for its
             material type.
    '''
    errs = {'dna': 'DNA', 'rna': 'RNA', 'peptide': 'peptide'}
    if material == 'dna' or material == 'rna' or material == 'peptide':
        alphabet = ALPHABETS[material]
        err_msg = errs[material]
    else:
        msg = 'Input material must be \'dna\', \'rna\', or \'peptide\'.'
        raise ValueError(msg)
    # This is a bottleneck when modifying sequence - hence the run_checks
    # optional parameter in sequence objects..
    # First attempt with cython was slower. Could also try pypy.
    if re.search('[^' + alphabet + ']', seq):
        raise ValueError('Encountered a non-%s character' % err_msg) | 
	python | 
	def check_alphabet(seq, material):
    '''Verify that a given string is valid DNA, RNA, or peptide characters.
    :param seq: DNA, RNA, or peptide sequence.
    :type seq: str
    :param material: Input material - 'dna', 'rna', or 'pepide'.
    :type sequence: str
    :returns: Whether the `seq` is a valid string of `material`.
    :rtype: bool
    :raises: ValueError if `material` isn't \'dna\', \'rna\', or \'peptide\'.
             ValueError if `seq` contains invalid characters for its
             material type.
    '''
    errs = {'dna': 'DNA', 'rna': 'RNA', 'peptide': 'peptide'}
    if material == 'dna' or material == 'rna' or material == 'peptide':
        alphabet = ALPHABETS[material]
        err_msg = errs[material]
    else:
        msg = 'Input material must be \'dna\', \'rna\', or \'peptide\'.'
        raise ValueError(msg)
    # This is a bottleneck when modifying sequence - hence the run_checks
    # optional parameter in sequence objects..
    # First attempt with cython was slower. Could also try pypy.
    if re.search('[^' + alphabet + ']', seq):
        raise ValueError('Encountered a non-%s character' % err_msg) | 
	Verify that a given string is valid DNA, RNA, or peptide characters.
    :param seq: DNA, RNA, or peptide sequence.
    :type seq: str
    :param material: Input material - 'dna', 'rna', or 'pepide'.
    :type sequence: str
    :returns: Whether the `seq` is a valid string of `material`.
    :rtype: bool
    :raises: ValueError if `material` isn't \'dna\', \'rna\', or \'peptide\'.
             ValueError if `seq` contains invalid characters for its
             material type. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L399-L424 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	process_seq | 
	def process_seq(seq, material):
    '''Validate and process sequence inputs.
    :param seq: input sequence
    :type seq: str
    :param material: DNA, RNA, or peptide
    :type: str
    :returns: Uppercase version of `seq` with the alphabet checked by
              check_alphabet().
    :rtype: str
    '''
    check_alphabet(seq, material)
    seq = seq.upper()
    return seq | 
	python | 
	def process_seq(seq, material):
    '''Validate and process sequence inputs.
    :param seq: input sequence
    :type seq: str
    :param material: DNA, RNA, or peptide
    :type: str
    :returns: Uppercase version of `seq` with the alphabet checked by
              check_alphabet().
    :rtype: str
    '''
    check_alphabet(seq, material)
    seq = seq.upper()
    return seq | 
	Validate and process sequence inputs.
    :param seq: input sequence
    :type seq: str
    :param material: DNA, RNA, or peptide
    :type: str
    :returns: Uppercase version of `seq` with the alphabet checked by
              check_alphabet().
    :rtype: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L427-L441 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	palindrome | 
	def palindrome(seq):
    '''Test whether a sequence is palindrome.
    :param seq: Sequence to analyze (DNA or RNA).
    :type seq: coral.DNA or coral.RNA
    :returns: Whether a sequence is a palindrome.
    :rtype: bool
    '''
    seq_len = len(seq)
    if seq_len % 2 == 0:
        # Sequence has even number of bases, can test non-overlapping seqs
        wing = seq_len / 2
        l_wing = seq[0: wing]
        r_wing = seq[wing:]
        if l_wing == r_wing.reverse_complement():
            return True
        else:
            return False
    else:
        # Sequence has odd number of bases and cannot be a palindrome
        return False | 
	python | 
	def palindrome(seq):
    '''Test whether a sequence is palindrome.
    :param seq: Sequence to analyze (DNA or RNA).
    :type seq: coral.DNA or coral.RNA
    :returns: Whether a sequence is a palindrome.
    :rtype: bool
    '''
    seq_len = len(seq)
    if seq_len % 2 == 0:
        # Sequence has even number of bases, can test non-overlapping seqs
        wing = seq_len / 2
        l_wing = seq[0: wing]
        r_wing = seq[wing:]
        if l_wing == r_wing.reverse_complement():
            return True
        else:
            return False
    else:
        # Sequence has odd number of bases and cannot be a palindrome
        return False | 
	Test whether a sequence is palindrome.
    :param seq: Sequence to analyze (DNA or RNA).
    :type seq: coral.DNA or coral.RNA
    :returns: Whether a sequence is a palindrome.
    :rtype: bool | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L444-L465 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	Sequence.copy | 
	def copy(self):
        '''Create a copy of the current instance.
        :returns: A safely editable copy of the current sequence.
        '''
        # Significant performance improvements by skipping alphabet check
        return type(self)(self.seq, self.material, run_checks=False) | 
	python | 
	def copy(self):
        '''Create a copy of the current instance.
        :returns: A safely editable copy of the current sequence.
        '''
        # Significant performance improvements by skipping alphabet check
        return type(self)(self.seq, self.material, run_checks=False) | 
	Create a copy of the current instance.
        :returns: A safely editable copy of the current sequence. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L40-L47 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	Sequence.locate | 
	def locate(self, pattern):
        '''Find sequences matching a pattern.
        :param pattern: Sequence for which to find matches.
        :type pattern: str
        :returns: Indices of pattern matches.
        :rtype: list of ints
        '''
        if len(pattern) > len(self):
            raise ValueError('Search pattern longer than searchable ' +
                             'sequence.')
        seq = self.seq
        pattern = str(pattern).upper()
        re_pattern = '(?=' + pattern + ')'
        matches = [index.start() % len(self) for index in
                   re.finditer(re_pattern, seq)]
        return matches | 
	python | 
	def locate(self, pattern):
        '''Find sequences matching a pattern.
        :param pattern: Sequence for which to find matches.
        :type pattern: str
        :returns: Indices of pattern matches.
        :rtype: list of ints
        '''
        if len(pattern) > len(self):
            raise ValueError('Search pattern longer than searchable ' +
                             'sequence.')
        seq = self.seq
        pattern = str(pattern).upper()
        re_pattern = '(?=' + pattern + ')'
        matches = [index.start() % len(self) for index in
                   re.finditer(re_pattern, seq)]
        return matches | 
	Find sequences matching a pattern.
        :param pattern: Sequence for which to find matches.
        :type pattern: str
        :returns: Indices of pattern matches.
        :rtype: list of ints | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L49-L68 | 
| 
	klavinslab/coral | 
	coral/sequence/_sequence.py | 
	Feature.copy | 
	def copy(self):
        '''Return a copy of the Feature.
        :returns: A safely editable copy of the current feature.
        :rtype: coral.Feature
        '''
        return type(self)(self.name, self.start, self.stop, self.feature_type,
                          gene=self.gene, locus_tag=self.locus_tag,
                          qualifiers=self.qualifiers, strand=self.strand) | 
	python | 
	def copy(self):
        '''Return a copy of the Feature.
        :returns: A safely editable copy of the current feature.
        :rtype: coral.Feature
        '''
        return type(self)(self.name, self.start, self.stop, self.feature_type,
                          gene=self.gene, locus_tag=self.locus_tag,
                          qualifiers=self.qualifiers, strand=self.strand) | 
	Return a copy of the Feature.
        :returns: A safely editable copy of the current feature.
        :rtype: coral.Feature | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L334-L343 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	nupack_multi | 
	def nupack_multi(seqs, material, cmd, arguments, report=True):
    '''Split Nupack commands over processors.
    :param inputs: List of sequences, same format as for coral.analysis.Nupack.
    :type inpus: list
    :param material: Input material: 'dna' or 'rna'.
    :type material: str
    :param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
    :type cmd: str
    :param arguments: Arguments for the command.
    :type arguments: str
    :returns: A list of the same return value you would get from `cmd`.
    :rtype: list
    '''
    nupack_pool = multiprocessing.Pool()
    try:
        args = [{'seq': seq,
                 'cmd': cmd,
                 'material': material,
                 'arguments': arguments} for seq in seqs]
        nupack_iterator = nupack_pool.imap(run_nupack, args)
        total = len(seqs)
        msg = ' calculations complete.'
        passed = 4
        while report:
            completed = nupack_iterator._index
            if (completed == total):
                break
            else:
                if passed >= 4:
                    print '({0}/{1}) '.format(completed, total) + msg
                    passed = 0
                passed += 1
                time.sleep(1)
        multi_output = [x for x in nupack_iterator]
        nupack_pool.close()
        nupack_pool.join()
    except KeyboardInterrupt:
        nupack_pool.terminate()
        nupack_pool.close()
        raise KeyboardInterrupt
    return multi_output | 
	python | 
	def nupack_multi(seqs, material, cmd, arguments, report=True):
    '''Split Nupack commands over processors.
    :param inputs: List of sequences, same format as for coral.analysis.Nupack.
    :type inpus: list
    :param material: Input material: 'dna' or 'rna'.
    :type material: str
    :param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
    :type cmd: str
    :param arguments: Arguments for the command.
    :type arguments: str
    :returns: A list of the same return value you would get from `cmd`.
    :rtype: list
    '''
    nupack_pool = multiprocessing.Pool()
    try:
        args = [{'seq': seq,
                 'cmd': cmd,
                 'material': material,
                 'arguments': arguments} for seq in seqs]
        nupack_iterator = nupack_pool.imap(run_nupack, args)
        total = len(seqs)
        msg = ' calculations complete.'
        passed = 4
        while report:
            completed = nupack_iterator._index
            if (completed == total):
                break
            else:
                if passed >= 4:
                    print '({0}/{1}) '.format(completed, total) + msg
                    passed = 0
                passed += 1
                time.sleep(1)
        multi_output = [x for x in nupack_iterator]
        nupack_pool.close()
        nupack_pool.join()
    except KeyboardInterrupt:
        nupack_pool.terminate()
        nupack_pool.close()
        raise KeyboardInterrupt
    return multi_output | 
	Split Nupack commands over processors.
    :param inputs: List of sequences, same format as for coral.analysis.Nupack.
    :type inpus: list
    :param material: Input material: 'dna' or 'rna'.
    :type material: str
    :param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
    :type cmd: str
    :param arguments: Arguments for the command.
    :type arguments: str
    :returns: A list of the same return value you would get from `cmd`.
    :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1490-L1533 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	run_nupack | 
	def run_nupack(kwargs):
    '''Run picklable Nupack command.
    :param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
    :returns: Variable - whatever `cmd` returns.
    '''
    run = NUPACK(kwargs['seq'])
    output = getattr(run, kwargs['cmd'])(**kwargs['arguments'])
    return output | 
	python | 
	def run_nupack(kwargs):
    '''Run picklable Nupack command.
    :param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
    :returns: Variable - whatever `cmd` returns.
    '''
    run = NUPACK(kwargs['seq'])
    output = getattr(run, kwargs['cmd'])(**kwargs['arguments'])
    return output | 
	Run picklable Nupack command.
    :param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
    :returns: Variable - whatever `cmd` returns. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1536-L1545 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.pfunc_multi | 
	def pfunc_multi(self, strands, permutation=None, temp=37.0, pseudo=False,
                    material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the partition function for an ordered complex of strands.
        Runs the \'pfunc\' command.
        :param strands: List of strands to use as inputs to pfunc -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A 2-tuple of the free energy of the ordered complex
                  (float) and the partition function (float).
        :rtype: tuple
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        # Set up the input file and run the command
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        stdout = self._run('pfunc', cmd_args, lines).split('\n')
        return (float(stdout[-3]), float(stdout[-2])) | 
	python | 
	def pfunc_multi(self, strands, permutation=None, temp=37.0, pseudo=False,
                    material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the partition function for an ordered complex of strands.
        Runs the \'pfunc\' command.
        :param strands: List of strands to use as inputs to pfunc -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A 2-tuple of the free energy of the ordered complex
                  (float) and the partition function (float).
        :rtype: tuple
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        # Set up the input file and run the command
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        stdout = self._run('pfunc', cmd_args, lines).split('\n')
        return (float(stdout[-3]), float(stdout[-2])) | 
	Compute the partition function for an ordered complex of strands.
        Runs the \'pfunc\' command.
        :param strands: List of strands to use as inputs to pfunc -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A 2-tuple of the free energy of the ordered complex
                  (float) and the partition function (float).
        :rtype: tuple | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L97-L150 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.pairs | 
	def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False,
              material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strand: Strand on which to run pairs. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The probability matrix, where the (i, j)th entry
                  is the probability that base i is bound to base j. The matrix
                  is augmented (it's N+1 by N+1, where N is the number of bases
                  in the sequence) with an (N+1)th column containing the
                  probability that each base is unpaired.
        :rtype: numpy.array
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand)]
        self._run('pairs', cmd_args, lines)
        # Read the output from file
        ppairs = self._read_tempfile('pairs.ppairs')
        data = re.search('\n\n\d*\n(.*)', ppairs, flags=re.DOTALL).group(1)
        N = len(strand)
        data_lines = [line.split('\t') for line in data.split('\n') if line]
        prob_matrix = self._pairs_to_np(data_lines, N)
        return prob_matrix | 
	python | 
	def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False,
              material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strand: Strand on which to run pairs. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The probability matrix, where the (i, j)th entry
                  is the probability that base i is bound to base j. The matrix
                  is augmented (it's N+1 by N+1, where N is the number of bases
                  in the sequence) with an (N+1)th column containing the
                  probability that each base is unpaired.
        :rtype: numpy.array
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand)]
        self._run('pairs', cmd_args, lines)
        # Read the output from file
        ppairs = self._read_tempfile('pairs.ppairs')
        data = re.search('\n\n\d*\n(.*)', ppairs, flags=re.DOTALL).group(1)
        N = len(strand)
        data_lines = [line.split('\t') for line in data.split('\n') if line]
        prob_matrix = self._pairs_to_np(data_lines, N)
        return prob_matrix | 
	Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strand: Strand on which to run pairs. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The probability matrix, where the (i, j)th entry
                  is the probability that base i is bound to base j. The matrix
                  is augmented (it's N+1 by N+1, where N is the number of bases
                  in the sequence) with an (N+1)th column containing the
                  probability that each base is unpaired.
        :rtype: numpy.array | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L153-L213 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.pairs_multi | 
	def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0,
                    pseudo=False, material=None, dangles='some', sodium=1.0,
                    magnesium=0.0):
        '''Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strands: List of strands to use as inputs to pairs -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :returns: Two probability matrices: The probability matrix as in the
                  pairs method (but with a dimension equal to the sum of the
                  lengths of the sequences in the permutation), and a similar
                  probability matrix where multiple strands of the same species
                  are considered to be indistinguishable.
        :rtype: list
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        # Set up the input file and run the command. Note: no STDOUT
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        self._run('pairs', cmd_args, lines)
        # Read the output from file
        N = sum([len(s) for s in strands])
        matrices = []
        for mat_type in ['ppairs', 'epairs']:
            data = self._read_tempfile('pairs.' + mat_type)
            probs = re.search('\n\n\d*\n(.*)', data, flags=re.DOTALL).group(1)
            lines = probs.split('\n')
            # Remove the last line (empty)
            lines.pop()
            pairlist = [line.split('\t') for line in lines]
            prob_matrix = self._pairs_to_np(pairlist, N)
            matrices.append(prob_matrix)
        return matrices | 
	python | 
	def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0,
                    pseudo=False, material=None, dangles='some', sodium=1.0,
                    magnesium=0.0):
        '''Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strands: List of strands to use as inputs to pairs -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :returns: Two probability matrices: The probability matrix as in the
                  pairs method (but with a dimension equal to the sum of the
                  lengths of the sequences in the permutation), and a similar
                  probability matrix where multiple strands of the same species
                  are considered to be indistinguishable.
        :rtype: list
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        # Set up the input file and run the command. Note: no STDOUT
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        self._run('pairs', cmd_args, lines)
        # Read the output from file
        N = sum([len(s) for s in strands])
        matrices = []
        for mat_type in ['ppairs', 'epairs']:
            data = self._read_tempfile('pairs.' + mat_type)
            probs = re.search('\n\n\d*\n(.*)', data, flags=re.DOTALL).group(1)
            lines = probs.split('\n')
            # Remove the last line (empty)
            lines.pop()
            pairlist = [line.split('\t') for line in lines]
            prob_matrix = self._pairs_to_np(pairlist, N)
            matrices.append(prob_matrix)
        return matrices | 
	Compute the pair probabilities for an ordered complex of strands.
        Runs the \'pairs\' command.
        :param strands: List of strands to use as inputs to pairs -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param cutoff: Only probabilities above this cutoff appear in the
                       output.
        :type cutoff: float
        :returns: Two probability matrices: The probability matrix as in the
                  pairs method (but with a dimension equal to the sum of the
                  lengths of the sequences in the permutation), and a similar
                  probability matrix where multiple strands of the same species
                  are considered to be indistinguishable.
        :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L216-L290 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.mfe | 
	def mfe(self, strand, degenerate=False, temp=37.0, pseudo=False,
            material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strand: Strand on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        if degenerate:
            cmd_args.append('-degenerate')
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand)]
        self._run('mfe', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('mfe.mfe'))
        if degenerate:
            return structures
        else:
            return structures[0] | 
	python | 
	def mfe(self, strand, degenerate=False, temp=37.0, pseudo=False,
            material=None, dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strand: Strand on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        if degenerate:
            cmd_args.append('-degenerate')
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand)]
        self._run('mfe', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('mfe.mfe'))
        if degenerate:
            return structures
        else:
            return structures[0] | 
	Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strand: Strand on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L293-L354 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.mfe_multi | 
	def mfe_multi(self, strands, permutation=None, degenerate=False, temp=37.0,
                  pseudo=False, material=None, dangles='some', sodium=1.0,
                  magnesium=0.0):
        '''Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strands: Strands on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        if degenerate:
            cmd_args.append('-degenerate')
        # Set up the input file and run the command. Note: no STDOUT
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        self._run('mfe', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('mfe.mfe'))
        if degenerate:
            return structures
        else:
            return structures[0] | 
	python | 
	def mfe_multi(self, strands, permutation=None, degenerate=False, temp=37.0,
                  pseudo=False, material=None, dangles='some', sodium=1.0,
                  magnesium=0.0):
        '''Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strands: Strands on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strands, material, multi=True)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=True)
        if degenerate:
            cmd_args.append('-degenerate')
        # Set up the input file and run the command. Note: no STDOUT
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        self._run('mfe', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('mfe.mfe'))
        if degenerate:
            return structures
        else:
            return structures[0] | 
	Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
        command.
        :param strands: Strands on which to run mfe. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :param degenerate: Setting to True will result in returning a list of
                           dictionaries associated with structures having the
                           same, minimal MFE value.
        :type degenerate: bool
        :returns: A dictionary with keys for 'mfe' (a float), 'dotparens'
                  (dot-parens notation of the MFE structure), and 'pairlist'
                  (a pair list notation of the MFE structure). Note that the
                  pair list will be an empty list if the MFE is unstructured.
        :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L357-L427 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.subopt | 
	def subopt(self, strand, gap, temp=37.0, pseudo=False, material=None,
               dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the suboptimal structures within a defined energy gap of the
        MFE. Runs the \'subopt\' command.
        :param strand: Strand on which to run subopt. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param gap: Energy gap within to restrict results, e.g. 0.1.
        :type gap: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries of the type returned by .mfe().
        :rtype: list
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand), str(gap)]
        self._run('subopt', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('subopt.subopt'))
        return structures | 
	python | 
	def subopt(self, strand, gap, temp=37.0, pseudo=False, material=None,
               dangles='some', sodium=1.0, magnesium=0.0):
        '''Compute the suboptimal structures within a defined energy gap of the
        MFE. Runs the \'subopt\' command.
        :param strand: Strand on which to run subopt. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param gap: Energy gap within to restrict results, e.g. 0.1.
        :type gap: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries of the type returned by .mfe().
        :rtype: list
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand), str(gap)]
        self._run('subopt', cmd_args, lines)
        # Read the output from file
        structures = self._process_mfe(self._read_tempfile('subopt.subopt'))
        return structures | 
	Compute the suboptimal structures within a defined energy gap of the
        MFE. Runs the \'subopt\' command.
        :param strand: Strand on which to run subopt. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param gap: Energy gap within to restrict results, e.g. 0.1.
        :type gap: float
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries of the type returned by .mfe().
        :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L430-L481 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.count | 
	def count(self, strand, pseudo=False):
        '''Enumerates the total number of secondary structures over the
        structural ensemble Ω(π). Runs the \'count\' command.
        :param strand: Strand on which to run count. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :returns: The count of the number of structures in the structural
                  ensemble.
        :rtype: int
        '''
        # Set up command flags
        if pseudo:
            cmd_args = ['-pseudo']
        else:
            cmd_args = []
        # Set up the input file and run the command
        stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
        # Return the count
        return int(float(stdout[-2])) | 
	python | 
	def count(self, strand, pseudo=False):
        '''Enumerates the total number of secondary structures over the
        structural ensemble Ω(π). Runs the \'count\' command.
        :param strand: Strand on which to run count. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :returns: The count of the number of structures in the structural
                  ensemble.
        :rtype: int
        '''
        # Set up command flags
        if pseudo:
            cmd_args = ['-pseudo']
        else:
            cmd_args = []
        # Set up the input file and run the command
        stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
        # Return the count
        return int(float(stdout[-2])) | 
	Enumerates the total number of secondary structures over the
        structural ensemble Ω(π). Runs the \'count\' command.
        :param strand: Strand on which to run count. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: list
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :returns: The count of the number of structures in the structural
                  ensemble.
        :rtype: int | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L548-L572 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.count_multi | 
	def count_multi(self, strands, permutation=None, pseudo=False):
        '''Enumerates the total number of secondary structures over the
        structural ensemble Ω(π) with an ordered permutation of strands. Runs
        the \'count\' command.
        :param strands: List of strands to use as inputs to count -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :returns: Dictionary with the following key:value pairs: 'energy':
                  free energy, 'pfunc': partition function.
        :rtype: dict
        '''
        # Set up command flags
        cmd_args = ['-multi']
        if pseudo:
            cmd_args.append('-pseudo')
        # Set up the input file and run the command
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        stdout = self._run('count', cmd_args, lines).split('\n')
        return int(float(stdout[-2])) | 
	python | 
	def count_multi(self, strands, permutation=None, pseudo=False):
        '''Enumerates the total number of secondary structures over the
        structural ensemble Ω(π) with an ordered permutation of strands. Runs
        the \'count\' command.
        :param strands: List of strands to use as inputs to count -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :returns: Dictionary with the following key:value pairs: 'energy':
                  free energy, 'pfunc': partition function.
        :rtype: dict
        '''
        # Set up command flags
        cmd_args = ['-multi']
        if pseudo:
            cmd_args.append('-pseudo')
        # Set up the input file and run the command
        if permutation is None:
            permutation = range(1, len(strands) + 1)
        lines = self._multi_lines(strands, permutation)
        stdout = self._run('count', cmd_args, lines).split('\n')
        return int(float(stdout[-2])) | 
	Enumerates the total number of secondary structures over the
        structural ensemble Ω(π) with an ordered permutation of strands. Runs
        the \'count\' command.
        :param strands: List of strands to use as inputs to count -multi.
        :type strands: list
        :param permutation: The circular permutation of strands to test in
                            complex. e.g. to test in the order that was input
                            for 4 strands, the permutation would be [1,2,3,4].
                            If set to None, defaults to the order of the
                            input strands.
        :type permutation: list
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :returns: Dictionary with the following key:value pairs: 'energy':
                  free energy, 'pfunc': partition function.
        :rtype: dict | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L575-L615 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.energy | 
	def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None,
               dangles='some', sodium=1.0, magnesium=0.0):
        '''Calculate the free energy of a given sequence structure. Runs the
        \'energy\' command.
        :param strand: Strand on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param dotparens: The structure in dotparens notation.
        :type dotparens: str
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The free energy of the sequence with the specified secondary
                  structure.
        :rtype: float
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand), dotparens]
        stdout = self._run('energy', cmd_args, lines).split('\n')
        # Return the energy
        return float(stdout[-2]) | 
	python | 
	def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None,
               dangles='some', sodium=1.0, magnesium=0.0):
        '''Calculate the free energy of a given sequence structure. Runs the
        \'energy\' command.
        :param strand: Strand on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param dotparens: The structure in dotparens notation.
        :type dotparens: str
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The free energy of the sequence with the specified secondary
                  structure.
        :rtype: float
        '''
        # Set the material (will be used to set command material flag)
        material = self._set_material(strand, material)
        # Set up command flags
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        # Set up the input file and run the command. Note: no STDOUT
        lines = [str(strand), dotparens]
        stdout = self._run('energy', cmd_args, lines).split('\n')
        # Return the energy
        return float(stdout[-2]) | 
	Calculate the free energy of a given sequence structure. Runs the
        \'energy\' command.
        :param strand: Strand on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strand: coral.DNA or coral.RNA
        :param dotparens: The structure in dotparens notation.
        :type dotparens: str
        :param temp: Temperature setting for the computation. Negative values
                     are not allowed.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: The free energy of the sequence with the specified secondary
                  structure.
        :rtype: float | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L618-L668 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.complexes | 
	def complexes(self, strands, max_size, ordered=False, pairs=False,
                  mfe=False, cutoff=0.001, degenerate=False, temp=37.0,
                  pseudo=False, material=None, dangles='some', sodium=1.0,
                  magnesium=0.0):
        '''
        :param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param degenerate: Applies only when \'mfe\' is set to True. If
                           set to True, the 'mfe' value associated with each
                           complex will be a list of degenerate MFEs (as in
                           the case of .mfe()).
        :type degenerate: bool
        :param temp: Temperature.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries containing at least 'energy',
                  'complex', and 'strands' keys. If 'ordered' is True, the
                  different possible ordered permutations of complexes are
                  considered. In addition, with 'ordered' set to True, an
                  additional 'order' key describing the exact order of strands
                  and a 'permutation' index (integer) are added. If 'pairs' is
                  True, there is an additional 'epairs' key containing the
                  base-pairing expectation values. If 'mfe' is selected, 'mfe,
                  'dotparens', and 'pairlist' keys in the same as .mfe(). In
                  addition, 'mfe' sets the -ordered flag, so the same keys as
                  when 'ordered' is set to True are added.
        :rtype: float
        '''
        # TODO: Consider returning a pandas dataframe in this (and other)
        # situations to make sorting/selection between results easier.
        material = self._set_material(strands, material, multi=True)
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        cmd_args.append('-quiet')
        if mfe:
            cmd_args.append('-mfe')
            ordered = True
            if degenerate:
                cmd_args.append('-degenerate')
        if ordered:
            cmd_args.append('-ordered')
        if pairs:
            cmd_args.append('-pairs')
            cmd_args.append('-cutoff')
            cmd_args.append(cutoff)
        dim = sum([len(s) for s in strands])
        nstrands = len(strands)
        # Set up the input file and run the command
        lines = self._multi_lines(strands, [max_size])
        self._run('complexes', cmd_args, lines)
        # Read the output from file(s)
        if ordered:
            ocx_lines = self._read_tempfile('complexes.ocx').split('\n')
            # Process each lines
            output = []
            for line in ocx_lines:
                if line and not line.startswith('%'):
                    data = line.split('\t')
                    energy = float(data[-1])
                    complexes = [int(d) for d in data[2:2 + nstrands]]
                    permutation = int(data[1])
                    output.append({'energy': energy, 'complex': complexes,
                                   'permutation': permutation})
            key_lines = self._read_tempfile('complexes.ocx-key').split('\n')
            data_lines = [l for l in key_lines if not l.startswith('%')]
            data_lines.pop()
            for i, line in enumerate(data_lines):
                data = line.split('\t')
                keys = [int(d) for d in data[2:-1]]
                output[i]['order'] = keys
            if pairs:
                epairs_data = self._read_tempfile('complexes.ocx-epairs')
                pairslist = self._process_epairs(epairs_data)
                for i, pairs in enumerate(pairslist):
                    output[i]['epairs'] = self._pairs_to_np(pairs, dim)
                # TODO: add ocx-ppairs as well
            if mfe:
                mfe_data = self._read_tempfile('complexes.ocx-mfe')
                if degenerate:
                    return NotImplementedError('Not implemented for complexes')
                else:
                    mfe_output = self._process_mfe(mfe_data, complexes=True)
                    for i, mfedat in enumerate(mfe_output):
                        output[i]['mfe'] = mfedat['mfe']
                        output[i]['dotparens'] = mfedat['dotparens']
                        output[i]['pairlist'] = mfedat['pairlist']
        else:
            cx_lines = self._read_tempfile('complexes.cx').split('\n')
            # Remove empty last line
            cx_lines.pop()
            output = []
            for line in cx_lines:
                if not line.startswith('%'):
                    data = line.split('\t')
                    energy = float(data[-1])
                    complexes = [int(d) for d in data[1:1 + len(strands)]]
                    output.append({'energy': energy, 'complex': complexes})
            if pairs:
                # Process epairs
                epairs_data = self._read_tempfile('complexes.cx-epairs')
                pairslist = self._process_epairs(epairs_data)
                for i, pairs in enumerate(pairslist):
                    proba_mat = self._pairs_to_np(pairs, dim)
                    output[i]['epairs'] = proba_mat
        # Add strands (for downstream concentrations)
        for cx in output:
            cx['strands'] = [s.copy() for s in strands]
        return output | 
	python | 
	def complexes(self, strands, max_size, ordered=False, pairs=False,
                  mfe=False, cutoff=0.001, degenerate=False, temp=37.0,
                  pseudo=False, material=None, dangles='some', sodium=1.0,
                  magnesium=0.0):
        '''
        :param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param degenerate: Applies only when \'mfe\' is set to True. If
                           set to True, the 'mfe' value associated with each
                           complex will be a list of degenerate MFEs (as in
                           the case of .mfe()).
        :type degenerate: bool
        :param temp: Temperature.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries containing at least 'energy',
                  'complex', and 'strands' keys. If 'ordered' is True, the
                  different possible ordered permutations of complexes are
                  considered. In addition, with 'ordered' set to True, an
                  additional 'order' key describing the exact order of strands
                  and a 'permutation' index (integer) are added. If 'pairs' is
                  True, there is an additional 'epairs' key containing the
                  base-pairing expectation values. If 'mfe' is selected, 'mfe,
                  'dotparens', and 'pairlist' keys in the same as .mfe(). In
                  addition, 'mfe' sets the -ordered flag, so the same keys as
                  when 'ordered' is set to True are added.
        :rtype: float
        '''
        # TODO: Consider returning a pandas dataframe in this (and other)
        # situations to make sorting/selection between results easier.
        material = self._set_material(strands, material, multi=True)
        cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
                                       magnesium, multi=False)
        cmd_args.append('-quiet')
        if mfe:
            cmd_args.append('-mfe')
            ordered = True
            if degenerate:
                cmd_args.append('-degenerate')
        if ordered:
            cmd_args.append('-ordered')
        if pairs:
            cmd_args.append('-pairs')
            cmd_args.append('-cutoff')
            cmd_args.append(cutoff)
        dim = sum([len(s) for s in strands])
        nstrands = len(strands)
        # Set up the input file and run the command
        lines = self._multi_lines(strands, [max_size])
        self._run('complexes', cmd_args, lines)
        # Read the output from file(s)
        if ordered:
            ocx_lines = self._read_tempfile('complexes.ocx').split('\n')
            # Process each lines
            output = []
            for line in ocx_lines:
                if line and not line.startswith('%'):
                    data = line.split('\t')
                    energy = float(data[-1])
                    complexes = [int(d) for d in data[2:2 + nstrands]]
                    permutation = int(data[1])
                    output.append({'energy': energy, 'complex': complexes,
                                   'permutation': permutation})
            key_lines = self._read_tempfile('complexes.ocx-key').split('\n')
            data_lines = [l for l in key_lines if not l.startswith('%')]
            data_lines.pop()
            for i, line in enumerate(data_lines):
                data = line.split('\t')
                keys = [int(d) for d in data[2:-1]]
                output[i]['order'] = keys
            if pairs:
                epairs_data = self._read_tempfile('complexes.ocx-epairs')
                pairslist = self._process_epairs(epairs_data)
                for i, pairs in enumerate(pairslist):
                    output[i]['epairs'] = self._pairs_to_np(pairs, dim)
                # TODO: add ocx-ppairs as well
            if mfe:
                mfe_data = self._read_tempfile('complexes.ocx-mfe')
                if degenerate:
                    return NotImplementedError('Not implemented for complexes')
                else:
                    mfe_output = self._process_mfe(mfe_data, complexes=True)
                    for i, mfedat in enumerate(mfe_output):
                        output[i]['mfe'] = mfedat['mfe']
                        output[i]['dotparens'] = mfedat['dotparens']
                        output[i]['pairlist'] = mfedat['pairlist']
        else:
            cx_lines = self._read_tempfile('complexes.cx').split('\n')
            # Remove empty last line
            cx_lines.pop()
            output = []
            for line in cx_lines:
                if not line.startswith('%'):
                    data = line.split('\t')
                    energy = float(data[-1])
                    complexes = [int(d) for d in data[1:1 + len(strands)]]
                    output.append({'energy': energy, 'complex': complexes})
            if pairs:
                # Process epairs
                epairs_data = self._read_tempfile('complexes.cx-epairs')
                pairslist = self._process_epairs(epairs_data)
                for i, pairs in enumerate(pairslist):
                    proba_mat = self._pairs_to_np(pairs, dim)
                    output[i]['epairs'] = proba_mat
        # Add strands (for downstream concentrations)
        for cx in output:
            cx['strands'] = [s.copy() for s in strands]
        return output | 
	:param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param degenerate: Applies only when \'mfe\' is set to True. If
                           set to True, the 'mfe' value associated with each
                           complex will be a list of degenerate MFEs (as in
                           the case of .mfe()).
        :type degenerate: bool
        :param temp: Temperature.
        :type temp: float
        :param pseudo: Enable pseudoknots.
        :type pseudo: bool
        :param material: The material setting to use in the computation. If set
                         to None (the default), the material type is inferred
                         from the strands. Other settings available: 'dna' for
                         DNA parameters, 'rna' for RNA (1995) parameters, and
                         'rna1999' for the RNA 1999 parameters.
        :type material: str
        :param dangles: How to treat dangles in the computation. From the
                        user guide: For \'none\': Dangle energies are ignored.
                        For \'some\': \'A dangle energy is incorporated for
                        each unpaired base flanking a duplex\'. For 'all': all
                        dangle energy is considered.
        :type dangles: str
        :param sodium: Sodium concentration in solution (molar), only applies
                       to DNA.
        :type sodium: float
        :param magnesium: Magnesium concentration in solution (molar), only
                          applies to DNA>
        :type magnesium: float
        :returns: A list of dictionaries containing at least 'energy',
                  'complex', and 'strands' keys. If 'ordered' is True, the
                  different possible ordered permutations of complexes are
                  considered. In addition, with 'ordered' set to True, an
                  additional 'order' key describing the exact order of strands
                  and a 'permutation' index (integer) are added. If 'pairs' is
                  True, there is an additional 'epairs' key containing the
                  base-pairing expectation values. If 'mfe' is selected, 'mfe,
                  'dotparens', and 'pairlist' keys in the same as .mfe(). In
                  addition, 'mfe' sets the -ordered flag, so the same keys as
                  when 'ordered' is set to True are added.
        :rtype: float | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L976-L1124 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.complexes_timeonly | 
	def complexes_timeonly(self, strands, max_size):
        '''Estimate the amount of time it will take to calculate all the
        partition functions for each circular permutation - estimate the time
        the actual \'complexes\' command will take to run.
        :param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :returns: The estimated time to run complexes' partition functions, in
                  seconds.
        :rtype: float
        '''
        cmd_args = ['-quiet', '-timeonly']
        lines = self._multi_lines(strands, [max_size])
        stdout = self._run('complexes', cmd_args, lines)
        return float(re.search('calculation\: (.*) seconds', stdout).group(1)) | 
	python | 
	def complexes_timeonly(self, strands, max_size):
        '''Estimate the amount of time it will take to calculate all the
        partition functions for each circular permutation - estimate the time
        the actual \'complexes\' command will take to run.
        :param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :returns: The estimated time to run complexes' partition functions, in
                  seconds.
        :rtype: float
        '''
        cmd_args = ['-quiet', '-timeonly']
        lines = self._multi_lines(strands, [max_size])
        stdout = self._run('complexes', cmd_args, lines)
        return float(re.search('calculation\: (.*) seconds', stdout).group(1)) | 
	Estimate the amount of time it will take to calculate all the
        partition functions for each circular permutation - estimate the time
        the actual \'complexes\' command will take to run.
        :param strands: Strands on which to run energy. Strands must be either
                       coral.DNA or coral.RNA).
        :type strands: list of coral.DNA or coral.RNA
        :param max_size: Maximum complex size to consider (maximum number of
                         strand species in complex).
        :type max_size: int
        :returns: The estimated time to run complexes' partition functions, in
                  seconds.
        :rtype: float | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1127-L1146 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.concentrations | 
	def concentrations(self, complexes, concs, ordered=False, pairs=False,
                       cutoff=0.001, temp=37.0):
        '''
        :param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param concs: The concentration(s) of each strand species in the
                      initial complex. If they are all the same, a single
                      float can be used here.
        :type concs: list of floats or float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a
                  'concentrations' key. If 'pairs' is True, an 'fpairs' key
                  is added.
        :rtype: list
        '''
        # Check inputs
        nstrands = len(complexes[0]['strands'])
        try:
            if len(concs) != nstrands:
                raise ValueError('concs argument not same length as strands.')
        except TypeError:
            concs = [concs for i in range(len(complexes['strands']))]
        # Set up command-line arguments
        cmd_args = ['-quiet']
        if ordered:
            cmd_args.append('-ordered')
        # Write .con file
        with open(os.path.join(self._tempdir, 'concentrations.con')) as f:
            f.writelines(concs)
        # Write .cx or .ocx file
        header = ['%t Number of strands: {}'.format(nstrands),
                  '%\tid\tsequence']
        for i, strand in enumerate(complexes['strands']):
            header.append('%\t{}\t{}'.format(i + 1, strand))
        header.append('%\tT = {}'.format(temp))
        body = []
        for i, cx in enumerate(complexes):
            permutation = '\t'.join(complexes['complex'])
            line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy'])
            body.append(line)
        if ordered:
            cxfile = os.path.join(self._tempdir, 'concentrations.ocx')
        else:
            cxfile = os.path.join(self._tempdir, 'concentrations.cx')
        with open(cxfile) as f:
            f.writelines(header + body)
        # Run 'concentrations'
        self._run('concentrations', cmd_args, None)
        # Parse the .eq (concentrations) file
        eq_lines = self._read_tempfile('concentrations.eq').split('\n')
        tsv_lines = [l for l in eq_lines if not l.startswith('%')]
        output = []
        for i, line in enumerate(tsv_lines):
            # It's a TSV
            data = line.split('\t')
            # Column 0 is an index
            # Columns 1-nstrands is the complex
            cx = [int(c) for c in data[1:nstrands]]
            # Column nstrands + 1 is the complex energy
            # Column nstrands + 2 is the equilibrium concentration
            eq = float(data[nstrands + 2])
            output[i] = {'complex': cx, 'concentration': eq}
        if pairs:
            # Read the .fpairs file
            pairs = self._read_tempfile('concentrations.fpairs')
            pairs_tsv = [l for l in pairs.split('\n') if not l.startswith('%')]
            # Remove first line (n complexes)
            dim = int(pairs_tsv.pop(0))
            pprob = [[int(p[0]), int(p[1]), float(p[2])] for p in pairs_tsv]
            # Convert to augmented numpy matrix
            fpairs_mat = self.pairs_to_np(pprob, dim)
            for i, out in enumerate(output):
                output[i]['fpairs'] = fpairs_mat
        return output | 
	python | 
	def concentrations(self, complexes, concs, ordered=False, pairs=False,
                       cutoff=0.001, temp=37.0):
        '''
        :param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param concs: The concentration(s) of each strand species in the
                      initial complex. If they are all the same, a single
                      float can be used here.
        :type concs: list of floats or float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a
                  'concentrations' key. If 'pairs' is True, an 'fpairs' key
                  is added.
        :rtype: list
        '''
        # Check inputs
        nstrands = len(complexes[0]['strands'])
        try:
            if len(concs) != nstrands:
                raise ValueError('concs argument not same length as strands.')
        except TypeError:
            concs = [concs for i in range(len(complexes['strands']))]
        # Set up command-line arguments
        cmd_args = ['-quiet']
        if ordered:
            cmd_args.append('-ordered')
        # Write .con file
        with open(os.path.join(self._tempdir, 'concentrations.con')) as f:
            f.writelines(concs)
        # Write .cx or .ocx file
        header = ['%t Number of strands: {}'.format(nstrands),
                  '%\tid\tsequence']
        for i, strand in enumerate(complexes['strands']):
            header.append('%\t{}\t{}'.format(i + 1, strand))
        header.append('%\tT = {}'.format(temp))
        body = []
        for i, cx in enumerate(complexes):
            permutation = '\t'.join(complexes['complex'])
            line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy'])
            body.append(line)
        if ordered:
            cxfile = os.path.join(self._tempdir, 'concentrations.ocx')
        else:
            cxfile = os.path.join(self._tempdir, 'concentrations.cx')
        with open(cxfile) as f:
            f.writelines(header + body)
        # Run 'concentrations'
        self._run('concentrations', cmd_args, None)
        # Parse the .eq (concentrations) file
        eq_lines = self._read_tempfile('concentrations.eq').split('\n')
        tsv_lines = [l for l in eq_lines if not l.startswith('%')]
        output = []
        for i, line in enumerate(tsv_lines):
            # It's a TSV
            data = line.split('\t')
            # Column 0 is an index
            # Columns 1-nstrands is the complex
            cx = [int(c) for c in data[1:nstrands]]
            # Column nstrands + 1 is the complex energy
            # Column nstrands + 2 is the equilibrium concentration
            eq = float(data[nstrands + 2])
            output[i] = {'complex': cx, 'concentration': eq}
        if pairs:
            # Read the .fpairs file
            pairs = self._read_tempfile('concentrations.fpairs')
            pairs_tsv = [l for l in pairs.split('\n') if not l.startswith('%')]
            # Remove first line (n complexes)
            dim = int(pairs_tsv.pop(0))
            pprob = [[int(p[0]), int(p[1]), float(p[2])] for p in pairs_tsv]
            # Convert to augmented numpy matrix
            fpairs_mat = self.pairs_to_np(pprob, dim)
            for i, out in enumerate(output):
                output[i]['fpairs'] = fpairs_mat
        return output | 
	:param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param concs: The concentration(s) of each strand species in the
                      initial complex. If they are all the same, a single
                      float can be used here.
        :type concs: list of floats or float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param pairs: Calculate base-pairing observables as with .pairs().
        :type pairs: bool
        :param cutoff: A setting when pairs is set to True - only probabilities
                       above this threshold will be returned.
        :type cutoff: float
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a
                  'concentrations' key. If 'pairs' is True, an 'fpairs' key
                  is added.
        :rtype: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1149-L1242 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK.distributions | 
	def distributions(self, complexes, counts, volume, maxstates=1e7,
                      ordered=False, temp=37.0):
        '''Runs the \'distributions\' NUPACK command. Note: this is intended
        for a relatively small number of species (on the order of ~20
        total strands for complex size ~14).
        :param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param counts: A list of the exact number of molecules of each initial
                       species (the strands in the complexes command).
        :type counts: list of ints
        :param volume: The volume, in liters, of the container.
        :type volume: float
        :param maxstates: Maximum number of states to be enumerated, needed
                          as allowing too many states can lead to a segfault.
                          In NUPACK, this is referred to as lambda.
        :type maxstates: float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a 'complexes'
                  key for the unique complex, an 'ev' key for the expected
                  value of the complex population and a 'probcols' list
                  indicating the probability that a given complex has
                  population 0, 1, ... max(pop) at equilibrium.
        :rtype: list
        :raises: LambdaError if maxstates is exceeded.
        '''
        # Check inputs
        nstrands = len(complexes[0]['strands'])
        if len(counts) != nstrands:
            raise ValueError('counts argument not same length as strands.')
        # Set up command-line arguments
        cmd_args = []
        if ordered:
            cmd_args.append('-ordered')
        # Write .count file
        countpath = os.path.join(self._tempdir, 'distributions.count')
        with open(countpath, 'w') as f:
            f.writelines([str(c) for c in counts] + [str(volume)])
        # Write .cx or .ocx file
        header = ['%t Number of strands: {}'.format(nstrands),
                  '%\tid\tsequence']
        for i, strand in enumerate(complexes['strands']):
            header.append('%\t{}\t{}'.format(i + 1, strand))
        header.append('%\tT = {}'.format(temp))
        body = []
        for i, cx in enumerate(complexes):
            permutation = '\t'.join(complexes['complex'])
            line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy'])
            body.append(line)
        if ordered:
            cxfile = os.path.join(self._tempdir, 'distributions.ocx')
        else:
            cxfile = os.path.join(self._tempdir, 'distributions.cx')
        with open(cxfile) as f:
            f.writelines(header + body)
        # Run 'distributions'
        stdout = self._run('distributions', cmd_args, None)
        # Parse STDOUT
        stdout_lines = stdout.split('\n')
        if stdout_lines[0].startswith('Exceeded maximum number'):
            raise LambdaError('Exceeded maxstates combinations.')
        # pop_search = re.search('There are (*) pop', stdout_lines[0]).group(1)
        # populations = int(pop_search)
        # kT_search = re.search('of the box: (*) kT', stdout_lines[1]).group(1)
        # kT = float(kT_search)
        # Parse .dist file (comments header + TSV)
        dist_lines = self._read_tempfile('distributions.dist').split('\n')
        tsv_lines = [l for l in dist_lines if not l.startswith('%')]
        tsv_lines.pop()
        output = []
        for i, line in enumerate(tsv_lines):
            data = line.split('\t')
            # Column 0 is an index
            # Columns 1-nstrands are complexes
            cx = [int(d) for d in data[1:nstrands]]
            # Column nstrands + 1 is expected value of complex
            ev = float(data[nstrands + 1])
            # Columns nstrands + 2 and on are probability columns
            probcols = [float(d) for d in data[nstrands + 2:]]
            output[i]['complex'] = cx
            output[i]['ev'] = ev
            output[i]['probcols'] = probcols
        return output | 
	python | 
	def distributions(self, complexes, counts, volume, maxstates=1e7,
                      ordered=False, temp=37.0):
        '''Runs the \'distributions\' NUPACK command. Note: this is intended
        for a relatively small number of species (on the order of ~20
        total strands for complex size ~14).
        :param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param counts: A list of the exact number of molecules of each initial
                       species (the strands in the complexes command).
        :type counts: list of ints
        :param volume: The volume, in liters, of the container.
        :type volume: float
        :param maxstates: Maximum number of states to be enumerated, needed
                          as allowing too many states can lead to a segfault.
                          In NUPACK, this is referred to as lambda.
        :type maxstates: float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a 'complexes'
                  key for the unique complex, an 'ev' key for the expected
                  value of the complex population and a 'probcols' list
                  indicating the probability that a given complex has
                  population 0, 1, ... max(pop) at equilibrium.
        :rtype: list
        :raises: LambdaError if maxstates is exceeded.
        '''
        # Check inputs
        nstrands = len(complexes[0]['strands'])
        if len(counts) != nstrands:
            raise ValueError('counts argument not same length as strands.')
        # Set up command-line arguments
        cmd_args = []
        if ordered:
            cmd_args.append('-ordered')
        # Write .count file
        countpath = os.path.join(self._tempdir, 'distributions.count')
        with open(countpath, 'w') as f:
            f.writelines([str(c) for c in counts] + [str(volume)])
        # Write .cx or .ocx file
        header = ['%t Number of strands: {}'.format(nstrands),
                  '%\tid\tsequence']
        for i, strand in enumerate(complexes['strands']):
            header.append('%\t{}\t{}'.format(i + 1, strand))
        header.append('%\tT = {}'.format(temp))
        body = []
        for i, cx in enumerate(complexes):
            permutation = '\t'.join(complexes['complex'])
            line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy'])
            body.append(line)
        if ordered:
            cxfile = os.path.join(self._tempdir, 'distributions.ocx')
        else:
            cxfile = os.path.join(self._tempdir, 'distributions.cx')
        with open(cxfile) as f:
            f.writelines(header + body)
        # Run 'distributions'
        stdout = self._run('distributions', cmd_args, None)
        # Parse STDOUT
        stdout_lines = stdout.split('\n')
        if stdout_lines[0].startswith('Exceeded maximum number'):
            raise LambdaError('Exceeded maxstates combinations.')
        # pop_search = re.search('There are (*) pop', stdout_lines[0]).group(1)
        # populations = int(pop_search)
        # kT_search = re.search('of the box: (*) kT', stdout_lines[1]).group(1)
        # kT = float(kT_search)
        # Parse .dist file (comments header + TSV)
        dist_lines = self._read_tempfile('distributions.dist').split('\n')
        tsv_lines = [l for l in dist_lines if not l.startswith('%')]
        tsv_lines.pop()
        output = []
        for i, line in enumerate(tsv_lines):
            data = line.split('\t')
            # Column 0 is an index
            # Columns 1-nstrands are complexes
            cx = [int(d) for d in data[1:nstrands]]
            # Column nstrands + 1 is expected value of complex
            ev = float(data[nstrands + 1])
            # Columns nstrands + 2 and on are probability columns
            probcols = [float(d) for d in data[nstrands + 2:]]
            output[i]['complex'] = cx
            output[i]['ev'] = ev
            output[i]['probcols'] = probcols
        return output | 
	Runs the \'distributions\' NUPACK command. Note: this is intended
        for a relatively small number of species (on the order of ~20
        total strands for complex size ~14).
        :param complexes: A list of the type returned by the complexes()
                          method.
        :type complexes: list
        :param counts: A list of the exact number of molecules of each initial
                       species (the strands in the complexes command).
        :type counts: list of ints
        :param volume: The volume, in liters, of the container.
        :type volume: float
        :param maxstates: Maximum number of states to be enumerated, needed
                          as allowing too many states can lead to a segfault.
                          In NUPACK, this is referred to as lambda.
        :type maxstates: float
        :param ordered: Consider distinct ordered complexes - all distinct
                        circular permutations of each complex.
        :type ordered: bool
        :param temp: Temperature in C.
        :type temp: float
        :returns: A list of dictionaries containing (at least) a 'complexes'
                  key for the unique complex, an 'ev' key for the expected
                  value of the complex population and a 'probcols' list
                  indicating the probability that a given complex has
                  population 0, 1, ... max(pop) at equilibrium.
        :rtype: list
        :raises: LambdaError if maxstates is exceeded. | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1245-L1343 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK._multi_lines | 
	def _multi_lines(self, strands, permutation):
        '''Prepares lines to write to file for pfunc command input.
        :param strand: Strand input (cr.DNA or cr.RNA).
        :type strand: cr.DNA or cr.DNA
        :param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
                            by pfunc_multi.
        :type permutation: list
        '''
        lines = []
        # Write the total number of distinct strands
        lines.append(str(len(strands)))
        # Write the distinct strands
        lines += [str(strand) for strand in strands]
        # Write the permutation
        lines.append(' '.join(str(p) for p in permutation))
        return lines | 
	python | 
	def _multi_lines(self, strands, permutation):
        '''Prepares lines to write to file for pfunc command input.
        :param strand: Strand input (cr.DNA or cr.RNA).
        :type strand: cr.DNA or cr.DNA
        :param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
                            by pfunc_multi.
        :type permutation: list
        '''
        lines = []
        # Write the total number of distinct strands
        lines.append(str(len(strands)))
        # Write the distinct strands
        lines += [str(strand) for strand in strands]
        # Write the permutation
        lines.append(' '.join(str(p) for p in permutation))
        return lines | 
	Prepares lines to write to file for pfunc command input.
        :param strand: Strand input (cr.DNA or cr.RNA).
        :type strand: cr.DNA or cr.DNA
        :param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
                            by pfunc_multi.
        :type permutation: list | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1346-L1363 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK._read_tempfile | 
	def _read_tempfile(self, filename):
        '''Read in and return file that's in the tempdir.
        :param filename: Name of the file to read.
        :type filename: str
        '''
        with open(os.path.join(self._tempdir, filename)) as f:
            return f.read() | 
	python | 
	def _read_tempfile(self, filename):
        '''Read in and return file that's in the tempdir.
        :param filename: Name of the file to read.
        :type filename: str
        '''
        with open(os.path.join(self._tempdir, filename)) as f:
            return f.read() | 
	Read in and return file that's in the tempdir.
        :param filename: Name of the file to read.
        :type filename: str | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1366-L1374 | 
| 
	klavinslab/coral | 
	coral/analysis/_structure/nupack.py | 
	NUPACK._pairs_to_np | 
	def _pairs_to_np(self, pairlist, dim):
        '''Given a set of pair probability lines, construct a numpy array.
        :param pairlist: a list of pair probability triples
        :type pairlist: list
        :returns: An upper triangular matrix of pair probabilities augmented
                  with one extra column that represents the unpaired
                  probabilities.
        :rtype: numpy.array
        '''
        mat = np.zeros((dim, dim + 1))
        for line in pairlist:
            i = int(line[0]) - 1
            j = int(line[1]) - 1
            prob = float(line[2])
            mat[i, j] = prob
        return mat | 
	python | 
	def _pairs_to_np(self, pairlist, dim):
        '''Given a set of pair probability lines, construct a numpy array.
        :param pairlist: a list of pair probability triples
        :type pairlist: list
        :returns: An upper triangular matrix of pair probabilities augmented
                  with one extra column that represents the unpaired
                  probabilities.
        :rtype: numpy.array
        '''
        mat = np.zeros((dim, dim + 1))
        for line in pairlist:
            i = int(line[0]) - 1
            j = int(line[1]) - 1
            prob = float(line[2])
            mat[i, j] = prob
        return mat | 
	Given a set of pair probability lines, construct a numpy array.
        :param pairlist: a list of pair probability triples
        :type pairlist: list
        :returns: An upper triangular matrix of pair probabilities augmented
                  with one extra column that represents the unpaired
                  probabilities.
        :rtype: numpy.array | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1376-L1393 | 
| 
	klavinslab/coral | 
	coral/sequence/_dna.py | 
	_flip_feature | 
	def _flip_feature(self, feature, parent_len):
    '''Adjust a feature's location when flipping DNA.
    :param feature: The feature to flip.
    :type feature: coral.Feature
    :param parent_len: The length of the sequence to which the feature belongs.
    :type parent_len: int
    '''
    copy = feature.copy()
    # Put on the other strand
    if copy.strand == 0:
        copy.strand = 1
    else:
        copy.strand = 0
    # Adjust locations - guarantee that start is always less than end
    copy.start = parent_len - copy.start
    copy.stop = parent_len - copy.stop
    copy.start, copy.stop = copy.stop, copy.start
    return copy | 
	python | 
	def _flip_feature(self, feature, parent_len):
    '''Adjust a feature's location when flipping DNA.
    :param feature: The feature to flip.
    :type feature: coral.Feature
    :param parent_len: The length of the sequence to which the feature belongs.
    :type parent_len: int
    '''
    copy = feature.copy()
    # Put on the other strand
    if copy.strand == 0:
        copy.strand = 1
    else:
        copy.strand = 0
    # Adjust locations - guarantee that start is always less than end
    copy.start = parent_len - copy.start
    copy.stop = parent_len - copy.stop
    copy.start, copy.stop = copy.stop, copy.start
    return copy | 
	Adjust a feature's location when flipping DNA.
    :param feature: The feature to flip.
    :type feature: coral.Feature
    :param parent_len: The length of the sequence to which the feature belongs.
    :type parent_len: int | 
	https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_dna.py#L734-L753 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
