signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
---|---|---|---|
@property<EOL><INDENT>def canvasCharHeight(self):<DEDENT> | return self.visibleBox.h*<NUM_LIT:4>/self.plotviewBox.h<EOL> | Height in canvas units of a single char in the terminal | f1838:c3:m11 |
def polyline(self, vertexes, attr=<NUM_LIT:0>, row=None): | self.polylines.append((vertexes, attr, row))<EOL> | adds lines for (x,y) vertexes of a polygon | f1838:c3:m16 |
def polygon(self, vertexes, attr=<NUM_LIT:0>, row=None): | self.polylines.append((vertexes + [vertexes[<NUM_LIT:0>]], attr, row))<EOL> | adds lines for (x,y) vertexes of a polygon | f1838:c3:m17 |
def qcurve(self, vertexes, attr=<NUM_LIT:0>, row=None): | assert len(vertexes) == <NUM_LIT:3>, len(vertexes)<EOL>x1, y1 = vertexes[<NUM_LIT:0>]<EOL>x2, y2 = vertexes[<NUM_LIT:1>]<EOL>x3, y3 = vertexes[<NUM_LIT:2>]<EOL>self.point(x1, y1, attr, row)<EOL>self._recursive_bezier(x1, y1, x2, y2, x3, y3, attr, row)<EOL>self.point(x3, y3, attr, row)<EOL> | quadratic curve from vertexes[0] to vertexes[2] with control point at vertexes[1] | f1838:c3:m18 |
def _recursive_bezier(self, x1, y1, x2, y2, x3, y3, attr, row, level=<NUM_LIT:0>): | m_approximation_scale = <NUM_LIT><EOL>m_distance_tolerance = (<NUM_LIT:0.5> / m_approximation_scale) ** <NUM_LIT:2><EOL>m_angle_tolerance = <NUM_LIT:1> * <NUM_LIT:2>*math.pi/<NUM_LIT> <EOL>curve_angle_tolerance_epsilon = <NUM_LIT><EOL>curve_recursion_limit = <NUM_LIT:32><EOL>curve_collinearity_epsilon = <NUM_LIT><EOL>if level > curve_recursion_limit:<EOL><INDENT>return<EOL><DEDENT>x12 = (x1 + x2) / <NUM_LIT:2><EOL>y12 = (y1 + y2) / <NUM_LIT:2><EOL>x23 = (x2 + x3) / <NUM_LIT:2><EOL>y23 = (y2 + y3) / <NUM_LIT:2><EOL>x123 = (x12 + x23) / <NUM_LIT:2><EOL>y123 = (y12 + y23) / <NUM_LIT:2><EOL>dx = x3-x1<EOL>dy = y3-y1<EOL>d = abs(((x2 - x3) * dy - (y2 - y3) * dx))<EOL>if d > curve_collinearity_epsilon:<EOL><INDENT>if d*d <= m_distance_tolerance * (dx*dx + dy*dy):<EOL><INDENT>if m_angle_tolerance < curve_angle_tolerance_epsilon:<EOL><INDENT>self.point(x123, y123, attr, row)<EOL>return<EOL><DEDENT>da = abs(math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))<EOL>if da >= math.pi:<EOL><INDENT>da = <NUM_LIT:2>*math.pi - da<EOL><DEDENT>if da < m_angle_tolerance:<EOL><INDENT>self.point(x123, y123, attr, row)<EOL>return<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>dx = x123 - (x1 + x3) / <NUM_LIT:2><EOL>dy = y123 - (y1 + y3) / <NUM_LIT:2><EOL>if dx*dx + dy*dy <= m_distance_tolerance:<EOL><INDENT>self.point(x123, y123, attr, row)<EOL>return<EOL><DEDENT><DEDENT>self._recursive_bezier(x1, y1, x12, y12, x123, y123, attr, row, level + <NUM_LIT:1>)<EOL>self._recursive_bezier(x123, y123, x23, y23, x3, y3, attr, row, level + <NUM_LIT:1>)<EOL> | from http://www.antigrain.com/research/adaptive_bezier/ | f1838:c3:m19 |
def fixPoint(self, plotterPoint, canvasPoint): | self.visibleBox.xmin = canvasPoint.x - self.canvasW(plotterPoint.x-self.plotviewBox.xmin)<EOL>self.visibleBox.ymin = canvasPoint.y - self.canvasH(plotterPoint.y-self.plotviewBox.ymin)<EOL>self.refresh()<EOL> | adjust visibleBox.xymin so that canvasPoint is plotted at plotterPoint | f1838:c3:m21 |
def zoomTo(self, bbox): | self.fixPoint(self.plotviewBox.xymin, bbox.xymin)<EOL>self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)<EOL> | set visible area to bbox, maintaining aspectRatio if applicable | f1838:c3:m22 |
def checkCursor(self): | if self.cursorBox:<EOL><INDENT>if self.cursorBox.h < self.canvasCharHeight:<EOL><INDENT>self.cursorBox.h = self.canvasCharHeight*<NUM_LIT:3>/<NUM_LIT:4><EOL><DEDENT>if self.cursorBox.w < self.canvasCharWidth:<EOL><INDENT>self.cursorBox.w = self.canvasCharWidth*<NUM_LIT:3>/<NUM_LIT:4><EOL><DEDENT><DEDENT>return False<EOL> | override Sheet.checkCursor | f1838:c3:m26 |
def scaleX(self, x): | return round(self.plotviewBox.xmin+(x-self.visibleBox.xmin)*self.xScaler)<EOL> | returns plotter x coordinate | f1838:c3:m29 |
def scaleY(self, y): | return round(self.plotviewBox.ymin+(y-self.visibleBox.ymin)*self.yScaler)<EOL> | returns plotter y coordinate | f1838:c3:m30 |
def canvasW(self, plotter_width): | return plotter_width/self.xScaler<EOL> | plotter X units to canvas units | f1838:c3:m31 |
def canvasH(self, plotter_height): | return plotter_height/self.yScaler<EOL> | plotter Y units to canvas units | f1838:c3:m32 |
def refresh(self): | self.needsRefresh = True<EOL> | triggers render() on next draw() | f1838:c3:m33 |
def render(self, h, w): | self.needsRefresh = False<EOL>cancelThread(*(t for t in self.currentThreads if t.name == '<STR_LIT>'))<EOL>self.labels.clear()<EOL>self.resetCanvasDimensions(h, w)<EOL>self.render_async()<EOL> | resets plotter, cancels previous render threads, spawns a new render | f1838:c3:m34 |
def render_sync(self): | self.setZoom()<EOL>bb = self.visibleBox<EOL>xmin, ymin, xmax, ymax = bb.xmin, bb.ymin, bb.xmax, bb.ymax<EOL>xfactor, yfactor = self.xScaler, self.yScaler<EOL>plotxmin, plotymin = self.plotviewBox.xmin, self.plotviewBox.ymin<EOL>for vertexes, attr, row in Progress(self.polylines, '<STR_LIT>'):<EOL><INDENT>if len(vertexes) == <NUM_LIT:1>: <EOL><INDENT>x1, y1 = vertexes[<NUM_LIT:0>]<EOL>x1, y1 = float(x1), float(y1)<EOL>if xmin <= x1 <= xmax and ymin <= y1 <= ymax:<EOL><INDENT>x = plotxmin+(x1-xmin)*xfactor<EOL>y = plotymin+(y1-ymin)*yfactor<EOL>self.plotpixel(round(x), round(y), attr, row)<EOL><DEDENT>continue<EOL><DEDENT>prev_x, prev_y = vertexes[<NUM_LIT:0>]<EOL>for x, y in vertexes[<NUM_LIT:1>:]:<EOL><INDENT>r = clipline(prev_x, prev_y, x, y, xmin, ymin, xmax, ymax)<EOL>if r:<EOL><INDENT>x1, y1, x2, y2 = r<EOL>x1 = plotxmin+float(x1-xmin)*xfactor<EOL>y1 = plotymin+float(y1-ymin)*yfactor<EOL>x2 = plotxmin+float(x2-xmin)*xfactor<EOL>y2 = plotymin+float(y2-ymin)*yfactor<EOL>self.plotline(x1, y1, x2, y2, attr, row)<EOL><DEDENT>prev_x, prev_y = x, y<EOL><DEDENT><DEDENT>for x, y, text, attr, row in Progress(self.gridlabels, '<STR_LIT>'):<EOL><INDENT>self.plotlabel(self.scaleX(x), self.scaleY(y), text, attr, row)<EOL><DEDENT> | plots points and lines and text onto the Plotter | f1838:c3:m36 |
def detect_command(cmdlist): | for platform, command, args in cmdlist:<EOL><INDENT>if platform is None or sys.platform == platform:<EOL><INDENT>path = shutil.which(command)<EOL>if path:<EOL><INDENT>return '<STR_LIT:U+0020>'.join([path, args])<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'<EOL> | Detect available clipboard util and return cmdline to copy data to the system clipboard.
cmddict is list of (platform, progname, argstr). | f1839:m0 |
@functools.lru_cache()<EOL>def clipboard(): | if not options.clipboard_copy_cmd:<EOL><INDENT>options.clipboard_copy_cmd = detect_clipboard_command()<EOL><DEDENT>return _Clipboard()<EOL> | Detect cmd and set option at first use, to allow option to be changed by user later. | f1839:m1 |
def copyToClipboard(value): | clipboard().copy(value)<EOL>status('<STR_LIT>')<EOL> | copy single value to system clipboard | f1839:m2 |
@asyncthread<EOL>def saveToClipboard(sheet, rows, filetype=None): | filetype = filetype or options.save_filetype<EOL>vs = copy(sheet)<EOL>vs.rows = rows<EOL>status('<STR_LIT>')<EOL>clipboard().save(vs, filetype)<EOL> | copy rows from sheet to system clipboard | f1839:m3 |
@property<EOL><INDENT>def command(self):<DEDENT> | cmd = options.clipboard_copy_cmd or fail('<STR_LIT>')<EOL>return cmd.split()<EOL> | Return cmdline cmd+args (as list for Popen) to copy data to the system clipboard. | f1839:c0:m0 |
def copy(self, value): | with tempfile.NamedTemporaryFile() as temp:<EOL><INDENT>with open(temp.name, '<STR_LIT:w>', encoding=options.encoding) as fp:<EOL><INDENT>fp.write(str(value))<EOL><DEDENT>p = subprocess.Popen(<EOL>self.command,<EOL>stdin=open(temp.name, '<STR_LIT:r>', encoding=options.encoding),<EOL>stdout=subprocess.DEVNULL)<EOL>p.communicate()<EOL><DEDENT> | Copy a cell to the system clipboard. | f1839:c0:m1 |
def save(self, vs, filetype): | <EOL>with tempfile.NamedTemporaryFile(suffix='<STR_LIT:.>'+filetype) as temp:<EOL><INDENT>saveSheets(temp.name, vs)<EOL>sync(<NUM_LIT:1>)<EOL>p = subprocess.Popen(<EOL>self.command,<EOL>stdin=open(temp.name, '<STR_LIT:r>', encoding=options.encoding),<EOL>stdout=subprocess.DEVNULL,<EOL>close_fds=True)<EOL>p.communicate()<EOL><DEDENT> | Copy rows to the system clipboard. | f1839:c0:m2 |
@functools.wraps(vd().toplevelTryFunc)<EOL>def threadProfileCode(func, *args, **kwargs): | with ThreadProfiler(threading.current_thread()) as prof:<EOL><INDENT>try:<EOL><INDENT>prof.thread.status = threadProfileCode.__wrapped__(func, *args, **kwargs)<EOL><DEDENT>except EscapeException as e:<EOL><INDENT>prof.thread.status = e<EOL><DEDENT><DEDENT> | Toplevel thread profile wrapper. | f1840:m2 |
def scaleY(self, canvasY): | plotterY = super().scaleY(canvasY)<EOL>return (self.plotviewBox.ymax-plotterY+<NUM_LIT:4>)<EOL> | returns plotter y coordinate, with y-axis inverted | f1841:c0:m2 |
def joinSheetnames(*sheetnames): | return '<STR_LIT:_>'.join(str(x) for x in sheetnames)<EOL> | Concatenate sheet names in a standard way | f1843:m0 |
def moveListItem(L, fromidx, toidx): | r = L.pop(fromidx)<EOL>L.insert(toidx, r)<EOL>return toidx<EOL> | Move element within list `L` and return element's new index. | f1843:m1 |
def urlcache(url, cachesecs=<NUM_LIT>*<NUM_LIT>*<NUM_LIT>): | p = Path(os.path.join(options.visidata_dir, '<STR_LIT>', urllib.parse.quote(url, safe='<STR_LIT>')))<EOL>if p.exists():<EOL><INDENT>secs = time.time() - p.stat().st_mtime<EOL>if secs < cachesecs:<EOL><INDENT>return p<EOL><DEDENT><DEDENT>if not p.parent.exists():<EOL><INDENT>os.makedirs(p.parent.resolve(), exist_ok=True)<EOL><DEDENT>assert p.parent.is_dir(), p.parent<EOL>req = urllib.request.Request(url, headers={'<STR_LIT>': __version_info__})<EOL>with urllib.request.urlopen(req) as fp:<EOL><INDENT>ret = fp.read().decode('<STR_LIT:utf-8>').strip()<EOL>with p.open_text(mode='<STR_LIT:w>') as fpout:<EOL><INDENT>fpout.write(ret)<EOL><DEDENT><DEDENT>return p<EOL> | Returns Path object to local cache of url contents. | f1845:m0 |
@asyncthread<EOL><INDENT>def reload(self):<DEDENT> | self.rows = []<EOL><INDENT>if len(self.origCols) == <NUM_LIT:1> and self.origCols[<NUM_LIT:0>].type in (int, float, currency):<EOL><INDENT>self.numericBinning()<EOL><DEDENT>else:<EOL><DEDENT>self.discreteBinning()<EOL>for c in self.nonKeyVisibleCols:<EOL><INDENT>c._cachedValues = collections.OrderedDict()<EOL><DEDENT> | Generate histrow for each row and then reverse-sort by length. | f1848:c0:m5 |
def git_all(*args, git=maybeloggit, **kwargs): | try:<EOL><INDENT>cmd = git(*args, _err_to_out=True, _decode_errors='<STR_LIT:replace>', **kwargs)<EOL>out = cmd.stdout<EOL><DEDENT>except sh.ErrorReturnCode as e:<EOL><INDENT>status('<STR_LIT>' % e.exit_code)<EOL>out = e.stdout<EOL><DEDENT>out = out.decode('<STR_LIT:utf-8>')<EOL>return out<EOL> | Return entire output of git command. | f1852:m2 |
def git_lines(*args, git=maybeloggit, **kwargs): | err = io.StringIO()<EOL>try:<EOL><INDENT>for line in git('<STR_LIT>', _err=err, *args, _decode_errors='<STR_LIT:replace>', _iter=True, _bg_exc=False, **kwargs):<EOL><INDENT>yield line[:-<NUM_LIT:1>] <EOL><DEDENT><DEDENT>except sh.ErrorReturnCode as e:<EOL><INDENT>status('<STR_LIT>' % e.exit_code)<EOL><DEDENT>errlines = err.getvalue().splitlines()<EOL>if len(errlines) < <NUM_LIT:3>:<EOL><INDENT>for line in errlines:<EOL><INDENT>status(line)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>vd().push(TextSheet('<STR_LIT>' + '<STR_LIT:U+0020>'.join(args), errlines))<EOL><DEDENT> | Generator of stdout lines from given git command | f1852:m3 |
def git_iter(sep, *args, git=maybeloggit, **kwargs): | bufsize = <NUM_LIT><EOL>err = io.StringIO()<EOL>chunks = []<EOL>try:<EOL><INDENT>for data in git('<STR_LIT>', *args, _decode_errors='<STR_LIT:replace>', _out_bufsize=bufsize, _iter=True, _err=err, **kwargs):<EOL><INDENT>while True:<EOL><INDENT>i = data.find(sep)<EOL>if i < <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>chunks.append(data[:i])<EOL>data = data[i+<NUM_LIT:1>:]<EOL>yield '<STR_LIT>'.join(chunks)<EOL>chunks.clear()<EOL><DEDENT>chunks.append(data)<EOL><DEDENT><DEDENT>except sh.ErrorReturnCode as e:<EOL><INDENT>status('<STR_LIT>' % e.exit_code)<EOL><DEDENT>r = '<STR_LIT>'.join(chunks)<EOL>if r:<EOL><INDENT>yield r<EOL><DEDENT>errlines = err.getvalue().splitlines()<EOL>if len(errlines) < <NUM_LIT:3>:<EOL><INDENT>for line in errlines:<EOL><INDENT>status(line)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>vd().push(TextSheet('<STR_LIT>' + '<STR_LIT:U+0020>'.join(args), errlines))<EOL><DEDENT> | Generator of chunks of stdout from given git command, delineated by sep character | f1852:m4 |
def git_status(self, r): | ret = self._cachedStatus.get(r.filename, None) if r else None<EOL>return ret if ret else ["<STR_LIT>", None, None]<EOL> | return tuple of (status, adds, dels).
status like !! ??
adds and dels are lists of additions and deletions. | f1852:c3:m4 |
def getDiffSheet(fn, *refs): | one column per ref | f1854:m0 |
|
def amendPrevious(self, targethash): | prevBranch = loggit_all('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>').strip()<EOL>ret = loggit_all('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'+targethash) <EOL>newChanges = loggit_all('<STR_LIT>', '<STR_LIT>').strip()<EOL>ret += loggit_all('<STR_LIT>', '<STR_LIT>', '<STR_LIT>') <EOL>with GitUndo('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>tmpBranch = randomBranchName()<EOL>ret += loggit_all('<STR_LIT>', '<STR_LIT>', tmpBranch) <EOL>with GitUndo('<STR_LIT>', prevBranch), GitUndo('<STR_LIT>', '<STR_LIT>', tmpBranch):<EOL><INDENT>ret += loggit_all('<STR_LIT>', '<STR_LIT>', targethash) <EOL>ret += loggit_all('<STR_LIT>', '<STR_LIT>', newChanges) <EOL>ret += loggit_all('<STR_LIT>', '<STR_LIT>', '<STR_LIT>') <EOL>ret += loggit_all('<STR_LIT>', '<STR_LIT>', tmpBranch, '<STR_LIT>', prevBranch) <EOL><DEDENT><DEDENT>return ret.splitlines()<EOL> | amend targethash with current index, then rebase newer commits on top | f1857:c2:m0 |
@functools.lru_cache()<EOL>def currency_multiplier(src_currency, dest_currency): | if src_currency == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>usd_mult = currency_rates()[src_currency]<EOL>if dest_currency == '<STR_LIT>':<EOL><INDENT>return usd_mult<EOL><DEDENT>return usd_mult/currency_rates()[dest_currency]<EOL> | returns equivalent value in USD for an amt of currency_code | f1861:m2 |
def parse_dossier_data(data, ep): | changed = False<EOL>doc_changed = False<EOL>ref = data['<STR_LIT>']['<STR_LIT>']<EOL>logger.debug('<STR_LIT>', ref)<EOL>with transaction.atomic():<EOL><INDENT>try:<EOL><INDENT>dossier = Dossier.objects.get(reference=ref)<EOL><DEDENT>except Dossier.DoesNotExist:<EOL><INDENT>dossier = Dossier(reference=ref)<EOL>logger.debug('<STR_LIT>')<EOL>changed = True<EOL><DEDENT>if dossier.title != data['<STR_LIT>']['<STR_LIT:title>']:<EOL><INDENT>logger.debug('<STR_LIT>', dossier.title,<EOL>data['<STR_LIT>']['<STR_LIT:title>'])<EOL>dossier.title = data['<STR_LIT>']['<STR_LIT:title>']<EOL>changed = True<EOL><DEDENT>if changed:<EOL><INDENT>logger.info('<STR_LIT>', ref)<EOL>dossier.save()<EOL><DEDENT>source = data['<STR_LIT>']['<STR_LIT:source>'].replace('<STR_LIT>', '<STR_LIT>')<EOL>try:<EOL><INDENT>doc = Document.objects.get(dossier=dossier, kind='<STR_LIT>')<EOL><DEDENT>except Document.DoesNotExist:<EOL><INDENT>doc = Document(dossier=dossier, kind='<STR_LIT>', chamber=ep)<EOL>logger.debug('<STR_LIT>', ref)<EOL>doc_changed = True<EOL><DEDENT>if doc.link != source:<EOL><INDENT>logger.debug('<STR_LIT>', doc.link, source)<EOL>doc.link = source<EOL>doc_changed = True<EOL><DEDENT>if doc_changed:<EOL><INDENT>logger.info('<STR_LIT>', doc.link, ref)<EOL>doc.save()<EOL><DEDENT><DEDENT>if '<STR_LIT>' in data.keys() and '<STR_LIT>' in data['<STR_LIT>']:<EOL><INDENT>command = Command()<EOL>command.init_cache()<EOL>command.parse_vote_data(data['<STR_LIT>'])<EOL><DEDENT> | Parse data from parltarck dossier export (1 dossier) Update dossier
if it existed before, this function goal is to import and update a
dossier, not to import all parltrack data | f1865:m0 |
def parse_vote_data(self, vote_data): | if '<STR_LIT>' not in vote_data.keys():<EOL><INDENT>logger.debug('<STR_LIT>',<EOL>vote_data['<STR_LIT:title>'])<EOL>return<EOL><DEDENT>dossier_pk = self.get_dossier(vote_data['<STR_LIT>'])<EOL>if not dossier_pk:<EOL><INDENT>logger.debug('<STR_LIT>',<EOL>vote_data['<STR_LIT>'])<EOL>return<EOL><DEDENT>return self.parse_proposal_data(<EOL>proposal_data=vote_data,<EOL>dossier_pk=dossier_pk<EOL>)<EOL> | Parse data from parltrack votes db dumps (1 proposal) | f1867:c0:m1 |
@transaction.atomic<EOL><INDENT>def parse_proposal_data(self, proposal_data, dossier_pk):<DEDENT> | proposal_display = '<STR_LIT>'.format(proposal_data['<STR_LIT:title>'].encode(<EOL>'<STR_LIT:utf-8>'), proposal_data.get('<STR_LIT>', '<STR_LIT>').encode('<STR_LIT:utf-8>'))<EOL>if '<STR_LIT>' not in proposal_data.keys():<EOL><INDENT>logger.debug('<STR_LIT>',<EOL>proposal_data['<STR_LIT>'])<EOL>return<EOL><DEDENT>changed = False<EOL>try:<EOL><INDENT>proposal = Proposal.objects.get(title=proposal_data['<STR_LIT:title>'])<EOL><DEDENT>except Proposal.DoesNotExist:<EOL><INDENT>proposal = Proposal(title=proposal_data['<STR_LIT:title>'])<EOL>changed = True<EOL><DEDENT>data_map = dict(<EOL>title=proposal_data['<STR_LIT:title>'],<EOL>datetime=_parse_date(proposal_data['<STR_LIT>']),<EOL>dossier_id=dossier_pk,<EOL>reference=proposal_data.get('<STR_LIT>'),<EOL>kind=proposal_data.get('<STR_LIT>')<EOL>)<EOL>for position in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>position_data = proposal_data.get(position, {})<EOL>position_total = position_data.get('<STR_LIT>', <NUM_LIT:0>)<EOL>if isinstance(position_total, str) and position_total.isdigit():<EOL><INDENT>position_total = int(position_total)<EOL><DEDENT>data_map['<STR_LIT>' % position.lower()] = position_total<EOL><DEDENT>for key, value in data_map.items():<EOL><INDENT>if value != getattr(proposal, key, None):<EOL><INDENT>setattr(proposal, key, value)<EOL>changed = True<EOL><DEDENT><DEDENT>if changed:<EOL><INDENT>proposal.save()<EOL><DEDENT>responses = vote_pre_import.send(sender=self, vote_data=proposal_data)<EOL>for receiver, response in responses:<EOL><INDENT>if response is False:<EOL><INDENT>logger.debug(<EOL>'<STR_LIT>', proposal_data.get(<EOL>'<STR_LIT>', proposal_data['<STR_LIT:title>']))<EOL>return<EOL><DEDENT><DEDENT>positions = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>logger.info(<EOL>'<STR_LIT>'.format(proposal_display))<EOL>for position in positions:<EOL><INDENT>for group_vote_data in proposal_data.get(<EOL>position,<EOL>{}).get(<EOL>'<STR_LIT>',<EOL>{}):<EOL><INDENT>for vote_data in group_vote_data['<STR_LIT>']:<EOL><INDENT>if not isinstance(vote_data, dict):<EOL><INDENT>logger.error('<STR_LIT>',<EOL>vote_data, proposal_data['<STR_LIT>'])<EOL>continue<EOL><DEDENT>representative_pk = self.get_representative(vote_data)<EOL>if representative_pk is None:<EOL><INDENT>logger.error('<STR_LIT>', vote_data)<EOL>continue<EOL><DEDENT>representative_name = vote_data.get('<STR_LIT>', '<STR_LIT>')<EOL>changed = False<EOL>try:<EOL><INDENT>vote = Vote.objects.get(<EOL>representative_id=representative_pk,<EOL>proposal_id=proposal.pk)<EOL><DEDENT>except Vote.DoesNotExist:<EOL><INDENT>vote = Vote(proposal_id=proposal.pk,<EOL>representative_id=representative_pk)<EOL>changed = True<EOL><DEDENT>if vote.position != position.lower():<EOL><INDENT>changed = True<EOL>vote.position = position.lower()<EOL><DEDENT>if vote.representative_name != representative_name:<EOL><INDENT>changed = True<EOL>vote.representative_name = representative_name<EOL><DEDENT>if changed:<EOL><INDENT>vote.save()<EOL>logger.debug('<STR_LIT>',<EOL>vote.pk, representative_pk, proposal_data['<STR_LIT:title>'],<EOL>proposal.pk, position)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return proposal<EOL> | Get or Create a proposal model from raw data | f1867:c0:m2 |
def find_dossier(data): | changed = False<EOL>dossier = None<EOL>reffield = None<EOL>for field in [k for k in ('<STR_LIT>', '<STR_LIT>') if k in data]:<EOL><INDENT>try:<EOL><INDENT>dossier = Dossier.objects.get(reference=data[field])<EOL>reffield = field<EOL>break<EOL><DEDENT>except Dossier.DoesNotExist:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if dossier is None:<EOL><INDENT>reffield = '<STR_LIT>' if '<STR_LIT>' in data else '<STR_LIT>'<EOL>dossier = Dossier(reference=data[reffield])<EOL>logger.debug('<STR_LIT>' % data[reffield])<EOL>changed = True<EOL><DEDENT>if '<STR_LIT>' in data and reffield != '<STR_LIT>':<EOL><INDENT>logger.debug('<STR_LIT>' % data['<STR_LIT>'])<EOL>dossier.reference = data['<STR_LIT>']<EOL>changed = True<EOL><DEDENT>return dossier, changed<EOL> | Find dossier with reference matching either 'ref_an' or 'ref_sen',
create it if not found. Ensure its reference is 'ref_an' if both fields
are present. | f1868:m1 |
def chambers(self): | <EOL>return set(sorted([d.chamber for d in self.documents.all()]))<EOL> | Return distinct chambers. You probably want to prefetch
documents__chamber before calling that. | f1891:c0:m1 |
def execute_from_file(self, url, file_var): | if isinstance(file_var, file):<EOL><INDENT>f = file_var<EOL><DEDENT>elif isinstance(file_var, str):<EOL><INDENT>try:<EOL><INDENT>f = open(file_var)<EOL><DEDENT>except IOError as e:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>inputs = json.loads(f.read())<EOL>resp = self.execute(url, inputs)<EOL>return resp<EOL> | Identical to WebPypeClient.execute(), except this function accepts a
file path or file type instead of a dictionary. | f1897:c1:m1 |
def execute_from_url(self, url, input_url): | inputs = self._request(input_url)<EOL>resp = self.execute(url, inputs)<EOL>return resp<EOL> | Identical to WebPypeClient.execute(), except this function accepts a
url instead of a dictionary or string. | f1897:c1:m2 |
def intercontacttimes(tnet): | <EOL>tnet = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>if tnet.nettype[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>contacts = np.array([[None] * tnet.netshape[<NUM_LIT:0>]] * tnet.netshape[<NUM_LIT:0>])<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>for i in range(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:0>]):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, tnet.netshape[<NUM_LIT:0>]):<EOL><INDENT>edge_on = tnet.get_network_when(i=i, j=j)['<STR_LIT:t>'].values<EOL>if len(edge_on) > <NUM_LIT:0>:<EOL><INDENT>edge_on_diff = edge_on[<NUM_LIT:1>:] - edge_on[:-<NUM_LIT:1>]<EOL>contacts[i, j] = np.array(edge_on_diff)<EOL>contacts[j, i] = np.array(edge_on_diff)<EOL><DEDENT>else:<EOL><INDENT>contacts[i, j] = []<EOL>contacts[j, i] = []<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>for i in range(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:0>]):<EOL><INDENT>for j in range(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:0>]):<EOL><INDENT>edge_on = tnet.get_network_when(i=i, j=j)['<STR_LIT:t>'].values<EOL>if len(edge_on) > <NUM_LIT:0>:<EOL><INDENT>edge_on_diff = edge_on[<NUM_LIT:1>:] - edge_on[:-<NUM_LIT:1>]<EOL>contacts[i, j] = np.array(edge_on_diff)<EOL><DEDENT>else:<EOL><INDENT>contacts[i, j] = []<EOL><DEDENT><DEDENT><DEDENT><DEDENT>out = {}<EOL>out['<STR_LIT>'] = contacts<EOL>out['<STR_LIT>'] = tnet.nettype<EOL>return out<EOL> | Calculates the intercontacttimes of each edge in a network.
Parameters
-----------
tnet : array, dict
Temporal network (craphlet or contact). Nettype: 'bu', 'bd'
Returns
---------
contacts : dict
Intercontact times as numpy array in dictionary. contacts['intercontacttimes']
Notes
------
The inter-contact times is calculated by the time between consequecutive "active" edges (where active means
that the value is 1 in a binary network).
Examples
--------
This example goes through how inter-contact times are calculated.
>>> import teneto
>>> import numpy as np
Make a network with 2 nodes and 4 time-points with 4 edges spaced out.
>>> G = np.zeros([2,2,10])
>>> edge_on = [1,3,5,9]
>>> G[0,1,edge_on] = 1
The network visualised below make it clear what the inter-contact times are between the two nodes:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
G = np.zeros([2,2,10])
edge_on = [1,3,5,9]
G[0,1,edge_on] = 1
fig, ax = plt.subplots(1, figsize=(4,2))
teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2')
ax.set_ylim(-0.25, 1.25)
plt.tight_layout()
fig.show()
Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1.
>>> ict = teneto.networkmeasures.intercontacttimes(G)
The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN.
So the icts between nodes 0 and 1 are found by:
>>> ict['intercontacttimes'][0,1]
array([2, 2, 4]) | f1924:m0 |
def fluctuability(netin, calc='<STR_LIT>'): | <EOL>netin, _ = process_input(netin, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>netin[netin != <NUM_LIT:0>] = <NUM_LIT:1><EOL>unique_edges = np.sum(netin, axis=<NUM_LIT:2>)<EOL>unique_edges[unique_edges > <NUM_LIT:0>] = <NUM_LIT:1><EOL>unique_edges[unique_edges == <NUM_LIT:0>] = <NUM_LIT:0><EOL>fluct = (np.sum(unique_edges)) / np.sum(netin)<EOL>return fluct<EOL> | r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_] | f1925:m0 |
def temporal_betweenness_centrality(tnet=None, paths=None, calc='<STR_LIT:time>'): | if tnet is not None and paths is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is None and paths is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is not None:<EOL><INDENT>paths = shortest_temporal_path(tnet)<EOL><DEDENT>bet = np.zeros([paths[['<STR_LIT>', '<STR_LIT:to>']].max().max() +<EOL><NUM_LIT:1>, paths['<STR_LIT>'].max()+<NUM_LIT:1>])<EOL>for row in paths.iterrows():<EOL><INDENT>if (np.isnan(row[<NUM_LIT:1>]['<STR_LIT>'])).all():<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>nodes_in_path = np.unique(np.concatenate(<EOL>row[<NUM_LIT:1>]['<STR_LIT>'])).astype(int).tolist()<EOL>nodes_in_path.remove(row[<NUM_LIT:1>]['<STR_LIT>'])<EOL>nodes_in_path.remove(row[<NUM_LIT:1>]['<STR_LIT:to>'])<EOL>if len(nodes_in_path) > <NUM_LIT:0>:<EOL><INDENT>bet[nodes_in_path, row[<NUM_LIT:1>]['<STR_LIT>']] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>bet = (<NUM_LIT:1>/((bet.shape[<NUM_LIT:0>]-<NUM_LIT:1>)*(bet.shape[<NUM_LIT:0>]-<NUM_LIT:2>))) * bet<EOL>if calc == '<STR_LIT>':<EOL><INDENT>bet = np.mean(bet, axis=<NUM_LIT:1>)<EOL><DEDENT>return bet<EOL> | Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node) | f1926:m0 |
def volatility(tnet, distance_func_name='<STR_LIT:default>', calc='<STR_LIT>', communities=None, event_displacement=None): | <EOL>tnet, netinfo = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>distance_func_name = check_distance_funciton_input(<EOL>distance_func_name, netinfo)<EOL>if not isinstance(distance_func_name, str):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if netinfo['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>ind = np.triu_indices(tnet.shape[<NUM_LIT:0>], k=-tnet.shape[<NUM_LIT:0>])<EOL><DEDENT>elif netinfo['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>ind = np.triu_indices(tnet.shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL><DEDENT>if calc == '<STR_LIT>':<EOL><INDENT>communities = np.array(communities)<EOL>if len(communities) != netinfo['<STR_LIT>'][<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if communities.min() < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>distance_func = getDistanceFunction(distance_func_name)<EOL>if calc == '<STR_LIT>':<EOL><INDENT>vol = np.mean([distance_func(tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], t], tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], t + <NUM_LIT:1>])<EOL>for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)])<EOL><DEDENT>elif calc == '<STR_LIT:time>':<EOL><INDENT>vol = [distance_func(tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], t], tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], t + <NUM_LIT:1>])<EOL>for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)]<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>vol = [distance_func(tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], event_displacement],<EOL>tnet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], t]) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>])]<EOL><DEDENT>elif calc == '<STR_LIT>' or calc == '<STR_LIT>':<EOL><INDENT>vol = np.zeros([tnet.shape[<NUM_LIT:0>], tnet.shape[<NUM_LIT:1>]])<EOL>for i in ind[<NUM_LIT:0>]:<EOL><INDENT>for j in ind[<NUM_LIT:1>]:<EOL><INDENT>vol[i, j] = np.mean([distance_func(<EOL>tnet[i, j, t], tnet[i, j, t + <NUM_LIT:1>]) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)])<EOL><DEDENT><DEDENT>if netinfo['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>vol = vol + np.transpose(vol)<EOL><DEDENT>if calc == '<STR_LIT>':<EOL><INDENT>vol = np.mean(vol, axis=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>net_id = set(communities)<EOL>vol = np.zeros([max(net_id) + <NUM_LIT:1>, max(net_id) +<EOL><NUM_LIT:1>, netinfo['<STR_LIT>'][-<NUM_LIT:1>] - <NUM_LIT:1>])<EOL>for net1 in net_id:<EOL><INDENT>for net2 in net_id:<EOL><INDENT>if net1 != net2:<EOL><INDENT>vol[net1, net2, :] = [distance_func(tnet[communities == net1][:, communities == net2, t].flatten(),<EOL>tnet[communities == net1][:, communities == net2, t + <NUM_LIT:1>].flatten()) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)]<EOL><DEDENT>else:<EOL><INDENT>nettmp = tnet[communities ==<EOL>net1][:, communities == net2, :]<EOL>triu = np.triu_indices(nettmp.shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL>nettmp = nettmp[triu[<NUM_LIT:0>], triu[<NUM_LIT:1>], :]<EOL>vol[net1, net2, :] = [distance_func(nettmp[:, t].flatten(<EOL>), nettmp[:, t + <NUM_LIT:1>].flatten()) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>withi = np.array([[ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n]] for n in range(<EOL><NUM_LIT:0>, len(ind[<NUM_LIT:0>])) if communities[ind[<NUM_LIT:0>][n]] == communities[ind[<NUM_LIT:1>][n]]])<EOL>vol = [distance_func(tnet[withi[:, <NUM_LIT:0>], withi[:, <NUM_LIT:1>], t], tnet[withi[:, <NUM_LIT:0>],<EOL>withi[:, <NUM_LIT:1>], t + <NUM_LIT:1>]) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)]<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>beti = np.array([[ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n]] for n in range(<EOL><NUM_LIT:0>, len(ind[<NUM_LIT:0>])) if communities[ind[<NUM_LIT:0>][n]] != communities[ind[<NUM_LIT:1>][n]]])<EOL>vol = [distance_func(tnet[beti[:, <NUM_LIT:0>], beti[:, <NUM_LIT:1>], t], tnet[beti[:, <NUM_LIT:0>],<EOL>beti[:, <NUM_LIT:1>], t + <NUM_LIT:1>]) for t in range(<NUM_LIT:0>, tnet.shape[-<NUM_LIT:1>] - <NUM_LIT:1>)]<EOL><DEDENT>return vol<EOL> | r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array | f1927:m0 |
def sid(tnet, communities, axis=<NUM_LIT:0>, calc='<STR_LIT>', decay=<NUM_LIT:0>): | tnet, netinfo = utils.process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>D = temporal_degree_centrality(<EOL>tnet, calc='<STR_LIT:time>', communities=communities, decay=decay)<EOL>network_ids = np.unique(communities)<EOL>communities_size = np.array([sum(communities == n) for n in network_ids])<EOL>sid = np.zeros([network_ids.max()+<NUM_LIT:1>, network_ids.max()+<NUM_LIT:1>, tnet.shape[-<NUM_LIT:1>]])<EOL>for n in network_ids:<EOL><INDENT>for m in network_ids:<EOL><INDENT>betweenmodulescaling = <NUM_LIT:1>/(communities_size[n]*communities_size[m])<EOL>if netinfo['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>withinmodulescaling = <NUM_LIT:1> /(communities_size[n]*communities_size[n])<EOL><DEDENT>elif netinfo['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>withinmodulescaling = <NUM_LIT:2> /(communities_size[n]*(communities_size[n]-<NUM_LIT:1>))<EOL>if n == m:<EOL><INDENT>betweenmodulescaling = withinmodulescaling<EOL><DEDENT><DEDENT>sid[n, m, :] = withinmodulescaling *D[n, n, :] - betweenmodulescaling * D[n, m, :]<EOL><DEDENT><DEDENT>sid[np.isnan(sid)] = <NUM_LIT:0><EOL>if calc == '<STR_LIT>':<EOL><INDENT>return np.sum(np.sum(sid, axis=<NUM_LIT:1>), axis=<NUM_LIT:0>)<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>return np.sum(sid, axis=axis)<EOL><DEDENT>else:<EOL><INDENT>return sid<EOL><DEDENT> | r"""
Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_
Parameters
----------
tnet: array, dict
Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd'
communities : array
a Nx1 vector or NxT array of community assignment.
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
'global' returns temporal degree centrality (a 1xnode vector) (default);
'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing;
'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities.
decay: int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$.
Returns
-------
sid: array
segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time)
Notes
------
SID tries to quantify if there is more segergation or intgration compared to other time-points.
If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual.
There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'),
the third is a value for each community-community pairing (calc='community_pairs').
First we calculate the temporal strength for each edge. This is calculate by
.. math:: S_{i,t} = \sum_j G_{i,j,t}
The pairwise SID, when the network is undirected, is calculated by
.. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t})
Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A.
When calculating the SID for a community, it is calculated byL
.. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t})
Where C is the number of communities.
When calculating the SID globally, it is calculated byL
.. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t})
References
-----------
.. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] | f1928:m0 |
def bursty_coeff(data, calc='<STR_LIT>', nodes='<STR_LIT:all>', communities=None, threshold_type=None, threshold_level=None, threshold_params=None): | if threshold_type is not None:<EOL><INDENT>if threshold_params is None: <EOL><INDENT>threshold_params = {}<EOL><DEDENT>data = binarize(data, threshold_type,<EOL>threshold_level, **threshold_params)<EOL><DEDENT>if calc == '<STR_LIT>' and communities is None:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>")<EOL><DEDENT>ict = <NUM_LIT:0> <EOL>if isinstance(data, dict):<EOL><INDENT>if [k for k in list(data.keys()) if k == '<STR_LIT>'] == ['<STR_LIT>']:<EOL><INDENT>ict = <NUM_LIT:1><EOL><DEDENT><DEDENT>if ict == <NUM_LIT:0>:<EOL><INDENT>data = intercontacttimes(data)<EOL><DEDENT>ict_shape = data['<STR_LIT>'].shape<EOL>if len(ict_shape) == <NUM_LIT:2>:<EOL><INDENT>node_len = ict_shape[<NUM_LIT:0>] * ict_shape[<NUM_LIT:1>]<EOL><DEDENT>elif len(ict_shape) == <NUM_LIT:1>:<EOL><INDENT>node_len = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(nodes, list) and len(ict_shape) > <NUM_LIT:1>:<EOL><INDENT>node_combinations = [[list(set(nodes))[t], list(set(nodes))[tt]] for t in range(<EOL><NUM_LIT:0>, len(nodes)) for tt in range(<NUM_LIT:0>, len(nodes)) if t != tt]<EOL>do_nodes = [np.ravel_multi_index(n, ict_shape)<EOL>for n in node_combinations]<EOL><DEDENT>else:<EOL><INDENT>do_nodes = np.arange(<NUM_LIT:0>, node_len)<EOL><DEDENT>if calc == '<STR_LIT>':<EOL><INDENT>ict = np.concatenate(data['<STR_LIT>']<EOL>[do_nodes, do_nodes], axis=<NUM_LIT:1>)<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>unique_communities = np.unique(communities)<EOL>ict_shape = (len(unique_communities), len(unique_communities))<EOL>ict = np.array([[None] * ict_shape[<NUM_LIT:0>]] * ict_shape[<NUM_LIT:1>])<EOL>for i, s1 in enumerate(unique_communities):<EOL><INDENT>for j, s2 in enumerate(unique_communities):<EOL><INDENT>if s1 == s2:<EOL><INDENT>ind = np.triu_indices(sum(communities == s1), k=<NUM_LIT:1>)<EOL>ict[i, j] = np.concatenate(<EOL>data['<STR_LIT>'][ind[<NUM_LIT:0>], ind[<NUM_LIT:1>]])<EOL><DEDENT>else:<EOL><INDENT>ict[i, j] = np.concatenate(np.concatenate(<EOL>data['<STR_LIT>'][communities == s1, :][:, communities == s2]))<EOL><DEDENT><DEDENT><DEDENT>data['<STR_LIT>'] = ict<EOL>do_nodes = np.arange(<NUM_LIT:0>, ict_shape[<NUM_LIT:0>]*ict_shape[<NUM_LIT:1>])<EOL><DEDENT>if len(ict_shape) > <NUM_LIT:1>:<EOL><INDENT>ict = data['<STR_LIT>'].reshape(ict_shape[<NUM_LIT:0>] * ict_shape[<NUM_LIT:1>])<EOL>b_coeff = np.zeros(len(ict)) * np.nan<EOL><DEDENT>else:<EOL><INDENT>b_coeff = np.zeros(<NUM_LIT:1>) * np.nan<EOL>ict = [data['<STR_LIT>']]<EOL><DEDENT>for i in do_nodes:<EOL><INDENT>if isinstance(ict[i], np.ndarray):<EOL><INDENT>mu_ict = np.mean(ict[i])<EOL>sigma_ict = np.std(ict[i])<EOL>b_coeff[i] = (sigma_ict - mu_ict) / (sigma_ict + mu_ict)<EOL><DEDENT>else:<EOL><INDENT>b_coeff[i] = np.nan<EOL><DEDENT><DEDENT>if len(ict_shape) > <NUM_LIT:1>:<EOL><INDENT>b_coeff = b_coeff.reshape(ict_shape)<EOL><DEDENT>return b_coeff<EOL> | r"""
Calculates the bursty coefficient.[1][2]
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
A weighted network can be applied if you specify threshold_type and threshold_value which will make it binary.
calc : str
Caclulate the bursty coeff over what. Options include 'edge': calculate B on all ICTs between node i and j. (Default); 'node': caclulate B on all ICTs connected to node i.;
'communities': calculate B for each communities (argument communities then required);
'meanEdgePerNode': first calculate the ICTs between node i and j, then take the mean over all j.
nodes: list or str
Options: 'all': do for all nodes (default) or list of node indexes to calculate.
communities : array, optional
None (default) or Nx1 vector of communities assignment. This returns a "centrality" per communities instead of per node.
threshold_type : str, optional
If input is weighted. Specify binarizing threshold type. See teneto.utils.binarize
threshold_level : str, optional
If input is weighted. Specify binarizing threshold level. See teneto.utils.binarize
threhsold_params : dict
If input is weighted. Dictionawy with kwargs for teneto.utils.binarize
Returns
-------
B : array
Bursty coefficienct per (edge or node measure).
Notes
------
The burstiness coefficent, B, is defined in refs [1,2] as:
.. math:: B = {{\sigma_{ICT} - \mu_{ICT}} \over {\sigma_{ICT} + \mu_{ICT}}}
Where :math:`\sigma_{ICT}` and :math:`\mu_{ICT}` are the standard deviation and mean of the inter-contact times respectively (see teneto.networkmeasures.intercontacttimes)
When B > 0, indicates bursty intercontact times. When B < 0, indicates periodic/tonic intercontact times. When B = 0, indicates random.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
The two networks look like this:
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
G_bursty = np.zeros([2, 2, 60])
G_bursty[:,:,ts_bursty] = 1
G_periodic = np.zeros([2, 2, 60])
ts_periodic = np.arange(0, 60, 2)
G_periodic[:,:,ts_periodic] = 1
fig,ax = plt.subplots(2, 1, figsize=(10,3))
teneto.plot.slice_plot(G_bursty, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
teneto.plot.slice_plot(G_periodic, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
ax[0].set_title('G_bursty')
ax[1].set_title('G_periodic')
ax[0].set_ylim([-0.25,1.25])
ax[1].set_ylim([-0.25,1.25])
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
plt.tight_layout()
fig.show()
Now we call bursty_coeff.
>>> B_periodic = teneto.networkmeasures.bursty_coeff(G_periodic)
>>> B_periodic
array([[nan, -1.],
[-1., nan]])
Above we can see that between node 0 and 1, B=-1 (the diagonal is nan).
Doing the same for the second example:
>>> B_bursty = teneto.networkmeasures.bursty_coeff(G_bursty)
>>> B_bursty
array([[ nan, 0.13311003],
[0.13311003, nan]])
gives a positive value, indicating the inter-contact times between node 0 and 1 is bursty.
References
----------
.. [1] Goh, KI & Barabasi, AL (2008) Burstiness and Memory in Complex Systems. EPL (Europhysics Letters), 81: 4 [`Link <https://arxiv.org/pdf/physics/0610233.pdf>`_]
.. [2] Holme, P & Saramäki J (2012) Temporal networks. Physics Reports. 519: 3. [`Link <https://arxiv.org/pdf/1108.1780.pdf>`_] (Discrete formulation used here) | f1930:m0 |
def topological_overlap(tnet, calc='<STR_LIT:time>'): | tnet = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])[<NUM_LIT:0>]<EOL>numerator = np.sum(tnet[:, :, :-<NUM_LIT:1>] * tnet[:, :, <NUM_LIT:1>:], axis=<NUM_LIT:1>)<EOL>denominator = np.sqrt(<EOL>np.sum(tnet[:, :, :-<NUM_LIT:1>], axis=<NUM_LIT:1>) * np.sum(tnet[:, :, <NUM_LIT:1>:], axis=<NUM_LIT:1>))<EOL>topo_overlap = numerator / denominator<EOL>topo_overlap[np.isnan(topo_overlap)] = <NUM_LIT:0><EOL>if calc == '<STR_LIT:time>':<EOL><INDENT>topo_overlap = np.hstack(<EOL>[topo_overlap, np.zeros([topo_overlap.shape[<NUM_LIT:0>], <NUM_LIT:1>])*np.nan])<EOL><DEDENT>else:<EOL><INDENT>topo_overlap = np.mean(topo_overlap, axis=<NUM_LIT:1>)<EOL>if calc == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>topo_overlap = np.mean(topo_overlap)<EOL><DEDENT><DEDENT>return topo_overlap<EOL> | r"""
Topological overlap quantifies the persistency of edges through time. If two consequtive time-points have similar edges, this becomes high (max 1). If there is high change, this becomes 0.
References: [topo-1]_, [topo-2]_
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Nettype: 'bu'.
calc: str
which version of topological overlap to calculate:
'node' - calculates for each node, averaging over time.
'time' - (default) calculates for each node per time points.
'global' - (default) calculates for each node per time points.
Returns
-------
topo_overlap : array
if calc = 'time', array is (node,time) in size.
if calc = 'node', array is (node) in size.
if calc = 'global', array is (1) in size. The final time point returns as nan.
Notes
------
When edges persist over time, the topological overlap increases. It can be calculated as a global valu, per node, per node-time.
When calc='time', then the topological overlap is:
.. math:: TopoOverlap_{i,t} = {\sum_j G_{i,j,t} G_{i,j,t+1} \over \sqrt{\sum_j G_{i,j,t} \sum_j G_{i,j,t+1}}}
When calc='node', then the topological overlap is the mean of math:`TopoOverlap_{i,t}`:
.. math:: AvgTopoOverlap_{i} = {1 \over T-1} \sum_t TopoOverlap_{i,t}
where T is the number of time-points. This is called the *average topological overlap*.
When calc='global', the *temporal-correlation coefficient* is calculated
.. math:: TempCorrCoeff = {1 \over N} \sum_i AvgTopoOverlap_i
where N is the number of nodes.
For all the three measures above, the value is between 0 and 1 where 0 entails "all edges changes" and 1 entails "no edges change".
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Then make an temporal network with 3 nodes and 4 time-points.
>>> G = np.zeros([3, 3, 3])
>>> i_ind = np.array([0, 0, 0, 0,])
>>> j_ind = np.array([1, 1, 1, 2,])
>>> t_ind = np.array([0, 1, 2, 2,])
>>> G[i_ind, j_ind, t_ind] = 1
>>> G = G + G.transpose([1,0,2]) # Make symmetric
Now the topological overlap can be calculated:
>>> topo_overlap = teneto.networkmeasures.topological_overlap(G)
This returns *topo_overlap* which is a (node,time) array. Looking above at how we defined G,
when t = 0, there is only the edge (0,1). When t = 1, this edge still remains. This means
topo_overlap should equal 1 for node 0 at t=0 and 0 for node 2:
>>> topo_overlap[0,0]
1.0
>>> topo_overlap[2,0]
0.0
At t=2, there is now also an edge between (0,2), this means node 0's topological overlap at t=1 decreases as
its edges have decreased in their persistency at the next time point (i.e. some change has occured). It equals ca. 0.71
>>> topo_overlap[0,1]
0.7071067811865475
If we want the average topological overlap, we simply add the calc argument to be 'node'.
>>> avg_topo_overlap = teneto.networkmeasures.topological_overlap(G, calc='node')
Now this is an array with a length of 3 (one per node).
>>> avg_topo_overlap
array([0.85355339, 1. , 0. ])
Here we see that node 1 had all its connections persist, node 2 had no connections persisting, and node 0 was in between.
To calculate the temporal correlation coefficient,
>>> temp_corr_coeff = teneto.networkmeasures.topological_overlap(G, calc='global')
This produces one value reflecting all of G
>>> temp_corr_coeff
0.617851130197758
References
----------
.. [topo-1] Tang et al (2010) Small-world behavior in time-varying graphs. Phys. Rev. E 81, 055101(R) [`arxiv link <https://arxiv.org/pdf/0909.1712.pdf>`_]
.. [topo-2] Nicosia et al (2013) "Graph Metrics for Temporal Networks" In: Holme P., Saramäki J. (eds) Temporal Networks. Understanding Complex Systems. Springer.
[`arxiv link <https://arxiv.org/pdf/1306.0493.pdf>`_] | f1931:m0 |
def temporal_efficiency(tnet=None, paths=None, calc='<STR_LIT>'): | if tnet is not None and paths is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is None and paths is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is not None:<EOL><INDENT>paths = shortest_temporal_path(tnet)<EOL><DEDENT>pathmat = np.zeros([paths[['<STR_LIT>', '<STR_LIT:to>']].max().max(<EOL>)+<NUM_LIT:1>, paths[['<STR_LIT>', '<STR_LIT:to>']].max().max()+<NUM_LIT:1>, paths[['<STR_LIT>']].max().max()+<NUM_LIT:1>]) * np.nan<EOL>pathmat[paths['<STR_LIT>'].values, paths['<STR_LIT:to>'].values,<EOL>paths['<STR_LIT>'].values] = paths['<STR_LIT>']<EOL>if calc == '<STR_LIT>':<EOL><INDENT>eff = <NUM_LIT:1> / np.nanmean(pathmat)<EOL><DEDENT>elif calc == '<STR_LIT>' or calc == '<STR_LIT>':<EOL><INDENT>eff = <NUM_LIT:1> / np.nanmean(np.nanmean(pathmat, axis=<NUM_LIT:2>), axis=<NUM_LIT:1>)<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>eff = <NUM_LIT:1> / np.nanmean(np.nanmean(pathmat, axis=<NUM_LIT:2>), axis=<NUM_LIT:0>)<EOL><DEDENT>return eff<EOL> | r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency | f1932:m0 |
def temporal_degree_centrality(tnet, axis=<NUM_LIT:0>, calc='<STR_LIT>', communities=None, decay=<NUM_LIT:0>, ignorediagonal=True): | <EOL>tnet = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>if axis == <NUM_LIT:1>:<EOL><INDENT>fromax = '<STR_LIT>'<EOL>toax = '<STR_LIT:i>'<EOL><DEDENT>else:<EOL><INDENT>fromax = '<STR_LIT:i>'<EOL>toax = '<STR_LIT>'<EOL><DEDENT>if tnet.nettype[<NUM_LIT:0>] == '<STR_LIT:b>':<EOL><INDENT>tnet.network['<STR_LIT>'] = <NUM_LIT:1><EOL><DEDENT>if calc == '<STR_LIT:time>' and communities is None:<EOL><INDENT>tdeg = np.zeros([tnet.netshape[<NUM_LIT:0>], tnet.netshape[<NUM_LIT:1>]])<EOL>df = tnet.network.groupby([fromax, '<STR_LIT:t>']).sum().reset_index()<EOL>tdeg[df[fromax], df['<STR_LIT:t>']] = df['<STR_LIT>']<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>df = tnet.network.groupby([toax, '<STR_LIT:t>']).sum().reset_index()<EOL>tdeg[df[toax], df['<STR_LIT:t>']] += df['<STR_LIT>']<EOL><DEDENT><DEDENT>elif calc == '<STR_LIT>' and communities is None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>elif calc != '<STR_LIT:time>' and communities is None:<EOL><INDENT>tdeg = np.zeros([tnet.netshape[<NUM_LIT:0>]])<EOL>df = tnet.network.groupby([fromax])['<STR_LIT>'].sum().reset_index()<EOL>tdeg[df[fromax]] += df['<STR_LIT>']<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>df = tnet.network.groupby([toax])['<STR_LIT>'].sum().reset_index()<EOL>tdeg[df[toax]] += df['<STR_LIT>']<EOL><DEDENT><DEDENT>elif calc == '<STR_LIT>' and communities is not None:<EOL><INDENT>tdeg = np.zeros([tnet.netshape[<NUM_LIT:0>], tnet.netshape[<NUM_LIT:1>]])<EOL>for t in range(tnet.netshape[<NUM_LIT:1>]):<EOL><INDENT>if len(communities.shape) == <NUM_LIT:2>:<EOL><INDENT>C = communities[:, t]<EOL><DEDENT>else:<EOL><INDENT>C = communities<EOL><DEDENT>for c in np.unique(C):<EOL><INDENT>k_i = np.sum(tnet.df_to_array()[<EOL>:, C == c, t][C == c], axis=axis)<EOL>tdeg[C == c, t] = (k_i - np.mean(k_i)) / np.std(k_i)<EOL><DEDENT><DEDENT>tdeg[np.isnan(tdeg) == <NUM_LIT:1>] = <NUM_LIT:0><EOL><DEDENT>elif calc == '<STR_LIT:time>' and communities is not None:<EOL><INDENT>tdeg_communities = np.zeros(<EOL>[communities.max()+<NUM_LIT:1>, communities.max()+<NUM_LIT:1>, communities.shape[-<NUM_LIT:1>]])<EOL>if len(communities.shape) == <NUM_LIT:2>:<EOL><INDENT>for t in range(len(communities[-<NUM_LIT:1>])):<EOL><INDENT>C = communities[:, t]<EOL>unique_communities = np.unique(C)<EOL>for s1 in unique_communities:<EOL><INDENT>for s2 in unique_communities:<EOL><INDENT>tdeg_communities[s1, s2, t] = np.sum(<EOL>np.sum(tnet.df_to_array()[C == s1, :, t][:, C == s2], axis=<NUM_LIT:1>), axis=<NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>unique_communities = np.unique(communities)<EOL>tdeg_communities = [np.sum(np.sum(tnet.df_to_array()[communities == s1, :, :][:, communities == s2, :], axis=<NUM_LIT:1>), axis=<NUM_LIT:0>)<EOL>for s1 in unique_communities for s2 in unique_communities]<EOL><DEDENT>tdeg = np.array(tdeg_communities)<EOL>tdeg = np.reshape(tdeg, [len(np.unique(communities)), len(<EOL>np.unique(communities)), tnet.netshape[-<NUM_LIT:1>]])<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>for s in range(tdeg.shape[<NUM_LIT:0>]):<EOL><INDENT>tdeg[s, s, :] = tdeg[s, s, :]/<NUM_LIT:2><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if decay > <NUM_LIT:0> and calc == '<STR_LIT:time>':<EOL><INDENT>tdeg = tdeg.transpose(<EOL>np.hstack([len(tdeg.shape)-<NUM_LIT:1>, np.arange(len(tdeg.shape)-<NUM_LIT:1>)]))<EOL>for n in range(<NUM_LIT:1>, tdeg.shape[<NUM_LIT:0>]):<EOL><INDENT>tdeg[n] = np.exp(-decay)*tdeg[n-<NUM_LIT:1>] + tdeg[n]<EOL><DEDENT>tdeg = tdeg.transpose(np.hstack([np.arange(<NUM_LIT:1>, len(tdeg.shape)), <NUM_LIT:0>]))<EOL><DEDENT>elif decay > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>return tdeg<EOL> | temporal degree of network. Sum of all connections each node has through time.
Parameters
-----------
net : array, dict
Temporal network input (graphlet or contact). Can have nettype: 'bu', 'bd', 'wu', 'wd'
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
Can be following alternatives:
'avg' : (returns temporal degree centrality (a 1xnode vector))
'time' : (returns a node x time matrix),
'module_degree_zscore' : returns the Z-scored within community degree centrality
(communities argument required). This is done for each time-point
i.e. 'time' returns static degree centrality per time-point.
ignorediagonal: bool
if true, diagonal is made to 0.
communities : array (Nx1)
Vector of community assignment.
If this is given and calc='time', then the strength within and between each communities is returned (technically not degree centrality).
decay : int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. If
decay is 0 then the final D will equal D when calc='avg', if decay = inf
then this will equal calc='time'.
Returns
---------
D : array
temporal degree centrality (nodal measure). Array is 1D ('avg'), 2D ('time', 'module_degree_zscore') or 3D ('time' + communities (non-nodal/community measures)) | f1934:m0 |
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False): | if communities is None:<EOL><INDENT>if isinstance(tnet, dict):<EOL><INDENT>if '<STR_LIT>' in tnet.keys():<EOL><INDENT>communities = tnet['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>tnet = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>if tnet.nettype[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>if tnet.hdf5 == False:<EOL><INDENT>if sum(tnet.network['<STR_LIT>'] < <NUM_LIT:0>) > <NUM_LIT:0> and not removeneg:<EOL><INDENT>print(<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>tnet.network['<STR_LIT>'][tnet.network['<STR_LIT>'] < <NUM_LIT:0>] = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>part = np.zeros([tnet.netshape[<NUM_LIT:0>], tnet.netshape[<NUM_LIT:1>]])<EOL>if len(communities.shape) == <NUM_LIT:1>:<EOL><INDENT>for t in np.arange(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:1>]):<EOL><INDENT>C = communities<EOL>snapshot = tnet.get_network_when(t=t)<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>i_at_t = snapshot['<STR_LIT:i>'].values<EOL><DEDENT>else:<EOL><INDENT>i_at_t = np.concatenate(<EOL>[snapshot['<STR_LIT:i>'].values, snapshot['<STR_LIT>'].values])<EOL><DEDENT>i_at_t = np.unique(i_at_t).tolist()<EOL>i_at_t = list(map(int, i_at_t))<EOL>for i in i_at_t:<EOL><INDENT>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>df = tnet.get_network_when(i=i, t=t)<EOL>j_at_t = df['<STR_LIT>'].values<EOL>if tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = df['<STR_LIT>'].sum()<EOL><DEDENT>elif tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = len(df)<EOL><DEDENT><DEDENT>elif tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>df = tnet.get_network_when(ij=i, t=t)<EOL>j_at_t = np.concatenate([df['<STR_LIT:i>'].values, df['<STR_LIT>'].values])<EOL>if tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = df['<STR_LIT>'].sum()<EOL><DEDENT>elif tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = len(df)<EOL><DEDENT><DEDENT>j_at_t = list(map(int, j_at_t))<EOL>for c in np.unique(C[j_at_t]):<EOL><INDENT>ci = np.where(C == c)[<NUM_LIT:0>].tolist()<EOL>k_is = tnet.get_network_when(i=i, j=ci, t=t)<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>k_is2 = tnet.get_network_when(j=i, i=ci, t=t)<EOL>k_is = pd.concat([k_is, k_is2])<EOL><DEDENT>if len(k_is) > <NUM_LIT:0>:<EOL><INDENT>if tnet.nettype[<NUM_LIT:0>] == '<STR_LIT:b>':<EOL><INDENT>k_is = len(k_is)<EOL><DEDENT>else:<EOL><INDENT>k_is = k_is['<STR_LIT>'].sum()<EOL><DEDENT>part[i, t] += np.square(k_is/k_i)<EOL><DEDENT><DEDENT><DEDENT>part[i_at_t, t] = <NUM_LIT:1> - part[i_at_t, t]<EOL>if decay is not None and t > <NUM_LIT:0>:<EOL><INDENT>part[i_at_t, t] += decay*part[i_at_t, t-<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for t in np.arange(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:1>]):<EOL><INDENT>snapshot = tnet.get_network_when(t=t)<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>i_at_t = snapshot['<STR_LIT:i>'].values<EOL><DEDENT>else:<EOL><INDENT>i_at_t = np.concatenate(<EOL>[snapshot['<STR_LIT:i>'].values, snapshot['<STR_LIT>'].values])<EOL><DEDENT>i_at_t = np.unique(i_at_t).tolist()<EOL>i_at_t = list(map(int, i_at_t))<EOL>for i in i_at_t:<EOL><INDENT>for tc in np.arange(<NUM_LIT:0>, tnet.netshape[<NUM_LIT:1>]):<EOL><INDENT>C = communities[:, tc]<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>df = tnet.get_network_when(i=i, t=t)<EOL>j_at_t = df['<STR_LIT>'].values<EOL>if tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = df['<STR_LIT>'].sum()<EOL><DEDENT>elif tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = len(df)<EOL><DEDENT><DEDENT>elif tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>df = tnet.get_network_when(ij=i, t=t)<EOL>j_at_t = np.concatenate(<EOL>[df['<STR_LIT:i>'].values, df['<STR_LIT>'].values])<EOL>if tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = df['<STR_LIT>'].sum()<EOL><DEDENT>elif tnet.nettype == '<STR_LIT>':<EOL><INDENT>k_i = len(df)<EOL><DEDENT><DEDENT>j_at_t = list(map(int, j_at_t))<EOL>for c in np.unique(C[j_at_t]):<EOL><INDENT>ci = np.where(C == c)[<NUM_LIT:0>].tolist()<EOL>k_is = tnet.get_network_when(i=i, j=ci, t=t)<EOL>if tnet.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>k_is2 = tnet.get_network_when(j=i, i=ci, t=t)<EOL>k_is = pd.concat([k_is, k_is2])<EOL><DEDENT>if tnet.nettype[<NUM_LIT:0>] == '<STR_LIT:b>':<EOL><INDENT>k_is = len(k_is)<EOL><DEDENT>else:<EOL><INDENT>k_is = k_is['<STR_LIT>'].sum()<EOL><DEDENT>part[i, t] += np.square(k_is/k_i)<EOL><DEDENT><DEDENT>part[i, t] = part[i, t] / tnet.netshape[<NUM_LIT:1>]<EOL><DEDENT>part[i_at_t, t] = <NUM_LIT:1> - part[i_at_t, t]<EOL>if decay is not None and t > <NUM_LIT:0>:<EOL><INDENT>part[i_at_t, t] += decay*part[i_at_t, t-<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>part[np.isnan(part) == <NUM_LIT:1>] = <NUM_LIT:0><EOL>return part<EOL> | r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_] | f1935:m0 |
def reachability_latency(tnet=None, paths=None, rratio=<NUM_LIT:1>, calc='<STR_LIT>'): | if tnet is not None and paths is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is None and paths is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is not None:<EOL><INDENT>paths = shortest_temporal_path(tnet)<EOL><DEDENT>pathmat = np.zeros([paths[['<STR_LIT>', '<STR_LIT:to>']].max().max(<EOL>)+<NUM_LIT:1>, paths[['<STR_LIT>', '<STR_LIT:to>']].max().max()+<NUM_LIT:1>, paths[['<STR_LIT>']].max().max()+<NUM_LIT:1>]) * np.nan<EOL>pathmat[paths['<STR_LIT>'].values, paths['<STR_LIT:to>'].values,<EOL>paths['<STR_LIT>'].values] = paths['<STR_LIT>']<EOL>netshape = pathmat.shape<EOL>edges_to_reach = netshape[<NUM_LIT:0>] - np.round(netshape[<NUM_LIT:0>] * rratio)<EOL>reach_lat = np.zeros([netshape[<NUM_LIT:1>], netshape[<NUM_LIT:2>]]) * np.nan<EOL>for t_ind in range(<NUM_LIT:0>, netshape[<NUM_LIT:2>]):<EOL><INDENT>paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=<NUM_LIT:1>)<EOL>reach_lat[:, t_ind] = paths_sort[:, edges_to_reach]<EOL><DEDENT>if calc == '<STR_LIT>':<EOL><INDENT>reach_lat = np.nansum(reach_lat)<EOL>reach_lat = reach_lat / ((netshape[<NUM_LIT:0>]) * netshape[<NUM_LIT:2>])<EOL><DEDENT>elif calc == '<STR_LIT>':<EOL><INDENT>reach_lat = np.nansum(reach_lat, axis=<NUM_LIT:1>)<EOL>reach_lat = reach_lat / (netshape[<NUM_LIT:2>])<EOL><DEDENT>return reach_lat<EOL> | Reachability latency. This is the r-th longest temporal path.
Parameters
---------
data : array or dict
Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path)
rratio: float (default: 1)
reachability ratio that the latency is calculated in relation to.
Value must be over 0 and up to 1.
1 (default) - all nodes must be reached.
Other values (e.g. .5 imply that 50% of nodes are reached)
This is rounded to the nearest node inter.
E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards)
calc : str
what to calculate. Alternatives: 'global' entire network; 'nodes': for each node.
Returns
--------
reach_lat : array
Reachability latency
Notes
------
Reachability latency calculates the time it takes for the paths. | f1936:m0 |
def local_variation(data): | ict = <NUM_LIT:0> <EOL>if isinstance(data, dict):<EOL><INDENT>if [k for k in list(data.keys()) if k == '<STR_LIT>'] == ['<STR_LIT>']:<EOL><INDENT>ict = <NUM_LIT:1><EOL><DEDENT><DEDENT>if ict == <NUM_LIT:0>:<EOL><INDENT>data = intercontacttimes(data)<EOL><DEDENT>if data['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>ind = np.triu_indices(data['<STR_LIT>'].shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL><DEDENT>if data['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>triu = np.triu_indices(data['<STR_LIT>'].shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL>tril = np.tril_indices(data['<STR_LIT>'].shape[<NUM_LIT:0>], k=-<NUM_LIT:1>)<EOL>ind = [[], []]<EOL>ind[<NUM_LIT:0>] = np.concatenate([tril[<NUM_LIT:0>], triu[<NUM_LIT:0>]])<EOL>ind[<NUM_LIT:1>] = np.concatenate([tril[<NUM_LIT:1>], triu[<NUM_LIT:1>]])<EOL>ind = tuple(ind)<EOL><DEDENT>ict_shape = data['<STR_LIT>'].shape<EOL>lv = np.zeros(ict_shape)<EOL>for n in range(len(ind[<NUM_LIT:0>])):<EOL><INDENT>icts = data['<STR_LIT>'][ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n]]<EOL>if len(icts) > <NUM_LIT:0>:<EOL><INDENT>lv_nonnorm = np.sum(<EOL>np.power((icts[:-<NUM_LIT:1>] - icts[<NUM_LIT:1>:]) / (icts[:-<NUM_LIT:1>] + icts[<NUM_LIT:1>:]), <NUM_LIT:2>))<EOL>lv[ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n]] = (<NUM_LIT:3>/len(icts)) * lv_nonnorm<EOL><DEDENT>else:<EOL><INDENT>lv[ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n]] = np.nan<EOL><DEDENT><DEDENT>if data['<STR_LIT>'][<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>lv = lv + lv.transpose()<EOL><DEDENT>for n in range(lv.shape[<NUM_LIT:0>]):<EOL><INDENT>lv[n, n] = np.nan<EOL><DEDENT>return lv<EOL> | r"""
Calculates the local variaiont of inter-contact times. [LV-1]_, [LV-2]_
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
Returns
-------
LV : array
Local variation per edge.
Notes
------
The local variation is like the bursty coefficient and quantifies if a series of inter-contact times are periodic, random or Poisson distributed or bursty.
It is defined as:
.. math:: LV = {3 \over {n-1}}\sum_{i=1}^{n-1}{{{\iota_i - \iota_{i+1}} \over {\iota_i + \iota_{i+1}}}^2}
Where :math:`\iota` are inter-contact times and i is the index of the inter-contact time (not a node index). n is the number of events, making n-1 the number of inter-contact times.
The possible range is: :math:`0 \geq LV \gt 3`.
When periodic, LV=0, Poisson, LV=1 Larger LVs indicate bursty process.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
Now we call local variation for each edge.
>>> LV_periodic = teneto.networkmeasures.local_variation(G_periodic)
>>> LV_periodic
array([[nan, 0.],
[ 0., nan]])
Above we can see that between node 0 and 1, LV=0 (the diagonal is nan).
This is indicative of a periodic contacts (which is what we defined).
Doing the same for the second example:
>>> LV_bursty = teneto.networkmeasures.local_variation(G_bursty)
>>> LV_bursty
array([[ nan, 1.28748748],
[1.28748748, nan]])
When the value is greater than 1, it indicates a bursty process.
nans are returned if there are no intercontacttimes
References
----------
.. [LV-1] Shinomoto et al (2003) Differences in spiking patterns among cortical neurons. Neural Computation 15.12 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/089976603322518759>`_]
.. [LV-2] Followed eq., 4.34 in Masuda N & Lambiotte (2016) A guide to temporal networks. World Scientific. Series on Complex Networks. Vol 4 [`Link <https://www.worldscientific.com/doi/abs/10.1142/9781786341150_0001>`_] | f1937:m0 |
def temporal_closeness_centrality(tnet=None, paths=None): | if tnet is not None and paths is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is None and paths is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if tnet is not None:<EOL><INDENT>paths = shortest_temporal_path(tnet)<EOL><DEDENT>pathmat = np.zeros([paths[['<STR_LIT>', '<STR_LIT:to>']].max().max(<EOL>)+<NUM_LIT:1>, paths[['<STR_LIT>', '<STR_LIT:to>']].max().max()+<NUM_LIT:1>, paths[['<STR_LIT>']].max().max()+<NUM_LIT:1>]) * np.nan<EOL>pathmat[paths['<STR_LIT>'].values, paths['<STR_LIT:to>'].values,<EOL>paths['<STR_LIT>'].values] = paths['<STR_LIT>']<EOL>closeness = np.nansum(<NUM_LIT:1> / np.nanmean(pathmat, axis=<NUM_LIT:2>),<EOL>axis=<NUM_LIT:1>) / (pathmat.shape[<NUM_LIT:1>] - <NUM_LIT:1>)<EOL>return closeness<EOL> | Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure) | f1938:m0 |
def allegiance(community): | N = community.shape[<NUM_LIT:0>]<EOL>C = community.shape[<NUM_LIT:1>]<EOL>T = P = np.zeros([N, N])<EOL>for t in range(len(community[<NUM_LIT:0>, :])):<EOL><INDENT>for i in range(len(community[:, <NUM_LIT:0>])):<EOL><INDENT>for j in range(len(community[:, <NUM_LIT:0>])):<EOL><INDENT>if i == j:<EOL><INDENT>continue<EOL><DEDENT>if community[i][t] == community[j][t]:<EOL><INDENT>T[i, j] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>P = (<NUM_LIT:1>/C)*T<EOL>return P<EOL> | Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1 | f1939:m0 |
def recruitment(temporalcommunities, staticcommunities): | <EOL>if staticcommunities.shape[<NUM_LIT:0>] != temporalcommunities.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError( <EOL>'<STR_LIT>')<EOL><DEDENT>alleg = allegiance(temporalcommunities)<EOL>Rcoeff = np.zeros(len(staticcommunities))<EOL>for i, statcom in enumerate(staticcommunities):<EOL><INDENT>Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])<EOL><DEDENT>return Rcoeff<EOL> | Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. | f1940:m0 |
def flexibility(communities): | <EOL>flex = np.zeros(communities.shape[<NUM_LIT:0>])<EOL>for t in range(<NUM_LIT:1>, communities.shape[<NUM_LIT:1>]):<EOL><INDENT>flex[communities[:, t] != communities[:, t-<NUM_LIT:1>]] += <NUM_LIT:1><EOL><DEDENT>flex = flex / (communities.shape[<NUM_LIT:1>] - <NUM_LIT:1>)<EOL>return flex<EOL> | Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. | f1941:m0 |
def integration(temporalcommunities, staticcommunities): | <EOL>if staticcommunities.shape[<NUM_LIT:0>] != temporalcommunities.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>alleg = allegiance(temporalcommunities)<EOL>Icoeff = np.zeros(len(staticcommunities))<EOL>for i, statcom in enumerate(len(staticcommunities)):<EOL><INDENT>Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])<EOL><DEDENT>return Icoeff<EOL> | Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | f1943:m0 |
def __init__(self, BIDS_dir, pipeline=None, pipeline_subdir=None, parcellation=None, bids_tags=None, bids_suffix=None, bad_subjects=None, confound_pipeline=None, raw_data_exists=True, njobs=None): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>self.contact = []<EOL>if raw_data_exists:<EOL><INDENT>self.BIDS = BIDSLayout(BIDS_dir, validate=False)<EOL><DEDENT>else: <EOL><INDENT>self.BIDS = None<EOL><DEDENT>self.BIDS_dir = os.path.abspath(BIDS_dir)<EOL>self.pipeline = pipeline<EOL>self.confound_pipeline = confound_pipeline<EOL>self.raw_data_exists = raw_data_exists<EOL>if not pipeline_subdir:<EOL><INDENT>self.pipeline_subdir = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self.pipeline_subdir = pipeline_subdir<EOL><DEDENT>self.parcellation = parcellation<EOL>if self.BIDS_dir[-<NUM_LIT:1>] != '<STR_LIT:/>':<EOL><INDENT>self.BIDS_dir = self.BIDS_dir + '<STR_LIT:/>'<EOL><DEDENT>if not bids_suffix:<EOL><INDENT>self.bids_suffix = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self.bids_suffix = bids_suffix<EOL><DEDENT>if bad_subjects == None:<EOL><INDENT>self.bad_subjects = None<EOL><DEDENT>else:<EOL><INDENT>self.set_bad_subjects(bad_subjects)<EOL><DEDENT>if not njobs:<EOL><INDENT>self.njobs = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self.njobs = njobs<EOL><DEDENT>self.bad_files = []<EOL>self.confounds = None<EOL>self.set_bids_tags()<EOL>if bids_tags:<EOL><INDENT>self.set_bids_tags(bids_tags)<EOL><DEDENT>self.tvc_data_ = []<EOL>self.parcellation_data_ = []<EOL>self.participent_data_ = []<EOL>self.temporalnetwork_data_ = []<EOL>self.fc_data_ = []<EOL>self.tvc_trialinfo_ = []<EOL>self.parcellation_trialinfo_ = []<EOL>self.temporalnetwork_trialinfo_ = []<EOL>self.fc_trialinfo_ = []<EOL> | Parameters
----------
BIDS_dir : str
string to BIDS directory
pipeline : str
the directory that is in the BIDS_dir/derivatives/<pipeline>/
pipeline_subdir : str, optional
the directory that is in the BIDS_dir/derivatives/<pipeline>/sub-<subjectnr/[ses-<sesnr>]/func/<pipeline_subdir>
parcellation : str, optional
parcellation name
space : str, optional
different nomralized spaces
subjects : str or list, optional
can be part of the BIDS file name
sessions : str or list, optional
can be part of the BIDS file name
runs : str or list, optional
can be part of the BIDS file name
tasks : str or list, optional
can be part of the BIDS file name
bad_subjects : list or str, optional
Removes these subjects from the analysis
confound_pipeline : str, optional
If the confounds file is in another derivatives directory than the pipeline directory, set it here.
raw_data_exists : bool, optional
Default is True. If the unpreprocessed data is not present in BIDS_dir, set to False. Some BIDS funcitonality will be lost.
njobs : int, optional
How many parallel jobs to run. Default: 1. The set value can be overruled in individual functions. | f1946:c0:m0 |
def add_history(self, fname, fargs, init=<NUM_LIT:0>): | if init == <NUM_LIT:1>:<EOL><INDENT>self.history = []<EOL><DEDENT>self.history.append([fname, fargs])<EOL> | Adds a processing step to TenetoBIDS.history. | f1946:c0:m1 |
def export_history(self, dirname): | mods = [(m.__name__, m.__version__)<EOL>for m in sys.modules.values() if m if hasattr(m, '<STR_LIT>')]<EOL>with open(dirname + '<STR_LIT>', '<STR_LIT:w>') as f:<EOL><INDENT>for m in mods:<EOL><INDENT>m = list(m)<EOL>if not isinstance(m[<NUM_LIT:1>], str):<EOL><INDENT>m[<NUM_LIT:1>] = m[<NUM_LIT:1>].decode("<STR_LIT:utf-8>")<EOL><DEDENT>f.writelines(m[<NUM_LIT:0>] + '<STR_LIT>' + m[<NUM_LIT:1>] + '<STR_LIT:\n>')<EOL><DEDENT><DEDENT>with open(dirname + '<STR_LIT>', '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines('<STR_LIT>')<EOL>for func, args in self.history:<EOL><INDENT>f.writelines(func + '<STR_LIT>' + str(args) + '<STR_LIT>')<EOL><DEDENT><DEDENT>with open(dirname + '<STR_LIT>', '<STR_LIT:w>') as f:<EOL><INDENT>json.dump(self.tenetoinfo, f)<EOL><DEDENT> | Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname
Parameters
---------
dirname : str
directory to export entire TenetoBIDS history. | f1946:c0:m2 |
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=<NUM_LIT:1>, confound_corr_report=True): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>confound_files = self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>')<EOL>if confound_files:<EOL><INDENT>confounds_exist = True<EOL><DEDENT>else:<EOL><INDENT>confounds_exist = False<EOL><DEDENT>if not confound_corr_report:<EOL><INDENT>confounds_exist = False<EOL><DEDENT>if not tag:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>tag = '<STR_LIT>' + tag<EOL><DEDENT>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params,<EOL>confounds_exist, confound_files) for i, f in enumerate(files) if f}<EOL>for j in as_completed(job):<EOL><INDENT>j.result()<EOL><DEDENT><DEDENT>if update_pipeline == True:<EOL><INDENT>if not self.confound_pipeline and len(self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>')) > <NUM_LIT:0>:<EOL><INDENT>self.set_confound_pipeline = self.pipeline<EOL><DEDENT>self.set_pipeline('<STR_LIT>' + teneto.__version__)<EOL>self.set_pipeline_subdir('<STR_LIT>')<EOL>self.set_bids_suffix('<STR_LIT>')<EOL><DEDENT> | Derive time-varying connectivity on the selected files.
Parameters
----------
params : dict.
See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons)
update_pipeline : bool
If true, the object updates the selected files with those derived here.
njobs : int
How many parallel jobs to run
confound_corr_report : bool
If true, histograms and summary statistics of TVC and confounds are plotted in a report directory.
tag : str
any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]'
Returns
-------
dfc : files
saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy | f1946:c0:m3 |
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files): | data = load_tabular_file(f, index_col=True, header=True)<EOL>fs, _ = drop_bids_suffix(f)<EOL>save_name, save_dir, _ = self._save_namepaths_bids_derivatives(<EOL>fs, tag, '<STR_LIT>', '<STR_LIT>')<EOL>if '<STR_LIT>' in params.keys():<EOL><INDENT>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>fc_files = self.get_selected_files(<EOL>quiet=<NUM_LIT:1>, pipeline='<STR_LIT>', forfile=f)<EOL>if len(fc_files) == <NUM_LIT:1>:<EOL><INDENT>params['<STR_LIT>'] = load_tabular_file(<EOL>fc_files[<NUM_LIT:0>]).values<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in params.keys():<EOL><INDENT>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>fc_files = self.get_selected_files(<EOL>quiet=<NUM_LIT:1>, pipeline='<STR_LIT>', forfile=f)<EOL>if len(fc_files) == <NUM_LIT:1>:<EOL><INDENT>params['<STR_LIT>'] = load_tabular_file(<EOL>fc_files[<NUM_LIT:0>]).values<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>params['<STR_LIT>'] = '<STR_LIT:yes>'<EOL>params['<STR_LIT>'] = save_dir + '<STR_LIT>'<EOL>params['<STR_LIT>'] = save_name + '<STR_LIT>'<EOL>if not os.path.exists(params['<STR_LIT>']):<EOL><INDENT>os.makedirs(params['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' not in params:<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)<EOL>dfc_net = TemporalNetwork(from_array=dfc, nettype='<STR_LIT>')<EOL>dfc_net.network.to_csv(save_dir + save_name + '<STR_LIT>', sep='<STR_LIT:\t>')<EOL>sidecar = get_sidecar(f)<EOL>sidecar['<STR_LIT>'] = params<EOL>if '<STR_LIT>' in sidecar['<STR_LIT>']:<EOL><INDENT>sidecar['<STR_LIT>']['<STR_LIT>'] = True<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = fc_files<EOL><DEDENT>if '<STR_LIT>' in sidecar['<STR_LIT>']:<EOL><INDENT>sidecar['<STR_LIT>']['<STR_LIT>'] = True<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = fc_files<EOL><DEDENT>sidecar['<STR_LIT>']['<STR_LIT>'] = f<EOL>sidecar['<STR_LIT>']['<STR_LIT:description>'] = '<STR_LIT>'<EOL>with open(save_dir + save_name + '<STR_LIT>', '<STR_LIT:w>') as fs:<EOL><INDENT>json.dump(sidecar, fs)<EOL><DEDENT>if confounds_exist:<EOL><INDENT>analysis_step = '<STR_LIT>'<EOL>df = pd.read_csv(confound_files[i], sep='<STR_LIT:\t>')<EOL>df = df.fillna(df.median())<EOL>ind = np.triu_indices(dfc.shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL>dfc_df = pd.DataFrame(dfc[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], :].transpose())<EOL>if len(df) != len(dfc_df):<EOL><INDENT>df = df.iloc[int(np.round((params['<STR_LIT>']-<NUM_LIT:1>)/<NUM_LIT:2>)): int(np.round((params['<STR_LIT>']-<NUM_LIT:1>)/<NUM_LIT:2>)+len(dfc_df))]<EOL>df.reset_index(inplace=True, drop=True)<EOL><DEDENT>dfc_df_z = (dfc_df - dfc_df.mean())<EOL>df_z = (df - df.mean())<EOL>R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(<EOL>df_z.std(ddof=<NUM_LIT:0>)).div(dfc_df_z.std(ddof=<NUM_LIT:0>), axis=<NUM_LIT:0>)<EOL>R_df_describe = R_df.describe()<EOL>desc_index = R_df_describe.index<EOL>confound_report_dir = params['<STR_LIT>'] +'<STR_LIT:/>' + save_name + '<STR_LIT>'<EOL>confound_report_figdir = confound_report_dir + '<STR_LIT>'<EOL>if not os.path.exists(confound_report_figdir):<EOL><INDENT>os.makedirs(confound_report_figdir)<EOL><DEDENT>report = '<STR_LIT>'<EOL>report += '<STR_LIT>' + analysis_step + '<STR_LIT>'<EOL>for c in R_df.columns:<EOL><INDENT>fig, ax = plt.subplots(<NUM_LIT:1>)<EOL>ax = sns.distplot(<EOL>R_df[c], hist=False, color='<STR_LIT:m>', ax=ax, kde_kws={"<STR_LIT>": True})<EOL>fig.savefig(confound_report_figdir + c + '<STR_LIT>')<EOL>plt.close(fig)<EOL>report += '<STR_LIT>' + c + '<STR_LIT>'<EOL>for ind_name, r in enumerate(R_df_describe[c]):<EOL><INDENT>report += str(desc_index[ind_name]) + '<STR_LIT>'<EOL>report += str(r) + '<STR_LIT>'<EOL><DEDENT>report += '<STR_LIT>'<EOL>report += '<STR_LIT>' +os.path.abspath(confound_report_figdir) +'<STR_LIT:/>' + c + '<STR_LIT>'<EOL><DEDENT>report += '<STR_LIT>'<EOL>with open(confound_report_dir + save_name + '<STR_LIT>', '<STR_LIT:w>') as file:<EOL><INDENT>file.write(report)<EOL><DEDENT><DEDENT> | Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing. | f1946:c0:m4 |
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>R_group = []<EOL>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(<EOL>self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files}<EOL>for j in as_completed(job):<EOL><INDENT>R_group.append(j.result())<EOL><DEDENT><DEDENT>if returngroup:<EOL><INDENT>R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=<NUM_LIT:0>))<EOL>return np.array(R_group)<EOL><DEDENT> | Makes connectivity matrix for each of the subjects.
Parameters
----------
returngroup : bool, default=False
If true, returns the group average connectivity matrix.
njobs : int
How many parallel jobs to run
file_idx : bool
Default False, true if to ignore index column in loaded file.
file_hdr : bool
Default False, true if to ignore header row in loaded file.
Returns
-------
Saves data in derivatives/teneto_<version>/.../fc/
R_group : array
if returngroup is true, the average connectivity matrix is returned. | f1946:c0:m6 |
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): | file_name = f.split('<STR_LIT:/>')[-<NUM_LIT:1>].split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>if tag != '<STR_LIT>':<EOL><INDENT>tag = '<STR_LIT:_>' + tag<EOL><DEDENT>if suffix:<EOL><INDENT>file_name, _ = drop_bids_suffix(file_name)<EOL>save_name = file_name + tag<EOL>save_name += '<STR_LIT:_>' + suffix<EOL><DEDENT>else:<EOL><INDENT>save_name = file_name + tag<EOL><DEDENT>paths_post_pipeline = f.split(self.pipeline)<EOL>if self.pipeline_subdir:<EOL><INDENT>paths_post_pipeline = paths_post_pipeline[<NUM_LIT:1>].split(self.pipeline_subdir)[<EOL><NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>paths_post_pipeline = paths_post_pipeline[<NUM_LIT:1>].split(file_name)[<NUM_LIT:0>]<EOL><DEDENT>base_dir = self.BIDS_dir + '<STR_LIT>' + '<STR_LIT>' +teneto.__version__ + '<STR_LIT:/>' + paths_post_pipeline + '<STR_LIT:/>'<EOL>save_dir = base_dir + '<STR_LIT:/>' + save_directory + '<STR_LIT:/>'<EOL>if not os.path.exists(save_dir):<EOL><INDENT>try:<EOL><INDENT>os.makedirs(save_dir)<EOL><DEDENT>except:<EOL><INDENT>time.sleep(<NUM_LIT:2>)<EOL><DEDENT><DEDENT>if not os.path.exists(self.BIDS_dir + '<STR_LIT>' + '<STR_LIT>' + teneto.__version__ + '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>with open(self.BIDS_dir + '<STR_LIT>' + '<STR_LIT>' + teneto.__version__ + '<STR_LIT>', '<STR_LIT:w>') as fs:<EOL><INDENT>json.dump(self.tenetoinfo, fs)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>time.sleep(<NUM_LIT:2>)<EOL><DEDENT><DEDENT>return save_name, save_dir, base_dir<EOL> | Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) | f1946:c0:m8 |
def get_tags(self, tag, quiet=<NUM_LIT:1>): | if not self.pipeline:<EOL><INDENT>print('<STR_LIT>')<EOL>self.get_pipeline_alternatives(quiet)<EOL><DEDENT>else:<EOL><INDENT>if tag == '<STR_LIT>':<EOL><INDENT>datapath = self.BIDS_dir + '<STR_LIT>' + self.pipeline + '<STR_LIT:/>'<EOL>tag_alternatives = [<EOL>f.split('<STR_LIT>')[<NUM_LIT:1>] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and '<STR_LIT>' in f]<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>tag_alternatives = []<EOL>for sub in self.bids_tags['<STR_LIT>']:<EOL><INDENT>tag_alternatives += [f.split('<STR_LIT>')[<NUM_LIT:1>] for f in os.listdir(<EOL>self.BIDS_dir + '<STR_LIT>' + self.pipeline + '<STR_LIT:/>' + '<STR_LIT>' + sub) if '<STR_LIT>' in f]<EOL><DEDENT>tag_alternatives = set(tag_alternatives)<EOL><DEDENT>else:<EOL><INDENT>files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>tag_alternatives = []<EOL>for f in files:<EOL><INDENT>f = f.split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>f = f.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>tag_alternatives += [t.split('<STR_LIT:->')[<NUM_LIT:1>]<EOL>for t in f.split('<STR_LIT:_>') if t.split('<STR_LIT:->')[<NUM_LIT:0>] == tag]<EOL><DEDENT>tag_alternatives = set(tag_alternatives)<EOL><DEDENT>if quiet == <NUM_LIT:0>:<EOL><INDENT>print(tag + '<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(tag_alternatives))<EOL><DEDENT>return list(tag_alternatives)<EOL><DEDENT> | Returns which tag alternatives can be identified in the BIDS derivatives structure. | f1946:c0:m9 |
def get_pipeline_alternatives(self, quiet=<NUM_LIT:0>): | if not os.path.exists(self.BIDS_dir + '<STR_LIT>'):<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>pipeline_alternatives = os.listdir(self.BIDS_dir + '<STR_LIT>')<EOL>if quiet == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(pipeline_alternatives))<EOL><DEDENT>return list(pipeline_alternatives)<EOL><DEDENT> | The pipeline are the different outputs that are placed in the ./derivatives directory.
get_pipeline_alternatives gets those which are found in the specified BIDS directory structure. | f1946:c0:m10 |
def get_pipeline_subdir_alternatives(self, quiet=<NUM_LIT:0>): | if not self.pipeline:<EOL><INDENT>print('<STR_LIT>')<EOL>self.get_pipeline_alternatives()<EOL><DEDENT>else:<EOL><INDENT>pipeline_subdir_alternatives = []<EOL>for s in self.bids_tags['<STR_LIT>']:<EOL><INDENT>derdir_files = os.listdir(<EOL>self.BIDS_dir + '<STR_LIT>' + self.pipeline + '<STR_LIT>' + s + '<STR_LIT>')<EOL>pipeline_subdir_alternatives += [<EOL>f for f in derdir_files if os.path.isdir(self.BIDS_dir + '<STR_LIT>' + self.pipeline + '<STR_LIT>' + s + '<STR_LIT>' + f)]<EOL><DEDENT>pipeline_subdir_alternatives = set(pipeline_subdir_alternatives)<EOL>if quiet == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(pipeline_subdir_alternatives))<EOL><DEDENT>return list(pipeline_subdir_alternatives)<EOL><DEDENT> | Note
-----
This function currently returns the wrong folders and will be fixed in the future.
This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir
But it does not care about ses-yy at the moment. | f1946:c0:m11 |
def get_selected_files(self, pipeline='<STR_LIT>', forfile=None, quiet=<NUM_LIT:0>, allowedfileformats='<STR_LIT:default>'): | <EOL>file_dict = dict(self.bids_tags)<EOL>if allowedfileformats == '<STR_LIT:default>':<EOL><INDENT>allowedfileformats = ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>if forfile:<EOL><INDENT>if isinstance(forfile, str):<EOL><INDENT>forfile = get_bids_tag(forfile, '<STR_LIT:all>')<EOL><DEDENT>for n in forfile.keys():<EOL><INDENT>file_dict[n] = [forfile[n]]<EOL><DEDENT><DEDENT>non_entries = []<EOL>for n in file_dict:<EOL><INDENT>if not file_dict[n]:<EOL><INDENT>non_entries.append(n)<EOL><DEDENT><DEDENT>for n in non_entries:<EOL><INDENT>file_dict.pop(n)<EOL><DEDENT>file_components = []<EOL>for k in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if k in file_dict:<EOL><INDENT>file_components.append([k + '<STR_LIT:->' + t for t in file_dict[k]])<EOL><DEDENT><DEDENT>file_list = list(itertools.product(*file_components))<EOL>if pipeline == '<STR_LIT>':<EOL><INDENT>mdir = self.BIDS_dir + '<STR_LIT>' + self.pipeline<EOL><DEDENT>elif pipeline == '<STR_LIT>' and self.confound_pipeline:<EOL><INDENT>mdir = self.BIDS_dir + '<STR_LIT>' + self.confound_pipeline<EOL><DEDENT>elif pipeline == '<STR_LIT>':<EOL><INDENT>mdir = self.BIDS_dir + '<STR_LIT>' + self.pipeline<EOL><DEDENT>elif pipeline == '<STR_LIT>':<EOL><INDENT>mdir = self.BIDS_dir + '<STR_LIT>' + teneto.__version__<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>found_files = []<EOL>for f in file_list:<EOL><INDENT>wdir = str(mdir)<EOL>sub = [t for t in f if t.startswith('<STR_LIT>')]<EOL>ses = [t for t in f if t.startswith('<STR_LIT>')]<EOL>wdir += '<STR_LIT:/>' + sub[<NUM_LIT:0>] + '<STR_LIT:/>'<EOL>if ses:<EOL><INDENT>wdir += '<STR_LIT:/>' + ses[<NUM_LIT:0>] + '<STR_LIT:/>'<EOL><DEDENT>wdir += '<STR_LIT>'<EOL>if pipeline == '<STR_LIT>':<EOL><INDENT>wdir += '<STR_LIT:/>' + self.pipeline_subdir + '<STR_LIT:/>'<EOL>fileending = [self.bids_suffix +<EOL>f for f in allowedfileformats]<EOL><DEDENT>elif pipeline == '<STR_LIT>':<EOL><INDENT>wdir += '<STR_LIT>'<EOL>fileending = ['<STR_LIT>' + f for f in allowedfileformats]<EOL><DEDENT>elif pipeline == '<STR_LIT>':<EOL><INDENT>fileending = ['<STR_LIT>' + f for f in allowedfileformats]<EOL><DEDENT>if os.path.exists(wdir):<EOL><INDENT>found = []<EOL>for ff in os.listdir(wdir):<EOL><INDENT>ftags = get_bids_tag(ff, '<STR_LIT:all>')<EOL>t = [t for t in ftags if t in file_dict and ftags[t]<EOL>in file_dict[t]]<EOL>if len(t) == len(file_dict):<EOL><INDENT>found.append(ff)<EOL><DEDENT><DEDENT>found = [f for f in found for e in fileending if f.endswith(e)]<EOL>if pipeline == '<STR_LIT>':<EOL><INDENT>found = [i for i in found if '<STR_LIT>' in i]<EOL><DEDENT>else:<EOL><INDENT>found = [i for i in found if '<STR_LIT>' not in i]<EOL><DEDENT>found = list(<EOL>map(str.__add__, [re.sub('<STR_LIT>', '<STR_LIT:/>', wdir)]*len(found), found))<EOL>found = [i for i in found if not any(<EOL>[bf in i for bf in self.bad_files])]<EOL>if found:<EOL><INDENT>found_files += found<EOL><DEDENT><DEDENT>if quiet == -<NUM_LIT:1>:<EOL><INDENT>print(wdir)<EOL><DEDENT><DEDENT>found_files = list(set(found_files))<EOL>if quiet == <NUM_LIT:0>:<EOL><INDENT>print(found_files)<EOL><DEDENT>return found_files<EOL> | Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function. | f1946:c0:m12 |
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='<STR_LIT>'): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if isinstance(confound, str):<EOL><INDENT>confound = [confound]<EOL><DEDENT>if isinstance(exclusion_criteria, str):<EOL><INDENT>exclusion_criteria = [exclusion_criteria]<EOL><DEDENT>if isinstance(confound_stat, str):<EOL><INDENT>confound_stat = [confound_stat]<EOL><DEDENT>if len(exclusion_criteria) != len(confound):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if len(confound_stat) != len(confound):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>relex, crit = process_exclusion_criteria(exclusion_criteria)<EOL>files = sorted(self.get_selected_files(quiet=<NUM_LIT:1>))<EOL>confound_files = sorted(<EOL>self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>'))<EOL>files, confound_files = confound_matching(files, confound_files)<EOL>bad_files = []<EOL>bs = <NUM_LIT:0><EOL>foundconfound = []<EOL>foundreason = []<EOL>for s, cfile in enumerate(confound_files):<EOL><INDENT>df = load_tabular_file(cfile, index_col=None)<EOL>found_bad_subject = False<EOL>for i, _ in enumerate(confound):<EOL><INDENT>if confound_stat[i] == '<STR_LIT>':<EOL><INDENT>if relex[i](df[confound[i]].median(), crit[i]):<EOL><INDENT>found_bad_subject = True<EOL><DEDENT><DEDENT>elif confound_stat[i] == '<STR_LIT>':<EOL><INDENT>if relex[i](df[confound[i]].mean(), crit[i]):<EOL><INDENT>found_bad_subject = True<EOL><DEDENT><DEDENT>elif confound_stat[i] == '<STR_LIT>':<EOL><INDENT>if relex[i](df[i][confound[i]].std(), crit[i]):<EOL><INDENT>found_bad_subject = True<EOL><DEDENT><DEDENT>if found_bad_subject:<EOL><INDENT>foundconfound.append(confound[i])<EOL>foundreason.append(exclusion_criteria[i])<EOL><DEDENT><DEDENT>if found_bad_subject:<EOL><INDENT>bad_files.append(files[s])<EOL>bs += <NUM_LIT:1><EOL><DEDENT><DEDENT>self.set_bad_files(<EOL>bad_files, reason='<STR_LIT>')<EOL>for i, f in enumerate(bad_files):<EOL><INDENT>sidecar = get_sidecar(f)<EOL>sidecar['<STR_LIT>'] = {}<EOL>sidecar['<STR_LIT>'] = foundconfound[i]<EOL>sidecar['<STR_LIT>'] = foundreason[i]<EOL>for af in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>f = f.split(af)[<NUM_LIT:0>]<EOL><DEDENT>f += '<STR_LIT>'<EOL>with open(f, '<STR_LIT:w>') as fs:<EOL><INDENT>json.dump(sidecar, fs)<EOL><DEDENT><DEDENT>print('<STR_LIT>' + str(bs) + '<STR_LIT>')<EOL> | Excludes subjects given a certain exclusion criteria.
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
confound_stat : str or list
Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.).
Returns
--------
calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. | f1946:c0:m13 |
def set_exclusion_timepoint(self, confound, exclusion_criteria, replace_with, tol=<NUM_LIT:1>, overwrite=True, desc=None): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if isinstance(confound, str):<EOL><INDENT>confound = [confound]<EOL><DEDENT>if isinstance(exclusion_criteria, str):<EOL><INDENT>exclusion_criteria = [exclusion_criteria]<EOL><DEDENT>if len(exclusion_criteria) != len(confound):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>relex, crit = process_exclusion_criteria(exclusion_criteria)<EOL>files = sorted(self.get_selected_files(quiet=<NUM_LIT:1>))<EOL>confound_files = sorted(<EOL>self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>'))<EOL>files, confound_files = confound_matching(files, confound_files)<EOL>bad_files = []<EOL>for i, cfile in enumerate(confound_files):<EOL><INDENT>data = load_tabular_file(files[i]).values<EOL>df = load_tabular_file(cfile, index_col=None)<EOL>ind = []<EOL>for ci, c in enumerate(confound):<EOL><INDENT>ind = df[relex[ci](df[c], crit[ci])].index<EOL>if replace_with == '<STR_LIT>':<EOL><INDENT>if <NUM_LIT:0> in ind:<EOL><INDENT>ind = np.delete(ind, np.where(ind == <NUM_LIT:0>))<EOL><DEDENT>if df.index.max():<EOL><INDENT>ind = np.delete(ind, np.where(ind == df.index.max()))<EOL><DEDENT><DEDENT>data[:, ind.astype(int)] = np.nan<EOL><DEDENT>nanind = np.where(np.isnan(data[<NUM_LIT:0>, :]))[<NUM_LIT:0>]<EOL>badpoints_n = len(nanind)<EOL>if badpoints_n / np.array(len(df)) > tol:<EOL><INDENT>bad_files.append(files[i])<EOL><DEDENT>nonnanind = np.where(np.isnan(data[<NUM_LIT:0>, :]) == <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>nanind = nanind[nanind > nonnanind.min()]<EOL>nanind = nanind[nanind < nonnanind.max()]<EOL>if replace_with == '<STR_LIT>':<EOL><INDENT>for n in range(data.shape[<NUM_LIT:0>]):<EOL><INDENT>interp = interp1d(<EOL>nonnanind, data[n, nonnanind], kind='<STR_LIT>')<EOL>data[n, nanind] = interp(nanind)<EOL><DEDENT><DEDENT>data = pd.DataFrame(data)<EOL>sname, _ = drop_bids_suffix(files[i])<EOL>if self.pipeline != '<STR_LIT>' + teneto.__version__:<EOL><INDENT>sname = sname.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>spath = self.BIDS_dir + '<STR_LIT>' + '<STR_LIT>' + teneto.__version__ + '<STR_LIT:/>'<EOL>tags = get_bids_tag(sname, ['<STR_LIT>', '<STR_LIT>'])<EOL>spath += '<STR_LIT>' + tags['<STR_LIT>'] + '<STR_LIT:/>'<EOL>if '<STR_LIT>' in tags:<EOL><INDENT>spath += '<STR_LIT>' + tags['<STR_LIT>'] + '<STR_LIT:/>'<EOL><DEDENT>spath += '<STR_LIT>'<EOL>if self.pipeline_subdir:<EOL><INDENT>spath += self.pipeline_subdir + '<STR_LIT:/>'<EOL><DEDENT>make_directories(spath)<EOL>sname = spath + sname<EOL><DEDENT>if '<STR_LIT>' in sname and desc:<EOL><INDENT>desctag = get_bids_tag(sname.split('<STR_LIT:/>')[-<NUM_LIT:1>], '<STR_LIT>')<EOL>sname = '<STR_LIT>'.join(sname.split('<STR_LIT>' + desctag['<STR_LIT>']))<EOL>sname += '<STR_LIT>' + desc<EOL><DEDENT>if os.path.exists(sname + self.bids_suffix + '<STR_LIT>') and overwrite == False:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>data.to_csv(sname + '<STR_LIT:_>' + self.bids_suffix + '<STR_LIT>', sep='<STR_LIT:\t>')<EOL>sidecar = get_sidecar(files[i])<EOL>sidecar['<STR_LIT>'] = {}<EOL>sidecar['<STR_LIT>']['<STR_LIT:description>'] = '<STR_LIT>'<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:U+002C>'.join(confound)<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:U+002C>'.join(<EOL>exclusion_criteria)<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = replace_with<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = badpoints_n<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = badpoints_n /np.array(len(df))<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = tol<EOL>with open(sname + '<STR_LIT:_>' + self.bids_suffix + '<STR_LIT>', '<STR_LIT:w>') as fs:<EOL><INDENT>json.dump(sidecar, fs)<EOL><DEDENT><DEDENT>self.set_bad_files(<EOL>bad_files, reason='<STR_LIT>')<EOL>self.set_pipeline('<STR_LIT>' + teneto.__version__)<EOL>if desc:<EOL><INDENT>self.set_bids_tags({'<STR_LIT>': desc.split('<STR_LIT:->')[<NUM_LIT:1>]})<EOL><DEDENT> | Excludes subjects given a certain exclusion criteria. Does not work on nifti files, only csv, numpy or tsc. Assumes data is node,time
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files. Assumes data is node,time
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
replace_with : str
Can be 'nan' (bad values become nans) or 'cubicspline' (bad values are interpolated). If bad value occurs at 0 or -1 index, then these values are kept and no interpolation occurs.
tol : float
Tolerance of exlcuded time-points allowed before becoming a bad subject.
overwrite : bool (default=True)
If true, if their are files in the teneto derivatives directory with the same name, these will be overwritten with this step.
The json sidecar is updated with the new information about the file.
desc : str
String to add desc tag to filenames if overwrite is set to true.
Returns
------
Loads the TenetoBIDS.selected_files and replaces any instances of confound meeting the exclusion_criteria with replace_with. | f1946:c0:m14 |
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='<STR_LIT>', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>parc_name = parcellation.split('<STR_LIT:_>')[<NUM_LIT:0>].lower()<EOL>if not self.confounds and removeconfounds:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if update_pipeline == False and removeconfounds:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>self.set_network_communities(parcellation, netn=yeonetworkn)<EOL>if not tag:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>tag = '<STR_LIT>' + tag<EOL><DEDENT>if not parc_params:<EOL><INDENT>parc_params = {}<EOL><DEDENT>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation,<EOL>parc_name, parc_type, parc_params) for i, f in enumerate(files)}<EOL>for j in as_completed(job):<EOL><INDENT>j.result()<EOL><DEDENT><DEDENT>if update_pipeline == True:<EOL><INDENT>if not self.confound_pipeline and len(self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>')) > <NUM_LIT:0>:<EOL><INDENT>self.set_confound_pipeline(self.pipeline)<EOL><DEDENT>self.set_pipeline('<STR_LIT>' + teneto.__version__)<EOL>self.set_pipeline_subdir('<STR_LIT>')<EOL>if tag:<EOL><INDENT>self.set_bids_tags({'<STR_LIT>': tag.split('<STR_LIT:->')[<NUM_LIT:1>]})<EOL><DEDENT>self.set_bids_suffix('<STR_LIT>')<EOL>if removeconfounds:<EOL><INDENT>self.removeconfounds(<EOL>clean_params=clean_params, transpose=None, njobs=njobs)<EOL><DEDENT><DEDENT> | Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end.
Parameters
-----------
parcellation : str
specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278'
parc_type : str
can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
network : str
if "defaults", it selects static parcellation, _if available_ (other options will be made available soon).
removeconfounds : bool
if true, regresses out confounds that are specfied in self.set_confounds with linear regression.
update_pipeline : bool
TenetoBIDS gets updated with the parcellated files being selected.
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
clean_params : dict
**kwargs for nilearn function nilearn.signal.clean
yeonetworkn : int (7 or 17)
Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks
njobs : n
number of processes to run. Overrides TenetoBIDS.njobs
Returns
-------
Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/.
To load these files call TenetoBIDS.load_parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | f1946:c0:m16 |
def communitydetection(self, community_detection_params, community_type='<STR_LIT>', tag=None, file_hdr=False, file_idx=False, njobs=None): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if not tag:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>tag = '<STR_LIT>' + tag<EOL><DEDENT>if community_type == '<STR_LIT>':<EOL><INDENT>files = self.get_selected_files(quiet=True)<EOL>for f in files:<EOL><INDENT>if '<STR_LIT>' not in f:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>elif community_type == '<STR_LIT>':<EOL><INDENT>files = self.get_selected_files(<EOL>quiet=True, pipeline='<STR_LIT>')<EOL><DEDENT>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr,<EOL>file_idx, tag) for i, f in enumerate(files) if all([t + '<STR_LIT:_>' in f or t + '<STR_LIT:.>' in f for t in tag])}<EOL>for j in as_completed(job):<EOL><INDENT>j.result()<EOL><DEDENT><DEDENT> | Calls temporal_louvain_with_consensus on connectivity data
Parameters
----------
community_detection_params : dict
kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus
community_type : str
Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint.
file_idx : bool (default false)
if true, index column present in data and this will be ignored
file_hdr : bool (default false)
if true, header row present in data and this will be ignored
njobs : int
number of processes to run. Overrides TenetoBIDS.njobs
Note
----
All non-positive edges are made to zero.
Returns
-------
List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ | f1946:c0:m18 |
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if not self.confounds and not confounds:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if not tag:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>tag = '<STR_LIT>' + tag<EOL><DEDENT>if confounds:<EOL><INDENT>self.set_confounds(confounds)<EOL><DEDENT>files = sorted(self.get_selected_files(quiet=<NUM_LIT:1>))<EOL>confound_files = sorted(<EOL>self.get_selected_files(quiet=<NUM_LIT:1>, pipeline='<STR_LIT>'))<EOL>files, confound_files = confound_matching(files, confound_files)<EOL>if not clean_params:<EOL><INDENT>clean_params = {}<EOL><DEDENT>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(<EOL>self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}<EOL>for j in as_completed(job):<EOL><INDENT>j.result()<EOL><DEDENT><DEDENT>self.set_pipeline('<STR_LIT>' + teneto.__version__)<EOL>self.set_bids_suffix('<STR_LIT>')<EOL>if tag:<EOL><INDENT>self.set_bids_tags({'<STR_LIT>': tag.split('<STR_LIT:->')[<NUM_LIT:1>]})<EOL><DEDENT> | Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. | f1946:c0:m20 |
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): | if not njobs:<EOL><INDENT>njobs = self.njobs<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if isinstance(measure, str):<EOL><INDENT>measure = [measure]<EOL><DEDENT>if isinstance(measure_params, dict):<EOL><INDENT>measure_params = [measure_params]<EOL><DEDENT>if measure_params and len(measure) != len(measure_params):<EOL><INDENT>raise ValueError('<STR_LIT>' + str(len(measure_params)) +<EOL>'<STR_LIT>' + str(len(measure)) + '<STR_LIT>')<EOL><DEDENT>files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>if not tag:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>tag = '<STR_LIT>' + tag<EOL><DEDENT>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(<EOL>self._run_networkmeasures, f, tag, measure, measure_params) for f in files}<EOL>for j in as_completed(job):<EOL><INDENT>j.result()<EOL><DEDENT><DEDENT> | Calculates a network measure
For available funcitons see: teneto.networkmeasures
Parameters
----------
measure : str or list
Mame of function(s) from teneto.networkmeasures that will be run.
measure_params : dict or list of dctionaries)
Containing kwargs for the argument in measure.
See note regarding Communities key.
tag : str
Add additional tag to saved filenames.
Note
----
In measure_params, if communities can equal 'template', 'static', or 'temporal'.
These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy
Returns
-------
Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/
Load the measure with tenetoBIDS.load_network_measure | f1946:c0:m22 |
def set_confound_pipeline(self, confound_pipeline): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if not os.path.exists(self.BIDS_dir + '<STR_LIT>' + confound_pipeline):<EOL><INDENT>print('<STR_LIT>')<EOL>self.get_pipeline_alternatives()<EOL><DEDENT>else:<EOL><INDENT>self.confound_pipeline = confound_pipeline<EOL><DEDENT> | There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is. | f1946:c0:m26 |
def set_network_communities(self, parcellation, netn=<NUM_LIT>): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>subcortical = '<STR_LIT>'<EOL>cerebellar = '<STR_LIT>'<EOL>if '<STR_LIT:+>' in parcellation:<EOL><INDENT>parcin = parcellation<EOL>parcellation = parcellation.split('<STR_LIT:+>')[<NUM_LIT:0>]<EOL>if '<STR_LIT>' in parcin:<EOL><INDENT>subcortical = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in parcin:<EOL><INDENT>cerebellar = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>subcortical = None<EOL><DEDENT>if parcellation.split('<STR_LIT:_>')[<NUM_LIT:0>] != '<STR_LIT>':<EOL><INDENT>net_path = teneto.__path__[<EOL><NUM_LIT:0>] + '<STR_LIT>' + parcellation + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>roin = parcellation.split('<STR_LIT:_>')[<NUM_LIT:1>].split('<STR_LIT>')[<NUM_LIT:0>]<EOL>net_path = teneto.__path__[<EOL><NUM_LIT:0>] + '<STR_LIT>' + roin + '<STR_LIT>' + str(netn) + '<STR_LIT>'<EOL><DEDENT>nn = <NUM_LIT:0><EOL>if os.path.exists(net_path):<EOL><INDENT>self.communitytemplate_ = pd.read_csv(<EOL>net_path, index_col=<NUM_LIT:0>, sep='<STR_LIT:\t>')<EOL>self.communitytemplate_info_ = self.communitytemplate_[['<STR_LIT>', '<STR_LIT>']].drop_duplicates(<EOL>).sort_values('<STR_LIT>').reset_index(drop=True)<EOL>self.communitytemplate_info_[<EOL>'<STR_LIT>'] = self.communitytemplate_.groupby('<STR_LIT>').count()['<STR_LIT>']<EOL><DEDENT>elif os.path.exists(parcellation):<EOL><INDENT>self.communitytemplate_ = pd.read_csv(<EOL>parcellation, index_col=<NUM_LIT:0>, sep='<STR_LIT:\t>')<EOL>self.communitytemplate_info_ = self.communitytemplate_.drop_duplicates(<EOL>).sort_values('<STR_LIT>').reset_index(drop=True)<EOL>self.communitytemplate_info_[<EOL>'<STR_LIT>'] = self.communitytemplate_.groupby('<STR_LIT>').count()<EOL><DEDENT>else:<EOL><INDENT>nn = <NUM_LIT:1><EOL>print('<STR_LIT>')<EOL><DEDENT>if subcortical == '<STR_LIT>' and nn == <NUM_LIT:0>:<EOL><INDENT>node_num = <NUM_LIT><EOL>sub = pd.DataFrame(data={'<STR_LIT>': ['<STR_LIT>']*node_num, '<STR_LIT>': np.repeat(<EOL>self.communitytemplate_['<STR_LIT>'].max()+<NUM_LIT:1>, node_num)})<EOL>self.communitytemplate_ = self.communitytemplate_.append(sub)<EOL>self.communitytemplate_.reset_index(drop=True, inplace=True)<EOL><DEDENT>if cerebellar == '<STR_LIT>' and nn == <NUM_LIT:0>:<EOL><INDENT>node_num = <NUM_LIT><EOL>sub = pd.DataFrame(data={'<STR_LIT>': ['<STR_LIT>']*node_num, '<STR_LIT>': np.repeat(<EOL>self.communitytemplate_['<STR_LIT>'].max()+<NUM_LIT:1>, node_num)})<EOL>self.communitytemplate_ = self.communitytemplate_.append(sub)<EOL>self.communitytemplate_.reset_index(drop=True, inplace=True)<EOL><DEDENT> | parcellation : str
path to csv or name of default parcellation.
netn : int
only when yeo atlas is used, specifies either 7 or 17. | f1946:c0:m28 |
def set_bids_suffix(self, bids_suffix): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>self.bids_suffix = bids_suffix<EOL> | The last analysis step is the final tag that is present in files. | f1946:c0:m29 |
def set_pipeline(self, pipeline): | self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>if not os.path.exists(self.BIDS_dir + '<STR_LIT>' + pipeline):<EOL><INDENT>print('<STR_LIT>')<EOL>self.get_pipeline_alternatives()<EOL><DEDENT>else:<EOL><INDENT>self.pipeline = pipeline<EOL><DEDENT> | Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. | f1946:c0:m30 |
def print_dataset_summary(self): | print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>if self.raw_data_exists:<EOL><INDENT>if self.BIDS.get_subjects():<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.BIDS.get_subjects())))<EOL>print('<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(self.BIDS.get_subjects()))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>print('<STR_LIT>' +<EOL>str(len(self.bids_tags['<STR_LIT>'])))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.bids_tags['<STR_LIT>']))<EOL>if isinstance(self.bad_subjects, list):<EOL><INDENT>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.bad_subjects))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>print('<STR_LIT>')<EOL>if self.raw_data_exists:<EOL><INDENT>if self.BIDS.get_tasks():<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.BIDS.get_tasks())))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.BIDS.get_tasks()))<EOL><DEDENT><DEDENT>if '<STR_LIT>' in self.bids_tags:<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.bids_tags['<STR_LIT>'])))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.bids_tags['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>print('<STR_LIT>')<EOL>if self.raw_data_exists:<EOL><INDENT>if self.BIDS.get_runs():<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.BIDS.get_runs())))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.BIDS.get_runs()))<EOL><DEDENT><DEDENT>if '<STR_LIT>' in self.bids_tags:<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.bids_tags['<STR_LIT>'])))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.bids_tags['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>print('<STR_LIT>')<EOL>if self.raw_data_exists:<EOL><INDENT>if self.BIDS.get_sessions():<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.BIDS.get_sessions())))<EOL>print('<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(self.BIDS.get_sessions()))<EOL><DEDENT><DEDENT>if '<STR_LIT>' in self.bids_tags:<EOL><INDENT>print('<STR_LIT>' +<EOL>str(len(self.bids_tags['<STR_LIT>'])))<EOL>print('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.bids_tags['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>print('<STR_LIT>')<EOL>if not self.pipeline:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' + self.pipeline)<EOL><DEDENT>if self.pipeline_subdir:<EOL><INDENT>print('<STR_LIT>' + self.pipeline_subdir)<EOL><DEDENT>selected_files = self.get_selected_files(quiet=<NUM_LIT:1>)<EOL>if selected_files:<EOL><INDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>' + str(len(selected_files)))<EOL>print('<STR_LIT>'.join(selected_files))<EOL><DEDENT> | Prints information about the the BIDS data and the files currently selected. | f1946:c0:m32 |
@classmethod<EOL><INDENT>def load_frompickle(cls, fname, reload_object=False):<DEDENT> | if fname[-<NUM_LIT:4>:] != '<STR_LIT>':<EOL><INDENT>fname += '<STR_LIT>'<EOL><DEDENT>with open(fname, '<STR_LIT:rb>') as f:<EOL><INDENT>tnet = pickle.load(f)<EOL><DEDENT>if reload_object:<EOL><INDENT>reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix,<EOL>bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs)<EOL>reloadnet.histroy = tnet.history<EOL>tnet = reloadnet<EOL><DEDENT>return tnet<EOL> | Loaded saved instance of
fname : str
path to pickle object (output of TenetoBIDS.save_aspickle)
reload_object : bool (default False)
reloads object by calling teneto.TenetoBIDS (some information lost, for development)
Returns
-------
self :
TenetoBIDS instance | f1946:c0:m34 |
def load_data(self, datatype='<STR_LIT>', tag=None, measure='<STR_LIT>'): | if datatype == '<STR_LIT>' and not measure:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>self.add_history(inspect.stack()[<NUM_LIT:0>][<NUM_LIT:3>], locals(), <NUM_LIT:1>)<EOL>data_list = []<EOL>trialinfo_list = []<EOL>for s in self.bids_tags['<STR_LIT>']:<EOL><INDENT>base_path, file_list, datainfo = self._get_filelist(<EOL>datatype, s, tag, measure=measure)<EOL>if base_path:<EOL><INDENT>for f in file_list:<EOL><INDENT>try:<EOL><INDENT>filetags = get_bids_tag(f, '<STR_LIT:all>')<EOL>data_list.append(load_tabular_file(base_path + f))<EOL>if datainfo == '<STR_LIT>':<EOL><INDENT>trialinfo_list.append(<EOL>pd.DataFrame(filetags, index=[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>except pd.errors.EmptyDataError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if datatype == '<STR_LIT>' and len(data_list) == <NUM_LIT:1>:<EOL><INDENT>data_list = data_list[<NUM_LIT:0>]<EOL><DEDENT>if measure:<EOL><INDENT>data_list = {measure: data_list}<EOL><DEDENT>setattr(self, datatype + '<STR_LIT>', data_list)<EOL>if trialinfo_list:<EOL><INDENT>out_trialinfo = pd.concat(trialinfo_list)<EOL>out_trialinfo.reset_index(inplace=True, drop=True)<EOL>setattr(self, datatype + '<STR_LIT>', out_trialinfo)<EOL><DEDENT> | Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function.
The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory.
Data is placed in teneto.tvc_data_
Parameters
----------
datatype : str
\'tvc\', \'parcellation\', \'participant\', \'temporalnetwork\'
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
measure : str
retquired when datatype is temporalnetwork. A networkmeasure that should be loaded.
Returns
-------
tvc_data_ : numpy array
Containing the parcellation data. Each file is appended to the first dimension of the numpy array.
tvc_trialinfo_ : pandas data frame
Containing the subject info (all BIDS tags) in the numpy array. | f1946:c0:m35 |
def __init__(self, N=None, T=None, nettype=None, from_df=None, from_array=None, from_dict=None, from_edgelist=None, timetype=None, diagonal=False,<EOL>timeunit=None, desc=None, starttime=None, nodelabels=None, timelabels=None, hdf5=False, hdf5path=None): | <EOL>if nettype:<EOL><INDENT>if nettype not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>inputvars = locals()<EOL>if sum([<NUM_LIT:1> for n in inputvars.keys() if '<STR_LIT>' in n and inputvars[n] is not None]) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if from_array is not None:<EOL><INDENT>teneto.utils.check_TemporalNetwork_input(from_array, '<STR_LIT>')<EOL><DEDENT>if from_dict is not None:<EOL><INDENT>teneto.utils.check_TemporalNetwork_input(from_dict, '<STR_LIT>')<EOL><DEDENT>if from_edgelist is not None:<EOL><INDENT>teneto.utils.check_TemporalNetwork_input(from_edgelist, '<STR_LIT>')<EOL><DEDENT>if N:<EOL><INDENT>if not isinstance(N, int):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if T:<EOL><INDENT>if not isinstance(T, int):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if N is None:<EOL><INDENT>self.N = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>self.N = int(N)<EOL><DEDENT>if T is None:<EOL><INDENT>self.T = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>self.T = int(T)<EOL><DEDENT>if timetype:<EOL><INDENT>if timetype not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>self.timetype = timetype<EOL><DEDENT>if hdf5:<EOL><INDENT>if hdf5path is None:<EOL><INDENT>hdf5path = '<STR_LIT>'<EOL><DEDENT>if hdf5path[:-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>hdf5path = hdf5path[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>self.diagonal = diagonal<EOL>if nodelabels:<EOL><INDENT>self.nodelabels = nodelabels<EOL><DEDENT>else:<EOL><INDENT>self.nodelabels = None<EOL><DEDENT>if timelabels:<EOL><INDENT>self.timelabels = timelabels<EOL><DEDENT>else:<EOL><INDENT>self.timelabels = None<EOL><DEDENT>if timeunit:<EOL><INDENT>self.timeunit = timeunit<EOL><DEDENT>else:<EOL><INDENT>self.timeunit = None<EOL><DEDENT>if starttime:<EOL><INDENT>self.starttime = starttime<EOL><DEDENT>else:<EOL><INDENT>self.starttime = <NUM_LIT:0><EOL><DEDENT>if desc:<EOL><INDENT>self.desc = desc<EOL><DEDENT>else:<EOL><INDENT>self.desc = None<EOL><DEDENT>if nettype:<EOL><INDENT>self.nettype = nettype<EOL><DEDENT>if from_df is not None:<EOL><INDENT>self.network_from_df(from_df)<EOL><DEDENT>if from_edgelist is not None:<EOL><INDENT>self.network_from_edgelist(from_edgelist)<EOL><DEDENT>elif from_array is not None:<EOL><INDENT>self.network_from_array(from_array)<EOL><DEDENT>elif from_dict is not None:<EOL><INDENT>self.network_from_dict(from_dict)<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>if nettype:<EOL><INDENT>if nettype[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>', '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>']<EOL><DEDENT><DEDENT>else:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>']<EOL><DEDENT>self.network = pd.DataFrame(columns=colnames)<EOL><DEDENT>self._calc_netshape()<EOL>if not self.diagonal:<EOL><INDENT>self._drop_diagonal()<EOL><DEDENT>if nettype:<EOL><INDENT>if nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>self._drop_duplicate_ij()<EOL><DEDENT><DEDENT>self.hdf5 = False<EOL>if hdf5:<EOL><INDENT>self.hdf5_setup(hdf5path)<EOL><DEDENT> | N : int
number of nodes in network
T : int
number of time-points in network
nettype : str
description of network. Can be: bu, bd, wu, wd where the letters stand for binary, weighted, undirected and directed. Default is weighted undirected
from_df : pandas df
input data frame with i,j,t,[weight] columns
from_array : array
input data from an array with dimesnions node,node,time
from_dict : dict
input data is a contact sequence dictionary.
from_edgelist : list
input data is a list of lists where each item in main list consists of [i,j,t,[weight]].
timetype : str
discrete or continuous
diagonal : bool
if the diagonal should be included in the edge list.
timeunit : str
string (used in plots)
desc : str
string to describe network.
startime : int
integer represents time of first index.
nodelabels : list
list of labels for naming the nodes
timelabels : list
list of labels for time-points
hdf5 : bool
if true, pandas dataframe is stored and queried as a h5 file.
hdf5path : str
Where the h5 files is saved if hdf5 is True. If left unset, the default is ./teneto_temporalnetwork.h5 | f1948:c0:m0 |
def network_from_array(self, array): | if len(array.shape) == <NUM_LIT:2>:<EOL><INDENT>array = np.array(array, ndmin=<NUM_LIT:3>).transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL><DEDENT>teneto.utils.check_TemporalNetwork_input(array, '<STR_LIT>')<EOL>uvals = np.unique(array)<EOL>if len(uvals) == <NUM_LIT:2> and <NUM_LIT:1> in uvals and <NUM_LIT:0> in uvals:<EOL><INDENT>i, j, t = np.where(array == <NUM_LIT:1>)<EOL>self.network = pd.DataFrame(data={'<STR_LIT:i>': i, '<STR_LIT>': j, '<STR_LIT:t>': t})<EOL><DEDENT>else:<EOL><INDENT>i, j, t = np.where(array != <NUM_LIT:0>)<EOL>w = array[array != <NUM_LIT:0>]<EOL>self.network = pd.DataFrame(<EOL>data={'<STR_LIT:i>': i, '<STR_LIT>': j, '<STR_LIT:t>': t, '<STR_LIT>': w})<EOL><DEDENT>self.N = int(array.shape[<NUM_LIT:0>])<EOL>self.T = int(array.shape[-<NUM_LIT:1>])<EOL>self._update_network()<EOL> | impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array. | f1948:c0:m2 |
def network_from_df(self, df): | teneto.utils.check_TemporalNetwork_input(df, '<STR_LIT>')<EOL>self.network = df<EOL>self._update_network()<EOL> | Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge. | f1948:c0:m4 |
def network_from_edgelist(self, edgelist): | teneto.utils.check_TemporalNetwork_input(edgelist, '<STR_LIT>')<EOL>if len(edgelist[<NUM_LIT:0>]) == <NUM_LIT:4>:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>', '<STR_LIT>']<EOL><DEDENT>elif len(edgelist[<NUM_LIT:0>]) == <NUM_LIT:3>:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>']<EOL><DEDENT>self.network = pd.DataFrame(edgelist, columns=colnames)<EOL>self._update_network()<EOL> | Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight]. | f1948:c0:m5 |
def _drop_duplicate_ij(self): | self.network['<STR_LIT>'] = list(map(lambda x: tuple(sorted(x)), list(<EOL>zip(*[self.network['<STR_LIT:i>'].values, self.network['<STR_LIT>'].values]))))<EOL>self.network.drop_duplicates(['<STR_LIT>', '<STR_LIT:t>'], inplace=True)<EOL>self.network.reset_index(inplace=True, drop=True)<EOL>self.network.drop('<STR_LIT>', inplace=True, axis=<NUM_LIT:1>)<EOL> | Drops duplicate entries from the network dataframe. | f1948:c0:m7 |
def _drop_diagonal(self): | self.network = self.network.where(<EOL>self.network['<STR_LIT:i>'] != self.network['<STR_LIT>']).dropna()<EOL>self.network.reset_index(inplace=True, drop=True)<EOL> | Drops self-contacts from the network dataframe. | f1948:c0:m8 |
def add_edge(self, edgelist): | if not isinstance(edgelist[<NUM_LIT:0>], list):<EOL><INDENT>edgelist = [edgelist]<EOL><DEDENT>teneto.utils.check_TemporalNetwork_input(edgelist, '<STR_LIT>')<EOL>if len(edgelist[<NUM_LIT:0>]) == <NUM_LIT:4>:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>', '<STR_LIT>']<EOL><DEDENT>elif len(edgelist[<NUM_LIT:0>]) == <NUM_LIT:3>:<EOL><INDENT>colnames = ['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>']<EOL><DEDENT>if self.hdf5:<EOL><INDENT>with pd.HDFStore(self.network) as hdf:<EOL><INDENT>rows = hdf.get_storer('<STR_LIT>').nrows<EOL>hdf.append('<STR_LIT>', pd.DataFrame(edgelist, columns=colnames, index=np.arange(<EOL>rows, rows+len(edgelist))), format='<STR_LIT>', data_columns=True)<EOL><DEDENT>edgelist = np.array(edgelist)<EOL>if np.max(edgelist[:, :<NUM_LIT:2>]) > self.netshape[<NUM_LIT:0>]:<EOL><INDENT>self.netshape[<NUM_LIT:0>] = np.max(edgelist[:, :<NUM_LIT:2>])<EOL><DEDENT>if np.max(edgelist[:, <NUM_LIT:2>]) > self.netshape[<NUM_LIT:1>]:<EOL><INDENT>self.netshape[<NUM_LIT:1>] = np.max(edgelist[:, <NUM_LIT:2>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>newedges = pd.DataFrame(edgelist, columns=colnames)<EOL>self.network = pd.concat(<EOL>[self.network, newedges], ignore_index=True, sort=True)<EOL>self._update_network()<EOL><DEDENT> | Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge | f1948:c0:m10 |
def drop_edge(self, edgelist): | if not isinstance(edgelist[<NUM_LIT:0>], list):<EOL><INDENT>edgelist = [edgelist]<EOL><DEDENT>teneto.utils.check_TemporalNetwork_input(edgelist, '<STR_LIT>')<EOL>if self.hdf5:<EOL><INDENT>with pd.HDFStore(self.network) as hdf:<EOL><INDENT>for e in edgelist:<EOL><INDENT>hdf.remove(<EOL>'<STR_LIT>', '<STR_LIT>' + str(e[<NUM_LIT:0>]) + '<STR_LIT>' + '<STR_LIT>' + str(e[<NUM_LIT:1>]) + '<STR_LIT>' + '<STR_LIT>' + str(e[<NUM_LIT:2>]))<EOL><DEDENT><DEDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>for e in edgelist:<EOL><INDENT>idx = self.network[(self.network['<STR_LIT:i>'] == e[<NUM_LIT:0>]) & (<EOL>self.network['<STR_LIT>'] == e[<NUM_LIT:1>]) & (self.network['<STR_LIT:t>'] == e[<NUM_LIT:2>])].index<EOL>self.network.drop(idx, inplace=True)<EOL><DEDENT>self.network.reset_index(inplace=True, drop=True)<EOL>self._update_network()<EOL><DEDENT> | Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe | f1948:c0:m11 |
def calc_networkmeasure(self, networkmeasure, **measureparams): | availablemeasures = [f for f in dir(<EOL>teneto.networkmeasures) if not f.startswith('<STR_LIT>')]<EOL>if networkmeasure not in availablemeasures:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(availablemeasures))<EOL><DEDENT>funs = inspect.getmembers(teneto.networkmeasures)<EOL>funs = {m[<NUM_LIT:0>]: m[<NUM_LIT:1>] for m in funs if not m[<NUM_LIT:0>].startswith('<STR_LIT>')}<EOL>measure = funs[networkmeasure](self, **measureparams)<EOL>return measure<EOL> | Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure] | f1948:c0:m12 |
def generatenetwork(self, networktype, **networkparams): | availabletypes = [f for f in dir(<EOL>teneto.generatenetwork) if not f.startswith('<STR_LIT>')]<EOL>if networktype not in availabletypes:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(availabletypes))<EOL><DEDENT>funs = inspect.getmembers(teneto.generatenetwork)<EOL>funs = {m[<NUM_LIT:0>]: m[<NUM_LIT:1>] for m in funs if not m[<NUM_LIT:0>].startswith('<STR_LIT>')}<EOL>network = funs[networktype](**networkparams)<EOL>self.network_from_array(network)<EOL>if self.nettype[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>self._drop_duplicate_ij()<EOL><DEDENT> | Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network. | f1948:c0:m13 |
def save_aspickle(self, fname): | if fname[-<NUM_LIT:4>:] != '<STR_LIT>':<EOL><INDENT>fname += '<STR_LIT>'<EOL><DEDENT>with open(fname, '<STR_LIT:wb>') as f:<EOL><INDENT>pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT> | Saves object as pickle.
fname : str
file path. | f1948:c0:m15 |
def rand_poisson(nnodes, ncontacts, lam=<NUM_LIT:1>, nettype='<STR_LIT>', netinfo=None, netrep='<STR_LIT>'): | if isinstance(ncontacts, list):<EOL><INDENT>if len(ncontacts) != nnodes:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if isinstance(lam, list):<EOL><INDENT>if len(lam) != nnodes:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if nettype == '<STR_LIT>':<EOL><INDENT>edgen = int((nnodes*(nnodes-<NUM_LIT:1>))/<NUM_LIT:2>)<EOL><DEDENT>elif nettype == '<STR_LIT>':<EOL><INDENT>edgen = int(nnodes*nnodes)<EOL><DEDENT>if not isinstance(lam, list) and not isinstance(ncontacts, list):<EOL><INDENT>icts = np.random.poisson(lam, size=(edgen, ncontacts))<EOL>net = np.zeros([edgen, icts.sum(axis=<NUM_LIT:1>).max()+<NUM_LIT:1>])<EOL>for n in range(edgen):<EOL><INDENT>net[n, np.unique(np.cumsum(icts[n]))] = <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>icts = []<EOL>ict_max = <NUM_LIT:0><EOL>for n in range(edgen):<EOL><INDENT>icts.append(np.random.poisson(lam[n], size=ncontacts[n]))<EOL>if sum(icts[-<NUM_LIT:1>]) > ict_max:<EOL><INDENT>ict_max = sum(icts[-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>net = np.zeros([nnodes, ict_max+<NUM_LIT:1>])<EOL>for n in range(nnodes):<EOL><INDENT>net[n, np.unique(np.cumsum(icts[n]))] = <NUM_LIT:1><EOL><DEDENT><DEDENT>if nettype == '<STR_LIT>':<EOL><INDENT>nettmp = np.zeros([nnodes, nnodes, net.shape[-<NUM_LIT:1>]])<EOL>ind = np.triu_indices(nnodes, k=<NUM_LIT:1>)<EOL>nettmp[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], :] = net<EOL>net = nettmp + nettmp.transpose([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>])<EOL><DEDENT>elif nettype == '<STR_LIT>':<EOL><INDENT>net = net.reshape([nnodes, nnodes, net.shape[-<NUM_LIT:1>]], order='<STR_LIT:F>')<EOL>net = set_diagonal(net, <NUM_LIT:0>)<EOL><DEDENT>if netrep == '<STR_LIT>':<EOL><INDENT>if not netinfo:<EOL><INDENT>netinfo = {}<EOL><DEDENT>netinfo['<STR_LIT>'] = '<STR_LIT:b>' + nettype[-<NUM_LIT:1>]<EOL>net = graphlet2contact(net, netinfo)<EOL><DEDENT>return net<EOL> | Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed. | f1949:m0 |
def rand_binomial(size, prob, netrep='<STR_LIT>', nettype='<STR_LIT>', initialize='<STR_LIT>', netinfo=None, randomseed=None): | size = np.atleast_1d(size)<EOL>prob = np.atleast_1d(prob)<EOL>if len(size) == <NUM_LIT:2> or (len(size) == <NUM_LIT:3> and size[<NUM_LIT:0>] == size[<NUM_LIT:1>]):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(prob) > <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if prob.min() < <NUM_LIT:0> or prob.max() > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if nettype[-<NUM_LIT:1>] == '<STR_LIT:u>' or nettype[-<NUM_LIT:1>] == '<STR_LIT:d>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>network_size = size[<NUM_LIT:0>]<EOL>nr_time_points = size[-<NUM_LIT:1>]<EOL>connmat = network_size * network_size<EOL>if randomseed:<EOL><INDENT>np.random.seed(randomseed)<EOL><DEDENT>if len(prob) == <NUM_LIT:1>:<EOL><INDENT>net = np.random.binomial(<NUM_LIT:1>, prob, connmat * nr_time_points)<EOL>net = net.reshape(network_size * network_size, nr_time_points)<EOL><DEDENT>if len(prob) == <NUM_LIT:2>:<EOL><INDENT>net = np.zeros([connmat, nr_time_points])<EOL>if initialize == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>edgesat0 = np.random.randint(<EOL><NUM_LIT:0>, connmat, int(np.round(initialize * (connmat))))<EOL>net[edgesat0, <NUM_LIT:0>] = <NUM_LIT:1><EOL><DEDENT>for t_ind in range(<NUM_LIT:0>, nr_time_points - <NUM_LIT:1>):<EOL><INDENT>edges_off = np.where(net[:, t_ind] == <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>edges_on = np.where(net[:, t_ind] == <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>update_edges_on = np.random.binomial(<NUM_LIT:1>, prob[<NUM_LIT:0>], len(edges_off))<EOL>update_edge_off = np.random.binomial(<NUM_LIT:1>, prob[<NUM_LIT:1>], len(edges_on))<EOL>net[edges_off, t_ind + <NUM_LIT:1>] = update_edges_on<EOL>net[edges_on, t_ind + <NUM_LIT:1>] = update_edge_off<EOL><DEDENT><DEDENT>net[np.arange(<NUM_LIT:0>, network_size * network_size, network_size + <NUM_LIT:1>), :] = <NUM_LIT:0><EOL>net = net.reshape([network_size, network_size, nr_time_points])<EOL>if nettype[-<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>unet = np.zeros(net.shape)<EOL>ind = np.triu_indices(network_size)<EOL>unet[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], :] = np.array(net[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], :])<EOL>unet = unet + np.transpose(unet, [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>])<EOL>net = unet<EOL><DEDENT>if netrep == '<STR_LIT>':<EOL><INDENT>if not netinfo:<EOL><INDENT>netinfo = {}<EOL><DEDENT>netinfo['<STR_LIT>'] = '<STR_LIT:b>' + nettype[-<NUM_LIT:1>]<EOL>net = graphlet2contact(net, netinfo)<EOL><DEDENT>return net<EOL> | Creates a random binary network following a binomial distribution.
Parameters
----------
size : list or array of length 2 or 3.
Input [n,t] generates n number of nodes and t number of time points.
Can also be of length 3 (node x node x time) but number of nodes in 3-tuple must be identical.
prob : int or list/array of length 2.
If int, this indicates probabability for each node becoming active (equal for all nodes).
If tuple/list of length 2, this indicates different probabilities for edges to become active/inactive.
The first value is "birth rate". The probability of an absent connection becoming present.
The second value is the "death rate". This dictates the probability of an active edge remaining present.
example : [40,60] means there is a 40% chance that a 0 will become a 1 and a 60% chance that a 1 stays a 1.
netrep : str
network representation: 'graphlet' (default) or 'contact'.
nettype : str
Weighted or directed network. String 'bu' or 'bd' (accepts 'u' and 'd' as well as b is implicit)
initialize : float or str
Input percentage (in decimal) for how many nodes start activated. Alternative specify 'zero' (default) for all nodes to start deactivated.
netinfo : dict
Dictionary for contact representaiton information.
randomseed : int
Set random seed.
Returns
-------
net : array or dict
Generated nework. Format depends on netrep input argument.
Notes
------
The idea of this function is to randomly determine if an edge is present.
Option 2 of the "prob" parameter can be used to create a small autocorrelaiton or make sure that, once an edge has been present, it never disapears. [rb-1]_
Examples
--------
>>> import teneto
>>> import numpy as np
>>> import matplotlib.pyplot as plt
To make the networks a little more complex, the probabailities of rand_binomial can be set so differently for edges that have previously been active.
Instead of passing a single integer to p, you can pass a list of 2 values.
The first value is the probabililty for edges that, at t-1=0 will be active at t (is sometimes called the birth-rate).
The second (optional) value is the probabaility of edges that, at t-1=1 will be active at t (sometimes called the death-rate).
The latter value helps create an autocorrelation. Without it, connections will have no autocorrelation.
**Example with just birthrate**
Below we create a network with 5 nodes and 10 time-points. Edges have a 25% chance to appear.
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> birth_rate = 0.25
>>> G = teneto.generatenetwork.rand_binomial([N,N,T], [birth_rate])
We can see that that edges appear randomly:
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(G, ax, cmap='Set2')
>>> fig.tight_layout()
>>> fig.show()
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
birth_rate = 0.25
G = teneto.generatenetwork.rand_binomial([N,N,T], [birth_rate])
fig,ax = plt.subplots(figsize=(10,3))
ax = teneto.plot.slice_plot(G, ax, cmap='Set2')
fig.tight_layout()
fig.show()
**Example with birthrate and deathrate**
Below we create a network with 5 nodes and 10 time-points. Edges have a 25% chance to appear and have a 75% chance to remain.
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> birth_rate = 0.25
>>> death_rate = 0.75
>>> G = teneto.generatenetwork.rand_binomial([N,N,T], [birth_rate, death_rate])
We can see the autocorrelation that this creates by plotting the network:
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(G, ax, cmap='Set2')
>>> fig.tight_layout()
>>> fig.show()
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
birth_rate = 0.25
death_rate = 0.75
G = teneto.generatenetwork.rand_binomial([N,N,T], [birth_rate, death_rate])
fig,ax = plt.subplots(figsize=(10,3))
ax = teneto.plot.slice_plot(G, ax, cmap='Set2')
fig.tight_layout()
fig.show()
References
---------
.. [rb-1] Clementi et al (2008) Flooding Time in edge-Markovian Dynamic Graphs *PODC* This function was written without reference to this paper. But this paper discusses a lot of properties of these types of graphs. | f1950:m0 |
def slice_plot(netin, ax, nodelabels=None, timelabels=None, communities=None, plotedgeweights=False, edgeweightscalar=<NUM_LIT:1>, timeunit='<STR_LIT>', linestyle='<STR_LIT>', cmap=None, nodesize=<NUM_LIT:100>, nodekwargs=None, edgekwargs=None): | <EOL>inputType = checkInput(netin)<EOL>if inputType == '<STR_LIT>':<EOL><INDENT>netin = graphlet2contact(netin)<EOL>inputType = '<STR_LIT:C>'<EOL><DEDENT>edgelist = [tuple(np.array(e[<NUM_LIT:0>:<NUM_LIT:2>]) + e[<NUM_LIT:2>] * netin['<STR_LIT>'][<NUM_LIT:0>])<EOL>for e in netin['<STR_LIT>']]<EOL>if nodelabels is not None and len(nodelabels) == netin['<STR_LIT>'][<NUM_LIT:0>]:<EOL><INDENT>pass<EOL><DEDENT>elif nodelabels is not None and len(nodelabels) != netin['<STR_LIT>'][<NUM_LIT:0>]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif nodelabels is None and netin['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>nodelabels = np.arange(<NUM_LIT:1>, netin['<STR_LIT>'][<NUM_LIT:0>] + <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>nodelabels = netin['<STR_LIT>']<EOL><DEDENT>if timelabels is not None and len(timelabels) == netin['<STR_LIT>'][-<NUM_LIT:1>]:<EOL><INDENT>pass<EOL><DEDENT>elif timelabels is not None and len(timelabels) != netin['<STR_LIT>'][-<NUM_LIT:1>]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif timelabels is None and str(netin['<STR_LIT>']) == '<STR_LIT>':<EOL><INDENT>timelabels = np.arange(<NUM_LIT:1>, netin['<STR_LIT>'][-<NUM_LIT:1>] + <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>timelabels = np.arange(netin['<STR_LIT>'], netin['<STR_LIT>'] *<EOL>netin['<STR_LIT>'][-<NUM_LIT:1>] + netin['<STR_LIT>'], netin['<STR_LIT>'])<EOL><DEDENT>if timeunit is None:<EOL><INDENT>timeunit = netin['<STR_LIT>']<EOL><DEDENT>timeNum = len(timelabels)<EOL>nodeNum = len(nodelabels)<EOL>posy = np.tile(list(range(<NUM_LIT:0>, nodeNum)), timeNum)<EOL>posx = np.repeat(list(range(<NUM_LIT:0>, timeNum)), nodeNum)<EOL>if nodekwargs is None:<EOL><INDENT>nodekwargs = {}<EOL><DEDENT>if edgekwargs is None:<EOL><INDENT>edgekwargs = {}<EOL><DEDENT>if cmap:<EOL><INDENT>nodekwargs['<STR_LIT>'] = cmap<EOL><DEDENT>if '<STR_LIT:c>' not in nodekwargs:<EOL><INDENT>nodekwargs['<STR_LIT:c>'] = posy<EOL><DEDENT>if communities is not None:<EOL><INDENT>if len(communities.shape) == <NUM_LIT:1>:<EOL><INDENT>nodekwargs['<STR_LIT:c>'] = np.tile(communities, timeNum)<EOL><DEDENT>else:<EOL><INDENT>nodekwargs['<STR_LIT:c>'] = communities.flatten(order='<STR_LIT:F>')<EOL><DEDENT><DEDENT>for ei, edge in enumerate(edgelist):<EOL><INDENT>if plotedgeweights == True and netin['<STR_LIT>'][<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>edgekwargs['<STR_LIT>'] = netin['<STR_LIT>'][ei] * edgeweightscalar<EOL><DEDENT>bvx, bvy = bezier_points(<EOL>(posx[edge[<NUM_LIT:0>]], posy[edge[<NUM_LIT:0>]]), (posx[edge[<NUM_LIT:1>]], posy[edge[<NUM_LIT:1>]]), nodeNum, <NUM_LIT:20>)<EOL>ax.plot(bvx, bvy, linestyle, **edgekwargs)<EOL><DEDENT>ax.set_yticks(range(<NUM_LIT:0>, len(nodelabels)))<EOL>ax.set_xticks(range(<NUM_LIT:0>, len(timelabels)))<EOL>ax.set_yticklabels(nodelabels)<EOL>ax.set_xticklabels(timelabels)<EOL>ax.grid()<EOL>ax.set_frame_on(False)<EOL>ax.spines['<STR_LIT>'].set_visible(False)<EOL>ax.spines['<STR_LIT:right>'].set_visible(False)<EOL>ax.get_xaxis().tick_bottom()<EOL>ax.get_yaxis().tick_left()<EOL>ax.set_xlim([min(posx) - <NUM_LIT:1>, max(posx) + <NUM_LIT:1>])<EOL>ax.set_ylim([min(posy) - <NUM_LIT:1>, max(posy) + <NUM_LIT:1>])<EOL>ax.scatter(posx, posy, s=nodesize, zorder=<NUM_LIT:10>, **nodekwargs)<EOL>if timeunit != '<STR_LIT>':<EOL><INDENT>timeunit = '<STR_LIT>' + timeunit + '<STR_LIT:)>'<EOL><DEDENT>ax.set_xlabel('<STR_LIT>' + timeunit)<EOL>return ax<EOL> | r'''
Fuction draws "slice graph" and exports axis handles
Parameters
----------
netin : array, dict
temporal network input (graphlet or contact)
ax : matplotlib figure handles.
nodelabels : list
nodes labels. List of strings.
timelabels : list
labels of dimension Graph is expressed across. List of strings.
communities : array
array of size: (time) or (node,time). Nodes will be coloured accordingly.
plotedgeweights : bool
if True, edges will vary in size (default False)
edgeweightscalar : int
scalar to multiply all edges if tweaking is needed.
timeunit : string
unit time axis is in.
linestyle : string
line style of Bezier curves.
nodesize : int
size of nodes
nodekwargs : dict
any additional kwargs for matplotlib.plt.scatter for the nodes
edgekwargs : dict
any additional kwargs for matplotlib.plt.plots for the edges
Returns
---------
ax : axis handle of slice graph
Examples
---------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot'] # Node names
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(C, ax, cmap='Pastel2')
>>> plt.tight_layout()
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot']
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Pastel2'
ax = teneto.plot.slice_plot(C,ax,cmap=cmap)
plt.tight_layout()
fig.show() | f1953:m0 |
def circle_plot(netIn, ax, nodelabels=None, linestyle='<STR_LIT>', nodesize=<NUM_LIT:1000>, cmap='<STR_LIT>'): | <EOL>inputType = checkInput(netIn, conMat=<NUM_LIT:1>)<EOL>if nodelabels is None:<EOL><INDENT>nodelabels = []<EOL><DEDENT>if inputType == '<STR_LIT:M>':<EOL><INDENT>shape = np.shape(netIn)<EOL>edg = np.where(np.abs(netIn) > <NUM_LIT:0>)<EOL>contacts = [tuple([edg[<NUM_LIT:0>][i], edg[<NUM_LIT:1>][i]])<EOL>for i in range(<NUM_LIT:0>, len(edg[<NUM_LIT:0>]))]<EOL>netIn = {}<EOL>netIn['<STR_LIT>'] = contacts<EOL>netIn['<STR_LIT>'] = shape<EOL><DEDENT>elif inputType == '<STR_LIT>':<EOL><INDENT>netIn = graphlet2contact(netIn)<EOL>inputType = '<STR_LIT:C>'<EOL><DEDENT>if inputType == '<STR_LIT:C>':<EOL><INDENT>edgeList = [tuple(np.array(e[<NUM_LIT:0>:<NUM_LIT:2>]) + e[<NUM_LIT:2>] * netIn['<STR_LIT>'][<NUM_LIT:0>])<EOL>for e in netIn['<STR_LIT>']]<EOL><DEDENT>elif inputType == '<STR_LIT:M>':<EOL><INDENT>edgeList = netIn['<STR_LIT>']<EOL><DEDENT>n = netIn['<STR_LIT>'][<NUM_LIT:0>]<EOL>posx = [math.cos((<NUM_LIT:2> * math.pi * i) / n) for i in range(<NUM_LIT:0>, n)]<EOL>posy = [math.sin((<NUM_LIT:2> * math.pi * i) / n) for i in range(<NUM_LIT:0>, n)]<EOL>cmap = cm.get_cmap(cmap)(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, n))<EOL>for edge in edgeList:<EOL><INDENT>bvx, bvy = bezier_circle(<EOL>(posx[edge[<NUM_LIT:0>]], posy[edge[<NUM_LIT:0>]]), (posx[edge[<NUM_LIT:1>]], posy[edge[<NUM_LIT:1>]]), <NUM_LIT:20>)<EOL>ax.plot(bvx, bvy, linestyle, zorder=<NUM_LIT:0>)<EOL><DEDENT>for i in range(n):<EOL><INDENT>ax.scatter(posx[i], posy[i], s=nodesize, c=cmap[i], zorder=<NUM_LIT:1>)<EOL><DEDENT>ax.set_yticklabels([])<EOL>ax.set_xticklabels([])<EOL>ax.set_yticks([])<EOL>ax.set_xticks([])<EOL>ax.set_frame_on(False)<EOL>x0, x1 = ax.get_xlim()<EOL>y0, y1 = ax.get_ylim()<EOL>ax.set_aspect((x1 - x0) / (y1 - y0))<EOL>ax.spines['<STR_LIT>'].set_visible(False)<EOL>ax.spines['<STR_LIT:right>'].set_visible(False)<EOL>ax.spines['<STR_LIT:left>'].set_visible(False)<EOL>ax.spines['<STR_LIT>'].set_visible(False)<EOL>return ax<EOL> | r'''
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show() | f1954:m0 |
def graphlet_stack_plot(netin, ax, q=<NUM_LIT:10>, cmap='<STR_LIT>', gridcolor='<STR_LIT:k>', borderwidth=<NUM_LIT:2>, bordercolor=None, Fs=<NUM_LIT:1>, timeunit='<STR_LIT>', t0=<NUM_LIT:1>, sharpen='<STR_LIT:yes>', vminmax='<STR_LIT>'): | <EOL>inputType = checkInput(netin)<EOL>if inputType == '<STR_LIT>':<EOL><INDENT>netin = netin.contact<EOL>inputType = '<STR_LIT:C>'<EOL><DEDENT>if inputType == '<STR_LIT:C>':<EOL><INDENT>if timeunit == '<STR_LIT>':<EOL><INDENT>timeunit = netin['<STR_LIT>']<EOL><DEDENT>if t0 == <NUM_LIT:1>:<EOL><INDENT>t0 = netin['<STR_LIT>']<EOL><DEDENT>if Fs == <NUM_LIT:1>:<EOL><INDENT>Fs = netin['<STR_LIT>']<EOL><DEDENT>netin = contact2graphlet(netin)<EOL><DEDENT>if timeunit != '<STR_LIT>':<EOL><INDENT>timeunit = '<STR_LIT>' + timeunit + '<STR_LIT:)>'<EOL><DEDENT>if bordercolor == None:<EOL><INDENT>bordercolor = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL><DEDENT>if not isinstance(borderwidth, int):<EOL><INDENT>borderwidth = int(borderwidth)<EOL>print('<STR_LIT>')<EOL><DEDENT>v = np.arange(<NUM_LIT:0>, netin.shape[<NUM_LIT:0>] + <NUM_LIT:1>)<EOL>vr = np.arange(netin.shape[<NUM_LIT:0>], -<NUM_LIT:1>, -<NUM_LIT:1>)<EOL>if vminmax == '<STR_LIT>' or vminmax == '<STR_LIT>' or vminmax == '<STR_LIT>':<EOL><INDENT>vminmax = [-np.nanmax(np.abs(netin)), np.nanmax(np.abs(netin))]<EOL><DEDENT>elif vminmax == '<STR_LIT>':<EOL><INDENT>vminmax = [np.nanmin(netin), np.nanmax(netin)]<EOL><DEDENT>qb = q * borderwidth<EOL>figmat = np.zeros([<NUM_LIT> * q + (qb * <NUM_LIT:2>), int(((netin.shape[-<NUM_LIT:1>]) *<EOL>(<NUM_LIT> * q) + (qb * <NUM_LIT:2>)) - ((netin.shape[-<NUM_LIT:1>] - <NUM_LIT:1>) * q * <NUM_LIT>) / <NUM_LIT:2>), <NUM_LIT:4>])<EOL>for n in range(<NUM_LIT:0>, netin.shape[-<NUM_LIT:1>]):<EOL><INDENT>figtmp, axtmp = plt.subplots(<EOL><NUM_LIT:1>, facecolor='<STR_LIT>', figsize=(q, q), dpi=<NUM_LIT>)<EOL>axtmp.pcolormesh(v, vr, netin[:, :, n], cmap=cmap, edgecolor=gridcolor,<EOL>linewidth=q * <NUM_LIT:2>, vmin=vminmax[<NUM_LIT:0>], vmax=vminmax[<NUM_LIT:1>])<EOL>axtmp.set_xticklabels('<STR_LIT>')<EOL>axtmp.set_yticklabels('<STR_LIT>')<EOL>axtmp.set_xticks([])<EOL>axtmp.set_yticks([])<EOL>x0, x1 = axtmp.get_xlim()<EOL>y0, y1 = axtmp.get_ylim()<EOL>axtmp.set_aspect((x1 - x0) / (y1 - y0))<EOL>axtmp.spines['<STR_LIT:left>'].set_visible(False)<EOL>axtmp.spines['<STR_LIT:right>'].set_visible(False)<EOL>axtmp.spines['<STR_LIT>'].set_visible(False)<EOL>axtmp.spines['<STR_LIT>'].set_visible(False)<EOL>plt.subplots_adjust(left=<NUM_LIT:0>, bottom=<NUM_LIT:0>, right=<NUM_LIT:1>,<EOL>top=<NUM_LIT:1>, wspace=<NUM_LIT:0>, hspace=<NUM_LIT:0>)<EOL>figtmp.canvas.draw()<EOL>figmattmp = np.fromstring(<EOL>figtmp.canvas.tostring_rgb(), dtype=np.uint8, sep='<STR_LIT>')<EOL>figmattmp = figmattmp.reshape(<EOL>figtmp.canvas.get_width_height()[::-<NUM_LIT:1>] + (<NUM_LIT:3>,))<EOL>plt.close(figtmp)<EOL>figmattmp_withborder = np.zeros(<EOL>[figmattmp.shape[<NUM_LIT:0>] + (qb * <NUM_LIT:2>), figmattmp.shape[<NUM_LIT:1>] + (qb * <NUM_LIT:2>), <NUM_LIT:3>]) + (np.array(bordercolor) * <NUM_LIT:255>)<EOL>figmattmp_withborder[qb:-qb, qb:-qb, :] = figmattmp<EOL>y, x = np.ogrid[-qb: qb + <NUM_LIT:1>, -qb: qb + <NUM_LIT:1>]<EOL>mask = x * x + y * y <= qb * qb<EOL>Mq1 = np.vstack([[mask[:qb, :qb] == <NUM_LIT:0>], [mask[:qb, :qb] == <NUM_LIT:0>], [<EOL>mask[:qb, :qb] == <NUM_LIT:0>]]).transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL>figmattmp_withborder[:qb, :qb, :][Mq1] = <NUM_LIT:255><EOL>Mq1 = np.vstack([[mask[:qb, -qb:] == <NUM_LIT:0>], [mask[:qb, -qb:]<EOL>== <NUM_LIT:0>], [mask[:qb, -qb:] == <NUM_LIT:0>]]).transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL>figmattmp_withborder[:qb, -qb:, :][Mq1] = <NUM_LIT:255><EOL>Mq1 = np.vstack([[mask[-qb:, :qb] == <NUM_LIT:0>], [mask[-qb:, :qb]<EOL>== <NUM_LIT:0>], [mask[-qb:, :qb] == <NUM_LIT:0>]]).transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL>figmattmp_withborder[-qb:, :qb, :][Mq1] = <NUM_LIT:255><EOL>Mq1 = np.vstack([[mask[-qb:, -qb:] == <NUM_LIT:0>], [mask[-qb:, -qb:]<EOL>== <NUM_LIT:0>], [mask[-qb:, -qb:] == <NUM_LIT:0>]]).transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL>figmattmp_withborder[-qb:, -qb:, :][Mq1] = <NUM_LIT:255><EOL>scale = np.matrix([[<NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:3>, <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>sheer = np.matrix([[<NUM_LIT:1>, np.tan(np.pi / <NUM_LIT:12>), <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>figmattmp = ndimage.affine_transform(<EOL>figmattmp_withborder, sheer * (scale), offset=[-<NUM_LIT> * q, <NUM_LIT:0>, <NUM_LIT:0>], cval=<NUM_LIT:255>)<EOL>trans = np.where(np.sum(figmattmp, axis=<NUM_LIT:2>) == <NUM_LIT:255> * <NUM_LIT:3>)<EOL>alphamat = np.ones([figmattmp.shape[<NUM_LIT:0>], figmattmp.shape[<NUM_LIT:0>]])<EOL>alphamat[trans[<NUM_LIT:0>], trans[<NUM_LIT:1>]] = <NUM_LIT:0><EOL>figmattmp = np.dstack([figmattmp, alphamat])<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>figmat[:, n * (<NUM_LIT> * q):((n + <NUM_LIT:1>) * (<NUM_LIT> * q) + (qb * <NUM_LIT:2>))] = figmattmp<EOL><DEDENT>else:<EOL><INDENT>figmat[:, n * (<NUM_LIT> * q) - int((n * q * <NUM_LIT>) / <NUM_LIT:2>):int(((n + <NUM_LIT:1>)<EOL>* (<NUM_LIT> * q) + (qb * <NUM_LIT:2>)) - (n * q * <NUM_LIT>) / <NUM_LIT:2>)] = figmattmp<EOL><DEDENT><DEDENT>figmat[:, :, <NUM_LIT:0>:<NUM_LIT:3>] = figmat[:, :, <NUM_LIT:0>:<NUM_LIT:3>] / <NUM_LIT:255><EOL>figmat = figmat[:, :-int((q / <NUM_LIT:2>) * <NUM_LIT>), :]<EOL>fid = np.where(figmat[:, :, -<NUM_LIT:1>] > <NUM_LIT:0>)<EOL>fargmin = np.argmin(fid[<NUM_LIT:0>])<EOL>ymax = np.max(fid[<NUM_LIT:0>])<EOL>yright = np.max(np.where(figmat[:, fid[<NUM_LIT:1>][fargmin], -<NUM_LIT:1>] > <NUM_LIT:0>))<EOL>xtickloc = np.where(figmat[ymax, :, -<NUM_LIT:1>] > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>xtickloc = np.delete(xtickloc, np.where(np.diff(xtickloc) == <NUM_LIT:1>)[<NUM_LIT:0>] + <NUM_LIT:1>)<EOL>fid = np.where(figmat[:, :, -<NUM_LIT:1>] > <NUM_LIT:0>)<EOL>ymin = np.min(fid[<NUM_LIT:0>])<EOL>topfig = np.where(figmat[ymin, :, -<NUM_LIT:1>] > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>topfig = topfig[<NUM_LIT:0>:len(topfig):int(len(topfig) / netin.shape[-<NUM_LIT:1>])]<EOL>if sharpen == '<STR_LIT:yes>':<EOL><INDENT>figmat[:, :, :-<NUM_LIT:1>] = ndimage.median_filter(figmat[:, :, :-<NUM_LIT:1>], <NUM_LIT:3>)<EOL><DEDENT>ax.imshow(figmat[:, :, :-<NUM_LIT:1>], zorder=<NUM_LIT:1>)<EOL>ax.spines['<STR_LIT:left>'].set_visible(False)<EOL>ax.spines['<STR_LIT:right>'].set_visible(False)<EOL>ax.spines['<STR_LIT>'].set_visible(False)<EOL>ax.spines['<STR_LIT>'].set_visible(False)<EOL>ax.set_xticklabels('<STR_LIT>')<EOL>ax.set_yticklabels('<STR_LIT>')<EOL>ax.set_xticks([])<EOL>ax.set_yticks([])<EOL>L = int((((netin.shape[-<NUM_LIT:1>] - <NUM_LIT:3>) + <NUM_LIT:1>) * (<NUM_LIT> * q) +<EOL>(qb * <NUM_LIT:2>)) - ((netin.shape[-<NUM_LIT:1>] - <NUM_LIT:3>) * q * <NUM_LIT>) / <NUM_LIT:2> - q)<EOL>_ = [ax.plot(range(topfig[i], xt), np.zeros(len(range(topfig[i], xt))) + yright,<EOL>color='<STR_LIT:k>', linestyle='<STR_LIT::>', zorder=<NUM_LIT:2>) for i, xt in enumerate(xtickloc[<NUM_LIT:1>:])]<EOL>ax.plot(range(<NUM_LIT:0>, L), np.zeros(L) + ymax,<EOL>color='<STR_LIT:k>', linestyle='<STR_LIT::>', zorder=<NUM_LIT:2>)<EOL>_ = [ax.plot(np.zeros(q * <NUM_LIT:10>) + xt, np.arange(ymax, ymax + q * <NUM_LIT:10>),<EOL>color='<STR_LIT:k>', linestyle='<STR_LIT::>', zorder=<NUM_LIT:2>) for xt in xtickloc]<EOL>_ = [ax.text(xt, ymax + q * <NUM_LIT:20>, str(round((i + t0) * Fs, <NUM_LIT:5>)),<EOL>horizontalalignment='<STR_LIT>',) for i, xt in enumerate(xtickloc)]<EOL>ylim = ax.axes.get_ylim()<EOL>xlim = ax.axes.get_xlim()<EOL>ax.set_ylim(ylim[<NUM_LIT:0>] + q * <NUM_LIT:15>, <NUM_LIT:0>)<EOL>ax.set_xlim(xlim[<NUM_LIT:0>] - q * <NUM_LIT:20>, xlim[<NUM_LIT:1>])<EOL>ax.set_xlabel('<STR_LIT>' + timeunit)<EOL>return ax<EOL> | r'''
Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack.
Parameters
----------
netin : array, dict
network input (graphlet or contact)
ax : matplotlib ax handles.
q : int
Quality. Increaseing this will lead to smoother axis but take up more memory.
cmap : str
Colormap (matplotlib) of graphlets
Fs : int
Sampling rate. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
timeunit : str
Unit of time for xlabel. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
t0 : int
What should the first time point be called. Should be integer. Default 1.
gridcolor : str
The color of the grid section of the graphlets. Set to 'none' if not wanted.
borderwidth : int
Scales the size of border. (at the moment it cannot be set to 0.)
bordorcolor :
color of the border (at the moment it must be in RGB values between 0 and 1 -> this will be changed sometime in the future). Default: black.
vminmax : str
'maxabs', 'minmax' (default), or list/array with length of 2. Specifies the min and max colormap value of graphlets. Maxabs entails [-max(abs(G)),max(abs(G))], minmax entails [min(G), max(G)].
Returns
--------
ax : matplotlib ax handle
Note
------
This function can require a lot of RAM with larger networks.
Note
------
At the momenet bordercolor cannot be set to zero. To remove border, set bordorwidth=1 and bordercolor=[1,1,1] for temporay workaround.
Examples
-------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap='Greys')
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Greys'
ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap=cmap)
fig.show() | f1955:m0 |
def partition_inference(tctc_mat, comp, tau, sigma, kappa): | communityinfo = {}<EOL>communityinfo['<STR_LIT>'] = []<EOL>communityinfo['<STR_LIT:start>'] = np.empty(<NUM_LIT:0>)<EOL>communityinfo['<STR_LIT:end>'] = np.empty(<NUM_LIT:0>)<EOL>communityinfo['<STR_LIT:size>'] = np.empty(<NUM_LIT:0>)<EOL>for i, tcomp in enumerate(comp):<EOL><INDENT>if len(tcomp) > <NUM_LIT:0>:<EOL><INDENT>for traj in tcomp:<EOL><INDENT>ignore = <NUM_LIT:0><EOL>preexisting = <NUM_LIT:0><EOL>if i != <NUM_LIT:0>:<EOL><INDENT>cutoff = i-<NUM_LIT:1>-kappa<EOL>if cutoff < <NUM_LIT:0>:<EOL><INDENT>cutoff = <NUM_LIT:0><EOL><DEDENT>if np.any(np.sum(np.sum(tctc_mat[traj, :, cutoff:i][:, traj], axis=<NUM_LIT:0>), axis=<NUM_LIT:0>) == np.power(len(traj), <NUM_LIT:2>)):<EOL><INDENT>for checknode in np.where(communityinfo['<STR_LIT:end>']>=cutoff)[<NUM_LIT:0>]:<EOL><INDENT>if traj == communityinfo['<STR_LIT>'][checknode]:<EOL><INDENT>ignore = <NUM_LIT:1><EOL><DEDENT><DEDENT>if ignore == <NUM_LIT:0>: <EOL><INDENT>for checknode in np.where(communityinfo['<STR_LIT:end>']>=cutoff)[<NUM_LIT:0>]:<EOL><INDENT>if set(communityinfo['<STR_LIT>'][checknode]).issuperset(traj):<EOL><INDENT>preexisting = <NUM_LIT:1> <EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if ignore == <NUM_LIT:0>:<EOL><INDENT>approxmaxlength = tau*<NUM_LIT:2><EOL>a = np.sum(<EOL>np.sum(tctc_mat[traj, :, i:i+approxmaxlength][:, traj], axis=<NUM_LIT:0>), axis=<NUM_LIT:0>)<EOL>if len(traj)*len(traj)*approxmaxlength == a.sum():<EOL><INDENT>ok = <NUM_LIT:0><EOL>ii = <NUM_LIT:1><EOL>while ok == <NUM_LIT:0>:<EOL><INDENT>b = np.sum(np.sum(<EOL>tctc_mat[traj, :, i+(approxmaxlength*ii):i+(approxmaxlength*(ii+<NUM_LIT:1>))][:, traj], axis=<NUM_LIT:0>), axis=<NUM_LIT:0>)<EOL>a = np.append(a, b)<EOL>if len(traj)*len(traj)*approxmaxlength != b.sum():<EOL><INDENT>ok = <NUM_LIT:1><EOL><DEDENT>if i+(approxmaxlength*(ii+<NUM_LIT:1>)) > tctc_mat.shape[-<NUM_LIT:1>]:<EOL><INDENT>ok = <NUM_LIT:1><EOL><DEDENT>ii += <NUM_LIT:1><EOL><DEDENT><DEDENT>a = np.where(a == np.power(len(traj), <NUM_LIT:2>))[<NUM_LIT:0>]<EOL>if len(a) == <NUM_LIT:1>:<EOL><INDENT>stopind = i + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>a = np.append(a, a.max()+kappa+<NUM_LIT:2>)<EOL>stopind = i + np.split(a, np.where(<EOL>np.diff(a) > kappa+<NUM_LIT:1>)[<NUM_LIT:0>]+<NUM_LIT:1>)[<NUM_LIT:0>][-<NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT>if ((stopind - i) >= tau or preexisting == <NUM_LIT:1>) and len(traj) >= sigma:<EOL><INDENT>communityinfo['<STR_LIT>'].append(sorted(traj))<EOL>communityinfo['<STR_LIT:start>'] = np.append(communityinfo['<STR_LIT:start>'], int(i))<EOL>communityinfo['<STR_LIT:end>'] = np.append(<EOL>communityinfo['<STR_LIT:end>'], int(stopind))<EOL>communityinfo['<STR_LIT:size>'] = np.append(communityinfo['<STR_LIT:size>'], len(traj))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>communityinfo = pd.DataFrame(communityinfo)<EOL>communityinfo['<STR_LIT:start>'] = communityinfo['<STR_LIT:start>'].astype(int)<EOL>communityinfo['<STR_LIT:end>'] = communityinfo['<STR_LIT:end>'].astype(int)<EOL>badrows = []<EOL>for v in communityinfo.iterrows():<EOL><INDENT>skipselrule = (communityinfo['<STR_LIT:end>'] == v[<NUM_LIT:1>]['<STR_LIT:end>'])<EOL>for u in communityinfo[skipselrule].iterrows():<EOL><INDENT>a = <NUM_LIT:1><EOL>if u[<NUM_LIT:1>]['<STR_LIT:start>'] > v[<NUM_LIT:1>]['<STR_LIT:start>'] and sorted(u[<NUM_LIT:1>]['<STR_LIT>']) == sorted(v[<NUM_LIT:1>]['<STR_LIT>']):<EOL><INDENT>badrows.append(u[<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>communityinfo = communityinfo.drop(badrows)<EOL>for v in communityinfo.iterrows():<EOL><INDENT>skipselrule = (communityinfo['<STR_LIT:end>'] <= v[<NUM_LIT:1>]['<STR_LIT:start>']) & (<EOL>communityinfo['<STR_LIT:end>']+kappa >= v[<NUM_LIT:1>]['<STR_LIT:start>'])<EOL>for u in communityinfo[skipselrule].iterrows():<EOL><INDENT>a = <NUM_LIT:1><EOL>if set(u[<NUM_LIT:1>]['<STR_LIT>']).issuperset(v[<NUM_LIT:1>]['<STR_LIT>']):<EOL><INDENT>communityinfo.loc[v[<NUM_LIT:0>], '<STR_LIT:start>'] = u[<NUM_LIT:1>]['<STR_LIT:start>']<EOL><DEDENT><DEDENT><DEDENT>communityinfo['<STR_LIT>'] = np.array(communityinfo['<STR_LIT:end>']) - np.array(communityinfo['<STR_LIT:start>'])<EOL>communityinfo = communityinfo[communityinfo['<STR_LIT>'] >= tau]<EOL>communityinfo = communityinfo[communityinfo['<STR_LIT:size>'] >= sigma]<EOL>badrows = []<EOL>if kappa > <NUM_LIT:0>:<EOL><INDENT>for v in communityinfo.iterrows():<EOL><INDENT>skipselrule = (communityinfo['<STR_LIT:end>'] == v[<NUM_LIT:1>]['<STR_LIT:end>']) & (<EOL>communityinfo['<STR_LIT:start>'] < v[<NUM_LIT:1>]['<STR_LIT:start>'])<EOL>for u in communityinfo[skipselrule].iterrows():<EOL><INDENT>if set(v[<NUM_LIT:1>]['<STR_LIT>']).issubset(u[<NUM_LIT:1>]['<STR_LIT>']):<EOL><INDENT>badrows.append(v[<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>communityinfo = communityinfo.drop(badrows)<EOL><DEDENT>return communityinfo<EOL> | r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops. | f1957:m0 |
Subsets and Splits