repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
emre/storm
storm/__main__.py
web
def web(port, debug=False, theme="modern", ssh_config=None): """Starts the web UI.""" from storm import web as _web _web.run(port, debug, theme, ssh_config)
python
def web(port, debug=False, theme="modern", ssh_config=None): """Starts the web UI.""" from storm import web as _web _web.run(port, debug, theme, ssh_config)
[ "def", "web", "(", "port", ",", "debug", "=", "False", ",", "theme", "=", "\"modern\"", ",", "ssh_config", "=", "None", ")", ":", "from", "storm", "import", "web", "as", "_web", "_web", ".", "run", "(", "port", ",", "debug", ",", "theme", ",", "ssh_config", ")" ]
Starts the web UI.
[ "Starts", "the", "web", "UI", "." ]
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L310-L313
train
diging/tethne
tethne/writers/collection.py
_strip_list_attributes
def _strip_list_attributes(graph_): """Converts lists attributes to strings for all nodes and edges in G.""" for n_ in graph_.nodes(data=True): for k,v in n_[1].iteritems(): if type(v) is list: graph_.node[n_[0]][k] = unicode(v) for e_ in graph_.edges(data=True): for k,v in e_[2].iteritems(): if type(v) is list: graph_.edge[e_[0]][e_[1]][k] = unicode(v) return graph_
python
def _strip_list_attributes(graph_): """Converts lists attributes to strings for all nodes and edges in G.""" for n_ in graph_.nodes(data=True): for k,v in n_[1].iteritems(): if type(v) is list: graph_.node[n_[0]][k] = unicode(v) for e_ in graph_.edges(data=True): for k,v in e_[2].iteritems(): if type(v) is list: graph_.edge[e_[0]][e_[1]][k] = unicode(v) return graph_
[ "def", "_strip_list_attributes", "(", "graph_", ")", ":", "for", "n_", "in", "graph_", ".", "nodes", "(", "data", "=", "True", ")", ":", "for", "k", ",", "v", "in", "n_", "[", "1", "]", ".", "iteritems", "(", ")", ":", "if", "type", "(", "v", ")", "is", "list", ":", "graph_", ".", "node", "[", "n_", "[", "0", "]", "]", "[", "k", "]", "=", "unicode", "(", "v", ")", "for", "e_", "in", "graph_", ".", "edges", "(", "data", "=", "True", ")", ":", "for", "k", ",", "v", "in", "e_", "[", "2", "]", ".", "iteritems", "(", ")", ":", "if", "type", "(", "v", ")", "is", "list", ":", "graph_", ".", "edge", "[", "e_", "[", "0", "]", "]", "[", "e_", "[", "1", "]", "]", "[", "k", "]", "=", "unicode", "(", "v", ")", "return", "graph_" ]
Converts lists attributes to strings for all nodes and edges in G.
[ "Converts", "lists", "attributes", "to", "strings", "for", "all", "nodes", "and", "edges", "in", "G", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/writers/collection.py#L189-L200
train
diging/tethne
tethne/writers/collection.py
_safe_type
def _safe_type(value): """Converts Python type names to XGMML-safe type names.""" if type(value) is str: dtype = 'string' if type(value) is unicode: dtype = 'string' if type(value) is int: dtype = 'integer' if type(value) is float: dtype = 'real' return dtype
python
def _safe_type(value): """Converts Python type names to XGMML-safe type names.""" if type(value) is str: dtype = 'string' if type(value) is unicode: dtype = 'string' if type(value) is int: dtype = 'integer' if type(value) is float: dtype = 'real' return dtype
[ "def", "_safe_type", "(", "value", ")", ":", "if", "type", "(", "value", ")", "is", "str", ":", "dtype", "=", "'string'", "if", "type", "(", "value", ")", "is", "unicode", ":", "dtype", "=", "'string'", "if", "type", "(", "value", ")", "is", "int", ":", "dtype", "=", "'integer'", "if", "type", "(", "value", ")", "is", "float", ":", "dtype", "=", "'real'", "return", "dtype" ]
Converts Python type names to XGMML-safe type names.
[ "Converts", "Python", "type", "names", "to", "XGMML", "-", "safe", "type", "names", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/writers/collection.py#L202-L210
train
diging/tethne
tethne/readers/wos.py
read
def read(path, corpus=True, index_by='wosid', streaming=False, parse_only=None, corpus_class=Corpus, **kwargs): """ Parse one or more WoS field-tagged data files. Examples -------- .. code-block:: python >>> from tethne.readers import wos >>> corpus = wos.read("/path/to/some/wos/data") >>> corpus <tethne.classes.corpus.Corpus object at 0x10057c2d0> Parameters ---------- path : str Path to WoS field-tagged data. Can be a path directly to a single data file, or to a directory containing several data files. corpus : bool If True (default), returns a :class:`.Corpus`\. If False, will return only a list of :class:`.Paper`\s. Returns ------- :class:`.Corpus` or :class:`.Paper` """ if not os.path.exists(path): raise ValueError('No such file or directory') # We need the primary index field in the parse results. if parse_only: parse_only.append(index_by) if streaming: return streaming_read(path, corpus=corpus, index_by=index_by, parse_only=parse_only, **kwargs) if os.path.isdir(path): # Directory containing 1+ WoS data files. papers = [] for sname in os.listdir(path): if sname.endswith('txt') and not sname.startswith('.'): papers += read(os.path.join(path, sname), corpus=False, parse_only=parse_only) else: # A single data file. papers = WoSParser(path).parse(parse_only=parse_only) if corpus: return corpus_class(papers, index_by=index_by, **kwargs) return papers
python
def read(path, corpus=True, index_by='wosid', streaming=False, parse_only=None, corpus_class=Corpus, **kwargs): """ Parse one or more WoS field-tagged data files. Examples -------- .. code-block:: python >>> from tethne.readers import wos >>> corpus = wos.read("/path/to/some/wos/data") >>> corpus <tethne.classes.corpus.Corpus object at 0x10057c2d0> Parameters ---------- path : str Path to WoS field-tagged data. Can be a path directly to a single data file, or to a directory containing several data files. corpus : bool If True (default), returns a :class:`.Corpus`\. If False, will return only a list of :class:`.Paper`\s. Returns ------- :class:`.Corpus` or :class:`.Paper` """ if not os.path.exists(path): raise ValueError('No such file or directory') # We need the primary index field in the parse results. if parse_only: parse_only.append(index_by) if streaming: return streaming_read(path, corpus=corpus, index_by=index_by, parse_only=parse_only, **kwargs) if os.path.isdir(path): # Directory containing 1+ WoS data files. papers = [] for sname in os.listdir(path): if sname.endswith('txt') and not sname.startswith('.'): papers += read(os.path.join(path, sname), corpus=False, parse_only=parse_only) else: # A single data file. papers = WoSParser(path).parse(parse_only=parse_only) if corpus: return corpus_class(papers, index_by=index_by, **kwargs) return papers
[ "def", "read", "(", "path", ",", "corpus", "=", "True", ",", "index_by", "=", "'wosid'", ",", "streaming", "=", "False", ",", "parse_only", "=", "None", ",", "corpus_class", "=", "Corpus", ",", "*", "*", "kwargs", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "ValueError", "(", "'No such file or directory'", ")", "# We need the primary index field in the parse results.", "if", "parse_only", ":", "parse_only", ".", "append", "(", "index_by", ")", "if", "streaming", ":", "return", "streaming_read", "(", "path", ",", "corpus", "=", "corpus", ",", "index_by", "=", "index_by", ",", "parse_only", "=", "parse_only", ",", "*", "*", "kwargs", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "# Directory containing 1+ WoS data files.", "papers", "=", "[", "]", "for", "sname", "in", "os", ".", "listdir", "(", "path", ")", ":", "if", "sname", ".", "endswith", "(", "'txt'", ")", "and", "not", "sname", ".", "startswith", "(", "'.'", ")", ":", "papers", "+=", "read", "(", "os", ".", "path", ".", "join", "(", "path", ",", "sname", ")", ",", "corpus", "=", "False", ",", "parse_only", "=", "parse_only", ")", "else", ":", "# A single data file.", "papers", "=", "WoSParser", "(", "path", ")", ".", "parse", "(", "parse_only", "=", "parse_only", ")", "if", "corpus", ":", "return", "corpus_class", "(", "papers", ",", "index_by", "=", "index_by", ",", "*", "*", "kwargs", ")", "return", "papers" ]
Parse one or more WoS field-tagged data files. Examples -------- .. code-block:: python >>> from tethne.readers import wos >>> corpus = wos.read("/path/to/some/wos/data") >>> corpus <tethne.classes.corpus.Corpus object at 0x10057c2d0> Parameters ---------- path : str Path to WoS field-tagged data. Can be a path directly to a single data file, or to a directory containing several data files. corpus : bool If True (default), returns a :class:`.Corpus`\. If False, will return only a list of :class:`.Paper`\s. Returns ------- :class:`.Corpus` or :class:`.Paper`
[ "Parse", "one", "or", "more", "WoS", "field", "-", "tagged", "data", "files", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L350-L401
train
diging/tethne
tethne/readers/wos.py
WoSParser.parse_author
def parse_author(self, value): """ Attempts to split an author name into last and first parts. """ tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split(' ') if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] # Ignore JR, II, III, etc. else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() return aulast, auinit
python
def parse_author(self, value): """ Attempts to split an author name into last and first parts. """ tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split(' ') if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] # Ignore JR, II, III, etc. else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() return aulast, auinit
[ "def", "parse_author", "(", "self", ",", "value", ")", ":", "tokens", "=", "tuple", "(", "[", "t", ".", "upper", "(", ")", ".", "strip", "(", ")", "for", "t", "in", "value", ".", "split", "(", "','", ")", "]", ")", "if", "len", "(", "tokens", ")", "==", "1", ":", "tokens", "=", "value", ".", "split", "(", "' '", ")", "if", "len", "(", "tokens", ")", ">", "0", ":", "if", "len", "(", "tokens", ")", ">", "1", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", ":", "2", "]", "# Ignore JR, II, III, etc.", "else", ":", "aulast", "=", "tokens", "[", "0", "]", "auinit", "=", "''", "else", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", "]", ",", "''", "aulast", "=", "_strip_punctuation", "(", "aulast", ")", ".", "upper", "(", ")", "auinit", "=", "_strip_punctuation", "(", "auinit", ")", ".", "upper", "(", ")", "return", "aulast", ",", "auinit" ]
Attempts to split an author name into last and first parts.
[ "Attempts", "to", "split", "an", "author", "name", "into", "last", "and", "first", "parts", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L112-L129
train
diging/tethne
tethne/readers/wos.py
WoSParser.handle_CR
def handle_CR(self, value): """ Parses cited references. """ citation = self.entry_class() value = strip_tags(value) # First-author name and publication date. ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)' ny_match = re.match(ptn, value, flags=re.U) nj_match = re.match('([\w\s\W]+),\s([\w\s]+)', value, flags=re.U) if ny_match is not None: name_raw, date, journal = ny_match.groups() elif nj_match is not None: name_raw, journal = nj_match.groups() date = None else: return datematch = re.match('([0-9]{4})', value) if datematch: date = datematch.group(1) name_raw = None if name_raw: name_tokens = [t.replace('.', '') for t in name_raw.split(' ')] if len(name_tokens) > 4 or value.startswith('*'): # Probably not a person. proc = lambda x: _strip_punctuation(x) aulast = ' '.join([proc(n) for n in name_tokens]).upper() auinit = '' elif len(name_tokens) > 0: aulast = name_tokens[0].upper() proc = lambda x: _space_sep(_strip_punctuation(x)) auinit = ' '.join([proc(n) for n in name_tokens[1:]]).upper() else: aulast = name_tokens[0].upper() auinit = '' setattr(citation, 'authors_init', [(aulast, auinit)]) if date: date = int(date) setattr(citation, 'date', date) setattr(citation, 'journal', journal) # Volume. v_match = re.search('\,\s+V([0-9A-Za-z]+)', value) if v_match is not None: volume = v_match.group(1) else: volume = None setattr(citation, 'volume', volume) # Start page. p_match = re.search('\,\s+[Pp]([0-9A-Za-z]+)', value) if p_match is not None: page = p_match.group(1) else: page = None setattr(citation, 'pageStart', page) # DOI. doi_match = re.search('DOI\s(.*)', value) if doi_match is not None: doi = doi_match.group(1) else: doi = None setattr(citation, 'doi', doi) return citation
python
def handle_CR(self, value): """ Parses cited references. """ citation = self.entry_class() value = strip_tags(value) # First-author name and publication date. ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)' ny_match = re.match(ptn, value, flags=re.U) nj_match = re.match('([\w\s\W]+),\s([\w\s]+)', value, flags=re.U) if ny_match is not None: name_raw, date, journal = ny_match.groups() elif nj_match is not None: name_raw, journal = nj_match.groups() date = None else: return datematch = re.match('([0-9]{4})', value) if datematch: date = datematch.group(1) name_raw = None if name_raw: name_tokens = [t.replace('.', '') for t in name_raw.split(' ')] if len(name_tokens) > 4 or value.startswith('*'): # Probably not a person. proc = lambda x: _strip_punctuation(x) aulast = ' '.join([proc(n) for n in name_tokens]).upper() auinit = '' elif len(name_tokens) > 0: aulast = name_tokens[0].upper() proc = lambda x: _space_sep(_strip_punctuation(x)) auinit = ' '.join([proc(n) for n in name_tokens[1:]]).upper() else: aulast = name_tokens[0].upper() auinit = '' setattr(citation, 'authors_init', [(aulast, auinit)]) if date: date = int(date) setattr(citation, 'date', date) setattr(citation, 'journal', journal) # Volume. v_match = re.search('\,\s+V([0-9A-Za-z]+)', value) if v_match is not None: volume = v_match.group(1) else: volume = None setattr(citation, 'volume', volume) # Start page. p_match = re.search('\,\s+[Pp]([0-9A-Za-z]+)', value) if p_match is not None: page = p_match.group(1) else: page = None setattr(citation, 'pageStart', page) # DOI. doi_match = re.search('DOI\s(.*)', value) if doi_match is not None: doi = doi_match.group(1) else: doi = None setattr(citation, 'doi', doi) return citation
[ "def", "handle_CR", "(", "self", ",", "value", ")", ":", "citation", "=", "self", ".", "entry_class", "(", ")", "value", "=", "strip_tags", "(", "value", ")", "# First-author name and publication date.", "ptn", "=", "'([\\w\\s\\W]+),\\s([0-9]{4}),\\s([\\w\\s]+)'", "ny_match", "=", "re", ".", "match", "(", "ptn", ",", "value", ",", "flags", "=", "re", ".", "U", ")", "nj_match", "=", "re", ".", "match", "(", "'([\\w\\s\\W]+),\\s([\\w\\s]+)'", ",", "value", ",", "flags", "=", "re", ".", "U", ")", "if", "ny_match", "is", "not", "None", ":", "name_raw", ",", "date", ",", "journal", "=", "ny_match", ".", "groups", "(", ")", "elif", "nj_match", "is", "not", "None", ":", "name_raw", ",", "journal", "=", "nj_match", ".", "groups", "(", ")", "date", "=", "None", "else", ":", "return", "datematch", "=", "re", ".", "match", "(", "'([0-9]{4})'", ",", "value", ")", "if", "datematch", ":", "date", "=", "datematch", ".", "group", "(", "1", ")", "name_raw", "=", "None", "if", "name_raw", ":", "name_tokens", "=", "[", "t", ".", "replace", "(", "'.'", ",", "''", ")", "for", "t", "in", "name_raw", ".", "split", "(", "' '", ")", "]", "if", "len", "(", "name_tokens", ")", ">", "4", "or", "value", ".", "startswith", "(", "'*'", ")", ":", "# Probably not a person.", "proc", "=", "lambda", "x", ":", "_strip_punctuation", "(", "x", ")", "aulast", "=", "' '", ".", "join", "(", "[", "proc", "(", "n", ")", "for", "n", "in", "name_tokens", "]", ")", ".", "upper", "(", ")", "auinit", "=", "''", "elif", "len", "(", "name_tokens", ")", ">", "0", ":", "aulast", "=", "name_tokens", "[", "0", "]", ".", "upper", "(", ")", "proc", "=", "lambda", "x", ":", "_space_sep", "(", "_strip_punctuation", "(", "x", ")", ")", "auinit", "=", "' '", ".", "join", "(", "[", "proc", "(", "n", ")", "for", "n", "in", "name_tokens", "[", "1", ":", "]", "]", ")", ".", "upper", "(", ")", "else", ":", "aulast", "=", "name_tokens", "[", "0", "]", ".", "upper", "(", ")", "auinit", "=", "''", "setattr", "(", "citation", ",", "'authors_init'", ",", "[", "(", "aulast", ",", "auinit", ")", "]", ")", "if", "date", ":", "date", "=", "int", "(", "date", ")", "setattr", "(", "citation", ",", "'date'", ",", "date", ")", "setattr", "(", "citation", ",", "'journal'", ",", "journal", ")", "# Volume.", "v_match", "=", "re", ".", "search", "(", "'\\,\\s+V([0-9A-Za-z]+)'", ",", "value", ")", "if", "v_match", "is", "not", "None", ":", "volume", "=", "v_match", ".", "group", "(", "1", ")", "else", ":", "volume", "=", "None", "setattr", "(", "citation", ",", "'volume'", ",", "volume", ")", "# Start page.", "p_match", "=", "re", ".", "search", "(", "'\\,\\s+[Pp]([0-9A-Za-z]+)'", ",", "value", ")", "if", "p_match", "is", "not", "None", ":", "page", "=", "p_match", ".", "group", "(", "1", ")", "else", ":", "page", "=", "None", "setattr", "(", "citation", ",", "'pageStart'", ",", "page", ")", "# DOI.", "doi_match", "=", "re", ".", "search", "(", "'DOI\\s(.*)'", ",", "value", ")", "if", "doi_match", "is", "not", "None", ":", "doi", "=", "doi_match", ".", "group", "(", "1", ")", "else", ":", "doi", "=", "None", "setattr", "(", "citation", ",", "'doi'", ",", "doi", ")", "return", "citation" ]
Parses cited references.
[ "Parses", "cited", "references", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L157-L227
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_WC
def postprocess_WC(self, entry): """ Parse WC keywords. Subject keywords are usually semicolon-delimited. """ if type(entry.WC) not in [str, unicode]: WC= u' '.join([unicode(k) for k in entry.WC]) else: WC= entry.WC entry.WC= [k.strip().upper() for k in WC.split(';')]
python
def postprocess_WC(self, entry): """ Parse WC keywords. Subject keywords are usually semicolon-delimited. """ if type(entry.WC) not in [str, unicode]: WC= u' '.join([unicode(k) for k in entry.WC]) else: WC= entry.WC entry.WC= [k.strip().upper() for k in WC.split(';')]
[ "def", "postprocess_WC", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "WC", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "WC", "=", "u' '", ".", "join", "(", "[", "unicode", "(", "k", ")", "for", "k", "in", "entry", ".", "WC", "]", ")", "else", ":", "WC", "=", "entry", ".", "WC", "entry", ".", "WC", "=", "[", "k", ".", "strip", "(", ")", ".", "upper", "(", ")", "for", "k", "in", "WC", ".", "split", "(", "';'", ")", "]" ]
Parse WC keywords. Subject keywords are usually semicolon-delimited.
[ "Parse", "WC", "keywords", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L229-L240
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_subject
def postprocess_subject(self, entry): """ Parse subject keywords. Subject keywords are usually semicolon-delimited. """ if type(entry.subject) not in [str, unicode]: subject = u' '.join([unicode(k) for k in entry.subject]) else: subject = entry.subject entry.subject = [k.strip().upper() for k in subject.split(';')]
python
def postprocess_subject(self, entry): """ Parse subject keywords. Subject keywords are usually semicolon-delimited. """ if type(entry.subject) not in [str, unicode]: subject = u' '.join([unicode(k) for k in entry.subject]) else: subject = entry.subject entry.subject = [k.strip().upper() for k in subject.split(';')]
[ "def", "postprocess_subject", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "subject", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "subject", "=", "u' '", ".", "join", "(", "[", "unicode", "(", "k", ")", "for", "k", "in", "entry", ".", "subject", "]", ")", "else", ":", "subject", "=", "entry", ".", "subject", "entry", ".", "subject", "=", "[", "k", ".", "strip", "(", ")", ".", "upper", "(", ")", "for", "k", "in", "subject", ".", "split", "(", "';'", ")", "]" ]
Parse subject keywords. Subject keywords are usually semicolon-delimited.
[ "Parse", "subject", "keywords", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L242-L253
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_authorKeywords
def postprocess_authorKeywords(self, entry): """ Parse author keywords. Author keywords are usually semicolon-delimited. """ if type(entry.authorKeywords) not in [str, unicode]: aK = u' '.join([unicode(k) for k in entry.authorKeywords]) else: aK = entry.authorKeywords entry.authorKeywords = [k.strip().upper() for k in aK.split(';')]
python
def postprocess_authorKeywords(self, entry): """ Parse author keywords. Author keywords are usually semicolon-delimited. """ if type(entry.authorKeywords) not in [str, unicode]: aK = u' '.join([unicode(k) for k in entry.authorKeywords]) else: aK = entry.authorKeywords entry.authorKeywords = [k.strip().upper() for k in aK.split(';')]
[ "def", "postprocess_authorKeywords", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "authorKeywords", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "aK", "=", "u' '", ".", "join", "(", "[", "unicode", "(", "k", ")", "for", "k", "in", "entry", ".", "authorKeywords", "]", ")", "else", ":", "aK", "=", "entry", ".", "authorKeywords", "entry", ".", "authorKeywords", "=", "[", "k", ".", "strip", "(", ")", ".", "upper", "(", ")", "for", "k", "in", "aK", ".", "split", "(", "';'", ")", "]" ]
Parse author keywords. Author keywords are usually semicolon-delimited.
[ "Parse", "author", "keywords", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L255-L266
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_keywordsPlus
def postprocess_keywordsPlus(self, entry): """ Parse WoS "Keyword Plus" keywords. Keyword Plus keywords are usually semicolon-delimited. """ if type(entry.keywordsPlus) in [str, unicode]: entry.keywordsPlus = [k.strip().upper() for k in entry.keywordsPlus.split(';')]
python
def postprocess_keywordsPlus(self, entry): """ Parse WoS "Keyword Plus" keywords. Keyword Plus keywords are usually semicolon-delimited. """ if type(entry.keywordsPlus) in [str, unicode]: entry.keywordsPlus = [k.strip().upper() for k in entry.keywordsPlus.split(';')]
[ "def", "postprocess_keywordsPlus", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "keywordsPlus", ")", "in", "[", "str", ",", "unicode", "]", ":", "entry", ".", "keywordsPlus", "=", "[", "k", ".", "strip", "(", ")", ".", "upper", "(", ")", "for", "k", "in", "entry", ".", "keywordsPlus", ".", "split", "(", "';'", ")", "]" ]
Parse WoS "Keyword Plus" keywords. Keyword Plus keywords are usually semicolon-delimited.
[ "Parse", "WoS", "Keyword", "Plus", "keywords", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L268-L277
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_funding
def postprocess_funding(self, entry): """ Separates funding agency from grant numbers. """ if type(entry.funding) not in [str, unicode]: return sources = [fu.strip() for fu in entry.funding.split(';')] sources_processed = [] for source in sources: m = re.search('(.*)?\s+\[(.+)\]', source) if m: agency, grant = m.groups() else: agency, grant = source, None sources_processed.append((agency, grant)) entry.funding = sources_processed
python
def postprocess_funding(self, entry): """ Separates funding agency from grant numbers. """ if type(entry.funding) not in [str, unicode]: return sources = [fu.strip() for fu in entry.funding.split(';')] sources_processed = [] for source in sources: m = re.search('(.*)?\s+\[(.+)\]', source) if m: agency, grant = m.groups() else: agency, grant = source, None sources_processed.append((agency, grant)) entry.funding = sources_processed
[ "def", "postprocess_funding", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "funding", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "return", "sources", "=", "[", "fu", ".", "strip", "(", ")", "for", "fu", "in", "entry", ".", "funding", ".", "split", "(", "';'", ")", "]", "sources_processed", "=", "[", "]", "for", "source", "in", "sources", ":", "m", "=", "re", ".", "search", "(", "'(.*)?\\s+\\[(.+)\\]'", ",", "source", ")", "if", "m", ":", "agency", ",", "grant", "=", "m", ".", "groups", "(", ")", "else", ":", "agency", ",", "grant", "=", "source", ",", "None", "sources_processed", ".", "append", "(", "(", "agency", ",", "grant", ")", ")", "entry", ".", "funding", "=", "sources_processed" ]
Separates funding agency from grant numbers.
[ "Separates", "funding", "agency", "from", "grant", "numbers", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L279-L296
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_authors_full
def postprocess_authors_full(self, entry): """ If only a single author was found, ensure that ``authors_full`` is nonetheless a list. """ if type(entry.authors_full) is not list: entry.authors_full = [entry.authors_full]
python
def postprocess_authors_full(self, entry): """ If only a single author was found, ensure that ``authors_full`` is nonetheless a list. """ if type(entry.authors_full) is not list: entry.authors_full = [entry.authors_full]
[ "def", "postprocess_authors_full", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "authors_full", ")", "is", "not", "list", ":", "entry", ".", "authors_full", "=", "[", "entry", ".", "authors_full", "]" ]
If only a single author was found, ensure that ``authors_full`` is nonetheless a list.
[ "If", "only", "a", "single", "author", "was", "found", "ensure", "that", "authors_full", "is", "nonetheless", "a", "list", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L298-L304
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_authors_init
def postprocess_authors_init(self, entry): """ If only a single author was found, ensure that ``authors_init`` is nonetheless a list. """ if type(entry.authors_init) is not list: entry.authors_init = [entry.authors_init]
python
def postprocess_authors_init(self, entry): """ If only a single author was found, ensure that ``authors_init`` is nonetheless a list. """ if type(entry.authors_init) is not list: entry.authors_init = [entry.authors_init]
[ "def", "postprocess_authors_init", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "authors_init", ")", "is", "not", "list", ":", "entry", ".", "authors_init", "=", "[", "entry", ".", "authors_init", "]" ]
If only a single author was found, ensure that ``authors_init`` is nonetheless a list.
[ "If", "only", "a", "single", "author", "was", "found", "ensure", "that", "authors_init", "is", "nonetheless", "a", "list", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L306-L312
train
diging/tethne
tethne/readers/wos.py
WoSParser.postprocess_citedReferences
def postprocess_citedReferences(self, entry): """ If only a single cited reference was found, ensure that ``citedReferences`` is nonetheless a list. """ if type(entry.citedReferences) is not list: entry.citedReferences = [entry.citedReferences]
python
def postprocess_citedReferences(self, entry): """ If only a single cited reference was found, ensure that ``citedReferences`` is nonetheless a list. """ if type(entry.citedReferences) is not list: entry.citedReferences = [entry.citedReferences]
[ "def", "postprocess_citedReferences", "(", "self", ",", "entry", ")", ":", "if", "type", "(", "entry", ".", "citedReferences", ")", "is", "not", "list", ":", "entry", ".", "citedReferences", "=", "[", "entry", ".", "citedReferences", "]" ]
If only a single cited reference was found, ensure that ``citedReferences`` is nonetheless a list.
[ "If", "only", "a", "single", "cited", "reference", "was", "found", "ensure", "that", "citedReferences", "is", "nonetheless", "a", "list", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L314-L320
train
diging/tethne
tethne/plot/__init__.py
plot_burstness
def plot_burstness(corpus, B, **kwargs): """ Generate a figure depicting burstness profiles for ``feature``. Parameters ---------- B Returns ------- fig : :class:`matplotlib.figure.Figure` Examples -------- .. code-block:: python >>> from tethne.analyze.corpus import burstness >>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True) >>> fig.savefig('~/burstness.png') Years prior to the first occurrence of each feature are grayed out. Periods in which the feature was bursty are depicted by colored blocks, the opacity of which indicates burstness intensity. .. figure:: _static/images/burstness.png :width: 600 :align: center """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches except ImportError: raise RuntimeError('This method requires the package matplotlib.') color = kwargs.get('color', 'red') # Get width based on slices. years = sorted(corpus.indices['date'].keys()) width = years[1] - years[0] height = 1.0 fig = plt.figure(figsize=(10,len(B)/4.)) f = 1 axes = {} for key, value in B.iteritems(): x,y = value ax = fig.add_subplot(len(B),1,f) f+=1 ax.set_yticks([]) ax.set_xbound(min(years), max(years) + 1) if not f == len(B)+1: # Only show xticks on the bottom subplot. ax.set_xticklabels([]) # Block out years until first occurrence of feature. rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years), height, fill=True, linewidth=0.0) rect.set_facecolor('black') rect.set_alpha(0.3) ax.add_patch(rect) # Add a rectangle for each year, shaded according to burstness state. for d in xrange(min(x), max(x)): try: i = x.index(d) except ValueError: continue xy = (d, 0.) state = y[i] rect = mpatches.Rectangle(xy, width, height, fill=True, linewidth=0.0) rect.set_facecolor(color) rect.set_alpha(state) ax.add_patch(rect) ax.set_ylabel(key, rotation=0, horizontalalignment='right', verticalalignment='center') plt.subplots_adjust(left=0.5) fig.tight_layout(h_pad=0.25) plt.show()
python
def plot_burstness(corpus, B, **kwargs): """ Generate a figure depicting burstness profiles for ``feature``. Parameters ---------- B Returns ------- fig : :class:`matplotlib.figure.Figure` Examples -------- .. code-block:: python >>> from tethne.analyze.corpus import burstness >>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True) >>> fig.savefig('~/burstness.png') Years prior to the first occurrence of each feature are grayed out. Periods in which the feature was bursty are depicted by colored blocks, the opacity of which indicates burstness intensity. .. figure:: _static/images/burstness.png :width: 600 :align: center """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches except ImportError: raise RuntimeError('This method requires the package matplotlib.') color = kwargs.get('color', 'red') # Get width based on slices. years = sorted(corpus.indices['date'].keys()) width = years[1] - years[0] height = 1.0 fig = plt.figure(figsize=(10,len(B)/4.)) f = 1 axes = {} for key, value in B.iteritems(): x,y = value ax = fig.add_subplot(len(B),1,f) f+=1 ax.set_yticks([]) ax.set_xbound(min(years), max(years) + 1) if not f == len(B)+1: # Only show xticks on the bottom subplot. ax.set_xticklabels([]) # Block out years until first occurrence of feature. rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years), height, fill=True, linewidth=0.0) rect.set_facecolor('black') rect.set_alpha(0.3) ax.add_patch(rect) # Add a rectangle for each year, shaded according to burstness state. for d in xrange(min(x), max(x)): try: i = x.index(d) except ValueError: continue xy = (d, 0.) state = y[i] rect = mpatches.Rectangle(xy, width, height, fill=True, linewidth=0.0) rect.set_facecolor(color) rect.set_alpha(state) ax.add_patch(rect) ax.set_ylabel(key, rotation=0, horizontalalignment='right', verticalalignment='center') plt.subplots_adjust(left=0.5) fig.tight_layout(h_pad=0.25) plt.show()
[ "def", "plot_burstness", "(", "corpus", ",", "B", ",", "*", "*", "kwargs", ")", ":", "try", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "patches", "as", "mpatches", "except", "ImportError", ":", "raise", "RuntimeError", "(", "'This method requires the package matplotlib.'", ")", "color", "=", "kwargs", ".", "get", "(", "'color'", ",", "'red'", ")", "# Get width based on slices.", "years", "=", "sorted", "(", "corpus", ".", "indices", "[", "'date'", "]", ".", "keys", "(", ")", ")", "width", "=", "years", "[", "1", "]", "-", "years", "[", "0", "]", "height", "=", "1.0", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "len", "(", "B", ")", "/", "4.", ")", ")", "f", "=", "1", "axes", "=", "{", "}", "for", "key", ",", "value", "in", "B", ".", "iteritems", "(", ")", ":", "x", ",", "y", "=", "value", "ax", "=", "fig", ".", "add_subplot", "(", "len", "(", "B", ")", ",", "1", ",", "f", ")", "f", "+=", "1", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax", ".", "set_xbound", "(", "min", "(", "years", ")", ",", "max", "(", "years", ")", "+", "1", ")", "if", "not", "f", "==", "len", "(", "B", ")", "+", "1", ":", "# Only show xticks on the bottom subplot.", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "# Block out years until first occurrence of feature.", "rect", "=", "mpatches", ".", "Rectangle", "(", "(", "min", "(", "years", ")", ",", "0", ")", ",", "sorted", "(", "x", ")", "[", "0", "]", "-", "min", "(", "years", ")", ",", "height", ",", "fill", "=", "True", ",", "linewidth", "=", "0.0", ")", "rect", ".", "set_facecolor", "(", "'black'", ")", "rect", ".", "set_alpha", "(", "0.3", ")", "ax", ".", "add_patch", "(", "rect", ")", "# Add a rectangle for each year, shaded according to burstness state.", "for", "d", "in", "xrange", "(", "min", "(", "x", ")", ",", "max", "(", "x", ")", ")", ":", "try", ":", "i", "=", "x", ".", "index", "(", "d", ")", "except", "ValueError", ":", "continue", "xy", "=", "(", "d", ",", "0.", ")", "state", "=", "y", "[", "i", "]", "rect", "=", "mpatches", ".", "Rectangle", "(", "xy", ",", "width", ",", "height", ",", "fill", "=", "True", ",", "linewidth", "=", "0.0", ")", "rect", ".", "set_facecolor", "(", "color", ")", "rect", ".", "set_alpha", "(", "state", ")", "ax", ".", "add_patch", "(", "rect", ")", "ax", ".", "set_ylabel", "(", "key", ",", "rotation", "=", "0", ",", "horizontalalignment", "=", "'right'", ",", "verticalalignment", "=", "'center'", ")", "plt", ".", "subplots_adjust", "(", "left", "=", "0.5", ")", "fig", ".", "tight_layout", "(", "h_pad", "=", "0.25", ")", "plt", ".", "show", "(", ")" ]
Generate a figure depicting burstness profiles for ``feature``. Parameters ---------- B Returns ------- fig : :class:`matplotlib.figure.Figure` Examples -------- .. code-block:: python >>> from tethne.analyze.corpus import burstness >>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True) >>> fig.savefig('~/burstness.png') Years prior to the first occurrence of each feature are grayed out. Periods in which the feature was bursty are depicted by colored blocks, the opacity of which indicates burstness intensity. .. figure:: _static/images/burstness.png :width: 600 :align: center
[ "Generate", "a", "figure", "depicting", "burstness", "profiles", "for", "feature", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/plot/__init__.py#L11-L97
train
diging/tethne
tethne/networks/helpers.py
simplify_multigraph
def simplify_multigraph(multigraph, time=False): """ Simplifies a graph by condensing multiple edges between the same node pair into a single edge, with a weight attribute equal to the number of edges. Parameters ---------- graph : networkx.MultiGraph E.g. a coauthorship graph. time : bool If True, will generate 'start' and 'end' attributes for each edge, corresponding to the earliest and latest 'date' values for that edge. Returns ------- graph : networkx.Graph A NetworkX :class:`.graph` . """ graph = nx.Graph() for node in multigraph.nodes(data=True): u = node[0] node_attribs = node[1] graph.add_node(u, node_attribs) for v in multigraph[u]: edges = multigraph.get_edge_data(u, v) # Dict. edge_attribs = { 'weight': len(edges) } if time: # Look for a date in each edge. start = 3000 end = 0 found_date = False for edge in edges.values(): try: found_date = True if edge['date'] < start: start = edge['date'] if edge['date'] > end: end = edge['date'] except KeyError: # No date to be found. pass if found_date: # If no date found, don't add start/end atts. edge_attribs['start'] = start edge_attribs['end'] = end graph.add_edge(u, v, edge_attribs) return graph
python
def simplify_multigraph(multigraph, time=False): """ Simplifies a graph by condensing multiple edges between the same node pair into a single edge, with a weight attribute equal to the number of edges. Parameters ---------- graph : networkx.MultiGraph E.g. a coauthorship graph. time : bool If True, will generate 'start' and 'end' attributes for each edge, corresponding to the earliest and latest 'date' values for that edge. Returns ------- graph : networkx.Graph A NetworkX :class:`.graph` . """ graph = nx.Graph() for node in multigraph.nodes(data=True): u = node[0] node_attribs = node[1] graph.add_node(u, node_attribs) for v in multigraph[u]: edges = multigraph.get_edge_data(u, v) # Dict. edge_attribs = { 'weight': len(edges) } if time: # Look for a date in each edge. start = 3000 end = 0 found_date = False for edge in edges.values(): try: found_date = True if edge['date'] < start: start = edge['date'] if edge['date'] > end: end = edge['date'] except KeyError: # No date to be found. pass if found_date: # If no date found, don't add start/end atts. edge_attribs['start'] = start edge_attribs['end'] = end graph.add_edge(u, v, edge_attribs) return graph
[ "def", "simplify_multigraph", "(", "multigraph", ",", "time", "=", "False", ")", ":", "graph", "=", "nx", ".", "Graph", "(", ")", "for", "node", "in", "multigraph", ".", "nodes", "(", "data", "=", "True", ")", ":", "u", "=", "node", "[", "0", "]", "node_attribs", "=", "node", "[", "1", "]", "graph", ".", "add_node", "(", "u", ",", "node_attribs", ")", "for", "v", "in", "multigraph", "[", "u", "]", ":", "edges", "=", "multigraph", ".", "get_edge_data", "(", "u", ",", "v", ")", "# Dict.", "edge_attribs", "=", "{", "'weight'", ":", "len", "(", "edges", ")", "}", "if", "time", ":", "# Look for a date in each edge.", "start", "=", "3000", "end", "=", "0", "found_date", "=", "False", "for", "edge", "in", "edges", ".", "values", "(", ")", ":", "try", ":", "found_date", "=", "True", "if", "edge", "[", "'date'", "]", "<", "start", ":", "start", "=", "edge", "[", "'date'", "]", "if", "edge", "[", "'date'", "]", ">", "end", ":", "end", "=", "edge", "[", "'date'", "]", "except", "KeyError", ":", "# No date to be found.", "pass", "if", "found_date", ":", "# If no date found, don't add start/end atts.", "edge_attribs", "[", "'start'", "]", "=", "start", "edge_attribs", "[", "'end'", "]", "=", "end", "graph", ".", "add_edge", "(", "u", ",", "v", ",", "edge_attribs", ")", "return", "graph" ]
Simplifies a graph by condensing multiple edges between the same node pair into a single edge, with a weight attribute equal to the number of edges. Parameters ---------- graph : networkx.MultiGraph E.g. a coauthorship graph. time : bool If True, will generate 'start' and 'end' attributes for each edge, corresponding to the earliest and latest 'date' values for that edge. Returns ------- graph : networkx.Graph A NetworkX :class:`.graph` .
[ "Simplifies", "a", "graph", "by", "condensing", "multiple", "edges", "between", "the", "same", "node", "pair", "into", "a", "single", "edge", "with", "a", "weight", "attribute", "equal", "to", "the", "number", "of", "edges", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/helpers.py#L28-L81
train
diging/tethne
tethne/networks/helpers.py
citation_count
def citation_count(papers, key='ayjid', verbose=False): """ Generates citation counts for all of the papers cited by papers. Parameters ---------- papers : list A list of :class:`.Paper` instances. key : str Property to use as node key. Default is 'ayjid' (recommended). verbose : bool If True, prints status messages. Returns ------- counts : dict Citation counts for all papers cited by papers. """ if verbose: print "Generating citation counts for "+unicode(len(papers))+" papers..." counts = Counter() for P in papers: if P['citations'] is not None: for p in P['citations']: counts[p[key]] += 1 return counts
python
def citation_count(papers, key='ayjid', verbose=False): """ Generates citation counts for all of the papers cited by papers. Parameters ---------- papers : list A list of :class:`.Paper` instances. key : str Property to use as node key. Default is 'ayjid' (recommended). verbose : bool If True, prints status messages. Returns ------- counts : dict Citation counts for all papers cited by papers. """ if verbose: print "Generating citation counts for "+unicode(len(papers))+" papers..." counts = Counter() for P in papers: if P['citations'] is not None: for p in P['citations']: counts[p[key]] += 1 return counts
[ "def", "citation_count", "(", "papers", ",", "key", "=", "'ayjid'", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "print", "\"Generating citation counts for \"", "+", "unicode", "(", "len", "(", "papers", ")", ")", "+", "\" papers...\"", "counts", "=", "Counter", "(", ")", "for", "P", "in", "papers", ":", "if", "P", "[", "'citations'", "]", "is", "not", "None", ":", "for", "p", "in", "P", "[", "'citations'", "]", ":", "counts", "[", "p", "[", "key", "]", "]", "+=", "1", "return", "counts" ]
Generates citation counts for all of the papers cited by papers. Parameters ---------- papers : list A list of :class:`.Paper` instances. key : str Property to use as node key. Default is 'ayjid' (recommended). verbose : bool If True, prints status messages. Returns ------- counts : dict Citation counts for all papers cited by papers.
[ "Generates", "citation", "counts", "for", "all", "of", "the", "papers", "cited", "by", "papers", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/helpers.py#L83-L111
train
diging/tethne
tethne/analyze/collection.py
connected
def connected(G, method_name, **kwargs): """ Performs analysis methods from networkx.connected on each graph in the collection. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method : string Name of method in networkx.connected. **kwargs : kwargs Keyword arguments, passed directly to method. Returns ------- results : dict Keys are graph indices, values are output of method for that graph. Raises ------ ValueError If name is not in networkx.connected, or if no such method exists. """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(['connected', method_name], **kwargs)
python
def connected(G, method_name, **kwargs): """ Performs analysis methods from networkx.connected on each graph in the collection. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method : string Name of method in networkx.connected. **kwargs : kwargs Keyword arguments, passed directly to method. Returns ------- results : dict Keys are graph indices, values are output of method for that graph. Raises ------ ValueError If name is not in networkx.connected, or if no such method exists. """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(['connected', method_name], **kwargs)
[ "def", "connected", "(", "G", ",", "method_name", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"To be removed in 0.8. Use GraphCollection.analyze instead.\"", ",", "DeprecationWarning", ")", "return", "G", ".", "analyze", "(", "[", "'connected'", ",", "method_name", "]", ",", "*", "*", "kwargs", ")" ]
Performs analysis methods from networkx.connected on each graph in the collection. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method : string Name of method in networkx.connected. **kwargs : kwargs Keyword arguments, passed directly to method. Returns ------- results : dict Keys are graph indices, values are output of method for that graph. Raises ------ ValueError If name is not in networkx.connected, or if no such method exists.
[ "Performs", "analysis", "methods", "from", "networkx", ".", "connected", "on", "each", "graph", "in", "the", "collection", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/collection.py#L72-L101
train
diging/tethne
tethne/analyze/collection.py
attachment_probability
def attachment_probability(G): """ Calculates the observed attachment probability for each node at each time-step. Attachment probability is calculated based on the observed new edges in the next time-step. So if a node acquires new edges at time t, this will accrue to the node's attachment probability at time t-1. Thus at a given time, one can ask whether degree and attachment probability are related. Parameters ---------- G : :class:`.GraphCollection` Must be sliced by 'date'. See :func:`.GraphCollection.slice`\. Returns ------- probs : dict Keyed by index in G.graphs, and then by node. """ warnings.warn("Removed in 0.8. Too domain-specific.") probs = {} G_ = None k_ = None for k,g in G.graphs.iteritems(): new_edges = {} if G_ is not None: for n in g.nodes(): try: old_neighbors = set(G_[n].keys()) if len(old_neighbors) > 0: new_neighbors = set(g[n].keys()) - old_neighbors new_edges[n] = float(len(new_neighbors)) else: new_edges[n] = 0. except KeyError: pass N = sum( new_edges.values() ) probs[k_] = { n:0. for n in G_.nodes() } if N > 0.: for n in G.nodes(): try: probs[k_][n] = new_edges[n]/N except KeyError: pass if probs[k_] is not None: networkx.set_node_attributes(G.graphs[k_], 'attachment_probability', probs[k_]) G_ = G k_ = k # Handle last graph (no values). key = G.graphs.keys()[-1] zprobs = { n:0. for n in G.graphs[key].nodes() } networkx.set_node_attributes(G.graphs[key], 'attachment_probability', zprobs) return probs
python
def attachment_probability(G): """ Calculates the observed attachment probability for each node at each time-step. Attachment probability is calculated based on the observed new edges in the next time-step. So if a node acquires new edges at time t, this will accrue to the node's attachment probability at time t-1. Thus at a given time, one can ask whether degree and attachment probability are related. Parameters ---------- G : :class:`.GraphCollection` Must be sliced by 'date'. See :func:`.GraphCollection.slice`\. Returns ------- probs : dict Keyed by index in G.graphs, and then by node. """ warnings.warn("Removed in 0.8. Too domain-specific.") probs = {} G_ = None k_ = None for k,g in G.graphs.iteritems(): new_edges = {} if G_ is not None: for n in g.nodes(): try: old_neighbors = set(G_[n].keys()) if len(old_neighbors) > 0: new_neighbors = set(g[n].keys()) - old_neighbors new_edges[n] = float(len(new_neighbors)) else: new_edges[n] = 0. except KeyError: pass N = sum( new_edges.values() ) probs[k_] = { n:0. for n in G_.nodes() } if N > 0.: for n in G.nodes(): try: probs[k_][n] = new_edges[n]/N except KeyError: pass if probs[k_] is not None: networkx.set_node_attributes(G.graphs[k_], 'attachment_probability', probs[k_]) G_ = G k_ = k # Handle last graph (no values). key = G.graphs.keys()[-1] zprobs = { n:0. for n in G.graphs[key].nodes() } networkx.set_node_attributes(G.graphs[key], 'attachment_probability', zprobs) return probs
[ "def", "attachment_probability", "(", "G", ")", ":", "warnings", ".", "warn", "(", "\"Removed in 0.8. Too domain-specific.\"", ")", "probs", "=", "{", "}", "G_", "=", "None", "k_", "=", "None", "for", "k", ",", "g", "in", "G", ".", "graphs", ".", "iteritems", "(", ")", ":", "new_edges", "=", "{", "}", "if", "G_", "is", "not", "None", ":", "for", "n", "in", "g", ".", "nodes", "(", ")", ":", "try", ":", "old_neighbors", "=", "set", "(", "G_", "[", "n", "]", ".", "keys", "(", ")", ")", "if", "len", "(", "old_neighbors", ")", ">", "0", ":", "new_neighbors", "=", "set", "(", "g", "[", "n", "]", ".", "keys", "(", ")", ")", "-", "old_neighbors", "new_edges", "[", "n", "]", "=", "float", "(", "len", "(", "new_neighbors", ")", ")", "else", ":", "new_edges", "[", "n", "]", "=", "0.", "except", "KeyError", ":", "pass", "N", "=", "sum", "(", "new_edges", ".", "values", "(", ")", ")", "probs", "[", "k_", "]", "=", "{", "n", ":", "0.", "for", "n", "in", "G_", ".", "nodes", "(", ")", "}", "if", "N", ">", "0.", ":", "for", "n", "in", "G", ".", "nodes", "(", ")", ":", "try", ":", "probs", "[", "k_", "]", "[", "n", "]", "=", "new_edges", "[", "n", "]", "/", "N", "except", "KeyError", ":", "pass", "if", "probs", "[", "k_", "]", "is", "not", "None", ":", "networkx", ".", "set_node_attributes", "(", "G", ".", "graphs", "[", "k_", "]", ",", "'attachment_probability'", ",", "probs", "[", "k_", "]", ")", "G_", "=", "G", "k_", "=", "k", "# Handle last graph (no values).", "key", "=", "G", ".", "graphs", ".", "keys", "(", ")", "[", "-", "1", "]", "zprobs", "=", "{", "n", ":", "0.", "for", "n", "in", "G", ".", "graphs", "[", "key", "]", ".", "nodes", "(", ")", "}", "networkx", ".", "set_node_attributes", "(", "G", ".", "graphs", "[", "key", "]", ",", "'attachment_probability'", ",", "zprobs", ")", "return", "probs" ]
Calculates the observed attachment probability for each node at each time-step. Attachment probability is calculated based on the observed new edges in the next time-step. So if a node acquires new edges at time t, this will accrue to the node's attachment probability at time t-1. Thus at a given time, one can ask whether degree and attachment probability are related. Parameters ---------- G : :class:`.GraphCollection` Must be sliced by 'date'. See :func:`.GraphCollection.slice`\. Returns ------- probs : dict Keyed by index in G.graphs, and then by node.
[ "Calculates", "the", "observed", "attachment", "probability", "for", "each", "node", "at", "each", "time", "-", "step", ".", "Attachment", "probability", "is", "calculated", "based", "on", "the", "observed", "new", "edges", "in", "the", "next", "time", "-", "step", ".", "So", "if", "a", "node", "acquires", "new", "edges", "at", "time", "t", "this", "will", "accrue", "to", "the", "node", "s", "attachment", "probability", "at", "time", "t", "-", "1", ".", "Thus", "at", "a", "given", "time", "one", "can", "ask", "whether", "degree", "and", "attachment", "probability", "are", "related", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/collection.py#L104-L166
train
diging/tethne
tethne/analyze/graph.py
global_closeness_centrality
def global_closeness_centrality(g, node=None, normalize=True): """ Calculates global closeness centrality for one or all nodes in the network. See :func:`.node_global_closeness_centrality` for more information. Parameters ---------- g : networkx.Graph normalize : boolean If True, normalizes centrality based on the average shortest path length. Default is True. Returns ------- C : dict Dictionary of results, with node identifiers as keys and gcc as values. """ if not node: C = {} for node in g.nodes(): C[node] = global_closeness_centrality(g, node, normalize=normalize) return C values = nx.shortest_path_length(g, node).values() c = sum([1./pl for pl in values if pl != 0.]) / len(g) if normalize: ac = 0 for sg in nx.connected_component_subgraphs(g): if len(sg.nodes()) > 1: aspl = nx.average_shortest_path_length(sg) ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 ) c = c/ac return c
python
def global_closeness_centrality(g, node=None, normalize=True): """ Calculates global closeness centrality for one or all nodes in the network. See :func:`.node_global_closeness_centrality` for more information. Parameters ---------- g : networkx.Graph normalize : boolean If True, normalizes centrality based on the average shortest path length. Default is True. Returns ------- C : dict Dictionary of results, with node identifiers as keys and gcc as values. """ if not node: C = {} for node in g.nodes(): C[node] = global_closeness_centrality(g, node, normalize=normalize) return C values = nx.shortest_path_length(g, node).values() c = sum([1./pl for pl in values if pl != 0.]) / len(g) if normalize: ac = 0 for sg in nx.connected_component_subgraphs(g): if len(sg.nodes()) > 1: aspl = nx.average_shortest_path_length(sg) ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 ) c = c/ac return c
[ "def", "global_closeness_centrality", "(", "g", ",", "node", "=", "None", ",", "normalize", "=", "True", ")", ":", "if", "not", "node", ":", "C", "=", "{", "}", "for", "node", "in", "g", ".", "nodes", "(", ")", ":", "C", "[", "node", "]", "=", "global_closeness_centrality", "(", "g", ",", "node", ",", "normalize", "=", "normalize", ")", "return", "C", "values", "=", "nx", ".", "shortest_path_length", "(", "g", ",", "node", ")", ".", "values", "(", ")", "c", "=", "sum", "(", "[", "1.", "/", "pl", "for", "pl", "in", "values", "if", "pl", "!=", "0.", "]", ")", "/", "len", "(", "g", ")", "if", "normalize", ":", "ac", "=", "0", "for", "sg", "in", "nx", ".", "connected_component_subgraphs", "(", "g", ")", ":", "if", "len", "(", "sg", ".", "nodes", "(", ")", ")", ">", "1", ":", "aspl", "=", "nx", ".", "average_shortest_path_length", "(", "sg", ")", "ac", "+=", "(", "1.", "/", "aspl", ")", "*", "(", "float", "(", "len", "(", "sg", ")", ")", "/", "float", "(", "len", "(", "g", ")", ")", "**", "2", ")", "c", "=", "c", "/", "ac", "return", "c" ]
Calculates global closeness centrality for one or all nodes in the network. See :func:`.node_global_closeness_centrality` for more information. Parameters ---------- g : networkx.Graph normalize : boolean If True, normalizes centrality based on the average shortest path length. Default is True. Returns ------- C : dict Dictionary of results, with node identifiers as keys and gcc as values.
[ "Calculates", "global", "closeness", "centrality", "for", "one", "or", "all", "nodes", "in", "the", "network", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/graph.py#L13-L49
train
diging/tethne
tethne/readers/dfr.py
ngrams
def ngrams(path, elem, ignore_hash=True): """ Yields N-grams from a JSTOR DfR dataset. Parameters ---------- path : string Path to unzipped JSTOR DfR folder containing N-grams. elem : string Name of subdirectory containing N-grams. (e.g. 'bigrams'). ignore_hash : bool If True, will exclude all N-grams that contain the hash '#' character. Returns ------- ngrams : :class:`.FeatureSet` """ grams = GramGenerator(path, elem, ignore_hash=ignore_hash) return FeatureSet({k: Feature(f) for k, f in grams})
python
def ngrams(path, elem, ignore_hash=True): """ Yields N-grams from a JSTOR DfR dataset. Parameters ---------- path : string Path to unzipped JSTOR DfR folder containing N-grams. elem : string Name of subdirectory containing N-grams. (e.g. 'bigrams'). ignore_hash : bool If True, will exclude all N-grams that contain the hash '#' character. Returns ------- ngrams : :class:`.FeatureSet` """ grams = GramGenerator(path, elem, ignore_hash=ignore_hash) return FeatureSet({k: Feature(f) for k, f in grams})
[ "def", "ngrams", "(", "path", ",", "elem", ",", "ignore_hash", "=", "True", ")", ":", "grams", "=", "GramGenerator", "(", "path", ",", "elem", ",", "ignore_hash", "=", "ignore_hash", ")", "return", "FeatureSet", "(", "{", "k", ":", "Feature", "(", "f", ")", "for", "k", ",", "f", "in", "grams", "}", ")" ]
Yields N-grams from a JSTOR DfR dataset. Parameters ---------- path : string Path to unzipped JSTOR DfR folder containing N-grams. elem : string Name of subdirectory containing N-grams. (e.g. 'bigrams'). ignore_hash : bool If True, will exclude all N-grams that contain the hash '#' character. Returns ------- ngrams : :class:`.FeatureSet`
[ "Yields", "N", "-", "grams", "from", "a", "JSTOR", "DfR", "dataset", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L294-L314
train
diging/tethne
tethne/readers/dfr.py
tokenize
def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False): """ Builds a vocabulary, and replaces words with vocab indices. Parameters ---------- ngrams : dict Keys are paper DOIs, values are lists of (Ngram, frequency) tuples. apply_stoplist : bool If True, will exclude all N-grams that contain words in the NLTK stoplist. Returns ------- t_ngrams : dict Tokenized ngrams, as doi:{i:count}. vocab : dict Vocabulary as i:term. token_tf : :class:`.Counter` Term counts for corpus, as i:count. """ vocab = {} vocab_ = {} word_tf = Counter() word_df = Counter() token_tf = Counter() token_df = Counter() t_ngrams = {} # Get global word counts, first. for grams in ngrams.values(): for g,c in grams: word_tf[g] += c word_df[g] += 1 if apply_stoplist: stoplist = stopwords.words() # Now tokenize. for doi, grams in ngrams.iteritems(): t_ngrams[doi] = [] for g,c in grams: ignore = False # Ignore extremely rare words (probably garbage). if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len: ignore = True # Stoplist. elif apply_stoplist: for w in g.split(): if w in stoplist: ignore = True if not ignore: # Coerce unicode to string. if type(g) is str: g = unicode(g) g = unidecode(g) if g not in vocab.values(): i = len(vocab) vocab[i] = g vocab_[g] = i else: i = vocab_[g] token_tf[i] += c token_df[i] += 1 t_ngrams[doi].append( (i,c) ) return t_ngrams, vocab, token_tf
python
def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False): """ Builds a vocabulary, and replaces words with vocab indices. Parameters ---------- ngrams : dict Keys are paper DOIs, values are lists of (Ngram, frequency) tuples. apply_stoplist : bool If True, will exclude all N-grams that contain words in the NLTK stoplist. Returns ------- t_ngrams : dict Tokenized ngrams, as doi:{i:count}. vocab : dict Vocabulary as i:term. token_tf : :class:`.Counter` Term counts for corpus, as i:count. """ vocab = {} vocab_ = {} word_tf = Counter() word_df = Counter() token_tf = Counter() token_df = Counter() t_ngrams = {} # Get global word counts, first. for grams in ngrams.values(): for g,c in grams: word_tf[g] += c word_df[g] += 1 if apply_stoplist: stoplist = stopwords.words() # Now tokenize. for doi, grams in ngrams.iteritems(): t_ngrams[doi] = [] for g,c in grams: ignore = False # Ignore extremely rare words (probably garbage). if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len: ignore = True # Stoplist. elif apply_stoplist: for w in g.split(): if w in stoplist: ignore = True if not ignore: # Coerce unicode to string. if type(g) is str: g = unicode(g) g = unidecode(g) if g not in vocab.values(): i = len(vocab) vocab[i] = g vocab_[g] = i else: i = vocab_[g] token_tf[i] += c token_df[i] += 1 t_ngrams[doi].append( (i,c) ) return t_ngrams, vocab, token_tf
[ "def", "tokenize", "(", "ngrams", ",", "min_tf", "=", "2", ",", "min_df", "=", "2", ",", "min_len", "=", "3", ",", "apply_stoplist", "=", "False", ")", ":", "vocab", "=", "{", "}", "vocab_", "=", "{", "}", "word_tf", "=", "Counter", "(", ")", "word_df", "=", "Counter", "(", ")", "token_tf", "=", "Counter", "(", ")", "token_df", "=", "Counter", "(", ")", "t_ngrams", "=", "{", "}", "# Get global word counts, first.", "for", "grams", "in", "ngrams", ".", "values", "(", ")", ":", "for", "g", ",", "c", "in", "grams", ":", "word_tf", "[", "g", "]", "+=", "c", "word_df", "[", "g", "]", "+=", "1", "if", "apply_stoplist", ":", "stoplist", "=", "stopwords", ".", "words", "(", ")", "# Now tokenize.", "for", "doi", ",", "grams", "in", "ngrams", ".", "iteritems", "(", ")", ":", "t_ngrams", "[", "doi", "]", "=", "[", "]", "for", "g", ",", "c", "in", "grams", ":", "ignore", "=", "False", "# Ignore extremely rare words (probably garbage).", "if", "word_tf", "[", "g", "]", "<", "min_tf", "or", "word_df", "[", "g", "]", "<", "min_df", "or", "len", "(", "g", ")", "<", "min_len", ":", "ignore", "=", "True", "# Stoplist.", "elif", "apply_stoplist", ":", "for", "w", "in", "g", ".", "split", "(", ")", ":", "if", "w", "in", "stoplist", ":", "ignore", "=", "True", "if", "not", "ignore", ":", "# Coerce unicode to string.", "if", "type", "(", "g", ")", "is", "str", ":", "g", "=", "unicode", "(", "g", ")", "g", "=", "unidecode", "(", "g", ")", "if", "g", "not", "in", "vocab", ".", "values", "(", ")", ":", "i", "=", "len", "(", "vocab", ")", "vocab", "[", "i", "]", "=", "g", "vocab_", "[", "g", "]", "=", "i", "else", ":", "i", "=", "vocab_", "[", "g", "]", "token_tf", "[", "i", "]", "+=", "c", "token_df", "[", "i", "]", "+=", "1", "t_ngrams", "[", "doi", "]", ".", "append", "(", "(", "i", ",", "c", ")", ")", "return", "t_ngrams", ",", "vocab", ",", "token_tf" ]
Builds a vocabulary, and replaces words with vocab indices. Parameters ---------- ngrams : dict Keys are paper DOIs, values are lists of (Ngram, frequency) tuples. apply_stoplist : bool If True, will exclude all N-grams that contain words in the NLTK stoplist. Returns ------- t_ngrams : dict Tokenized ngrams, as doi:{i:count}. vocab : dict Vocabulary as i:term. token_tf : :class:`.Counter` Term counts for corpus, as i:count.
[ "Builds", "a", "vocabulary", "and", "replaces", "words", "with", "vocab", "indices", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L317-L390
train
diging/tethne
tethne/readers/dfr.py
_handle_pagerange
def _handle_pagerange(pagerange): """ Yields start and end pages from DfR pagerange field. Parameters ---------- pagerange : str or unicode DfR-style pagerange, e.g. "pp. 435-444". Returns ------- start : str Start page. end : str End page. """ try: pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)") start, end = re.findall(pr, pagerange)[0] except IndexError: start = end = 0 return unicode(start), unicode(end)
python
def _handle_pagerange(pagerange): """ Yields start and end pages from DfR pagerange field. Parameters ---------- pagerange : str or unicode DfR-style pagerange, e.g. "pp. 435-444". Returns ------- start : str Start page. end : str End page. """ try: pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)") start, end = re.findall(pr, pagerange)[0] except IndexError: start = end = 0 return unicode(start), unicode(end)
[ "def", "_handle_pagerange", "(", "pagerange", ")", ":", "try", ":", "pr", "=", "re", ".", "compile", "(", "\"pp\\.\\s([0-9]+)\\-([0-9]+)\"", ")", "start", ",", "end", "=", "re", ".", "findall", "(", "pr", ",", "pagerange", ")", "[", "0", "]", "except", "IndexError", ":", "start", "=", "end", "=", "0", "return", "unicode", "(", "start", ")", ",", "unicode", "(", "end", ")" ]
Yields start and end pages from DfR pagerange field. Parameters ---------- pagerange : str or unicode DfR-style pagerange, e.g. "pp. 435-444". Returns ------- start : str Start page. end : str End page.
[ "Yields", "start", "and", "end", "pages", "from", "DfR", "pagerange", "field", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L430-L453
train
diging/tethne
tethne/readers/dfr.py
_handle_authors
def _handle_authors(authors): """ Yields aulast and auinit lists from value of authors node. Parameters ---------- authors : list, str, or unicode Value or values of 'author' element in DfR XML. Returns ------- aulast : list A list of author surnames (string). auinit : list A list of author first-initials (string). """ aulast = [] auinit = [] if type(authors) is list: for author in authors: if type(author) is str: author = unicode(author) author = unidecode(author) try: l,i = _handle_author(author) aulast.append(l) auinit.append(i) except ValueError: pass elif type(authors) is str or type(authors) is unicode: if type(authors) is str: authors = unicode(authors) author = unidecode(authors) try: l,i = _handle_author(author) aulast.append(l) auinit.append(i) except ValueError: pass else: raise ValueError("authors must be a list or a string") return aulast, auinit
python
def _handle_authors(authors): """ Yields aulast and auinit lists from value of authors node. Parameters ---------- authors : list, str, or unicode Value or values of 'author' element in DfR XML. Returns ------- aulast : list A list of author surnames (string). auinit : list A list of author first-initials (string). """ aulast = [] auinit = [] if type(authors) is list: for author in authors: if type(author) is str: author = unicode(author) author = unidecode(author) try: l,i = _handle_author(author) aulast.append(l) auinit.append(i) except ValueError: pass elif type(authors) is str or type(authors) is unicode: if type(authors) is str: authors = unicode(authors) author = unidecode(authors) try: l,i = _handle_author(author) aulast.append(l) auinit.append(i) except ValueError: pass else: raise ValueError("authors must be a list or a string") return aulast, auinit
[ "def", "_handle_authors", "(", "authors", ")", ":", "aulast", "=", "[", "]", "auinit", "=", "[", "]", "if", "type", "(", "authors", ")", "is", "list", ":", "for", "author", "in", "authors", ":", "if", "type", "(", "author", ")", "is", "str", ":", "author", "=", "unicode", "(", "author", ")", "author", "=", "unidecode", "(", "author", ")", "try", ":", "l", ",", "i", "=", "_handle_author", "(", "author", ")", "aulast", ".", "append", "(", "l", ")", "auinit", ".", "append", "(", "i", ")", "except", "ValueError", ":", "pass", "elif", "type", "(", "authors", ")", "is", "str", "or", "type", "(", "authors", ")", "is", "unicode", ":", "if", "type", "(", "authors", ")", "is", "str", ":", "authors", "=", "unicode", "(", "authors", ")", "author", "=", "unidecode", "(", "authors", ")", "try", ":", "l", ",", "i", "=", "_handle_author", "(", "author", ")", "aulast", ".", "append", "(", "l", ")", "auinit", ".", "append", "(", "i", ")", "except", "ValueError", ":", "pass", "else", ":", "raise", "ValueError", "(", "\"authors must be a list or a string\"", ")", "return", "aulast", ",", "auinit" ]
Yields aulast and auinit lists from value of authors node. Parameters ---------- authors : list, str, or unicode Value or values of 'author' element in DfR XML. Returns ------- aulast : list A list of author surnames (string). auinit : list A list of author first-initials (string).
[ "Yields", "aulast", "and", "auinit", "lists", "from", "value", "of", "authors", "node", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L462-L505
train
diging/tethne
tethne/readers/dfr.py
_handle_author
def _handle_author(author): """ Yields aulast and auinit from an author's full name. Parameters ---------- author : str or unicode Author fullname, e.g. "Richard L. Nixon". Returns ------- aulast : str Author surname. auinit : str Author first-initial. """ lname = author.split(' ') try: auinit = lname[0][0] final = lname[-1].upper() if final in ['JR.', 'III']: aulast = lname[-2].upper() + " " + final.strip(".") else: aulast = final except IndexError: raise ValueError("malformed author name") return aulast, auinit
python
def _handle_author(author): """ Yields aulast and auinit from an author's full name. Parameters ---------- author : str or unicode Author fullname, e.g. "Richard L. Nixon". Returns ------- aulast : str Author surname. auinit : str Author first-initial. """ lname = author.split(' ') try: auinit = lname[0][0] final = lname[-1].upper() if final in ['JR.', 'III']: aulast = lname[-2].upper() + " " + final.strip(".") else: aulast = final except IndexError: raise ValueError("malformed author name") return aulast, auinit
[ "def", "_handle_author", "(", "author", ")", ":", "lname", "=", "author", ".", "split", "(", "' '", ")", "try", ":", "auinit", "=", "lname", "[", "0", "]", "[", "0", "]", "final", "=", "lname", "[", "-", "1", "]", ".", "upper", "(", ")", "if", "final", "in", "[", "'JR.'", ",", "'III'", "]", ":", "aulast", "=", "lname", "[", "-", "2", "]", ".", "upper", "(", ")", "+", "\" \"", "+", "final", ".", "strip", "(", "\".\"", ")", "else", ":", "aulast", "=", "final", "except", "IndexError", ":", "raise", "ValueError", "(", "\"malformed author name\"", ")", "return", "aulast", ",", "auinit" ]
Yields aulast and auinit from an author's full name. Parameters ---------- author : str or unicode Author fullname, e.g. "Richard L. Nixon". Returns ------- aulast : str Author surname. auinit : str Author first-initial.
[ "Yields", "aulast", "and", "auinit", "from", "an", "author", "s", "full", "name", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L507-L536
train
diging/tethne
tethne/readers/dfr.py
GramGenerator._get
def _get(self, i): """ Retrieve data for the ith file in the dataset. """ with open(os.path.join(self.path, self.elem, self.files[i]), 'r') as f: # JSTOR hasn't always produced valid XML. contents = re.sub('(&)(?!amp;)', lambda match: '&amp;', f.read()) root = ET.fromstring(contents) doi = root.attrib['id'] if self.K: # Keys only. return doi grams = [] for gram in root.findall(self.elem_xml): text = unidecode(unicode(gram.text.strip())) if ( not self.ignore_hash or '#' not in list(text) ): c = ( text, number(gram.attrib['weight']) ) grams.append(c) if self.V: # Values only. return grams return doi, grams
python
def _get(self, i): """ Retrieve data for the ith file in the dataset. """ with open(os.path.join(self.path, self.elem, self.files[i]), 'r') as f: # JSTOR hasn't always produced valid XML. contents = re.sub('(&)(?!amp;)', lambda match: '&amp;', f.read()) root = ET.fromstring(contents) doi = root.attrib['id'] if self.K: # Keys only. return doi grams = [] for gram in root.findall(self.elem_xml): text = unidecode(unicode(gram.text.strip())) if ( not self.ignore_hash or '#' not in list(text) ): c = ( text, number(gram.attrib['weight']) ) grams.append(c) if self.V: # Values only. return grams return doi, grams
[ "def", "_get", "(", "self", ",", "i", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "self", ".", "elem", ",", "self", ".", "files", "[", "i", "]", ")", ",", "'r'", ")", "as", "f", ":", "# JSTOR hasn't always produced valid XML.", "contents", "=", "re", ".", "sub", "(", "'(&)(?!amp;)'", ",", "lambda", "match", ":", "'&amp;'", ",", "f", ".", "read", "(", ")", ")", "root", "=", "ET", ".", "fromstring", "(", "contents", ")", "doi", "=", "root", ".", "attrib", "[", "'id'", "]", "if", "self", ".", "K", ":", "# Keys only.", "return", "doi", "grams", "=", "[", "]", "for", "gram", "in", "root", ".", "findall", "(", "self", ".", "elem_xml", ")", ":", "text", "=", "unidecode", "(", "unicode", "(", "gram", ".", "text", ".", "strip", "(", ")", ")", ")", "if", "(", "not", "self", ".", "ignore_hash", "or", "'#'", "not", "in", "list", "(", "text", ")", ")", ":", "c", "=", "(", "text", ",", "number", "(", "gram", ".", "attrib", "[", "'weight'", "]", ")", ")", "grams", ".", "append", "(", "c", ")", "if", "self", ".", "V", ":", "# Values only.", "return", "grams", "return", "doi", ",", "grams" ]
Retrieve data for the ith file in the dataset.
[ "Retrieve", "data", "for", "the", "ith", "file", "in", "the", "dataset", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L175-L198
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel._generate_corpus
def _generate_corpus(self): """ Writes a corpus to disk amenable to MALLET topic modeling. """ target = self.temp + 'mallet' paths = write_documents(self.corpus, target, self.featureset_name, ['date', 'title']) self.corpus_path, self.metapath = paths self._export_corpus()
python
def _generate_corpus(self): """ Writes a corpus to disk amenable to MALLET topic modeling. """ target = self.temp + 'mallet' paths = write_documents(self.corpus, target, self.featureset_name, ['date', 'title']) self.corpus_path, self.metapath = paths self._export_corpus()
[ "def", "_generate_corpus", "(", "self", ")", ":", "target", "=", "self", ".", "temp", "+", "'mallet'", "paths", "=", "write_documents", "(", "self", ".", "corpus", ",", "target", ",", "self", ".", "featureset_name", ",", "[", "'date'", ",", "'title'", "]", ")", "self", ".", "corpus_path", ",", "self", ".", "metapath", "=", "paths", "self", ".", "_export_corpus", "(", ")" ]
Writes a corpus to disk amenable to MALLET topic modeling.
[ "Writes", "a", "corpus", "to", "disk", "amenable", "to", "MALLET", "topic", "modeling", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L151-L161
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel._export_corpus
def _export_corpus(self): """ Calls MALLET's `import-file` method. """ # bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt # --output mytopic-input.mallet --keep-sequence --remove-stopwords if not os.path.exists(self.mallet_bin): raise IOError("MALLET path invalid or non-existent.") self.input_path = os.path.join(self.temp, "input.mallet") exit = subprocess.call([ self.mallet_bin, 'import-file', '--input', self.corpus_path, '--output', self.input_path, '--keep-sequence', # Required for LDA. '--remove-stopwords']) # Probably redundant. if exit != 0: msg = "MALLET import-file failed with exit code {0}.".format(exit) raise RuntimeError(msg)
python
def _export_corpus(self): """ Calls MALLET's `import-file` method. """ # bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt # --output mytopic-input.mallet --keep-sequence --remove-stopwords if not os.path.exists(self.mallet_bin): raise IOError("MALLET path invalid or non-existent.") self.input_path = os.path.join(self.temp, "input.mallet") exit = subprocess.call([ self.mallet_bin, 'import-file', '--input', self.corpus_path, '--output', self.input_path, '--keep-sequence', # Required for LDA. '--remove-stopwords']) # Probably redundant. if exit != 0: msg = "MALLET import-file failed with exit code {0}.".format(exit) raise RuntimeError(msg)
[ "def", "_export_corpus", "(", "self", ")", ":", "# bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt", "# --output mytopic-input.mallet --keep-sequence --remove-stopwords", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "mallet_bin", ")", ":", "raise", "IOError", "(", "\"MALLET path invalid or non-existent.\"", ")", "self", ".", "input_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "temp", ",", "\"input.mallet\"", ")", "exit", "=", "subprocess", ".", "call", "(", "[", "self", ".", "mallet_bin", ",", "'import-file'", ",", "'--input'", ",", "self", ".", "corpus_path", ",", "'--output'", ",", "self", ".", "input_path", ",", "'--keep-sequence'", ",", "# Required for LDA.", "'--remove-stopwords'", "]", ")", "# Probably redundant.", "if", "exit", "!=", "0", ":", "msg", "=", "\"MALLET import-file failed with exit code {0}.\"", ".", "format", "(", "exit", ")", "raise", "RuntimeError", "(", "msg", ")" ]
Calls MALLET's `import-file` method.
[ "Calls", "MALLET", "s", "import", "-", "file", "method", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L163-L184
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.run
def run(self, **kwargs): """ Calls MALLET's `train-topic` method. """ #$ bin/mallet train-topics --input mytopic-input.mallet #> --num-topics 100 #> --output-doc-topics /Users/erickpeirson/doc_top #> --word-topic-counts-file /Users/erickpeirson/word_top #> --output-topic-keys /Users/erickpeirson/topic_keys if not os.path.exists(self.mallet_bin): raise IOError("MALLET path invalid or non-existent.") for attr in ['Z', 'max_iter']: if not hasattr(self, attr): raise AttributeError('Please set {0}'.format(attr)) self.ll = [] self.num_iters = 0 logger.debug('run() with k={0} for {1} iterations'.format(self.Z, self.max_iter)) prog = re.compile(u'\<([^\)]+)\>') ll_prog = re.compile(r'(\d+)') p = subprocess.Popen([ self.mallet_bin, 'train-topics', '--input', self.input_path, '--num-topics', unicode(self.Z), '--num-iterations', unicode(self.max_iter), '--output-doc-topics', self.dt, '--word-topic-counts-file', self.wt, '--output-model', self.om], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Handle output of MALLET in real time. while p.poll() is None: l = p.stderr.readline() # Keep track of LL/topic. try: this_ll = float(re.findall(u'([-+]\d+\.\d+)', l)[0]) self.ll.append(this_ll) except IndexError: # Not every line will match. pass # Keep track of modeling progress. try: this_iter = float(prog.match(l).groups()[0]) progress = int(100. * this_iter/self.max_iter) print 'Modeling progress: {0}%.\r'.format(progress), except AttributeError: # Not every line will match. pass self.num_iters += self.max_iter self.load()
python
def run(self, **kwargs): """ Calls MALLET's `train-topic` method. """ #$ bin/mallet train-topics --input mytopic-input.mallet #> --num-topics 100 #> --output-doc-topics /Users/erickpeirson/doc_top #> --word-topic-counts-file /Users/erickpeirson/word_top #> --output-topic-keys /Users/erickpeirson/topic_keys if not os.path.exists(self.mallet_bin): raise IOError("MALLET path invalid or non-existent.") for attr in ['Z', 'max_iter']: if not hasattr(self, attr): raise AttributeError('Please set {0}'.format(attr)) self.ll = [] self.num_iters = 0 logger.debug('run() with k={0} for {1} iterations'.format(self.Z, self.max_iter)) prog = re.compile(u'\<([^\)]+)\>') ll_prog = re.compile(r'(\d+)') p = subprocess.Popen([ self.mallet_bin, 'train-topics', '--input', self.input_path, '--num-topics', unicode(self.Z), '--num-iterations', unicode(self.max_iter), '--output-doc-topics', self.dt, '--word-topic-counts-file', self.wt, '--output-model', self.om], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Handle output of MALLET in real time. while p.poll() is None: l = p.stderr.readline() # Keep track of LL/topic. try: this_ll = float(re.findall(u'([-+]\d+\.\d+)', l)[0]) self.ll.append(this_ll) except IndexError: # Not every line will match. pass # Keep track of modeling progress. try: this_iter = float(prog.match(l).groups()[0]) progress = int(100. * this_iter/self.max_iter) print 'Modeling progress: {0}%.\r'.format(progress), except AttributeError: # Not every line will match. pass self.num_iters += self.max_iter self.load()
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "#$ bin/mallet train-topics --input mytopic-input.mallet", "#> --num-topics 100", "#> --output-doc-topics /Users/erickpeirson/doc_top", "#> --word-topic-counts-file /Users/erickpeirson/word_top", "#> --output-topic-keys /Users/erickpeirson/topic_keys", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "mallet_bin", ")", ":", "raise", "IOError", "(", "\"MALLET path invalid or non-existent.\"", ")", "for", "attr", "in", "[", "'Z'", ",", "'max_iter'", "]", ":", "if", "not", "hasattr", "(", "self", ",", "attr", ")", ":", "raise", "AttributeError", "(", "'Please set {0}'", ".", "format", "(", "attr", ")", ")", "self", ".", "ll", "=", "[", "]", "self", ".", "num_iters", "=", "0", "logger", ".", "debug", "(", "'run() with k={0} for {1} iterations'", ".", "format", "(", "self", ".", "Z", ",", "self", ".", "max_iter", ")", ")", "prog", "=", "re", ".", "compile", "(", "u'\\<([^\\)]+)\\>'", ")", "ll_prog", "=", "re", ".", "compile", "(", "r'(\\d+)'", ")", "p", "=", "subprocess", ".", "Popen", "(", "[", "self", ".", "mallet_bin", ",", "'train-topics'", ",", "'--input'", ",", "self", ".", "input_path", ",", "'--num-topics'", ",", "unicode", "(", "self", ".", "Z", ")", ",", "'--num-iterations'", ",", "unicode", "(", "self", ".", "max_iter", ")", ",", "'--output-doc-topics'", ",", "self", ".", "dt", ",", "'--word-topic-counts-file'", ",", "self", ".", "wt", ",", "'--output-model'", ",", "self", ".", "om", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "# Handle output of MALLET in real time.", "while", "p", ".", "poll", "(", ")", "is", "None", ":", "l", "=", "p", ".", "stderr", ".", "readline", "(", ")", "# Keep track of LL/topic.", "try", ":", "this_ll", "=", "float", "(", "re", ".", "findall", "(", "u'([-+]\\d+\\.\\d+)'", ",", "l", ")", "[", "0", "]", ")", "self", ".", "ll", ".", "append", "(", "this_ll", ")", "except", "IndexError", ":", "# Not every line will match.", "pass", "# Keep track of modeling progress.", "try", ":", "this_iter", "=", "float", "(", "prog", ".", "match", "(", "l", ")", ".", "groups", "(", ")", "[", "0", "]", ")", "progress", "=", "int", "(", "100.", "*", "this_iter", "/", "self", ".", "max_iter", ")", "print", "'Modeling progress: {0}%.\\r'", ".", "format", "(", "progress", ")", ",", "except", "AttributeError", ":", "# Not every line will match.", "pass", "self", ".", "num_iters", "+=", "self", ".", "max_iter", "self", ".", "load", "(", ")" ]
Calls MALLET's `train-topic` method.
[ "Calls", "MALLET", "s", "train", "-", "topic", "method", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L186-L241
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.topics_in
def topics_in(self, d, topn=5): """ List the top ``topn`` topics in document ``d``. """ return self.theta.features[d].top(topn)
python
def topics_in(self, d, topn=5): """ List the top ``topn`` topics in document ``d``. """ return self.theta.features[d].top(topn)
[ "def", "topics_in", "(", "self", ",", "d", ",", "topn", "=", "5", ")", ":", "return", "self", ".", "theta", ".", "features", "[", "d", "]", ".", "top", "(", "topn", ")" ]
List the top ``topn`` topics in document ``d``.
[ "List", "the", "top", "topn", "topics", "in", "document", "d", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L307-L311
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.list_topic
def list_topic(self, k, Nwords=10): """ List the top ``topn`` words for topic ``k``. Examples -------- .. code-block:: python >>> model.list_topic(1, Nwords=5) [ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ] """ return [(self.vocabulary[w], p) for w, p in self.phi.features[k].top(Nwords)]
python
def list_topic(self, k, Nwords=10): """ List the top ``topn`` words for topic ``k``. Examples -------- .. code-block:: python >>> model.list_topic(1, Nwords=5) [ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ] """ return [(self.vocabulary[w], p) for w, p in self.phi.features[k].top(Nwords)]
[ "def", "list_topic", "(", "self", ",", "k", ",", "Nwords", "=", "10", ")", ":", "return", "[", "(", "self", ".", "vocabulary", "[", "w", "]", ",", "p", ")", "for", "w", ",", "p", "in", "self", ".", "phi", ".", "features", "[", "k", "]", ".", "top", "(", "Nwords", ")", "]" ]
List the top ``topn`` words for topic ``k``. Examples -------- .. code-block:: python >>> model.list_topic(1, Nwords=5) [ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ]
[ "List", "the", "top", "topn", "words", "for", "topic", "k", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L313-L329
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.list_topics
def list_topics(self, Nwords=10): """ List the top ``Nwords`` words for each topic. """ return [(k, self.list_topic(k, Nwords)) for k in xrange(len(self.phi))]
python
def list_topics(self, Nwords=10): """ List the top ``Nwords`` words for each topic. """ return [(k, self.list_topic(k, Nwords)) for k in xrange(len(self.phi))]
[ "def", "list_topics", "(", "self", ",", "Nwords", "=", "10", ")", ":", "return", "[", "(", "k", ",", "self", ".", "list_topic", "(", "k", ",", "Nwords", ")", ")", "for", "k", "in", "xrange", "(", "len", "(", "self", ".", "phi", ")", ")", "]" ]
List the top ``Nwords`` words for each topic.
[ "List", "the", "top", "Nwords", "words", "for", "each", "topic", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L331-L335
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.print_topics
def print_topics(self, Nwords=10): """ Print the top ``Nwords`` words for each topic. """ print('Topic\tTop %i words' % Nwords) for k, words in self.list_topics(Nwords): print(unicode(k).ljust(3) + '\t' + ' '.join(list(zip(*words))[0]))
python
def print_topics(self, Nwords=10): """ Print the top ``Nwords`` words for each topic. """ print('Topic\tTop %i words' % Nwords) for k, words in self.list_topics(Nwords): print(unicode(k).ljust(3) + '\t' + ' '.join(list(zip(*words))[0]))
[ "def", "print_topics", "(", "self", ",", "Nwords", "=", "10", ")", ":", "print", "(", "'Topic\\tTop %i words'", "%", "Nwords", ")", "for", "k", ",", "words", "in", "self", ".", "list_topics", "(", "Nwords", ")", ":", "print", "(", "unicode", "(", "k", ")", ".", "ljust", "(", "3", ")", "+", "'\\t'", "+", "' '", ".", "join", "(", "list", "(", "zip", "(", "*", "words", ")", ")", "[", "0", "]", ")", ")" ]
Print the top ``Nwords`` words for each topic.
[ "Print", "the", "top", "Nwords", "words", "for", "each", "topic", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L338-L344
train
diging/tethne
tethne/model/corpus/mallet.py
LDAModel.topic_over_time
def topic_over_time(self, k, mode='counts', slice_kwargs={}): """ Calculate the representation of topic ``k`` in the corpus over time. """ return self.corpus.feature_distribution('topics', k, mode=mode, **slice_kwargs)
python
def topic_over_time(self, k, mode='counts', slice_kwargs={}): """ Calculate the representation of topic ``k`` in the corpus over time. """ return self.corpus.feature_distribution('topics', k, mode=mode, **slice_kwargs)
[ "def", "topic_over_time", "(", "self", ",", "k", ",", "mode", "=", "'counts'", ",", "slice_kwargs", "=", "{", "}", ")", ":", "return", "self", ".", "corpus", ".", "feature_distribution", "(", "'topics'", ",", "k", ",", "mode", "=", "mode", ",", "*", "*", "slice_kwargs", ")" ]
Calculate the representation of topic ``k`` in the corpus over time.
[ "Calculate", "the", "representation", "of", "topic", "k", "in", "the", "corpus", "over", "time", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L347-L353
train
diging/tethne
tethne/classes/corpus.py
Corpus.distribution
def distribution(self, **slice_kwargs): """ Calculates the number of papers in each slice, as defined by ``slice_kwargs``. Examples -------- .. code-block:: python >>> corpus.distribution(step_size=1, window_size=1) [5, 5] Parameters ---------- slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] for key, size in self.slice(count_only=True, **slice_kwargs): values.append(size) keys.append(key) return keys, values
python
def distribution(self, **slice_kwargs): """ Calculates the number of papers in each slice, as defined by ``slice_kwargs``. Examples -------- .. code-block:: python >>> corpus.distribution(step_size=1, window_size=1) [5, 5] Parameters ---------- slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] for key, size in self.slice(count_only=True, **slice_kwargs): values.append(size) keys.append(key) return keys, values
[ "def", "distribution", "(", "self", ",", "*", "*", "slice_kwargs", ")", ":", "values", "=", "[", "]", "keys", "=", "[", "]", "for", "key", ",", "size", "in", "self", ".", "slice", "(", "count_only", "=", "True", ",", "*", "*", "slice_kwargs", ")", ":", "values", ".", "append", "(", "size", ")", "keys", ".", "append", "(", "key", ")", "return", "keys", ",", "values" ]
Calculates the number of papers in each slice, as defined by ``slice_kwargs``. Examples -------- .. code-block:: python >>> corpus.distribution(step_size=1, window_size=1) [5, 5] Parameters ---------- slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list
[ "Calculates", "the", "number", "of", "papers", "in", "each", "slice", "as", "defined", "by", "slice_kwargs", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L595-L622
train
diging/tethne
tethne/classes/corpus.py
Corpus.feature_distribution
def feature_distribution(self, featureset_name, feature, mode='counts', **slice_kwargs): """ Calculates the distribution of a feature across slices of the corpus. Examples -------- .. code-block:: python >>> corpus.feature_distribution(featureset_name='citations', \ ... feature='DOLE RJ 1965 CELL', \ ... step_size=1, window_size=1) [2, 15, 25, 1] Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. feature : str Name of the specific feature of interest. E.g. if ``featureset_name='citations'``, then ``feature`` could be something like ``'DOLE RJ 1965 CELL'``. mode : str (default: ``'counts'``) If set to ``'counts'``, values will be the sum of all count values for the feature in each slice. If set to ``'documentCounts'``, values will be the number of papers in which the feature occurs in each slice. slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] fset = self.features[featureset_name] for key, papers in self.slice(subcorpus=False, **slice_kwargs): allfeatures = [v for v in chain(*[fset.features[self._generate_index(p)] for p in papers if self._generate_index(p) in fset.features])] if len(allfeatures) < 1: keys.append(key) values.append(0.) continue count = 0. for elem, v in allfeatures: if elem != feature: continue if mode == 'counts': count += v else: count += 1. values.append(count) keys.append(key) return keys, values
python
def feature_distribution(self, featureset_name, feature, mode='counts', **slice_kwargs): """ Calculates the distribution of a feature across slices of the corpus. Examples -------- .. code-block:: python >>> corpus.feature_distribution(featureset_name='citations', \ ... feature='DOLE RJ 1965 CELL', \ ... step_size=1, window_size=1) [2, 15, 25, 1] Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. feature : str Name of the specific feature of interest. E.g. if ``featureset_name='citations'``, then ``feature`` could be something like ``'DOLE RJ 1965 CELL'``. mode : str (default: ``'counts'``) If set to ``'counts'``, values will be the sum of all count values for the feature in each slice. If set to ``'documentCounts'``, values will be the number of papers in which the feature occurs in each slice. slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] fset = self.features[featureset_name] for key, papers in self.slice(subcorpus=False, **slice_kwargs): allfeatures = [v for v in chain(*[fset.features[self._generate_index(p)] for p in papers if self._generate_index(p) in fset.features])] if len(allfeatures) < 1: keys.append(key) values.append(0.) continue count = 0. for elem, v in allfeatures: if elem != feature: continue if mode == 'counts': count += v else: count += 1. values.append(count) keys.append(key) return keys, values
[ "def", "feature_distribution", "(", "self", ",", "featureset_name", ",", "feature", ",", "mode", "=", "'counts'", ",", "*", "*", "slice_kwargs", ")", ":", "values", "=", "[", "]", "keys", "=", "[", "]", "fset", "=", "self", ".", "features", "[", "featureset_name", "]", "for", "key", ",", "papers", "in", "self", ".", "slice", "(", "subcorpus", "=", "False", ",", "*", "*", "slice_kwargs", ")", ":", "allfeatures", "=", "[", "v", "for", "v", "in", "chain", "(", "*", "[", "fset", ".", "features", "[", "self", ".", "_generate_index", "(", "p", ")", "]", "for", "p", "in", "papers", "if", "self", ".", "_generate_index", "(", "p", ")", "in", "fset", ".", "features", "]", ")", "]", "if", "len", "(", "allfeatures", ")", "<", "1", ":", "keys", ".", "append", "(", "key", ")", "values", ".", "append", "(", "0.", ")", "continue", "count", "=", "0.", "for", "elem", ",", "v", "in", "allfeatures", ":", "if", "elem", "!=", "feature", ":", "continue", "if", "mode", "==", "'counts'", ":", "count", "+=", "v", "else", ":", "count", "+=", "1.", "values", ".", "append", "(", "count", ")", "keys", ".", "append", "(", "key", ")", "return", "keys", ",", "values" ]
Calculates the distribution of a feature across slices of the corpus. Examples -------- .. code-block:: python >>> corpus.feature_distribution(featureset_name='citations', \ ... feature='DOLE RJ 1965 CELL', \ ... step_size=1, window_size=1) [2, 15, 25, 1] Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. feature : str Name of the specific feature of interest. E.g. if ``featureset_name='citations'``, then ``feature`` could be something like ``'DOLE RJ 1965 CELL'``. mode : str (default: ``'counts'``) If set to ``'counts'``, values will be the sum of all count values for the feature in each slice. If set to ``'documentCounts'``, values will be the number of papers in which the feature occurs in each slice. slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list
[ "Calculates", "the", "distribution", "of", "a", "feature", "across", "slices", "of", "the", "corpus", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L624-L685
train
diging/tethne
tethne/classes/corpus.py
Corpus.top_features
def top_features(self, featureset_name, topn=20, by='counts', perslice=False, slice_kwargs={}): """ Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\. """ if perslice: return [(k, subcorpus.features[featureset_name].top(topn, by=by)) for k, subcorpus in self.slice(**slice_kwargs)] return self.features[featureset_name].top(topn, by=by)
python
def top_features(self, featureset_name, topn=20, by='counts', perslice=False, slice_kwargs={}): """ Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\. """ if perslice: return [(k, subcorpus.features[featureset_name].top(topn, by=by)) for k, subcorpus in self.slice(**slice_kwargs)] return self.features[featureset_name].top(topn, by=by)
[ "def", "top_features", "(", "self", ",", "featureset_name", ",", "topn", "=", "20", ",", "by", "=", "'counts'", ",", "perslice", "=", "False", ",", "slice_kwargs", "=", "{", "}", ")", ":", "if", "perslice", ":", "return", "[", "(", "k", ",", "subcorpus", ".", "features", "[", "featureset_name", "]", ".", "top", "(", "topn", ",", "by", "=", "by", ")", ")", "for", "k", ",", "subcorpus", "in", "self", ".", "slice", "(", "*", "*", "slice_kwargs", ")", "]", "return", "self", ".", "features", "[", "featureset_name", "]", ".", "top", "(", "topn", ",", "by", "=", "by", ")" ]
Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\.
[ "Retrieves", "the", "top", "topn", "most", "numerous", "features", "in", "the", "corpus", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L687-L713
train
diging/tethne
tethne/analyze/corpus.py
feature_burstness
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True, s=1.1, gamma=1., **slice_kwargs): """ Estimate burstness profile for a feature over the ``'date'`` axis. Parameters ---------- corpus : :class:`.Corpus` feature : str Name of featureset in ``corpus``. E.g. ``'citations'``. findex : int Index of ``feature`` in ``corpus``. k : int (default: 5) Number of burst states. normalize : bool (default: True) If True, burstness is expressed relative to the hightest possible state (``k-1``). Otherwise, states themselves are returned. kwargs : kwargs Parameters for burstness automaton HMM. """ if featureset_name not in corpus.features: corpus.index_feature(featureset_name) if 'date' not in corpus.indices: corpus.index('date') # Get time-intervals between occurrences. dates = [min(corpus.indices['date'].keys()) - 1] # Pad start. X_ = [1.] years, values = corpus.feature_distribution(featureset_name, feature) for year, N in izip(years, values): if N == 0: continue if N > 1: if year == dates[-1] + 1: for n in xrange(int(N)): X_.append(1./N) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) for n in xrange(int(N) - 1): X_.append(1./(N - 1)) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) # Get optimum state sequence. st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k) # Bin by date. A = defaultdict(list) for i in xrange(len(X_)): A[dates[i]].append(st[i]) # Normalize. if normalize: A = {key: mean(values)/k for key, values in A.items()} else: A = {key: mean(values) for key, values in A.items()} D = sorted(A.keys()) return D[1:], [A[d] for d in D[1:]]
python
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True, s=1.1, gamma=1., **slice_kwargs): """ Estimate burstness profile for a feature over the ``'date'`` axis. Parameters ---------- corpus : :class:`.Corpus` feature : str Name of featureset in ``corpus``. E.g. ``'citations'``. findex : int Index of ``feature`` in ``corpus``. k : int (default: 5) Number of burst states. normalize : bool (default: True) If True, burstness is expressed relative to the hightest possible state (``k-1``). Otherwise, states themselves are returned. kwargs : kwargs Parameters for burstness automaton HMM. """ if featureset_name not in corpus.features: corpus.index_feature(featureset_name) if 'date' not in corpus.indices: corpus.index('date') # Get time-intervals between occurrences. dates = [min(corpus.indices['date'].keys()) - 1] # Pad start. X_ = [1.] years, values = corpus.feature_distribution(featureset_name, feature) for year, N in izip(years, values): if N == 0: continue if N > 1: if year == dates[-1] + 1: for n in xrange(int(N)): X_.append(1./N) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) for n in xrange(int(N) - 1): X_.append(1./(N - 1)) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) # Get optimum state sequence. st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k) # Bin by date. A = defaultdict(list) for i in xrange(len(X_)): A[dates[i]].append(st[i]) # Normalize. if normalize: A = {key: mean(values)/k for key, values in A.items()} else: A = {key: mean(values) for key, values in A.items()} D = sorted(A.keys()) return D[1:], [A[d] for d in D[1:]]
[ "def", "feature_burstness", "(", "corpus", ",", "featureset_name", ",", "feature", ",", "k", "=", "5", ",", "normalize", "=", "True", ",", "s", "=", "1.1", ",", "gamma", "=", "1.", ",", "*", "*", "slice_kwargs", ")", ":", "if", "featureset_name", "not", "in", "corpus", ".", "features", ":", "corpus", ".", "index_feature", "(", "featureset_name", ")", "if", "'date'", "not", "in", "corpus", ".", "indices", ":", "corpus", ".", "index", "(", "'date'", ")", "# Get time-intervals between occurrences.", "dates", "=", "[", "min", "(", "corpus", ".", "indices", "[", "'date'", "]", ".", "keys", "(", ")", ")", "-", "1", "]", "# Pad start.", "X_", "=", "[", "1.", "]", "years", ",", "values", "=", "corpus", ".", "feature_distribution", "(", "featureset_name", ",", "feature", ")", "for", "year", ",", "N", "in", "izip", "(", "years", ",", "values", ")", ":", "if", "N", "==", "0", ":", "continue", "if", "N", ">", "1", ":", "if", "year", "==", "dates", "[", "-", "1", "]", "+", "1", ":", "for", "n", "in", "xrange", "(", "int", "(", "N", ")", ")", ":", "X_", ".", "append", "(", "1.", "/", "N", ")", "dates", ".", "append", "(", "year", ")", "else", ":", "X_", ".", "append", "(", "float", "(", "year", "-", "dates", "[", "-", "1", "]", ")", ")", "dates", ".", "append", "(", "year", ")", "for", "n", "in", "xrange", "(", "int", "(", "N", ")", "-", "1", ")", ":", "X_", ".", "append", "(", "1.", "/", "(", "N", "-", "1", ")", ")", "dates", ".", "append", "(", "year", ")", "else", ":", "X_", ".", "append", "(", "float", "(", "year", "-", "dates", "[", "-", "1", "]", ")", ")", "dates", ".", "append", "(", "year", ")", "# Get optimum state sequence.", "st", "=", "_forward", "(", "map", "(", "lambda", "x", ":", "x", "*", "100", ",", "X_", ")", ",", "s", "=", "s", ",", "gamma", "=", "gamma", ",", "k", "=", "k", ")", "# Bin by date.", "A", "=", "defaultdict", "(", "list", ")", "for", "i", "in", "xrange", "(", "len", "(", "X_", ")", ")", ":", "A", "[", "dates", "[", "i", "]", "]", ".", "append", "(", "st", "[", "i", "]", ")", "# Normalize.", "if", "normalize", ":", "A", "=", "{", "key", ":", "mean", "(", "values", ")", "/", "k", "for", "key", ",", "values", "in", "A", ".", "items", "(", ")", "}", "else", ":", "A", "=", "{", "key", ":", "mean", "(", "values", ")", "for", "key", ",", "values", "in", "A", ".", "items", "(", ")", "}", "D", "=", "sorted", "(", "A", ".", "keys", "(", ")", ")", "return", "D", "[", "1", ":", "]", ",", "[", "A", "[", "d", "]", "for", "d", "in", "D", "[", "1", ":", "]", "]" ]
Estimate burstness profile for a feature over the ``'date'`` axis. Parameters ---------- corpus : :class:`.Corpus` feature : str Name of featureset in ``corpus``. E.g. ``'citations'``. findex : int Index of ``feature`` in ``corpus``. k : int (default: 5) Number of burst states. normalize : bool (default: True) If True, burstness is expressed relative to the hightest possible state (``k-1``). Otherwise, states themselves are returned. kwargs : kwargs Parameters for burstness automaton HMM.
[ "Estimate", "burstness", "profile", "for", "a", "feature", "over", "the", "date", "axis", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/corpus.py#L157-L224
train
diging/tethne
tethne/networks/papers.py
cocitation
def cocitation(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs): """ Generate a cocitation network. A **cocitation network** is a network in which vertices are papers, and edges indicate that two papers were cited by the same third paper. `CiteSpace <http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_ is a popular desktop application for co-citation analysis, and you can read about the theory behind it `here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_. """ return cooccurrence(corpus, 'citations', min_weight=min_weight, edge_attrs=edge_attrs, **kwargs)
python
def cocitation(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs): """ Generate a cocitation network. A **cocitation network** is a network in which vertices are papers, and edges indicate that two papers were cited by the same third paper. `CiteSpace <http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_ is a popular desktop application for co-citation analysis, and you can read about the theory behind it `here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_. """ return cooccurrence(corpus, 'citations', min_weight=min_weight, edge_attrs=edge_attrs, **kwargs)
[ "def", "cocitation", "(", "corpus", ",", "min_weight", "=", "1", ",", "edge_attrs", "=", "[", "'ayjid'", ",", "'date'", "]", ",", "*", "*", "kwargs", ")", ":", "return", "cooccurrence", "(", "corpus", ",", "'citations'", ",", "min_weight", "=", "min_weight", ",", "edge_attrs", "=", "edge_attrs", ",", "*", "*", "kwargs", ")" ]
Generate a cocitation network. A **cocitation network** is a network in which vertices are papers, and edges indicate that two papers were cited by the same third paper. `CiteSpace <http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_ is a popular desktop application for co-citation analysis, and you can read about the theory behind it `here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_.
[ "Generate", "a", "cocitation", "network", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/papers.py#L43-L56
train
diging/tethne
tethne/classes/feature.py
StructuredFeature.context_chunk
def context_chunk(self, context, j): """ Retrieve the tokens in the ``j``th chunk of context ``context``. Parameters ---------- context : str Context name. j : int Index of a context chunk. Returns ------- chunk : list List of tokens in the selected chunk. """ N_chunks = len(self.contexts[context]) start = self.contexts[context][j] if j == N_chunks - 1: end = len(self) else: end = self.contexts[context][j+1] return [self[i] for i in xrange(start, end)]
python
def context_chunk(self, context, j): """ Retrieve the tokens in the ``j``th chunk of context ``context``. Parameters ---------- context : str Context name. j : int Index of a context chunk. Returns ------- chunk : list List of tokens in the selected chunk. """ N_chunks = len(self.contexts[context]) start = self.contexts[context][j] if j == N_chunks - 1: end = len(self) else: end = self.contexts[context][j+1] return [self[i] for i in xrange(start, end)]
[ "def", "context_chunk", "(", "self", ",", "context", ",", "j", ")", ":", "N_chunks", "=", "len", "(", "self", ".", "contexts", "[", "context", "]", ")", "start", "=", "self", ".", "contexts", "[", "context", "]", "[", "j", "]", "if", "j", "==", "N_chunks", "-", "1", ":", "end", "=", "len", "(", "self", ")", "else", ":", "end", "=", "self", ".", "contexts", "[", "context", "]", "[", "j", "+", "1", "]", "return", "[", "self", "[", "i", "]", "for", "i", "in", "xrange", "(", "start", ",", "end", ")", "]" ]
Retrieve the tokens in the ``j``th chunk of context ``context``. Parameters ---------- context : str Context name. j : int Index of a context chunk. Returns ------- chunk : list List of tokens in the selected chunk.
[ "Retrieve", "the", "tokens", "in", "the", "j", "th", "chunk", "of", "context", "context", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/feature.py#L108-L131
train
diging/tethne
tethne/classes/feature.py
StructuredFeature.add_context
def add_context(self, name, indices, level=None): """ Add a new context level to the hierarchy. By default, new contexts are added to the lowest level of the hierarchy. To insert the context elsewhere in the hierarchy, use the ``level`` argument. For example, ``level=0`` would insert the context at the highest level of the hierarchy. Parameters ---------- name : str indices : list Token indices at which each chunk in the context begins. level : int Level in the hierarchy at which to insert the context. By default, inserts context at the lowest level of the hierarchy """ self._validate_context((name, indices)) if level is None: level = len(self.contexts_ranked) self.contexts_ranked.insert(level, name) self.contexts[name] = indices
python
def add_context(self, name, indices, level=None): """ Add a new context level to the hierarchy. By default, new contexts are added to the lowest level of the hierarchy. To insert the context elsewhere in the hierarchy, use the ``level`` argument. For example, ``level=0`` would insert the context at the highest level of the hierarchy. Parameters ---------- name : str indices : list Token indices at which each chunk in the context begins. level : int Level in the hierarchy at which to insert the context. By default, inserts context at the lowest level of the hierarchy """ self._validate_context((name, indices)) if level is None: level = len(self.contexts_ranked) self.contexts_ranked.insert(level, name) self.contexts[name] = indices
[ "def", "add_context", "(", "self", ",", "name", ",", "indices", ",", "level", "=", "None", ")", ":", "self", ".", "_validate_context", "(", "(", "name", ",", "indices", ")", ")", "if", "level", "is", "None", ":", "level", "=", "len", "(", "self", ".", "contexts_ranked", ")", "self", ".", "contexts_ranked", ".", "insert", "(", "level", ",", "name", ")", "self", ".", "contexts", "[", "name", "]", "=", "indices" ]
Add a new context level to the hierarchy. By default, new contexts are added to the lowest level of the hierarchy. To insert the context elsewhere in the hierarchy, use the ``level`` argument. For example, ``level=0`` would insert the context at the highest level of the hierarchy. Parameters ---------- name : str indices : list Token indices at which each chunk in the context begins. level : int Level in the hierarchy at which to insert the context. By default, inserts context at the lowest level of the hierarchy
[ "Add", "a", "new", "context", "level", "to", "the", "hierarchy", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/feature.py#L170-L195
train
diging/tethne
tethne/classes/graphcollection.py
GraphCollection.index
def index(self, name, graph): """ Index any new nodes in `graph`, and relabel the nodes in `graph` using the index. Parameters ---------- name : hashable Unique name used to identify the `graph`. graph : networkx.Graph Returns ------- indexed_graph : networkx.Graph """ nodes = graph.nodes() # Index new nodes. new_nodes = list(set(nodes) - set(self.node_index.values())) start = max(len(self.node_index) - 1, max(self.node_index.keys())) for i in xrange(start, start + len(new_nodes)): n = new_nodes.pop() self.node_index[i], self.node_lookup[n] = n, i self.graphs_containing[n].append(name) # Relabel nodes in `graph`. new_labels = {n: self.node_lookup[n] for n in nodes} indexed_graph = nx.relabel.relabel_nodes(graph, new_labels, copy=True) return indexed_graph
python
def index(self, name, graph): """ Index any new nodes in `graph`, and relabel the nodes in `graph` using the index. Parameters ---------- name : hashable Unique name used to identify the `graph`. graph : networkx.Graph Returns ------- indexed_graph : networkx.Graph """ nodes = graph.nodes() # Index new nodes. new_nodes = list(set(nodes) - set(self.node_index.values())) start = max(len(self.node_index) - 1, max(self.node_index.keys())) for i in xrange(start, start + len(new_nodes)): n = new_nodes.pop() self.node_index[i], self.node_lookup[n] = n, i self.graphs_containing[n].append(name) # Relabel nodes in `graph`. new_labels = {n: self.node_lookup[n] for n in nodes} indexed_graph = nx.relabel.relabel_nodes(graph, new_labels, copy=True) return indexed_graph
[ "def", "index", "(", "self", ",", "name", ",", "graph", ")", ":", "nodes", "=", "graph", ".", "nodes", "(", ")", "# Index new nodes.", "new_nodes", "=", "list", "(", "set", "(", "nodes", ")", "-", "set", "(", "self", ".", "node_index", ".", "values", "(", ")", ")", ")", "start", "=", "max", "(", "len", "(", "self", ".", "node_index", ")", "-", "1", ",", "max", "(", "self", ".", "node_index", ".", "keys", "(", ")", ")", ")", "for", "i", "in", "xrange", "(", "start", ",", "start", "+", "len", "(", "new_nodes", ")", ")", ":", "n", "=", "new_nodes", ".", "pop", "(", ")", "self", ".", "node_index", "[", "i", "]", ",", "self", ".", "node_lookup", "[", "n", "]", "=", "n", ",", "i", "self", ".", "graphs_containing", "[", "n", "]", ".", "append", "(", "name", ")", "# Relabel nodes in `graph`.", "new_labels", "=", "{", "n", ":", "self", ".", "node_lookup", "[", "n", "]", "for", "n", "in", "nodes", "}", "indexed_graph", "=", "nx", ".", "relabel", ".", "relabel_nodes", "(", "graph", ",", "new_labels", ",", "copy", "=", "True", ")", "return", "indexed_graph" ]
Index any new nodes in `graph`, and relabel the nodes in `graph` using the index. Parameters ---------- name : hashable Unique name used to identify the `graph`. graph : networkx.Graph Returns ------- indexed_graph : networkx.Graph
[ "Index", "any", "new", "nodes", "in", "graph", "and", "relabel", "the", "nodes", "in", "graph", "using", "the", "index", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/graphcollection.py#L159-L188
train
diging/tethne
tethne/networks/topics.py
terms
def terms(model, threshold=0.01, **kwargs): """ Two terms are coupled if the posterior probability for both terms is greather than ``threshold`` for the same topic. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: 0.01 kwargs : kwargs Passed on to :func:`.cooccurrence`\. Returns ------- :ref:`networkx.Graph <networkx:graph>` """ select = lambda f, v, c, dc: v > threshold graph = cooccurrence(model.phi, filter=select, **kwargs) # Only include labels for terms that are actually in the graph. label_map = {k: v for k, v in model.vocabulary.items() if k in graph.nodes()} graph.name = '' return networkx.relabel_nodes(graph, label_map)
python
def terms(model, threshold=0.01, **kwargs): """ Two terms are coupled if the posterior probability for both terms is greather than ``threshold`` for the same topic. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: 0.01 kwargs : kwargs Passed on to :func:`.cooccurrence`\. Returns ------- :ref:`networkx.Graph <networkx:graph>` """ select = lambda f, v, c, dc: v > threshold graph = cooccurrence(model.phi, filter=select, **kwargs) # Only include labels for terms that are actually in the graph. label_map = {k: v for k, v in model.vocabulary.items() if k in graph.nodes()} graph.name = '' return networkx.relabel_nodes(graph, label_map)
[ "def", "terms", "(", "model", ",", "threshold", "=", "0.01", ",", "*", "*", "kwargs", ")", ":", "select", "=", "lambda", "f", ",", "v", ",", "c", ",", "dc", ":", "v", ">", "threshold", "graph", "=", "cooccurrence", "(", "model", ".", "phi", ",", "filter", "=", "select", ",", "*", "*", "kwargs", ")", "# Only include labels for terms that are actually in the graph.", "label_map", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "model", ".", "vocabulary", ".", "items", "(", ")", "if", "k", "in", "graph", ".", "nodes", "(", ")", "}", "graph", ".", "name", "=", "''", "return", "networkx", ".", "relabel_nodes", "(", "graph", ",", "label_map", ")" ]
Two terms are coupled if the posterior probability for both terms is greather than ``threshold`` for the same topic. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: 0.01 kwargs : kwargs Passed on to :func:`.cooccurrence`\. Returns ------- :ref:`networkx.Graph <networkx:graph>`
[ "Two", "terms", "are", "coupled", "if", "the", "posterior", "probability", "for", "both", "terms", "is", "greather", "than", "threshold", "for", "the", "same", "topic", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/topics.py#L24-L50
train
diging/tethne
tethne/networks/topics.py
topic_coupling
def topic_coupling(model, threshold=None, **kwargs): """ Two papers are coupled if they both contain a shared topic above a ``threshold``. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: ``3./model.Z`` kwargs : kwargs Passed on to :func:`.coupling`\. Returns ------- :ref:`networkx.Graph <networkx:graph>` """ if not threshold: threshold = 3./model.Z select = lambda f, v, c, dc: v > threshold graph = coupling(model.corpus, 'topics', filter=select, **kwargs) graph.name = '' return graph
python
def topic_coupling(model, threshold=None, **kwargs): """ Two papers are coupled if they both contain a shared topic above a ``threshold``. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: ``3./model.Z`` kwargs : kwargs Passed on to :func:`.coupling`\. Returns ------- :ref:`networkx.Graph <networkx:graph>` """ if not threshold: threshold = 3./model.Z select = lambda f, v, c, dc: v > threshold graph = coupling(model.corpus, 'topics', filter=select, **kwargs) graph.name = '' return graph
[ "def", "topic_coupling", "(", "model", ",", "threshold", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "threshold", ":", "threshold", "=", "3.", "/", "model", ".", "Z", "select", "=", "lambda", "f", ",", "v", ",", "c", ",", "dc", ":", "v", ">", "threshold", "graph", "=", "coupling", "(", "model", ".", "corpus", ",", "'topics'", ",", "filter", "=", "select", ",", "*", "*", "kwargs", ")", "graph", ".", "name", "=", "''", "return", "graph" ]
Two papers are coupled if they both contain a shared topic above a ``threshold``. Parameters ---------- model : :class:`.LDAModel` threshold : float Default: ``3./model.Z`` kwargs : kwargs Passed on to :func:`.coupling`\. Returns ------- :ref:`networkx.Graph <networkx:graph>`
[ "Two", "papers", "are", "coupled", "if", "they", "both", "contain", "a", "shared", "topic", "above", "a", "threshold", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/topics.py#L53-L77
train
diging/tethne
tethne/analyze/features.py
kl_divergence
def kl_divergence(V_a, V_b): """ Calculate Kullback-Leibler distance. Uses the smoothing method described in `Bigi 2003 <http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_ to facilitate better comparisons between vectors describing wordcounts. Parameters ---------- V_a : list V_b : list Returns ------- divergence : float KL divergence. """ # Find shared features. Ndiff = _shared_features(V_a, V_b) # aprob and bprob should each sum to 1.0 aprob = map(lambda v: float(v)/sum(V_a), V_a) bprob = map(lambda v: float(v)/sum(V_b), V_b) # Smooth according to Bigi 2003. aprob, bprob = _smooth(aprob, bprob, Ndiff) return sum(map(lambda a, b: (a-b)*log(a/b), aprob, bprob))
python
def kl_divergence(V_a, V_b): """ Calculate Kullback-Leibler distance. Uses the smoothing method described in `Bigi 2003 <http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_ to facilitate better comparisons between vectors describing wordcounts. Parameters ---------- V_a : list V_b : list Returns ------- divergence : float KL divergence. """ # Find shared features. Ndiff = _shared_features(V_a, V_b) # aprob and bprob should each sum to 1.0 aprob = map(lambda v: float(v)/sum(V_a), V_a) bprob = map(lambda v: float(v)/sum(V_b), V_b) # Smooth according to Bigi 2003. aprob, bprob = _smooth(aprob, bprob, Ndiff) return sum(map(lambda a, b: (a-b)*log(a/b), aprob, bprob))
[ "def", "kl_divergence", "(", "V_a", ",", "V_b", ")", ":", "# Find shared features.", "Ndiff", "=", "_shared_features", "(", "V_a", ",", "V_b", ")", "# aprob and bprob should each sum to 1.0", "aprob", "=", "map", "(", "lambda", "v", ":", "float", "(", "v", ")", "/", "sum", "(", "V_a", ")", ",", "V_a", ")", "bprob", "=", "map", "(", "lambda", "v", ":", "float", "(", "v", ")", "/", "sum", "(", "V_b", ")", ",", "V_b", ")", "# Smooth according to Bigi 2003.", "aprob", ",", "bprob", "=", "_smooth", "(", "aprob", ",", "bprob", ",", "Ndiff", ")", "return", "sum", "(", "map", "(", "lambda", "a", ",", "b", ":", "(", "a", "-", "b", ")", "*", "log", "(", "a", "/", "b", ")", ",", "aprob", ",", "bprob", ")", ")" ]
Calculate Kullback-Leibler distance. Uses the smoothing method described in `Bigi 2003 <http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_ to facilitate better comparisons between vectors describing wordcounts. Parameters ---------- V_a : list V_b : list Returns ------- divergence : float KL divergence.
[ "Calculate", "Kullback", "-", "Leibler", "distance", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/features.py#L18-L47
train
diging/tethne
tethne/analyze/features.py
_shared_features
def _shared_features(adense, bdense): """ Number of features in ``adense`` that are also in ``bdense``. """ a_indices = set(nonzero(adense)) b_indices = set(nonzero(bdense)) shared = list(a_indices & b_indices) diff = list(a_indices - b_indices) Ndiff = len(diff) return Ndiff
python
def _shared_features(adense, bdense): """ Number of features in ``adense`` that are also in ``bdense``. """ a_indices = set(nonzero(adense)) b_indices = set(nonzero(bdense)) shared = list(a_indices & b_indices) diff = list(a_indices - b_indices) Ndiff = len(diff) return Ndiff
[ "def", "_shared_features", "(", "adense", ",", "bdense", ")", ":", "a_indices", "=", "set", "(", "nonzero", "(", "adense", ")", ")", "b_indices", "=", "set", "(", "nonzero", "(", "bdense", ")", ")", "shared", "=", "list", "(", "a_indices", "&", "b_indices", ")", "diff", "=", "list", "(", "a_indices", "-", "b_indices", ")", "Ndiff", "=", "len", "(", "diff", ")", "return", "Ndiff" ]
Number of features in ``adense`` that are also in ``bdense``.
[ "Number", "of", "features", "in", "adense", "that", "are", "also", "in", "bdense", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/features.py#L100-L111
train
diging/tethne
tethne/networks/base.py
cooccurrence
def cooccurrence(corpus_or_featureset, featureset_name=None, min_weight=1, edge_attrs=['ayjid', 'date'], filter=None): """ A network of feature elements linked by their joint occurrence in papers. """ if not filter: filter = lambda f, v, c, dc: dc >= min_weight featureset = _get_featureset(corpus_or_featureset, featureset_name) if type(corpus_or_featureset) in [Corpus, StreamingCorpus]: attributes = {i: {a: corpus_or_featureset.indices_lookup[i][a] for a in edge_attrs} for i in corpus_or_featureset.indexed_papers.keys()} c = lambda f: featureset.count(f) # Overall count. dc = lambda f: featureset.documentCount(f) # Document count. attributes = {} # select applies filter to the elements in a (Structured)Feature. The # iteration behavior of Feature and StructuredFeature are different, as is # the manner in which the count for an element in each (Structured)Feature. if type(featureset) is FeatureSet: select = lambda feature: [f for f, v in feature if filter(f, v, c(f), dc(f))] elif type(featureset) is StructuredFeatureSet: select = lambda feature: [f for f in feature if filter(f, feature.count(f), c(f), dc(f))] pairs = Counter() eattrs = defaultdict(dict) nattrs = defaultdict(dict) nset = set() for paper, feature in featureset.iteritems(): if len(feature) == 0: continue selected = select(feature) nset |= set(selected) for combo in combinations(selected, 2): combo = tuple(sorted(combo)) pairs[combo] += 1 if paper in attributes: eattrs[combo] = attributes[paper] # Generate node attributes. for n in list(nset): nattrs[n]['count'] = featureset.count(n) nattrs[n]['documentCount'] = featureset.documentCount(n) return _generate_graph(nx.Graph, pairs, edge_attrs=eattrs, node_attrs=nattrs, min_weight=min_weight)
python
def cooccurrence(corpus_or_featureset, featureset_name=None, min_weight=1, edge_attrs=['ayjid', 'date'], filter=None): """ A network of feature elements linked by their joint occurrence in papers. """ if not filter: filter = lambda f, v, c, dc: dc >= min_weight featureset = _get_featureset(corpus_or_featureset, featureset_name) if type(corpus_or_featureset) in [Corpus, StreamingCorpus]: attributes = {i: {a: corpus_or_featureset.indices_lookup[i][a] for a in edge_attrs} for i in corpus_or_featureset.indexed_papers.keys()} c = lambda f: featureset.count(f) # Overall count. dc = lambda f: featureset.documentCount(f) # Document count. attributes = {} # select applies filter to the elements in a (Structured)Feature. The # iteration behavior of Feature and StructuredFeature are different, as is # the manner in which the count for an element in each (Structured)Feature. if type(featureset) is FeatureSet: select = lambda feature: [f for f, v in feature if filter(f, v, c(f), dc(f))] elif type(featureset) is StructuredFeatureSet: select = lambda feature: [f for f in feature if filter(f, feature.count(f), c(f), dc(f))] pairs = Counter() eattrs = defaultdict(dict) nattrs = defaultdict(dict) nset = set() for paper, feature in featureset.iteritems(): if len(feature) == 0: continue selected = select(feature) nset |= set(selected) for combo in combinations(selected, 2): combo = tuple(sorted(combo)) pairs[combo] += 1 if paper in attributes: eattrs[combo] = attributes[paper] # Generate node attributes. for n in list(nset): nattrs[n]['count'] = featureset.count(n) nattrs[n]['documentCount'] = featureset.documentCount(n) return _generate_graph(nx.Graph, pairs, edge_attrs=eattrs, node_attrs=nattrs, min_weight=min_weight)
[ "def", "cooccurrence", "(", "corpus_or_featureset", ",", "featureset_name", "=", "None", ",", "min_weight", "=", "1", ",", "edge_attrs", "=", "[", "'ayjid'", ",", "'date'", "]", ",", "filter", "=", "None", ")", ":", "if", "not", "filter", ":", "filter", "=", "lambda", "f", ",", "v", ",", "c", ",", "dc", ":", "dc", ">=", "min_weight", "featureset", "=", "_get_featureset", "(", "corpus_or_featureset", ",", "featureset_name", ")", "if", "type", "(", "corpus_or_featureset", ")", "in", "[", "Corpus", ",", "StreamingCorpus", "]", ":", "attributes", "=", "{", "i", ":", "{", "a", ":", "corpus_or_featureset", ".", "indices_lookup", "[", "i", "]", "[", "a", "]", "for", "a", "in", "edge_attrs", "}", "for", "i", "in", "corpus_or_featureset", ".", "indexed_papers", ".", "keys", "(", ")", "}", "c", "=", "lambda", "f", ":", "featureset", ".", "count", "(", "f", ")", "# Overall count.", "dc", "=", "lambda", "f", ":", "featureset", ".", "documentCount", "(", "f", ")", "# Document count.", "attributes", "=", "{", "}", "# select applies filter to the elements in a (Structured)Feature. The", "# iteration behavior of Feature and StructuredFeature are different, as is", "# the manner in which the count for an element in each (Structured)Feature.", "if", "type", "(", "featureset", ")", "is", "FeatureSet", ":", "select", "=", "lambda", "feature", ":", "[", "f", "for", "f", ",", "v", "in", "feature", "if", "filter", "(", "f", ",", "v", ",", "c", "(", "f", ")", ",", "dc", "(", "f", ")", ")", "]", "elif", "type", "(", "featureset", ")", "is", "StructuredFeatureSet", ":", "select", "=", "lambda", "feature", ":", "[", "f", "for", "f", "in", "feature", "if", "filter", "(", "f", ",", "feature", ".", "count", "(", "f", ")", ",", "c", "(", "f", ")", ",", "dc", "(", "f", ")", ")", "]", "pairs", "=", "Counter", "(", ")", "eattrs", "=", "defaultdict", "(", "dict", ")", "nattrs", "=", "defaultdict", "(", "dict", ")", "nset", "=", "set", "(", ")", "for", "paper", ",", "feature", "in", "featureset", ".", "iteritems", "(", ")", ":", "if", "len", "(", "feature", ")", "==", "0", ":", "continue", "selected", "=", "select", "(", "feature", ")", "nset", "|=", "set", "(", "selected", ")", "for", "combo", "in", "combinations", "(", "selected", ",", "2", ")", ":", "combo", "=", "tuple", "(", "sorted", "(", "combo", ")", ")", "pairs", "[", "combo", "]", "+=", "1", "if", "paper", "in", "attributes", ":", "eattrs", "[", "combo", "]", "=", "attributes", "[", "paper", "]", "# Generate node attributes.", "for", "n", "in", "list", "(", "nset", ")", ":", "nattrs", "[", "n", "]", "[", "'count'", "]", "=", "featureset", ".", "count", "(", "n", ")", "nattrs", "[", "n", "]", "[", "'documentCount'", "]", "=", "featureset", ".", "documentCount", "(", "n", ")", "return", "_generate_graph", "(", "nx", ".", "Graph", ",", "pairs", ",", "edge_attrs", "=", "eattrs", ",", "node_attrs", "=", "nattrs", ",", "min_weight", "=", "min_weight", ")" ]
A network of feature elements linked by their joint occurrence in papers.
[ "A", "network", "of", "feature", "elements", "linked", "by", "their", "joint", "occurrence", "in", "papers", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L39-L93
train
diging/tethne
tethne/networks/base.py
coupling
def coupling(corpus_or_featureset, featureset_name=None, min_weight=1, filter=lambda f, v, c, dc: True, node_attrs=[]): """ A network of papers linked by their joint posession of features. """ featureset = _get_featureset(corpus_or_featureset, featureset_name) c = lambda f: featureset.count(f) # Overall count. dc = lambda f: featureset.documentCount(f) # Document count. f = lambda elem: featureset.index[elem] v = lambda p, f: featureset.features[p].value(f) select = lambda p, elem: filter(f(elem), v(p, f(elem)), c(f(elem)), dc(f(elem))) pairs = defaultdict(list) for elem, papers in featureset.with_feature.iteritems(): selected = [p for p in papers if select(p, elem)] for combo in combinations(selected, 2): combo = tuple(sorted(combo)) pairs[combo].append(featureset.index[elem]) graph = nx.Graph() for combo, features in pairs.iteritems(): count = len(features) if count >= min_weight: graph.add_edge(combo[0], combo[1], features=features, weight=count) # Add node attributes. for attr in node_attrs: for node in graph.nodes(): value = '' if node in corpus_or_featureset: paper = corpus_or_featureset[node] if hasattr(paper, attr): value = getattr(paper, attr) if value is None: value = '' elif callable(value): value = value() graph.node[node][attr] = value return graph
python
def coupling(corpus_or_featureset, featureset_name=None, min_weight=1, filter=lambda f, v, c, dc: True, node_attrs=[]): """ A network of papers linked by their joint posession of features. """ featureset = _get_featureset(corpus_or_featureset, featureset_name) c = lambda f: featureset.count(f) # Overall count. dc = lambda f: featureset.documentCount(f) # Document count. f = lambda elem: featureset.index[elem] v = lambda p, f: featureset.features[p].value(f) select = lambda p, elem: filter(f(elem), v(p, f(elem)), c(f(elem)), dc(f(elem))) pairs = defaultdict(list) for elem, papers in featureset.with_feature.iteritems(): selected = [p for p in papers if select(p, elem)] for combo in combinations(selected, 2): combo = tuple(sorted(combo)) pairs[combo].append(featureset.index[elem]) graph = nx.Graph() for combo, features in pairs.iteritems(): count = len(features) if count >= min_weight: graph.add_edge(combo[0], combo[1], features=features, weight=count) # Add node attributes. for attr in node_attrs: for node in graph.nodes(): value = '' if node in corpus_or_featureset: paper = corpus_or_featureset[node] if hasattr(paper, attr): value = getattr(paper, attr) if value is None: value = '' elif callable(value): value = value() graph.node[node][attr] = value return graph
[ "def", "coupling", "(", "corpus_or_featureset", ",", "featureset_name", "=", "None", ",", "min_weight", "=", "1", ",", "filter", "=", "lambda", "f", ",", "v", ",", "c", ",", "dc", ":", "True", ",", "node_attrs", "=", "[", "]", ")", ":", "featureset", "=", "_get_featureset", "(", "corpus_or_featureset", ",", "featureset_name", ")", "c", "=", "lambda", "f", ":", "featureset", ".", "count", "(", "f", ")", "# Overall count.", "dc", "=", "lambda", "f", ":", "featureset", ".", "documentCount", "(", "f", ")", "# Document count.", "f", "=", "lambda", "elem", ":", "featureset", ".", "index", "[", "elem", "]", "v", "=", "lambda", "p", ",", "f", ":", "featureset", ".", "features", "[", "p", "]", ".", "value", "(", "f", ")", "select", "=", "lambda", "p", ",", "elem", ":", "filter", "(", "f", "(", "elem", ")", ",", "v", "(", "p", ",", "f", "(", "elem", ")", ")", ",", "c", "(", "f", "(", "elem", ")", ")", ",", "dc", "(", "f", "(", "elem", ")", ")", ")", "pairs", "=", "defaultdict", "(", "list", ")", "for", "elem", ",", "papers", "in", "featureset", ".", "with_feature", ".", "iteritems", "(", ")", ":", "selected", "=", "[", "p", "for", "p", "in", "papers", "if", "select", "(", "p", ",", "elem", ")", "]", "for", "combo", "in", "combinations", "(", "selected", ",", "2", ")", ":", "combo", "=", "tuple", "(", "sorted", "(", "combo", ")", ")", "pairs", "[", "combo", "]", ".", "append", "(", "featureset", ".", "index", "[", "elem", "]", ")", "graph", "=", "nx", ".", "Graph", "(", ")", "for", "combo", ",", "features", "in", "pairs", ".", "iteritems", "(", ")", ":", "count", "=", "len", "(", "features", ")", "if", "count", ">=", "min_weight", ":", "graph", ".", "add_edge", "(", "combo", "[", "0", "]", ",", "combo", "[", "1", "]", ",", "features", "=", "features", ",", "weight", "=", "count", ")", "# Add node attributes.", "for", "attr", "in", "node_attrs", ":", "for", "node", "in", "graph", ".", "nodes", "(", ")", ":", "value", "=", "''", "if", "node", "in", "corpus_or_featureset", ":", "paper", "=", "corpus_or_featureset", "[", "node", "]", "if", "hasattr", "(", "paper", ",", "attr", ")", ":", "value", "=", "getattr", "(", "paper", ",", "attr", ")", "if", "value", "is", "None", ":", "value", "=", "''", "elif", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "graph", ".", "node", "[", "node", "]", "[", "attr", "]", "=", "value", "return", "graph" ]
A network of papers linked by their joint posession of features.
[ "A", "network", "of", "papers", "linked", "by", "their", "joint", "posession", "of", "features", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L97-L140
train
diging/tethne
tethne/networks/base.py
multipartite
def multipartite(corpus, featureset_names, min_weight=1, filters={}): """ A network of papers and one or more featuresets. """ pairs = Counter() node_type = {corpus._generate_index(p): {'type': 'paper'} for p in corpus.papers} for featureset_name in featureset_names: ftypes = {} featureset = _get_featureset(corpus, featureset_name) for paper, feature in featureset.iteritems(): if featureset_name in filters: if not filters[featureset_name](featureset, feature): continue if len(feature) < 1: continue for f in list(zip(*feature))[0]: ftypes[f] = {'type': featureset_name} pairs[(paper, f)] += 1 node_type.update(ftypes) return _generate_graph(nx.DiGraph, pairs, node_attrs=node_type, min_weight=min_weight)
python
def multipartite(corpus, featureset_names, min_weight=1, filters={}): """ A network of papers and one or more featuresets. """ pairs = Counter() node_type = {corpus._generate_index(p): {'type': 'paper'} for p in corpus.papers} for featureset_name in featureset_names: ftypes = {} featureset = _get_featureset(corpus, featureset_name) for paper, feature in featureset.iteritems(): if featureset_name in filters: if not filters[featureset_name](featureset, feature): continue if len(feature) < 1: continue for f in list(zip(*feature))[0]: ftypes[f] = {'type': featureset_name} pairs[(paper, f)] += 1 node_type.update(ftypes) return _generate_graph(nx.DiGraph, pairs, node_attrs=node_type, min_weight=min_weight)
[ "def", "multipartite", "(", "corpus", ",", "featureset_names", ",", "min_weight", "=", "1", ",", "filters", "=", "{", "}", ")", ":", "pairs", "=", "Counter", "(", ")", "node_type", "=", "{", "corpus", ".", "_generate_index", "(", "p", ")", ":", "{", "'type'", ":", "'paper'", "}", "for", "p", "in", "corpus", ".", "papers", "}", "for", "featureset_name", "in", "featureset_names", ":", "ftypes", "=", "{", "}", "featureset", "=", "_get_featureset", "(", "corpus", ",", "featureset_name", ")", "for", "paper", ",", "feature", "in", "featureset", ".", "iteritems", "(", ")", ":", "if", "featureset_name", "in", "filters", ":", "if", "not", "filters", "[", "featureset_name", "]", "(", "featureset", ",", "feature", ")", ":", "continue", "if", "len", "(", "feature", ")", "<", "1", ":", "continue", "for", "f", "in", "list", "(", "zip", "(", "*", "feature", ")", ")", "[", "0", "]", ":", "ftypes", "[", "f", "]", "=", "{", "'type'", ":", "featureset_name", "}", "pairs", "[", "(", "paper", ",", "f", ")", "]", "+=", "1", "node_type", ".", "update", "(", "ftypes", ")", "return", "_generate_graph", "(", "nx", ".", "DiGraph", ",", "pairs", ",", "node_attrs", "=", "node_type", ",", "min_weight", "=", "min_weight", ")" ]
A network of papers and one or more featuresets.
[ "A", "network", "of", "papers", "and", "one", "or", "more", "featuresets", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L143-L167
train
diging/tethne
tethne/utilities.py
_strip_punctuation
def _strip_punctuation(s): """ Removes all punctuation characters from a string. """ if type(s) is str and not PYTHON_3: # Bytestring (default in Python 2.x). return s.translate(string.maketrans("",""), string.punctuation) else: # Unicode string (default in Python 3.x). translate_table = dict((ord(char), u'') for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~') return s.translate(translate_table)
python
def _strip_punctuation(s): """ Removes all punctuation characters from a string. """ if type(s) is str and not PYTHON_3: # Bytestring (default in Python 2.x). return s.translate(string.maketrans("",""), string.punctuation) else: # Unicode string (default in Python 3.x). translate_table = dict((ord(char), u'') for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~') return s.translate(translate_table)
[ "def", "_strip_punctuation", "(", "s", ")", ":", "if", "type", "(", "s", ")", "is", "str", "and", "not", "PYTHON_3", ":", "# Bytestring (default in Python 2.x).", "return", "s", ".", "translate", "(", "string", ".", "maketrans", "(", "\"\"", ",", "\"\"", ")", ",", "string", ".", "punctuation", ")", "else", ":", "# Unicode string (default in Python 3.x).", "translate_table", "=", "dict", "(", "(", "ord", "(", "char", ")", ",", "u''", ")", "for", "char", "in", "u'!\"#%\\'()*+,-./:;<=>?@[\\]^_`{|}~'", ")", "return", "s", ".", "translate", "(", "translate_table", ")" ]
Removes all punctuation characters from a string.
[ "Removes", "all", "punctuation", "characters", "from", "a", "string", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L115-L123
train
diging/tethne
tethne/utilities.py
overlap
def overlap(listA, listB): """ Return list of objects shared by listA, listB. """ if (listA is None) or (listB is None): return [] else: return list(set(listA) & set(listB))
python
def overlap(listA, listB): """ Return list of objects shared by listA, listB. """ if (listA is None) or (listB is None): return [] else: return list(set(listA) & set(listB))
[ "def", "overlap", "(", "listA", ",", "listB", ")", ":", "if", "(", "listA", "is", "None", ")", "or", "(", "listB", "is", "None", ")", ":", "return", "[", "]", "else", ":", "return", "list", "(", "set", "(", "listA", ")", "&", "set", "(", "listB", ")", ")" ]
Return list of objects shared by listA, listB.
[ "Return", "list", "of", "objects", "shared", "by", "listA", "listB", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L174-L181
train
diging/tethne
tethne/utilities.py
subdict
def subdict(super_dict, keys): """ Returns a subset of the super_dict with the specified keys. """ sub_dict = {} valid_keys = super_dict.keys() for key in keys: if key in valid_keys: sub_dict[key] = super_dict[key] return sub_dict
python
def subdict(super_dict, keys): """ Returns a subset of the super_dict with the specified keys. """ sub_dict = {} valid_keys = super_dict.keys() for key in keys: if key in valid_keys: sub_dict[key] = super_dict[key] return sub_dict
[ "def", "subdict", "(", "super_dict", ",", "keys", ")", ":", "sub_dict", "=", "{", "}", "valid_keys", "=", "super_dict", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "if", "key", "in", "valid_keys", ":", "sub_dict", "[", "key", "]", "=", "super_dict", "[", "key", "]", "return", "sub_dict" ]
Returns a subset of the super_dict with the specified keys.
[ "Returns", "a", "subset", "of", "the", "super_dict", "with", "the", "specified", "keys", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L184-L194
train
diging/tethne
tethne/utilities.py
concat_list
def concat_list(listA, listB, delim=' '): """ Concatenate list elements pair-wise with the delim character Returns the concatenated list Raises index error if lists are not parallel """ # Lists must be of equal length. if len(listA) != len(listB): raise IndexError('Input lists are not parallel.') # Concatenate lists. listC = [] for i in xrange(len(listA)): app = listA[i] + delim + listB[i] listC.append(app) return listC
python
def concat_list(listA, listB, delim=' '): """ Concatenate list elements pair-wise with the delim character Returns the concatenated list Raises index error if lists are not parallel """ # Lists must be of equal length. if len(listA) != len(listB): raise IndexError('Input lists are not parallel.') # Concatenate lists. listC = [] for i in xrange(len(listA)): app = listA[i] + delim + listB[i] listC.append(app) return listC
[ "def", "concat_list", "(", "listA", ",", "listB", ",", "delim", "=", "' '", ")", ":", "# Lists must be of equal length.", "if", "len", "(", "listA", ")", "!=", "len", "(", "listB", ")", ":", "raise", "IndexError", "(", "'Input lists are not parallel.'", ")", "# Concatenate lists.", "listC", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "listA", ")", ")", ":", "app", "=", "listA", "[", "i", "]", "+", "delim", "+", "listB", "[", "i", "]", "listC", ".", "append", "(", "app", ")", "return", "listC" ]
Concatenate list elements pair-wise with the delim character Returns the concatenated list Raises index error if lists are not parallel
[ "Concatenate", "list", "elements", "pair", "-", "wise", "with", "the", "delim", "character", "Returns", "the", "concatenated", "list", "Raises", "index", "error", "if", "lists", "are", "not", "parallel" ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L212-L229
train
diging/tethne
tethne/utilities.py
strip_non_ascii
def strip_non_ascii(s): """ Returns the string without non-ASCII characters. Parameters ---------- string : string A string that may contain non-ASCII characters. Returns ------- clean_string : string A string that does not contain non-ASCII characters. """ stripped = (c for c in s if 0 < ord(c) < 127) clean_string = u''.join(stripped) return clean_string
python
def strip_non_ascii(s): """ Returns the string without non-ASCII characters. Parameters ---------- string : string A string that may contain non-ASCII characters. Returns ------- clean_string : string A string that does not contain non-ASCII characters. """ stripped = (c for c in s if 0 < ord(c) < 127) clean_string = u''.join(stripped) return clean_string
[ "def", "strip_non_ascii", "(", "s", ")", ":", "stripped", "=", "(", "c", "for", "c", "in", "s", "if", "0", "<", "ord", "(", "c", ")", "<", "127", ")", "clean_string", "=", "u''", ".", "join", "(", "stripped", ")", "return", "clean_string" ]
Returns the string without non-ASCII characters. Parameters ---------- string : string A string that may contain non-ASCII characters. Returns ------- clean_string : string A string that does not contain non-ASCII characters.
[ "Returns", "the", "string", "without", "non", "-", "ASCII", "characters", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L231-L248
train
diging/tethne
tethne/utilities.py
dict_from_node
def dict_from_node(node, recursive=False): """ Converts ElementTree node to a dictionary. Parameters ---------- node : ElementTree node recursive : boolean If recursive=False, the value of any field with children will be the number of children. Returns ------- dict : nested dictionary. Tags as keys and values as values. Sub-elements that occur multiple times in an element are contained in a list. """ dict = {} for snode in node: if len(snode) > 0: if recursive: # Will drill down until len(snode) <= 0. value = dict_from_node(snode, True) else: value = len(snode) elif snode.text is not None: value = snode.text else: value = u'' if snode.tag in dict.keys(): # If there are multiple subelements # with the same tag, then the value # of the element should be a list # rather than a dict. if type(dict[snode.tag]) is list: # If a list has already been # started, just append to # it. dict[snode.tag].append(value) else: dict[snode.tag] = [ dict[snode.tag], value ] else: dict[snode.tag] = value # Default behavior. return dict
python
def dict_from_node(node, recursive=False): """ Converts ElementTree node to a dictionary. Parameters ---------- node : ElementTree node recursive : boolean If recursive=False, the value of any field with children will be the number of children. Returns ------- dict : nested dictionary. Tags as keys and values as values. Sub-elements that occur multiple times in an element are contained in a list. """ dict = {} for snode in node: if len(snode) > 0: if recursive: # Will drill down until len(snode) <= 0. value = dict_from_node(snode, True) else: value = len(snode) elif snode.text is not None: value = snode.text else: value = u'' if snode.tag in dict.keys(): # If there are multiple subelements # with the same tag, then the value # of the element should be a list # rather than a dict. if type(dict[snode.tag]) is list: # If a list has already been # started, just append to # it. dict[snode.tag].append(value) else: dict[snode.tag] = [ dict[snode.tag], value ] else: dict[snode.tag] = value # Default behavior. return dict
[ "def", "dict_from_node", "(", "node", ",", "recursive", "=", "False", ")", ":", "dict", "=", "{", "}", "for", "snode", "in", "node", ":", "if", "len", "(", "snode", ")", ">", "0", ":", "if", "recursive", ":", "# Will drill down until len(snode) <= 0.", "value", "=", "dict_from_node", "(", "snode", ",", "True", ")", "else", ":", "value", "=", "len", "(", "snode", ")", "elif", "snode", ".", "text", "is", "not", "None", ":", "value", "=", "snode", ".", "text", "else", ":", "value", "=", "u''", "if", "snode", ".", "tag", "in", "dict", ".", "keys", "(", ")", ":", "# If there are multiple subelements", "# with the same tag, then the value", "# of the element should be a list", "# rather than a dict.", "if", "type", "(", "dict", "[", "snode", ".", "tag", "]", ")", "is", "list", ":", "# If a list has already been", "# started, just append to", "# it.", "dict", "[", "snode", ".", "tag", "]", ".", "append", "(", "value", ")", "else", ":", "dict", "[", "snode", ".", "tag", "]", "=", "[", "dict", "[", "snode", ".", "tag", "]", ",", "value", "]", "else", ":", "dict", "[", "snode", ".", "tag", "]", "=", "value", "# Default behavior.", "return", "dict" ]
Converts ElementTree node to a dictionary. Parameters ---------- node : ElementTree node recursive : boolean If recursive=False, the value of any field with children will be the number of children. Returns ------- dict : nested dictionary. Tags as keys and values as values. Sub-elements that occur multiple times in an element are contained in a list.
[ "Converts", "ElementTree", "node", "to", "a", "dictionary", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L255-L298
train
diging/tethne
tethne/utilities.py
MLStripper.feed
def feed(self, data): """ added this check as sometimes we are getting the data in integer format instead of string """ try: self.rawdata = self.rawdata + data except TypeError: data = unicode(data) self.rawdata = self.rawdata + data self.goahead(0)
python
def feed(self, data): """ added this check as sometimes we are getting the data in integer format instead of string """ try: self.rawdata = self.rawdata + data except TypeError: data = unicode(data) self.rawdata = self.rawdata + data self.goahead(0)
[ "def", "feed", "(", "self", ",", "data", ")", ":", "try", ":", "self", ".", "rawdata", "=", "self", ".", "rawdata", "+", "data", "except", "TypeError", ":", "data", "=", "unicode", "(", "data", ")", "self", ".", "rawdata", "=", "self", ".", "rawdata", "+", "data", "self", ".", "goahead", "(", "0", ")" ]
added this check as sometimes we are getting the data in integer format instead of string
[ "added", "this", "check", "as", "sometimes", "we", "are", "getting", "the", "data", "in", "integer", "format", "instead", "of", "string" ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L50-L60
train
diging/tethne
tethne/serialize/paper.py
Serialize.serializePaper
def serializePaper(self): """ This method creates a fixture for the "django-tethne_paper" model. Returns ------- paper_details in JSON format, which can written to a file. """ pid = tethnedao.getMaxPaperID(); papers_details = [] for paper in self.corpus: pid = pid + 1 paper_key = getattr(paper, Serialize.paper_source_map[self.source]) self.paperIdMap[paper_key] = pid paper_data = { "model": "django-tethne.paper", "pk": self.paperIdMap[paper_key], "fields": { "paper_id": paper_key, "corpus":self.corpus_id, "pub_date": getattr(paper, 'date', ''), "volume": getattr(paper, 'volume', ''), "title": getattr(paper, 'title', ''), "abstract": getattr(paper, 'abstract', ''), } } papers_details.append(paper_data) return papers_details
python
def serializePaper(self): """ This method creates a fixture for the "django-tethne_paper" model. Returns ------- paper_details in JSON format, which can written to a file. """ pid = tethnedao.getMaxPaperID(); papers_details = [] for paper in self.corpus: pid = pid + 1 paper_key = getattr(paper, Serialize.paper_source_map[self.source]) self.paperIdMap[paper_key] = pid paper_data = { "model": "django-tethne.paper", "pk": self.paperIdMap[paper_key], "fields": { "paper_id": paper_key, "corpus":self.corpus_id, "pub_date": getattr(paper, 'date', ''), "volume": getattr(paper, 'volume', ''), "title": getattr(paper, 'title', ''), "abstract": getattr(paper, 'abstract', ''), } } papers_details.append(paper_data) return papers_details
[ "def", "serializePaper", "(", "self", ")", ":", "pid", "=", "tethnedao", ".", "getMaxPaperID", "(", ")", "papers_details", "=", "[", "]", "for", "paper", "in", "self", ".", "corpus", ":", "pid", "=", "pid", "+", "1", "paper_key", "=", "getattr", "(", "paper", ",", "Serialize", ".", "paper_source_map", "[", "self", ".", "source", "]", ")", "self", ".", "paperIdMap", "[", "paper_key", "]", "=", "pid", "paper_data", "=", "{", "\"model\"", ":", "\"django-tethne.paper\"", ",", "\"pk\"", ":", "self", ".", "paperIdMap", "[", "paper_key", "]", ",", "\"fields\"", ":", "{", "\"paper_id\"", ":", "paper_key", ",", "\"corpus\"", ":", "self", ".", "corpus_id", ",", "\"pub_date\"", ":", "getattr", "(", "paper", ",", "'date'", ",", "''", ")", ",", "\"volume\"", ":", "getattr", "(", "paper", ",", "'volume'", ",", "''", ")", ",", "\"title\"", ":", "getattr", "(", "paper", ",", "'title'", ",", "''", ")", ",", "\"abstract\"", ":", "getattr", "(", "paper", ",", "'abstract'", ",", "''", ")", ",", "}", "}", "papers_details", ".", "append", "(", "paper_data", ")", "return", "papers_details" ]
This method creates a fixture for the "django-tethne_paper" model. Returns ------- paper_details in JSON format, which can written to a file.
[ "This", "method", "creates", "a", "fixture", "for", "the", "django", "-", "tethne_paper", "model", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L108-L137
train
diging/tethne
tethne/serialize/paper.py
Serialize.serializeCitation
def serializeCitation(self): """ This method creates a fixture for the "django-tethne_citation" model. Returns ------- citation details which can be written to a file """ citation_details = [] citation_id = tethnedao.getMaxCitationID() for citation in self.corpus.features['citations'].index.values(): date_match = re.search(r'(\d+)', citation) if date_match is not None: date = date_match.group(1) if date_match is None: date_match = re.search(r"NONE", citation) date = date_match.group() first_author = citation.replace('_', ' ').split(date)[0].rstrip() journal = citation.replace('_', ' ').split(date)[1].lstrip() citation_key = citation if citation_key not in self.citationIdMap: citation_id += 1 self.citationIdMap[citation_key] = citation_id citation_data = { "model": "django-tethne.citation", "pk": citation_id, "fields": { "literal": citation, "journal": journal, "first_author": first_author, "date": date } } citation_details.append(citation_data) return citation_details
python
def serializeCitation(self): """ This method creates a fixture for the "django-tethne_citation" model. Returns ------- citation details which can be written to a file """ citation_details = [] citation_id = tethnedao.getMaxCitationID() for citation in self.corpus.features['citations'].index.values(): date_match = re.search(r'(\d+)', citation) if date_match is not None: date = date_match.group(1) if date_match is None: date_match = re.search(r"NONE", citation) date = date_match.group() first_author = citation.replace('_', ' ').split(date)[0].rstrip() journal = citation.replace('_', ' ').split(date)[1].lstrip() citation_key = citation if citation_key not in self.citationIdMap: citation_id += 1 self.citationIdMap[citation_key] = citation_id citation_data = { "model": "django-tethne.citation", "pk": citation_id, "fields": { "literal": citation, "journal": journal, "first_author": first_author, "date": date } } citation_details.append(citation_data) return citation_details
[ "def", "serializeCitation", "(", "self", ")", ":", "citation_details", "=", "[", "]", "citation_id", "=", "tethnedao", ".", "getMaxCitationID", "(", ")", "for", "citation", "in", "self", ".", "corpus", ".", "features", "[", "'citations'", "]", ".", "index", ".", "values", "(", ")", ":", "date_match", "=", "re", ".", "search", "(", "r'(\\d+)'", ",", "citation", ")", "if", "date_match", "is", "not", "None", ":", "date", "=", "date_match", ".", "group", "(", "1", ")", "if", "date_match", "is", "None", ":", "date_match", "=", "re", ".", "search", "(", "r\"NONE\"", ",", "citation", ")", "date", "=", "date_match", ".", "group", "(", ")", "first_author", "=", "citation", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "split", "(", "date", ")", "[", "0", "]", ".", "rstrip", "(", ")", "journal", "=", "citation", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "split", "(", "date", ")", "[", "1", "]", ".", "lstrip", "(", ")", "citation_key", "=", "citation", "if", "citation_key", "not", "in", "self", ".", "citationIdMap", ":", "citation_id", "+=", "1", "self", ".", "citationIdMap", "[", "citation_key", "]", "=", "citation_id", "citation_data", "=", "{", "\"model\"", ":", "\"django-tethne.citation\"", ",", "\"pk\"", ":", "citation_id", ",", "\"fields\"", ":", "{", "\"literal\"", ":", "citation", ",", "\"journal\"", ":", "journal", ",", "\"first_author\"", ":", "first_author", ",", "\"date\"", ":", "date", "}", "}", "citation_details", ".", "append", "(", "citation_data", ")", "return", "citation_details" ]
This method creates a fixture for the "django-tethne_citation" model. Returns ------- citation details which can be written to a file
[ "This", "method", "creates", "a", "fixture", "for", "the", "django", "-", "tethne_citation", "model", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L210-L246
train
diging/tethne
tethne/serialize/paper.py
Serialize.serializeInstitution
def serializeInstitution(self): """ This method creates a fixture for the "django-tethne_citation_institution" model. Returns ------- institution details which can be written to a file """ institution_data = [] institution_instance_data = [] affiliation_data = [] affiliation_id = tethnedao.getMaxAffiliationID() institution_id = tethnedao.getMaxInstitutionID() institution_instance_id = tethnedao.getMaxInstitutionInstanceID() for paper in self.corpus: if hasattr(paper, 'authorAddress'): paper_key = getattr(paper, Serialize.paper_source_map[self.source]) if type(paper.authorAddress) is unicode: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(paper.authorAddress) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors: for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) elif type(paper.authorAddress) is list: for address in paper.authorAddress: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(address) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors is None: authors = prevAuthors for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) prevAuthors = authors return institution_data, institution_instance_data, affiliation_data
python
def serializeInstitution(self): """ This method creates a fixture for the "django-tethne_citation_institution" model. Returns ------- institution details which can be written to a file """ institution_data = [] institution_instance_data = [] affiliation_data = [] affiliation_id = tethnedao.getMaxAffiliationID() institution_id = tethnedao.getMaxInstitutionID() institution_instance_id = tethnedao.getMaxInstitutionInstanceID() for paper in self.corpus: if hasattr(paper, 'authorAddress'): paper_key = getattr(paper, Serialize.paper_source_map[self.source]) if type(paper.authorAddress) is unicode: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(paper.authorAddress) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors: for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) elif type(paper.authorAddress) is list: for address in paper.authorAddress: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(address) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors is None: authors = prevAuthors for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) prevAuthors = authors return institution_data, institution_instance_data, affiliation_data
[ "def", "serializeInstitution", "(", "self", ")", ":", "institution_data", "=", "[", "]", "institution_instance_data", "=", "[", "]", "affiliation_data", "=", "[", "]", "affiliation_id", "=", "tethnedao", ".", "getMaxAffiliationID", "(", ")", "institution_id", "=", "tethnedao", ".", "getMaxInstitutionID", "(", ")", "institution_instance_id", "=", "tethnedao", ".", "getMaxInstitutionInstanceID", "(", ")", "for", "paper", "in", "self", ".", "corpus", ":", "if", "hasattr", "(", "paper", ",", "'authorAddress'", ")", ":", "paper_key", "=", "getattr", "(", "paper", ",", "Serialize", ".", "paper_source_map", "[", "self", ".", "source", "]", ")", "if", "type", "(", "paper", ".", "authorAddress", ")", "is", "unicode", ":", "institution_id", "+=", "1", "institution_instance_id", "+=", "1", "institute_literal", ",", "authors", "=", "SerializeUtility", ".", "get_auth_inst", "(", "paper", ".", "authorAddress", ")", "institute_row", ",", "institute_instance_row", "=", "self", ".", "get_details_from_inst_literal", "(", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", "if", "institute_row", ":", "institution_data", ".", "append", "(", "institute_row", ")", "institution_instance_data", ".", "append", "(", "institute_instance_row", ")", "if", "authors", ":", "for", "author", "in", "authors", ":", "affiliation_id", "+=", "1", "affiliation_row", "=", "self", ".", "get_affiliation_details", "(", "author", ",", "affiliation_id", ",", "institute_literal", ")", "affiliation_data", ".", "append", "(", "affiliation_row", ")", "elif", "type", "(", "paper", ".", "authorAddress", ")", "is", "list", ":", "for", "address", "in", "paper", ".", "authorAddress", ":", "institution_id", "+=", "1", "institution_instance_id", "+=", "1", "institute_literal", ",", "authors", "=", "SerializeUtility", ".", "get_auth_inst", "(", "address", ")", "institute_row", ",", "institute_instance_row", "=", "self", ".", "get_details_from_inst_literal", "(", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", "if", "institute_row", ":", "institution_data", ".", "append", "(", "institute_row", ")", "institution_instance_data", ".", "append", "(", "institute_instance_row", ")", "if", "authors", "is", "None", ":", "authors", "=", "prevAuthors", "for", "author", "in", "authors", ":", "affiliation_id", "+=", "1", "affiliation_row", "=", "self", ".", "get_affiliation_details", "(", "author", ",", "affiliation_id", ",", "institute_literal", ")", "affiliation_data", ".", "append", "(", "affiliation_row", ")", "prevAuthors", "=", "authors", "return", "institution_data", ",", "institution_instance_data", ",", "affiliation_data" ]
This method creates a fixture for the "django-tethne_citation_institution" model. Returns ------- institution details which can be written to a file
[ "This", "method", "creates", "a", "fixture", "for", "the", "django", "-", "tethne_citation_institution", "model", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L289-L344
train
diging/tethne
tethne/serialize/paper.py
Serialize.get_details_from_inst_literal
def get_details_from_inst_literal(self, institute_literal, institution_id, institution_instance_id, paper_key): """ This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns ------- """ institute_details = institute_literal.split(',') institute_name = institute_details[0] country = institute_details[len(institute_details)-1].lstrip().replace('.', '') institute_row = None zipcode = "" state = "" city = "" if 'USA' in country: temp = country if(len(temp.split())) == 3: country = temp.split()[2] zipcode = temp.split()[1] state = temp.split()[0] elif(len(temp.split())) == 2: country = temp.split()[1] state = temp.split()[0] city = institute_details[len(institute_details)-2].lstrip() addressline1 = "" for i in range(1, len(institute_details)-1, 1): if i != len(institute_details)-2: addressline1 = addressline1 + institute_details[i]+',' else: addressline1 = addressline1 + institute_details[i] if institute_literal not in self.instituteIdMap: self.instituteIdMap[institute_literal] = institution_id institute_row = { "model": "django-tethne.institution", "pk": institution_id, "fields": { "institute_name": institute_name, "addressLine1": addressline1, "country": country, "zip": zipcode, "state": state, "city": city } } department = "" if re.search('Dept([^,]*),', institute_literal) is not None: department = re.search('Dept([^,]*),', institute_literal).group().replace(',', '') institute_instance_row = { "model": "django-tethne.institution_instance", "pk": institution_instance_id, "fields": { "institution": self.instituteIdMap[institute_literal], "literal": institute_literal, "institute_name": institute_name, "addressLine1": addressline1, "country": country, "paper": self.paperIdMap[paper_key], "department": department, "zip": zipcode, "state": state, "city": city } } return institute_row, institute_instance_row
python
def get_details_from_inst_literal(self, institute_literal, institution_id, institution_instance_id, paper_key): """ This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns ------- """ institute_details = institute_literal.split(',') institute_name = institute_details[0] country = institute_details[len(institute_details)-1].lstrip().replace('.', '') institute_row = None zipcode = "" state = "" city = "" if 'USA' in country: temp = country if(len(temp.split())) == 3: country = temp.split()[2] zipcode = temp.split()[1] state = temp.split()[0] elif(len(temp.split())) == 2: country = temp.split()[1] state = temp.split()[0] city = institute_details[len(institute_details)-2].lstrip() addressline1 = "" for i in range(1, len(institute_details)-1, 1): if i != len(institute_details)-2: addressline1 = addressline1 + institute_details[i]+',' else: addressline1 = addressline1 + institute_details[i] if institute_literal not in self.instituteIdMap: self.instituteIdMap[institute_literal] = institution_id institute_row = { "model": "django-tethne.institution", "pk": institution_id, "fields": { "institute_name": institute_name, "addressLine1": addressline1, "country": country, "zip": zipcode, "state": state, "city": city } } department = "" if re.search('Dept([^,]*),', institute_literal) is not None: department = re.search('Dept([^,]*),', institute_literal).group().replace(',', '') institute_instance_row = { "model": "django-tethne.institution_instance", "pk": institution_instance_id, "fields": { "institution": self.instituteIdMap[institute_literal], "literal": institute_literal, "institute_name": institute_name, "addressLine1": addressline1, "country": country, "paper": self.paperIdMap[paper_key], "department": department, "zip": zipcode, "state": state, "city": city } } return institute_row, institute_instance_row
[ "def", "get_details_from_inst_literal", "(", "self", ",", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", ":", "institute_details", "=", "institute_literal", ".", "split", "(", "','", ")", "institute_name", "=", "institute_details", "[", "0", "]", "country", "=", "institute_details", "[", "len", "(", "institute_details", ")", "-", "1", "]", ".", "lstrip", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", "institute_row", "=", "None", "zipcode", "=", "\"\"", "state", "=", "\"\"", "city", "=", "\"\"", "if", "'USA'", "in", "country", ":", "temp", "=", "country", "if", "(", "len", "(", "temp", ".", "split", "(", ")", ")", ")", "==", "3", ":", "country", "=", "temp", ".", "split", "(", ")", "[", "2", "]", "zipcode", "=", "temp", ".", "split", "(", ")", "[", "1", "]", "state", "=", "temp", ".", "split", "(", ")", "[", "0", "]", "elif", "(", "len", "(", "temp", ".", "split", "(", ")", ")", ")", "==", "2", ":", "country", "=", "temp", ".", "split", "(", ")", "[", "1", "]", "state", "=", "temp", ".", "split", "(", ")", "[", "0", "]", "city", "=", "institute_details", "[", "len", "(", "institute_details", ")", "-", "2", "]", ".", "lstrip", "(", ")", "addressline1", "=", "\"\"", "for", "i", "in", "range", "(", "1", ",", "len", "(", "institute_details", ")", "-", "1", ",", "1", ")", ":", "if", "i", "!=", "len", "(", "institute_details", ")", "-", "2", ":", "addressline1", "=", "addressline1", "+", "institute_details", "[", "i", "]", "+", "','", "else", ":", "addressline1", "=", "addressline1", "+", "institute_details", "[", "i", "]", "if", "institute_literal", "not", "in", "self", ".", "instituteIdMap", ":", "self", ".", "instituteIdMap", "[", "institute_literal", "]", "=", "institution_id", "institute_row", "=", "{", "\"model\"", ":", "\"django-tethne.institution\"", ",", "\"pk\"", ":", "institution_id", ",", "\"fields\"", ":", "{", "\"institute_name\"", ":", "institute_name", ",", "\"addressLine1\"", ":", "addressline1", ",", "\"country\"", ":", "country", ",", "\"zip\"", ":", "zipcode", ",", "\"state\"", ":", "state", ",", "\"city\"", ":", "city", "}", "}", "department", "=", "\"\"", "if", "re", ".", "search", "(", "'Dept([^,]*),'", ",", "institute_literal", ")", "is", "not", "None", ":", "department", "=", "re", ".", "search", "(", "'Dept([^,]*),'", ",", "institute_literal", ")", ".", "group", "(", ")", ".", "replace", "(", "','", ",", "''", ")", "institute_instance_row", "=", "{", "\"model\"", ":", "\"django-tethne.institution_instance\"", ",", "\"pk\"", ":", "institution_instance_id", ",", "\"fields\"", ":", "{", "\"institution\"", ":", "self", ".", "instituteIdMap", "[", "institute_literal", "]", ",", "\"literal\"", ":", "institute_literal", ",", "\"institute_name\"", ":", "institute_name", ",", "\"addressLine1\"", ":", "addressline1", ",", "\"country\"", ":", "country", ",", "\"paper\"", ":", "self", ".", "paperIdMap", "[", "paper_key", "]", ",", "\"department\"", ":", "department", ",", "\"zip\"", ":", "zipcode", ",", "\"state\"", ":", "state", ",", "\"city\"", ":", "city", "}", "}", "return", "institute_row", ",", "institute_instance_row" ]
This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns -------
[ "This", "method", "parses", "the", "institute", "literal", "to", "get", "the", "following", "1", ".", "Department", "naame", "2", ".", "Country", "3", ".", "University", "name", "4", ".", "ZIP", "STATE", "AND", "CITY", "(", "Only", "if", "the", "country", "is", "USA", ".", "For", "other", "countries", "the", "standard", "may", "vary", ".", "So", "parsing", "these", "values", "becomes", "very", "difficult", ".", "However", "the", "complete", "address", "can", "be", "found", "in", "the", "column", "AddressLine1" ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L346-L424
train
diging/tethne
tethne/serialize/paper.py
Serialize.get_affiliation_details
def get_affiliation_details(self, value, affiliation_id, institute_literal): """ This method is used to map the Affiliation between an author and Institution. Parameters ---------- value - The author name affiliation_id - Primary key of the affiliation table institute_literal Returns ------- Affiliation details(JSON fixture) which can be written to a file """ tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split() if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() author_key = auinit+aulast affiliation_row = { "model": "django-tethne.affiliation", "pk": affiliation_id, "fields": { "author": self.authorIdMap[author_key], "institution": self.instituteIdMap[institute_literal] } } return affiliation_row
python
def get_affiliation_details(self, value, affiliation_id, institute_literal): """ This method is used to map the Affiliation between an author and Institution. Parameters ---------- value - The author name affiliation_id - Primary key of the affiliation table institute_literal Returns ------- Affiliation details(JSON fixture) which can be written to a file """ tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split() if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() author_key = auinit+aulast affiliation_row = { "model": "django-tethne.affiliation", "pk": affiliation_id, "fields": { "author": self.authorIdMap[author_key], "institution": self.instituteIdMap[institute_literal] } } return affiliation_row
[ "def", "get_affiliation_details", "(", "self", ",", "value", ",", "affiliation_id", ",", "institute_literal", ")", ":", "tokens", "=", "tuple", "(", "[", "t", ".", "upper", "(", ")", ".", "strip", "(", ")", "for", "t", "in", "value", ".", "split", "(", "','", ")", "]", ")", "if", "len", "(", "tokens", ")", "==", "1", ":", "tokens", "=", "value", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", ">", "0", ":", "if", "len", "(", "tokens", ")", ">", "1", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", ":", "2", "]", "else", ":", "aulast", "=", "tokens", "[", "0", "]", "auinit", "=", "''", "else", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", "]", ",", "''", "aulast", "=", "_strip_punctuation", "(", "aulast", ")", ".", "upper", "(", ")", "auinit", "=", "_strip_punctuation", "(", "auinit", ")", ".", "upper", "(", ")", "author_key", "=", "auinit", "+", "aulast", "affiliation_row", "=", "{", "\"model\"", ":", "\"django-tethne.affiliation\"", ",", "\"pk\"", ":", "affiliation_id", ",", "\"fields\"", ":", "{", "\"author\"", ":", "self", ".", "authorIdMap", "[", "author_key", "]", ",", "\"institution\"", ":", "self", ".", "instituteIdMap", "[", "institute_literal", "]", "}", "}", "return", "affiliation_row" ]
This method is used to map the Affiliation between an author and Institution. Parameters ---------- value - The author name affiliation_id - Primary key of the affiliation table institute_literal Returns ------- Affiliation details(JSON fixture) which can be written to a file
[ "This", "method", "is", "used", "to", "map", "the", "Affiliation", "between", "an", "author", "and", "Institution", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L426-L464
train
diging/tethne
tethne/readers/base.py
IterParser.start
def start(self): """ Find the first data entry and prepare to parse. """ while not self.is_start(self.current_tag): self.next() self.new_entry()
python
def start(self): """ Find the first data entry and prepare to parse. """ while not self.is_start(self.current_tag): self.next() self.new_entry()
[ "def", "start", "(", "self", ")", ":", "while", "not", "self", ".", "is_start", "(", "self", ".", "current_tag", ")", ":", "self", ".", "next", "(", ")", "self", ".", "new_entry", "(", ")" ]
Find the first data entry and prepare to parse.
[ "Find", "the", "first", "data", "entry", "and", "prepare", "to", "parse", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/base.py#L129-L136
train
diging/tethne
tethne/readers/base.py
IterParser.handle
def handle(self, tag, data): """ Process a single line of data, and store the result. Parameters ---------- tag : str data : """ if self.is_end(tag): self.postprocess_entry() if self.is_start(tag): self.new_entry() if not data or not tag: return if getattr(self, 'parse_only', None) and tag not in self.parse_only: return # TODO: revisit encoding here. if isinstance(data, unicode): data = unicodedata.normalize('NFKD', data)#.encode('utf-8','ignore') handler = self._get_handler(tag) if handler is not None: data = handler(data) if tag in self.tags: # Rename the field. tag = self.tags[tag] # Multiline fields are represented as lists of values. if hasattr(self.data[-1], tag): value = getattr(self.data[-1], tag) if tag in self.concat_fields: value = ' '.join([value, unicode(data)]) elif type(value) is list: value.append(data) elif value not in [None, '']: value = [value, data] else: value = data setattr(self.data[-1], tag, value) self.fields.add(tag)
python
def handle(self, tag, data): """ Process a single line of data, and store the result. Parameters ---------- tag : str data : """ if self.is_end(tag): self.postprocess_entry() if self.is_start(tag): self.new_entry() if not data or not tag: return if getattr(self, 'parse_only', None) and tag not in self.parse_only: return # TODO: revisit encoding here. if isinstance(data, unicode): data = unicodedata.normalize('NFKD', data)#.encode('utf-8','ignore') handler = self._get_handler(tag) if handler is not None: data = handler(data) if tag in self.tags: # Rename the field. tag = self.tags[tag] # Multiline fields are represented as lists of values. if hasattr(self.data[-1], tag): value = getattr(self.data[-1], tag) if tag in self.concat_fields: value = ' '.join([value, unicode(data)]) elif type(value) is list: value.append(data) elif value not in [None, '']: value = [value, data] else: value = data setattr(self.data[-1], tag, value) self.fields.add(tag)
[ "def", "handle", "(", "self", ",", "tag", ",", "data", ")", ":", "if", "self", ".", "is_end", "(", "tag", ")", ":", "self", ".", "postprocess_entry", "(", ")", "if", "self", ".", "is_start", "(", "tag", ")", ":", "self", ".", "new_entry", "(", ")", "if", "not", "data", "or", "not", "tag", ":", "return", "if", "getattr", "(", "self", ",", "'parse_only'", ",", "None", ")", "and", "tag", "not", "in", "self", ".", "parse_only", ":", "return", "# TODO: revisit encoding here.", "if", "isinstance", "(", "data", ",", "unicode", ")", ":", "data", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "data", ")", "#.encode('utf-8','ignore')", "handler", "=", "self", ".", "_get_handler", "(", "tag", ")", "if", "handler", "is", "not", "None", ":", "data", "=", "handler", "(", "data", ")", "if", "tag", "in", "self", ".", "tags", ":", "# Rename the field.", "tag", "=", "self", ".", "tags", "[", "tag", "]", "# Multiline fields are represented as lists of values.", "if", "hasattr", "(", "self", ".", "data", "[", "-", "1", "]", ",", "tag", ")", ":", "value", "=", "getattr", "(", "self", ".", "data", "[", "-", "1", "]", ",", "tag", ")", "if", "tag", "in", "self", ".", "concat_fields", ":", "value", "=", "' '", ".", "join", "(", "[", "value", ",", "unicode", "(", "data", ")", "]", ")", "elif", "type", "(", "value", ")", "is", "list", ":", "value", ".", "append", "(", "data", ")", "elif", "value", "not", "in", "[", "None", ",", "''", "]", ":", "value", "=", "[", "value", ",", "data", "]", "else", ":", "value", "=", "data", "setattr", "(", "self", ".", "data", "[", "-", "1", "]", ",", "tag", ",", "value", ")", "self", ".", "fields", ".", "add", "(", "tag", ")" ]
Process a single line of data, and store the result. Parameters ---------- tag : str data :
[ "Process", "a", "single", "line", "of", "data", "and", "store", "the", "result", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/base.py#L138-L183
train
diging/tethne
tethne/readers/base.py
FTParser.open
def open(self): """ Open the data file. """ if not os.path.exists(self.path): raise IOError("No such path: {0}".format(self.path)) with open(self.path, "rb") as f: msg = f.read() result = chardet.detect(msg) self.buffer = codecs.open(self.path, "rb", encoding=result['encoding']) self.at_eof = False
python
def open(self): """ Open the data file. """ if not os.path.exists(self.path): raise IOError("No such path: {0}".format(self.path)) with open(self.path, "rb") as f: msg = f.read() result = chardet.detect(msg) self.buffer = codecs.open(self.path, "rb", encoding=result['encoding']) self.at_eof = False
[ "def", "open", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "raise", "IOError", "(", "\"No such path: {0}\"", ".", "format", "(", "self", ".", "path", ")", ")", "with", "open", "(", "self", ".", "path", ",", "\"rb\"", ")", "as", "f", ":", "msg", "=", "f", ".", "read", "(", ")", "result", "=", "chardet", ".", "detect", "(", "msg", ")", "self", ".", "buffer", "=", "codecs", ".", "open", "(", "self", ".", "path", ",", "\"rb\"", ",", "encoding", "=", "result", "[", "'encoding'", "]", ")", "self", ".", "at_eof", "=", "False" ]
Open the data file.
[ "Open", "the", "data", "file", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/base.py#L206-L221
train
diging/tethne
tethne/readers/base.py
FTParser.next
def next(self): """ Get the next line of data. Returns ------- tag : str data : """ line = self.buffer.readline() while line == '\n': # Skip forward to the next line with content. line = self.buffer.readline() if line == '': # End of file. self.at_eof = True return None, None match = re.match('([A-Z]{2}|[C][1])\W(.*)', line) if match is not None: self.current_tag, data = match.groups() else: self.current_tag = self.last_tag data = line.strip() return self.current_tag, _cast(data)
python
def next(self): """ Get the next line of data. Returns ------- tag : str data : """ line = self.buffer.readline() while line == '\n': # Skip forward to the next line with content. line = self.buffer.readline() if line == '': # End of file. self.at_eof = True return None, None match = re.match('([A-Z]{2}|[C][1])\W(.*)', line) if match is not None: self.current_tag, data = match.groups() else: self.current_tag = self.last_tag data = line.strip() return self.current_tag, _cast(data)
[ "def", "next", "(", "self", ")", ":", "line", "=", "self", ".", "buffer", ".", "readline", "(", ")", "while", "line", "==", "'\\n'", ":", "# Skip forward to the next line with content.", "line", "=", "self", ".", "buffer", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "# End of file.", "self", ".", "at_eof", "=", "True", "return", "None", ",", "None", "match", "=", "re", ".", "match", "(", "'([A-Z]{2}|[C][1])\\W(.*)'", ",", "line", ")", "if", "match", "is", "not", "None", ":", "self", ".", "current_tag", ",", "data", "=", "match", ".", "groups", "(", ")", "else", ":", "self", ".", "current_tag", "=", "self", ".", "last_tag", "data", "=", "line", ".", "strip", "(", ")", "return", "self", ".", "current_tag", ",", "_cast", "(", "data", ")" ]
Get the next line of data. Returns ------- tag : str data :
[ "Get", "the", "next", "line", "of", "data", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/base.py#L223-L247
train
diging/tethne
tethne/networks/authors.py
coauthors
def coauthors(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs): """ A graph describing joint authorship in ``corpus``. """ return cooccurrence(corpus, 'authors', min_weight=min_weight, edge_attrs=edge_attrs, **kwargs)
python
def coauthors(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs): """ A graph describing joint authorship in ``corpus``. """ return cooccurrence(corpus, 'authors', min_weight=min_weight, edge_attrs=edge_attrs, **kwargs)
[ "def", "coauthors", "(", "corpus", ",", "min_weight", "=", "1", ",", "edge_attrs", "=", "[", "'ayjid'", ",", "'date'", "]", ",", "*", "*", "kwargs", ")", ":", "return", "cooccurrence", "(", "corpus", ",", "'authors'", ",", "min_weight", "=", "min_weight", ",", "edge_attrs", "=", "edge_attrs", ",", "*", "*", "kwargs", ")" ]
A graph describing joint authorship in ``corpus``.
[ "A", "graph", "describing", "joint", "authorship", "in", "corpus", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/authors.py#L22-L27
train
diging/tethne
tethne/readers/zotero.py
extract_text
def extract_text(fpath): """ Extracts structured text content from a plain-text file at ``fpath``. Parameters ---------- fpath : str Path to the text file.. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains sentence context. """ with codecs.open(fpath, 'r') as f: # Determine the encoding of the file. document = f.read() encoding = chardet.detect(document)['encoding'] document = document.decode(encoding) tokens = [] sentences = [] i = 0 for sentence in nltk.tokenize.sent_tokenize(document): sentences.append(i) for word in nltk.tokenize.word_tokenize(sentence): tokens.append(word) i += 1 contexts = [('sentence', sentences)] return StructuredFeature(tokens, contexts)
python
def extract_text(fpath): """ Extracts structured text content from a plain-text file at ``fpath``. Parameters ---------- fpath : str Path to the text file.. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains sentence context. """ with codecs.open(fpath, 'r') as f: # Determine the encoding of the file. document = f.read() encoding = chardet.detect(document)['encoding'] document = document.decode(encoding) tokens = [] sentences = [] i = 0 for sentence in nltk.tokenize.sent_tokenize(document): sentences.append(i) for word in nltk.tokenize.word_tokenize(sentence): tokens.append(word) i += 1 contexts = [('sentence', sentences)] return StructuredFeature(tokens, contexts)
[ "def", "extract_text", "(", "fpath", ")", ":", "with", "codecs", ".", "open", "(", "fpath", ",", "'r'", ")", "as", "f", ":", "# Determine the encoding of the file.", "document", "=", "f", ".", "read", "(", ")", "encoding", "=", "chardet", ".", "detect", "(", "document", ")", "[", "'encoding'", "]", "document", "=", "document", ".", "decode", "(", "encoding", ")", "tokens", "=", "[", "]", "sentences", "=", "[", "]", "i", "=", "0", "for", "sentence", "in", "nltk", ".", "tokenize", ".", "sent_tokenize", "(", "document", ")", ":", "sentences", ".", "append", "(", "i", ")", "for", "word", "in", "nltk", ".", "tokenize", ".", "word_tokenize", "(", "sentence", ")", ":", "tokens", ".", "append", "(", "word", ")", "i", "+=", "1", "contexts", "=", "[", "(", "'sentence'", ",", "sentences", ")", "]", "return", "StructuredFeature", "(", "tokens", ",", "contexts", ")" ]
Extracts structured text content from a plain-text file at ``fpath``. Parameters ---------- fpath : str Path to the text file.. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains sentence context.
[ "Extracts", "structured", "text", "content", "from", "a", "plain", "-", "text", "file", "at", "fpath", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L86-L117
train
diging/tethne
tethne/readers/zotero.py
extract_pdf
def extract_pdf(fpath): """ Extracts structured text content from a PDF at ``fpath``. Parameters ---------- fpath : str Path to the PDF. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains page and sentence contexts. """ with codecs.open(fpath, 'r') as f: # Determine the encoding of the file. document = slate.PDF(f) encoding = chardet.detect(document[0]) tokens = [] pages = [] sentences = [] tokenizer = nltk.tokenize.TextTilingTokenizer() i = 0 for page in document: pages.append(i) # Decode using the correct encoding. page = page.decode(encoding['encoding']) for sentence in nltk.tokenize.sent_tokenize(page): sentences.append(i) for word in nltk.tokenize.word_tokenize(sentence): if len(word) > 15: words = nltk.tokenize.word_tokenize(_infer_spaces(word)) if mean([len(w) for w in words]) > 2: for w in words: tokens.append(w) i += 1 continue tokens.append(word) i += 1 contexts = [('page', pages), ('sentence', sentences)] return StructuredFeature(tokens, contexts)
python
def extract_pdf(fpath): """ Extracts structured text content from a PDF at ``fpath``. Parameters ---------- fpath : str Path to the PDF. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains page and sentence contexts. """ with codecs.open(fpath, 'r') as f: # Determine the encoding of the file. document = slate.PDF(f) encoding = chardet.detect(document[0]) tokens = [] pages = [] sentences = [] tokenizer = nltk.tokenize.TextTilingTokenizer() i = 0 for page in document: pages.append(i) # Decode using the correct encoding. page = page.decode(encoding['encoding']) for sentence in nltk.tokenize.sent_tokenize(page): sentences.append(i) for word in nltk.tokenize.word_tokenize(sentence): if len(word) > 15: words = nltk.tokenize.word_tokenize(_infer_spaces(word)) if mean([len(w) for w in words]) > 2: for w in words: tokens.append(w) i += 1 continue tokens.append(word) i += 1 contexts = [('page', pages), ('sentence', sentences)] return StructuredFeature(tokens, contexts)
[ "def", "extract_pdf", "(", "fpath", ")", ":", "with", "codecs", ".", "open", "(", "fpath", ",", "'r'", ")", "as", "f", ":", "# Determine the encoding of the file.", "document", "=", "slate", ".", "PDF", "(", "f", ")", "encoding", "=", "chardet", ".", "detect", "(", "document", "[", "0", "]", ")", "tokens", "=", "[", "]", "pages", "=", "[", "]", "sentences", "=", "[", "]", "tokenizer", "=", "nltk", ".", "tokenize", ".", "TextTilingTokenizer", "(", ")", "i", "=", "0", "for", "page", "in", "document", ":", "pages", ".", "append", "(", "i", ")", "# Decode using the correct encoding.", "page", "=", "page", ".", "decode", "(", "encoding", "[", "'encoding'", "]", ")", "for", "sentence", "in", "nltk", ".", "tokenize", ".", "sent_tokenize", "(", "page", ")", ":", "sentences", ".", "append", "(", "i", ")", "for", "word", "in", "nltk", ".", "tokenize", ".", "word_tokenize", "(", "sentence", ")", ":", "if", "len", "(", "word", ")", ">", "15", ":", "words", "=", "nltk", ".", "tokenize", ".", "word_tokenize", "(", "_infer_spaces", "(", "word", ")", ")", "if", "mean", "(", "[", "len", "(", "w", ")", "for", "w", "in", "words", "]", ")", ">", "2", ":", "for", "w", "in", "words", ":", "tokens", ".", "append", "(", "w", ")", "i", "+=", "1", "continue", "tokens", ".", "append", "(", "word", ")", "i", "+=", "1", "contexts", "=", "[", "(", "'page'", ",", "pages", ")", ",", "(", "'sentence'", ",", "sentences", ")", "]", "return", "StructuredFeature", "(", "tokens", ",", "contexts", ")" ]
Extracts structured text content from a PDF at ``fpath``. Parameters ---------- fpath : str Path to the PDF. Returns ------- :class:`.StructuredFeature` A :class:`.StructuredFeature` that contains page and sentence contexts.
[ "Extracts", "structured", "text", "content", "from", "a", "PDF", "at", "fpath", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L120-L167
train
diging/tethne
tethne/readers/zotero.py
read
def read(path, corpus=True, index_by='uri', follow_links=False, **kwargs): """ Read bibliographic data from Zotero RDF. Examples -------- Assuming that the Zotero collection was exported to the directory ``/my/working/dir`` with the name ``myCollection``, a subdirectory should have been created at ``/my/working/dir/myCollection``, and an RDF file should exist at ``/my/working/dir/myCollection/myCollection.rdf``. .. code-block:: python >>> from tethne.readers.zotero import read >>> myCorpus = read('/my/working/dir/myCollection') >>> myCorpus <tethne.classes.corpus.Corpus object at 0x10047e350> Parameters ---------- path : str Path to the output directory created by Zotero. Expected to contain a file called ``[directory_name].rdf``. corpus : bool (default: True) If True, returns a :class:`.Corpus`\. Otherwise, returns a list of :class:`.Paper`\s. index_by : str (default: ``'identifier'``) :class:`.Paper` attribute name to use as the primary indexing field. If the field is missing on a :class:`.Paper`\, a unique identifier will be generated based on the title and author names. follow_links : bool If ``True``, attempts to load full-text content from attached files (e.g. PDFs with embedded text). Default: False. kwargs : kwargs Passed to the :class:`.Corpus` constructor. Returns ------- corpus : :class:`.Corpus` """ # TODO: is there a case where `from_dir` would make sense? parser = ZoteroParser(path, index_by=index_by, follow_links=follow_links) papers = parser.parse() if corpus: c = Corpus(papers, index_by=index_by, **kwargs) if c.duplicate_papers: warnings.warn("Duplicate papers detected. Use the 'duplicate_papers' attribute of the corpus to get the list", UserWarning) for fset_name, fset_values in parser.full_text.iteritems(): c.features[fset_name] = StructuredFeatureSet(fset_values) return c return papers
python
def read(path, corpus=True, index_by='uri', follow_links=False, **kwargs): """ Read bibliographic data from Zotero RDF. Examples -------- Assuming that the Zotero collection was exported to the directory ``/my/working/dir`` with the name ``myCollection``, a subdirectory should have been created at ``/my/working/dir/myCollection``, and an RDF file should exist at ``/my/working/dir/myCollection/myCollection.rdf``. .. code-block:: python >>> from tethne.readers.zotero import read >>> myCorpus = read('/my/working/dir/myCollection') >>> myCorpus <tethne.classes.corpus.Corpus object at 0x10047e350> Parameters ---------- path : str Path to the output directory created by Zotero. Expected to contain a file called ``[directory_name].rdf``. corpus : bool (default: True) If True, returns a :class:`.Corpus`\. Otherwise, returns a list of :class:`.Paper`\s. index_by : str (default: ``'identifier'``) :class:`.Paper` attribute name to use as the primary indexing field. If the field is missing on a :class:`.Paper`\, a unique identifier will be generated based on the title and author names. follow_links : bool If ``True``, attempts to load full-text content from attached files (e.g. PDFs with embedded text). Default: False. kwargs : kwargs Passed to the :class:`.Corpus` constructor. Returns ------- corpus : :class:`.Corpus` """ # TODO: is there a case where `from_dir` would make sense? parser = ZoteroParser(path, index_by=index_by, follow_links=follow_links) papers = parser.parse() if corpus: c = Corpus(papers, index_by=index_by, **kwargs) if c.duplicate_papers: warnings.warn("Duplicate papers detected. Use the 'duplicate_papers' attribute of the corpus to get the list", UserWarning) for fset_name, fset_values in parser.full_text.iteritems(): c.features[fset_name] = StructuredFeatureSet(fset_values) return c return papers
[ "def", "read", "(", "path", ",", "corpus", "=", "True", ",", "index_by", "=", "'uri'", ",", "follow_links", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# TODO: is there a case where `from_dir` would make sense?", "parser", "=", "ZoteroParser", "(", "path", ",", "index_by", "=", "index_by", ",", "follow_links", "=", "follow_links", ")", "papers", "=", "parser", ".", "parse", "(", ")", "if", "corpus", ":", "c", "=", "Corpus", "(", "papers", ",", "index_by", "=", "index_by", ",", "*", "*", "kwargs", ")", "if", "c", ".", "duplicate_papers", ":", "warnings", ".", "warn", "(", "\"Duplicate papers detected. Use the 'duplicate_papers' attribute of the corpus to get the list\"", ",", "UserWarning", ")", "for", "fset_name", ",", "fset_values", "in", "parser", ".", "full_text", ".", "iteritems", "(", ")", ":", "c", ".", "features", "[", "fset_name", "]", "=", "StructuredFeatureSet", "(", "fset_values", ")", "return", "c", "return", "papers" ]
Read bibliographic data from Zotero RDF. Examples -------- Assuming that the Zotero collection was exported to the directory ``/my/working/dir`` with the name ``myCollection``, a subdirectory should have been created at ``/my/working/dir/myCollection``, and an RDF file should exist at ``/my/working/dir/myCollection/myCollection.rdf``. .. code-block:: python >>> from tethne.readers.zotero import read >>> myCorpus = read('/my/working/dir/myCollection') >>> myCorpus <tethne.classes.corpus.Corpus object at 0x10047e350> Parameters ---------- path : str Path to the output directory created by Zotero. Expected to contain a file called ``[directory_name].rdf``. corpus : bool (default: True) If True, returns a :class:`.Corpus`\. Otherwise, returns a list of :class:`.Paper`\s. index_by : str (default: ``'identifier'``) :class:`.Paper` attribute name to use as the primary indexing field. If the field is missing on a :class:`.Paper`\, a unique identifier will be generated based on the title and author names. follow_links : bool If ``True``, attempts to load full-text content from attached files (e.g. PDFs with embedded text). Default: False. kwargs : kwargs Passed to the :class:`.Corpus` constructor. Returns ------- corpus : :class:`.Corpus`
[ "Read", "bibliographic", "data", "from", "Zotero", "RDF", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L391-L446
train
diging/tethne
tethne/readers/zotero.py
ZoteroParser.handle_date
def handle_date(self, value): """ Attempt to coerced date to ISO8601. """ try: return iso8601.parse_date(unicode(value)).year except iso8601.ParseError: for datefmt in ("%B %d, %Y", "%Y-%m", "%Y-%m-%d", "%m/%d/%Y"): try: # TODO: remove str coercion. return datetime.strptime(unicode(value), datefmt).date().year except ValueError: pass
python
def handle_date(self, value): """ Attempt to coerced date to ISO8601. """ try: return iso8601.parse_date(unicode(value)).year except iso8601.ParseError: for datefmt in ("%B %d, %Y", "%Y-%m", "%Y-%m-%d", "%m/%d/%Y"): try: # TODO: remove str coercion. return datetime.strptime(unicode(value), datefmt).date().year except ValueError: pass
[ "def", "handle_date", "(", "self", ",", "value", ")", ":", "try", ":", "return", "iso8601", ".", "parse_date", "(", "unicode", "(", "value", ")", ")", ".", "year", "except", "iso8601", ".", "ParseError", ":", "for", "datefmt", "in", "(", "\"%B %d, %Y\"", ",", "\"%Y-%m\"", ",", "\"%Y-%m-%d\"", ",", "\"%m/%d/%Y\"", ")", ":", "try", ":", "# TODO: remove str coercion.", "return", "datetime", ".", "strptime", "(", "unicode", "(", "value", ")", ",", "datefmt", ")", ".", "date", "(", ")", ".", "year", "except", "ValueError", ":", "pass" ]
Attempt to coerced date to ISO8601.
[ "Attempt", "to", "coerced", "date", "to", "ISO8601", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L242-L254
train
diging/tethne
tethne/readers/zotero.py
ZoteroParser.postprocess_link
def postprocess_link(self, entry): """ Attempt to load full-text content from resource. """ if not self.follow_links: return if type(entry.link) is not list: entry.link = [entry.link] for link in list(entry.link): if not os.path.exists(link): continue mime_type = magic.from_file(link, mime=True) if mime_type == 'application/pdf': structuredfeature = extract_pdf(link) elif mime_type == 'text/plain': structuredfeature = extract_text(link) else: structuredfeature = None if not structuredfeature: continue fset_name = mime_type.split('/')[-1] + '_text' if not fset_name in self.full_text: self.full_text[fset_name] = {} if hasattr(self, 'index_by'): ident = getattr(entry, self.index_by) if type(ident) is list: ident = ident[0] else: # If `index_by` is not set, use `uri` by default. ident = entry.uri self.full_text[fset_name][ident] = structuredfeature
python
def postprocess_link(self, entry): """ Attempt to load full-text content from resource. """ if not self.follow_links: return if type(entry.link) is not list: entry.link = [entry.link] for link in list(entry.link): if not os.path.exists(link): continue mime_type = magic.from_file(link, mime=True) if mime_type == 'application/pdf': structuredfeature = extract_pdf(link) elif mime_type == 'text/plain': structuredfeature = extract_text(link) else: structuredfeature = None if not structuredfeature: continue fset_name = mime_type.split('/')[-1] + '_text' if not fset_name in self.full_text: self.full_text[fset_name] = {} if hasattr(self, 'index_by'): ident = getattr(entry, self.index_by) if type(ident) is list: ident = ident[0] else: # If `index_by` is not set, use `uri` by default. ident = entry.uri self.full_text[fset_name][ident] = structuredfeature
[ "def", "postprocess_link", "(", "self", ",", "entry", ")", ":", "if", "not", "self", ".", "follow_links", ":", "return", "if", "type", "(", "entry", ".", "link", ")", "is", "not", "list", ":", "entry", ".", "link", "=", "[", "entry", ".", "link", "]", "for", "link", "in", "list", "(", "entry", ".", "link", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "link", ")", ":", "continue", "mime_type", "=", "magic", ".", "from_file", "(", "link", ",", "mime", "=", "True", ")", "if", "mime_type", "==", "'application/pdf'", ":", "structuredfeature", "=", "extract_pdf", "(", "link", ")", "elif", "mime_type", "==", "'text/plain'", ":", "structuredfeature", "=", "extract_text", "(", "link", ")", "else", ":", "structuredfeature", "=", "None", "if", "not", "structuredfeature", ":", "continue", "fset_name", "=", "mime_type", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "+", "'_text'", "if", "not", "fset_name", "in", "self", ".", "full_text", ":", "self", ".", "full_text", "[", "fset_name", "]", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'index_by'", ")", ":", "ident", "=", "getattr", "(", "entry", ",", "self", ".", "index_by", ")", "if", "type", "(", "ident", ")", "is", "list", ":", "ident", "=", "ident", "[", "0", "]", "else", ":", "# If `index_by` is not set, use `uri` by default.", "ident", "=", "entry", ".", "uri", "self", ".", "full_text", "[", "fset_name", "]", "[", "ident", "]", "=", "structuredfeature" ]
Attempt to load full-text content from resource.
[ "Attempt", "to", "load", "full", "-", "text", "content", "from", "resource", "." ]
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L351-L388
train
web-push-libs/pywebpush
pywebpush/__init__.py
webpush
def webpush(subscription_info, data=None, vapid_private_key=None, vapid_claims=None, content_encoding="aes128gcm", curl=False, timeout=None, ttl=0): """ One call solution to endcode and send `data` to the endpoint contained in `subscription_info` using optional VAPID auth headers. in example: .. code-block:: python from pywebpush import python webpush( subscription_info={ "endpoint": "https://push.example.com/v1/abcd", "keys": {"p256dh": "0123abcd...", "auth": "001122..."} }, data="Mary had a little lamb, with a nice mint jelly", vapid_private_key="path/to/key.pem", vapid_claims={"sub": "[email protected]"} ) No additional method call is required. Any non-success will throw a `WebPushException`. :param subscription_info: Provided by the client call :type subscription_info: dict :param data: Serialized data to send :type data: str :param vapid_private_key: Vapid instance or path to vapid private key PEM \ or encoded str :type vapid_private_key: Union[Vapid, str] :param vapid_claims: Dictionary of claims ('sub' required) :type vapid_claims: dict :param content_encoding: Optional content type string :type content_encoding: str :param curl: Return as "curl" string instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple :param ttl: Time To Live :type ttl: int :return requests.Response or string """ vapid_headers = None if vapid_claims: if not vapid_claims.get('aud'): url = urlparse(subscription_info.get('endpoint')) aud = "{}://{}".format(url.scheme, url.netloc) vapid_claims['aud'] = aud if not vapid_claims.get('exp'): # encryption lives for 12 hours vapid_claims['exp'] = int(time.time()) + (12 * 60 * 60) if not vapid_private_key: raise WebPushException("VAPID dict missing 'private_key'") if isinstance(vapid_private_key, Vapid): vv = vapid_private_key elif os.path.isfile(vapid_private_key): # Presume that key from file is handled correctly by # py_vapid. vv = Vapid.from_file( private_key_file=vapid_private_key) # pragma no cover else: vv = Vapid.from_string(private_key=vapid_private_key) vapid_headers = vv.sign(vapid_claims) response = WebPusher(subscription_info).send( data, vapid_headers, ttl=ttl, content_encoding=content_encoding, curl=curl, timeout=timeout, ) if not curl and response.status_code > 202: raise WebPushException("Push failed: {} {}".format( response.status_code, response.reason), response=response) return response
python
def webpush(subscription_info, data=None, vapid_private_key=None, vapid_claims=None, content_encoding="aes128gcm", curl=False, timeout=None, ttl=0): """ One call solution to endcode and send `data` to the endpoint contained in `subscription_info` using optional VAPID auth headers. in example: .. code-block:: python from pywebpush import python webpush( subscription_info={ "endpoint": "https://push.example.com/v1/abcd", "keys": {"p256dh": "0123abcd...", "auth": "001122..."} }, data="Mary had a little lamb, with a nice mint jelly", vapid_private_key="path/to/key.pem", vapid_claims={"sub": "[email protected]"} ) No additional method call is required. Any non-success will throw a `WebPushException`. :param subscription_info: Provided by the client call :type subscription_info: dict :param data: Serialized data to send :type data: str :param vapid_private_key: Vapid instance or path to vapid private key PEM \ or encoded str :type vapid_private_key: Union[Vapid, str] :param vapid_claims: Dictionary of claims ('sub' required) :type vapid_claims: dict :param content_encoding: Optional content type string :type content_encoding: str :param curl: Return as "curl" string instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple :param ttl: Time To Live :type ttl: int :return requests.Response or string """ vapid_headers = None if vapid_claims: if not vapid_claims.get('aud'): url = urlparse(subscription_info.get('endpoint')) aud = "{}://{}".format(url.scheme, url.netloc) vapid_claims['aud'] = aud if not vapid_claims.get('exp'): # encryption lives for 12 hours vapid_claims['exp'] = int(time.time()) + (12 * 60 * 60) if not vapid_private_key: raise WebPushException("VAPID dict missing 'private_key'") if isinstance(vapid_private_key, Vapid): vv = vapid_private_key elif os.path.isfile(vapid_private_key): # Presume that key from file is handled correctly by # py_vapid. vv = Vapid.from_file( private_key_file=vapid_private_key) # pragma no cover else: vv = Vapid.from_string(private_key=vapid_private_key) vapid_headers = vv.sign(vapid_claims) response = WebPusher(subscription_info).send( data, vapid_headers, ttl=ttl, content_encoding=content_encoding, curl=curl, timeout=timeout, ) if not curl and response.status_code > 202: raise WebPushException("Push failed: {} {}".format( response.status_code, response.reason), response=response) return response
[ "def", "webpush", "(", "subscription_info", ",", "data", "=", "None", ",", "vapid_private_key", "=", "None", ",", "vapid_claims", "=", "None", ",", "content_encoding", "=", "\"aes128gcm\"", ",", "curl", "=", "False", ",", "timeout", "=", "None", ",", "ttl", "=", "0", ")", ":", "vapid_headers", "=", "None", "if", "vapid_claims", ":", "if", "not", "vapid_claims", ".", "get", "(", "'aud'", ")", ":", "url", "=", "urlparse", "(", "subscription_info", ".", "get", "(", "'endpoint'", ")", ")", "aud", "=", "\"{}://{}\"", ".", "format", "(", "url", ".", "scheme", ",", "url", ".", "netloc", ")", "vapid_claims", "[", "'aud'", "]", "=", "aud", "if", "not", "vapid_claims", ".", "get", "(", "'exp'", ")", ":", "# encryption lives for 12 hours", "vapid_claims", "[", "'exp'", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "(", "12", "*", "60", "*", "60", ")", "if", "not", "vapid_private_key", ":", "raise", "WebPushException", "(", "\"VAPID dict missing 'private_key'\"", ")", "if", "isinstance", "(", "vapid_private_key", ",", "Vapid", ")", ":", "vv", "=", "vapid_private_key", "elif", "os", ".", "path", ".", "isfile", "(", "vapid_private_key", ")", ":", "# Presume that key from file is handled correctly by", "# py_vapid.", "vv", "=", "Vapid", ".", "from_file", "(", "private_key_file", "=", "vapid_private_key", ")", "# pragma no cover", "else", ":", "vv", "=", "Vapid", ".", "from_string", "(", "private_key", "=", "vapid_private_key", ")", "vapid_headers", "=", "vv", ".", "sign", "(", "vapid_claims", ")", "response", "=", "WebPusher", "(", "subscription_info", ")", ".", "send", "(", "data", ",", "vapid_headers", ",", "ttl", "=", "ttl", ",", "content_encoding", "=", "content_encoding", ",", "curl", "=", "curl", ",", "timeout", "=", "timeout", ",", ")", "if", "not", "curl", "and", "response", ".", "status_code", ">", "202", ":", "raise", "WebPushException", "(", "\"Push failed: {} {}\"", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "reason", ")", ",", "response", "=", "response", ")", "return", "response" ]
One call solution to endcode and send `data` to the endpoint contained in `subscription_info` using optional VAPID auth headers. in example: .. code-block:: python from pywebpush import python webpush( subscription_info={ "endpoint": "https://push.example.com/v1/abcd", "keys": {"p256dh": "0123abcd...", "auth": "001122..."} }, data="Mary had a little lamb, with a nice mint jelly", vapid_private_key="path/to/key.pem", vapid_claims={"sub": "[email protected]"} ) No additional method call is required. Any non-success will throw a `WebPushException`. :param subscription_info: Provided by the client call :type subscription_info: dict :param data: Serialized data to send :type data: str :param vapid_private_key: Vapid instance or path to vapid private key PEM \ or encoded str :type vapid_private_key: Union[Vapid, str] :param vapid_claims: Dictionary of claims ('sub' required) :type vapid_claims: dict :param content_encoding: Optional content type string :type content_encoding: str :param curl: Return as "curl" string instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple :param ttl: Time To Live :type ttl: int :return requests.Response or string
[ "One", "call", "solution", "to", "endcode", "and", "send", "data", "to", "the", "endpoint", "contained", "in", "subscription_info", "using", "optional", "VAPID", "auth", "headers", "." ]
2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4
https://github.com/web-push-libs/pywebpush/blob/2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4/pywebpush/__init__.py#L350-L435
train
web-push-libs/pywebpush
pywebpush/__init__.py
WebPusher.encode
def encode(self, data, content_encoding="aes128gcm"): """Encrypt the data. :param data: A serialized block of byte data (String, JSON, bit array, etc.) Make sure that whatever you send, your client knows how to understand it. :type data: str :param content_encoding: The content_encoding type to use to encrypt the data. Defaults to RFC8188 "aes128gcm". The previous draft-01 is "aesgcm", however this format is now deprecated. :type content_encoding: enum("aesgcm", "aes128gcm") """ # Salt is a random 16 byte array. if not data: return if not self.auth_key or not self.receiver_key: raise WebPushException("No keys specified in subscription info") salt = None if content_encoding not in self.valid_encodings: raise WebPushException("Invalid content encoding specified. " "Select from " + json.dumps(self.valid_encodings)) if content_encoding == "aesgcm": salt = os.urandom(16) # The server key is an ephemeral ECDH key used only for this # transaction server_key = ec.generate_private_key(ec.SECP256R1, default_backend()) crypto_key = server_key.public_key().public_bytes( encoding=serialization.Encoding.X962, format=serialization.PublicFormat.UncompressedPoint ) if isinstance(data, six.string_types): data = bytes(data.encode('utf8')) if content_encoding == "aes128gcm": encrypted = http_ece.encrypt( data, salt=salt, private_key=server_key, dh=self.receiver_key, auth_secret=self.auth_key, version=content_encoding) reply = CaseInsensitiveDict({ 'body': encrypted }) else: crypto_key = base64.urlsafe_b64encode(crypto_key).strip(b'=') encrypted = http_ece.encrypt( data, salt=salt, private_key=server_key, keyid=crypto_key.decode(), dh=self.receiver_key, auth_secret=self.auth_key, version=content_encoding) reply = CaseInsensitiveDict({ 'crypto_key': crypto_key, 'body': encrypted, }) if salt: reply['salt'] = base64.urlsafe_b64encode(salt).strip(b'=') return reply
python
def encode(self, data, content_encoding="aes128gcm"): """Encrypt the data. :param data: A serialized block of byte data (String, JSON, bit array, etc.) Make sure that whatever you send, your client knows how to understand it. :type data: str :param content_encoding: The content_encoding type to use to encrypt the data. Defaults to RFC8188 "aes128gcm". The previous draft-01 is "aesgcm", however this format is now deprecated. :type content_encoding: enum("aesgcm", "aes128gcm") """ # Salt is a random 16 byte array. if not data: return if not self.auth_key or not self.receiver_key: raise WebPushException("No keys specified in subscription info") salt = None if content_encoding not in self.valid_encodings: raise WebPushException("Invalid content encoding specified. " "Select from " + json.dumps(self.valid_encodings)) if content_encoding == "aesgcm": salt = os.urandom(16) # The server key is an ephemeral ECDH key used only for this # transaction server_key = ec.generate_private_key(ec.SECP256R1, default_backend()) crypto_key = server_key.public_key().public_bytes( encoding=serialization.Encoding.X962, format=serialization.PublicFormat.UncompressedPoint ) if isinstance(data, six.string_types): data = bytes(data.encode('utf8')) if content_encoding == "aes128gcm": encrypted = http_ece.encrypt( data, salt=salt, private_key=server_key, dh=self.receiver_key, auth_secret=self.auth_key, version=content_encoding) reply = CaseInsensitiveDict({ 'body': encrypted }) else: crypto_key = base64.urlsafe_b64encode(crypto_key).strip(b'=') encrypted = http_ece.encrypt( data, salt=salt, private_key=server_key, keyid=crypto_key.decode(), dh=self.receiver_key, auth_secret=self.auth_key, version=content_encoding) reply = CaseInsensitiveDict({ 'crypto_key': crypto_key, 'body': encrypted, }) if salt: reply['salt'] = base64.urlsafe_b64encode(salt).strip(b'=') return reply
[ "def", "encode", "(", "self", ",", "data", ",", "content_encoding", "=", "\"aes128gcm\"", ")", ":", "# Salt is a random 16 byte array.", "if", "not", "data", ":", "return", "if", "not", "self", ".", "auth_key", "or", "not", "self", ".", "receiver_key", ":", "raise", "WebPushException", "(", "\"No keys specified in subscription info\"", ")", "salt", "=", "None", "if", "content_encoding", "not", "in", "self", ".", "valid_encodings", ":", "raise", "WebPushException", "(", "\"Invalid content encoding specified. \"", "\"Select from \"", "+", "json", ".", "dumps", "(", "self", ".", "valid_encodings", ")", ")", "if", "content_encoding", "==", "\"aesgcm\"", ":", "salt", "=", "os", ".", "urandom", "(", "16", ")", "# The server key is an ephemeral ECDH key used only for this", "# transaction", "server_key", "=", "ec", ".", "generate_private_key", "(", "ec", ".", "SECP256R1", ",", "default_backend", "(", ")", ")", "crypto_key", "=", "server_key", ".", "public_key", "(", ")", ".", "public_bytes", "(", "encoding", "=", "serialization", ".", "Encoding", ".", "X962", ",", "format", "=", "serialization", ".", "PublicFormat", ".", "UncompressedPoint", ")", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "data", "=", "bytes", "(", "data", ".", "encode", "(", "'utf8'", ")", ")", "if", "content_encoding", "==", "\"aes128gcm\"", ":", "encrypted", "=", "http_ece", ".", "encrypt", "(", "data", ",", "salt", "=", "salt", ",", "private_key", "=", "server_key", ",", "dh", "=", "self", ".", "receiver_key", ",", "auth_secret", "=", "self", ".", "auth_key", ",", "version", "=", "content_encoding", ")", "reply", "=", "CaseInsensitiveDict", "(", "{", "'body'", ":", "encrypted", "}", ")", "else", ":", "crypto_key", "=", "base64", ".", "urlsafe_b64encode", "(", "crypto_key", ")", ".", "strip", "(", "b'='", ")", "encrypted", "=", "http_ece", ".", "encrypt", "(", "data", ",", "salt", "=", "salt", ",", "private_key", "=", "server_key", ",", "keyid", "=", "crypto_key", ".", "decode", "(", ")", ",", "dh", "=", "self", ".", "receiver_key", ",", "auth_secret", "=", "self", ".", "auth_key", ",", "version", "=", "content_encoding", ")", "reply", "=", "CaseInsensitiveDict", "(", "{", "'crypto_key'", ":", "crypto_key", ",", "'body'", ":", "encrypted", ",", "}", ")", "if", "salt", ":", "reply", "[", "'salt'", "]", "=", "base64", ".", "urlsafe_b64encode", "(", "salt", ")", ".", "strip", "(", "b'='", ")", "return", "reply" ]
Encrypt the data. :param data: A serialized block of byte data (String, JSON, bit array, etc.) Make sure that whatever you send, your client knows how to understand it. :type data: str :param content_encoding: The content_encoding type to use to encrypt the data. Defaults to RFC8188 "aes128gcm". The previous draft-01 is "aesgcm", however this format is now deprecated. :type content_encoding: enum("aesgcm", "aes128gcm")
[ "Encrypt", "the", "data", "." ]
2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4
https://github.com/web-push-libs/pywebpush/blob/2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4/pywebpush/__init__.py#L162-L224
train
web-push-libs/pywebpush
pywebpush/__init__.py
WebPusher.send
def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None, content_encoding="aes128gcm", curl=False, timeout=None): """Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple """ # Encode the data. if headers is None: headers = dict() encoded = {} headers = CaseInsensitiveDict(headers) if data: encoded = self.encode(data, content_encoding) if "crypto_key" in encoded: # Append the p256dh to the end of any existing crypto-key crypto_key = headers.get("crypto-key", "") if crypto_key: # due to some confusion by a push service provider, we # should use ';' instead of ',' to append the headers. # see # https://github.com/webpush-wg/webpush-encryption/issues/6 crypto_key += ';' crypto_key += ( "dh=" + encoded["crypto_key"].decode('utf8')) headers.update({ 'crypto-key': crypto_key }) if "salt" in encoded: headers.update({ 'encryption': "salt=" + encoded['salt'].decode('utf8') }) headers.update({ 'content-encoding': content_encoding, }) if gcm_key: # guess if it is a legacy GCM project key or actual FCM key # gcm keys are all about 40 chars (use 100 for confidence), # fcm keys are 153-175 chars if len(gcm_key) < 100: endpoint = 'https://android.googleapis.com/gcm/send' else: endpoint = 'https://fcm.googleapis.com/fcm/send' reg_ids = [] if not reg_id: reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1] reg_ids.append(reg_id) gcm_data = dict() gcm_data['registration_ids'] = reg_ids if data: gcm_data['raw_data'] = base64.b64encode( encoded.get('body')).decode('utf8') gcm_data['time_to_live'] = int( headers['ttl'] if 'ttl' in headers else ttl) encoded_data = json.dumps(gcm_data) headers.update({ 'Authorization': 'key='+gcm_key, 'Content-Type': 'application/json', }) else: encoded_data = encoded.get('body') endpoint = self.subscription_info['endpoint'] if 'ttl' not in headers or ttl: headers['ttl'] = str(ttl or 0) # Additionally useful headers: # Authorization / Crypto-Key (VAPID headers) if curl: return self.as_curl(endpoint, encoded_data, headers) return self.requests_method.post(endpoint, data=encoded_data, headers=headers, timeout=timeout)
python
def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None, content_encoding="aes128gcm", curl=False, timeout=None): """Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple """ # Encode the data. if headers is None: headers = dict() encoded = {} headers = CaseInsensitiveDict(headers) if data: encoded = self.encode(data, content_encoding) if "crypto_key" in encoded: # Append the p256dh to the end of any existing crypto-key crypto_key = headers.get("crypto-key", "") if crypto_key: # due to some confusion by a push service provider, we # should use ';' instead of ',' to append the headers. # see # https://github.com/webpush-wg/webpush-encryption/issues/6 crypto_key += ';' crypto_key += ( "dh=" + encoded["crypto_key"].decode('utf8')) headers.update({ 'crypto-key': crypto_key }) if "salt" in encoded: headers.update({ 'encryption': "salt=" + encoded['salt'].decode('utf8') }) headers.update({ 'content-encoding': content_encoding, }) if gcm_key: # guess if it is a legacy GCM project key or actual FCM key # gcm keys are all about 40 chars (use 100 for confidence), # fcm keys are 153-175 chars if len(gcm_key) < 100: endpoint = 'https://android.googleapis.com/gcm/send' else: endpoint = 'https://fcm.googleapis.com/fcm/send' reg_ids = [] if not reg_id: reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1] reg_ids.append(reg_id) gcm_data = dict() gcm_data['registration_ids'] = reg_ids if data: gcm_data['raw_data'] = base64.b64encode( encoded.get('body')).decode('utf8') gcm_data['time_to_live'] = int( headers['ttl'] if 'ttl' in headers else ttl) encoded_data = json.dumps(gcm_data) headers.update({ 'Authorization': 'key='+gcm_key, 'Content-Type': 'application/json', }) else: encoded_data = encoded.get('body') endpoint = self.subscription_info['endpoint'] if 'ttl' not in headers or ttl: headers['ttl'] = str(ttl or 0) # Additionally useful headers: # Authorization / Crypto-Key (VAPID headers) if curl: return self.as_curl(endpoint, encoded_data, headers) return self.requests_method.post(endpoint, data=encoded_data, headers=headers, timeout=timeout)
[ "def", "send", "(", "self", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "ttl", "=", "0", ",", "gcm_key", "=", "None", ",", "reg_id", "=", "None", ",", "content_encoding", "=", "\"aes128gcm\"", ",", "curl", "=", "False", ",", "timeout", "=", "None", ")", ":", "# Encode the data.", "if", "headers", "is", "None", ":", "headers", "=", "dict", "(", ")", "encoded", "=", "{", "}", "headers", "=", "CaseInsensitiveDict", "(", "headers", ")", "if", "data", ":", "encoded", "=", "self", ".", "encode", "(", "data", ",", "content_encoding", ")", "if", "\"crypto_key\"", "in", "encoded", ":", "# Append the p256dh to the end of any existing crypto-key", "crypto_key", "=", "headers", ".", "get", "(", "\"crypto-key\"", ",", "\"\"", ")", "if", "crypto_key", ":", "# due to some confusion by a push service provider, we", "# should use ';' instead of ',' to append the headers.", "# see", "# https://github.com/webpush-wg/webpush-encryption/issues/6", "crypto_key", "+=", "';'", "crypto_key", "+=", "(", "\"dh=\"", "+", "encoded", "[", "\"crypto_key\"", "]", ".", "decode", "(", "'utf8'", ")", ")", "headers", ".", "update", "(", "{", "'crypto-key'", ":", "crypto_key", "}", ")", "if", "\"salt\"", "in", "encoded", ":", "headers", ".", "update", "(", "{", "'encryption'", ":", "\"salt=\"", "+", "encoded", "[", "'salt'", "]", ".", "decode", "(", "'utf8'", ")", "}", ")", "headers", ".", "update", "(", "{", "'content-encoding'", ":", "content_encoding", ",", "}", ")", "if", "gcm_key", ":", "# guess if it is a legacy GCM project key or actual FCM key", "# gcm keys are all about 40 chars (use 100 for confidence),", "# fcm keys are 153-175 chars", "if", "len", "(", "gcm_key", ")", "<", "100", ":", "endpoint", "=", "'https://android.googleapis.com/gcm/send'", "else", ":", "endpoint", "=", "'https://fcm.googleapis.com/fcm/send'", "reg_ids", "=", "[", "]", "if", "not", "reg_id", ":", "reg_id", "=", "self", ".", "subscription_info", "[", "'endpoint'", "]", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", "reg_ids", ".", "append", "(", "reg_id", ")", "gcm_data", "=", "dict", "(", ")", "gcm_data", "[", "'registration_ids'", "]", "=", "reg_ids", "if", "data", ":", "gcm_data", "[", "'raw_data'", "]", "=", "base64", ".", "b64encode", "(", "encoded", ".", "get", "(", "'body'", ")", ")", ".", "decode", "(", "'utf8'", ")", "gcm_data", "[", "'time_to_live'", "]", "=", "int", "(", "headers", "[", "'ttl'", "]", "if", "'ttl'", "in", "headers", "else", "ttl", ")", "encoded_data", "=", "json", ".", "dumps", "(", "gcm_data", ")", "headers", ".", "update", "(", "{", "'Authorization'", ":", "'key='", "+", "gcm_key", ",", "'Content-Type'", ":", "'application/json'", ",", "}", ")", "else", ":", "encoded_data", "=", "encoded", ".", "get", "(", "'body'", ")", "endpoint", "=", "self", ".", "subscription_info", "[", "'endpoint'", "]", "if", "'ttl'", "not", "in", "headers", "or", "ttl", ":", "headers", "[", "'ttl'", "]", "=", "str", "(", "ttl", "or", "0", ")", "# Additionally useful headers:", "# Authorization / Crypto-Key (VAPID headers)", "if", "curl", ":", "return", "self", ".", "as_curl", "(", "endpoint", ",", "encoded_data", ",", "headers", ")", "return", "self", ".", "requests_method", ".", "post", "(", "endpoint", ",", "data", "=", "encoded_data", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")" ]
Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple
[ "Encode", "and", "send", "the", "data", "to", "the", "Push", "Service", "." ]
2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4
https://github.com/web-push-libs/pywebpush/blob/2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4/pywebpush/__init__.py#L256-L347
train
martijnvermaat/calmap
calmap/__init__.py
calendarplot
def calendarplot(data, how='sum', yearlabels=True, yearascending=True, yearlabel_kws=None, subplot_kws=None, gridspec_kws=None, fig_kws=None, **kwargs): """ Plot a timeseries as a calendar heatmap. Parameters ---------- data : Series Data for the plot. Must be indexed by a DatetimeIndex. how : string Method for resampling data by day. If `None`, assume data is already sampled by day and don't resample. Otherwise, this is passed to Pandas `Series.resample`. yearlabels : bool Whether or not to draw the year for each subplot. yearascending : bool Sort the calendar in ascending or descending order. yearlabel_kws : dict Keyword arguments passed to the matplotlib `set_ylabel` call which is used to draw the year for each subplot. subplot_kws : dict Keyword arguments passed to the matplotlib `add_subplot` call used to create each subplot. gridspec_kws : dict Keyword arguments passed to the matplotlib `GridSpec` constructor used to create the grid the subplots are placed on. fig_kws : dict Keyword arguments passed to the matplotlib `figure` call. kwargs : other keyword arguments All other keyword arguments are passed to `yearplot`. Returns ------- fig, axes : matplotlib Figure and Axes Tuple where `fig` is the matplotlib Figure object `axes` is an array of matplotlib Axes objects with the calendar heatmaps, one per year. Examples -------- With `calendarplot` we can plot several years in one figure: .. plot:: :context: close-figs calmap.calendarplot(events) """ yearlabel_kws = yearlabel_kws or {} subplot_kws = subplot_kws or {} gridspec_kws = gridspec_kws or {} fig_kws = fig_kws or {} years = np.unique(data.index.year) if not yearascending: years = years[::-1] fig, axes = plt.subplots(nrows=len(years), ncols=1, squeeze=False, subplot_kw=subplot_kws, gridspec_kw=gridspec_kws, **fig_kws) axes = axes.T[0] # We explicitely resample by day only once. This is an optimization. if how is None: by_day = data else: if _pandas_18: by_day = data.resample('D').agg(how) else: by_day = data.resample('D', how=how) ylabel_kws = dict( fontsize=32, color=kwargs.get('fillcolor', 'whitesmoke'), fontweight='bold', fontname='Arial', ha='center') ylabel_kws.update(yearlabel_kws) max_weeks = 0 for year, ax in zip(years, axes): yearplot(by_day, year=year, how=None, ax=ax, **kwargs) max_weeks = max(max_weeks, ax.get_xlim()[1]) if yearlabels: ax.set_ylabel(str(year), **ylabel_kws) # In a leap year it might happen that we have 54 weeks (e.g., 2012). # Here we make sure the width is consistent over all years. for ax in axes: ax.set_xlim(0, max_weeks) # Make the axes look good. plt.tight_layout() return fig, axes
python
def calendarplot(data, how='sum', yearlabels=True, yearascending=True, yearlabel_kws=None, subplot_kws=None, gridspec_kws=None, fig_kws=None, **kwargs): """ Plot a timeseries as a calendar heatmap. Parameters ---------- data : Series Data for the plot. Must be indexed by a DatetimeIndex. how : string Method for resampling data by day. If `None`, assume data is already sampled by day and don't resample. Otherwise, this is passed to Pandas `Series.resample`. yearlabels : bool Whether or not to draw the year for each subplot. yearascending : bool Sort the calendar in ascending or descending order. yearlabel_kws : dict Keyword arguments passed to the matplotlib `set_ylabel` call which is used to draw the year for each subplot. subplot_kws : dict Keyword arguments passed to the matplotlib `add_subplot` call used to create each subplot. gridspec_kws : dict Keyword arguments passed to the matplotlib `GridSpec` constructor used to create the grid the subplots are placed on. fig_kws : dict Keyword arguments passed to the matplotlib `figure` call. kwargs : other keyword arguments All other keyword arguments are passed to `yearplot`. Returns ------- fig, axes : matplotlib Figure and Axes Tuple where `fig` is the matplotlib Figure object `axes` is an array of matplotlib Axes objects with the calendar heatmaps, one per year. Examples -------- With `calendarplot` we can plot several years in one figure: .. plot:: :context: close-figs calmap.calendarplot(events) """ yearlabel_kws = yearlabel_kws or {} subplot_kws = subplot_kws or {} gridspec_kws = gridspec_kws or {} fig_kws = fig_kws or {} years = np.unique(data.index.year) if not yearascending: years = years[::-1] fig, axes = plt.subplots(nrows=len(years), ncols=1, squeeze=False, subplot_kw=subplot_kws, gridspec_kw=gridspec_kws, **fig_kws) axes = axes.T[0] # We explicitely resample by day only once. This is an optimization. if how is None: by_day = data else: if _pandas_18: by_day = data.resample('D').agg(how) else: by_day = data.resample('D', how=how) ylabel_kws = dict( fontsize=32, color=kwargs.get('fillcolor', 'whitesmoke'), fontweight='bold', fontname='Arial', ha='center') ylabel_kws.update(yearlabel_kws) max_weeks = 0 for year, ax in zip(years, axes): yearplot(by_day, year=year, how=None, ax=ax, **kwargs) max_weeks = max(max_weeks, ax.get_xlim()[1]) if yearlabels: ax.set_ylabel(str(year), **ylabel_kws) # In a leap year it might happen that we have 54 weeks (e.g., 2012). # Here we make sure the width is consistent over all years. for ax in axes: ax.set_xlim(0, max_weeks) # Make the axes look good. plt.tight_layout() return fig, axes
[ "def", "calendarplot", "(", "data", ",", "how", "=", "'sum'", ",", "yearlabels", "=", "True", ",", "yearascending", "=", "True", ",", "yearlabel_kws", "=", "None", ",", "subplot_kws", "=", "None", ",", "gridspec_kws", "=", "None", ",", "fig_kws", "=", "None", ",", "*", "*", "kwargs", ")", ":", "yearlabel_kws", "=", "yearlabel_kws", "or", "{", "}", "subplot_kws", "=", "subplot_kws", "or", "{", "}", "gridspec_kws", "=", "gridspec_kws", "or", "{", "}", "fig_kws", "=", "fig_kws", "or", "{", "}", "years", "=", "np", ".", "unique", "(", "data", ".", "index", ".", "year", ")", "if", "not", "yearascending", ":", "years", "=", "years", "[", ":", ":", "-", "1", "]", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "len", "(", "years", ")", ",", "ncols", "=", "1", ",", "squeeze", "=", "False", ",", "subplot_kw", "=", "subplot_kws", ",", "gridspec_kw", "=", "gridspec_kws", ",", "*", "*", "fig_kws", ")", "axes", "=", "axes", ".", "T", "[", "0", "]", "# We explicitely resample by day only once. This is an optimization.", "if", "how", "is", "None", ":", "by_day", "=", "data", "else", ":", "if", "_pandas_18", ":", "by_day", "=", "data", ".", "resample", "(", "'D'", ")", ".", "agg", "(", "how", ")", "else", ":", "by_day", "=", "data", ".", "resample", "(", "'D'", ",", "how", "=", "how", ")", "ylabel_kws", "=", "dict", "(", "fontsize", "=", "32", ",", "color", "=", "kwargs", ".", "get", "(", "'fillcolor'", ",", "'whitesmoke'", ")", ",", "fontweight", "=", "'bold'", ",", "fontname", "=", "'Arial'", ",", "ha", "=", "'center'", ")", "ylabel_kws", ".", "update", "(", "yearlabel_kws", ")", "max_weeks", "=", "0", "for", "year", ",", "ax", "in", "zip", "(", "years", ",", "axes", ")", ":", "yearplot", "(", "by_day", ",", "year", "=", "year", ",", "how", "=", "None", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "max_weeks", "=", "max", "(", "max_weeks", ",", "ax", ".", "get_xlim", "(", ")", "[", "1", "]", ")", "if", "yearlabels", ":", "ax", ".", "set_ylabel", "(", "str", "(", "year", ")", ",", "*", "*", "ylabel_kws", ")", "# In a leap year it might happen that we have 54 weeks (e.g., 2012).", "# Here we make sure the width is consistent over all years.", "for", "ax", "in", "axes", ":", "ax", ".", "set_xlim", "(", "0", ",", "max_weeks", ")", "# Make the axes look good.", "plt", ".", "tight_layout", "(", ")", "return", "fig", ",", "axes" ]
Plot a timeseries as a calendar heatmap. Parameters ---------- data : Series Data for the plot. Must be indexed by a DatetimeIndex. how : string Method for resampling data by day. If `None`, assume data is already sampled by day and don't resample. Otherwise, this is passed to Pandas `Series.resample`. yearlabels : bool Whether or not to draw the year for each subplot. yearascending : bool Sort the calendar in ascending or descending order. yearlabel_kws : dict Keyword arguments passed to the matplotlib `set_ylabel` call which is used to draw the year for each subplot. subplot_kws : dict Keyword arguments passed to the matplotlib `add_subplot` call used to create each subplot. gridspec_kws : dict Keyword arguments passed to the matplotlib `GridSpec` constructor used to create the grid the subplots are placed on. fig_kws : dict Keyword arguments passed to the matplotlib `figure` call. kwargs : other keyword arguments All other keyword arguments are passed to `yearplot`. Returns ------- fig, axes : matplotlib Figure and Axes Tuple where `fig` is the matplotlib Figure object `axes` is an array of matplotlib Axes objects with the calendar heatmaps, one per year. Examples -------- With `calendarplot` we can plot several years in one figure: .. plot:: :context: close-figs calmap.calendarplot(events)
[ "Plot", "a", "timeseries", "as", "a", "calendar", "heatmap", "." ]
83e2a9a0bdc773c9e48e05772fb412ac8deb8bae
https://github.com/martijnvermaat/calmap/blob/83e2a9a0bdc773c9e48e05772fb412ac8deb8bae/calmap/__init__.py#L233-L329
train
Frojd/wagtail-geo-widget
wagtailgeowidget/helpers.py
geosgeometry_str_to_struct
def geosgeometry_str_to_struct(value): ''' Parses a geosgeometry string into struct. Example: SRID=5432;POINT(12.0 13.0) Returns: >> [5432, 12.0, 13.0] ''' result = geos_ptrn.match(value) if not result: return None return { 'srid': result.group(1), 'x': result.group(2), 'y': result.group(3), }
python
def geosgeometry_str_to_struct(value): ''' Parses a geosgeometry string into struct. Example: SRID=5432;POINT(12.0 13.0) Returns: >> [5432, 12.0, 13.0] ''' result = geos_ptrn.match(value) if not result: return None return { 'srid': result.group(1), 'x': result.group(2), 'y': result.group(3), }
[ "def", "geosgeometry_str_to_struct", "(", "value", ")", ":", "result", "=", "geos_ptrn", ".", "match", "(", "value", ")", "if", "not", "result", ":", "return", "None", "return", "{", "'srid'", ":", "result", ".", "group", "(", "1", ")", ",", "'x'", ":", "result", ".", "group", "(", "2", ")", ",", "'y'", ":", "result", ".", "group", "(", "3", ")", ",", "}" ]
Parses a geosgeometry string into struct. Example: SRID=5432;POINT(12.0 13.0) Returns: >> [5432, 12.0, 13.0]
[ "Parses", "a", "geosgeometry", "string", "into", "struct", "." ]
3482891be8903293812738d9128b9bda03b9a2ba
https://github.com/Frojd/wagtail-geo-widget/blob/3482891be8903293812738d9128b9bda03b9a2ba/wagtailgeowidget/helpers.py#L8-L27
train
Frojd/wagtail-geo-widget
example/examplesite/settings/__init__.py
get_env
def get_env(name, default=None): """Get the environment variable or return exception""" if name in os.environ: return os.environ[name] if default is not None: return default error_msg = "Set the {} env variable".format(name) raise ImproperlyConfigured(error_msg)
python
def get_env(name, default=None): """Get the environment variable or return exception""" if name in os.environ: return os.environ[name] if default is not None: return default error_msg = "Set the {} env variable".format(name) raise ImproperlyConfigured(error_msg)
[ "def", "get_env", "(", "name", ",", "default", "=", "None", ")", ":", "if", "name", "in", "os", ".", "environ", ":", "return", "os", ".", "environ", "[", "name", "]", "if", "default", "is", "not", "None", ":", "return", "default", "error_msg", "=", "\"Set the {} env variable\"", ".", "format", "(", "name", ")", "raise", "ImproperlyConfigured", "(", "error_msg", ")" ]
Get the environment variable or return exception
[ "Get", "the", "environment", "variable", "or", "return", "exception" ]
3482891be8903293812738d9128b9bda03b9a2ba
https://github.com/Frojd/wagtail-geo-widget/blob/3482891be8903293812738d9128b9bda03b9a2ba/example/examplesite/settings/__init__.py#L6-L15
train
newville/asteval
asteval/asteval.py
Interpreter.user_defined_symbols
def user_defined_symbols(self): """Return a set of symbols that have been added to symtable after construction. I.e., the symbols from self.symtable that are not in self.no_deepcopy. Returns ------- unique_symbols : set symbols in symtable that are not in self.no_deepcopy """ sym_in_current = set(self.symtable.keys()) sym_from_construction = set(self.no_deepcopy) unique_symbols = sym_in_current.difference(sym_from_construction) return unique_symbols
python
def user_defined_symbols(self): """Return a set of symbols that have been added to symtable after construction. I.e., the symbols from self.symtable that are not in self.no_deepcopy. Returns ------- unique_symbols : set symbols in symtable that are not in self.no_deepcopy """ sym_in_current = set(self.symtable.keys()) sym_from_construction = set(self.no_deepcopy) unique_symbols = sym_in_current.difference(sym_from_construction) return unique_symbols
[ "def", "user_defined_symbols", "(", "self", ")", ":", "sym_in_current", "=", "set", "(", "self", ".", "symtable", ".", "keys", "(", ")", ")", "sym_from_construction", "=", "set", "(", "self", ".", "no_deepcopy", ")", "unique_symbols", "=", "sym_in_current", ".", "difference", "(", "sym_from_construction", ")", "return", "unique_symbols" ]
Return a set of symbols that have been added to symtable after construction. I.e., the symbols from self.symtable that are not in self.no_deepcopy. Returns ------- unique_symbols : set symbols in symtable that are not in self.no_deepcopy
[ "Return", "a", "set", "of", "symbols", "that", "have", "been", "added", "to", "symtable", "after", "construction", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L216-L232
train
newville/asteval
asteval/asteval.py
Interpreter.unimplemented
def unimplemented(self, node): """Unimplemented nodes.""" self.raise_exception(node, exc=NotImplementedError, msg="'%s' not supported" % (node.__class__.__name__))
python
def unimplemented(self, node): """Unimplemented nodes.""" self.raise_exception(node, exc=NotImplementedError, msg="'%s' not supported" % (node.__class__.__name__))
[ "def", "unimplemented", "(", "self", ",", "node", ")", ":", "self", ".", "raise_exception", "(", "node", ",", "exc", "=", "NotImplementedError", ",", "msg", "=", "\"'%s' not supported\"", "%", "(", "node", ".", "__class__", ".", "__name__", ")", ")" ]
Unimplemented nodes.
[ "Unimplemented", "nodes", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L234-L238
train
newville/asteval
asteval/asteval.py
Interpreter.raise_exception
def raise_exception(self, node, exc=None, msg='', expr=None, lineno=None): """Add an exception.""" if self.error is None: self.error = [] if expr is None: expr = self.expr if len(self.error) > 0 and not isinstance(node, ast.Module): msg = '%s' % msg err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno) self._interrupt = ast.Break() self.error.append(err) if self.error_msg is None: self.error_msg = "at expr='%s'" % (self.expr) elif len(msg) > 0: self.error_msg = msg if exc is None: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(self.error_msg)
python
def raise_exception(self, node, exc=None, msg='', expr=None, lineno=None): """Add an exception.""" if self.error is None: self.error = [] if expr is None: expr = self.expr if len(self.error) > 0 and not isinstance(node, ast.Module): msg = '%s' % msg err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno) self._interrupt = ast.Break() self.error.append(err) if self.error_msg is None: self.error_msg = "at expr='%s'" % (self.expr) elif len(msg) > 0: self.error_msg = msg if exc is None: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(self.error_msg)
[ "def", "raise_exception", "(", "self", ",", "node", ",", "exc", "=", "None", ",", "msg", "=", "''", ",", "expr", "=", "None", ",", "lineno", "=", "None", ")", ":", "if", "self", ".", "error", "is", "None", ":", "self", ".", "error", "=", "[", "]", "if", "expr", "is", "None", ":", "expr", "=", "self", ".", "expr", "if", "len", "(", "self", ".", "error", ")", ">", "0", "and", "not", "isinstance", "(", "node", ",", "ast", ".", "Module", ")", ":", "msg", "=", "'%s'", "%", "msg", "err", "=", "ExceptionHolder", "(", "node", ",", "exc", "=", "exc", ",", "msg", "=", "msg", ",", "expr", "=", "expr", ",", "lineno", "=", "lineno", ")", "self", ".", "_interrupt", "=", "ast", ".", "Break", "(", ")", "self", ".", "error", ".", "append", "(", "err", ")", "if", "self", ".", "error_msg", "is", "None", ":", "self", ".", "error_msg", "=", "\"at expr='%s'\"", "%", "(", "self", ".", "expr", ")", "elif", "len", "(", "msg", ")", ">", "0", ":", "self", ".", "error_msg", "=", "msg", "if", "exc", "is", "None", ":", "try", ":", "exc", "=", "self", ".", "error", "[", "0", "]", ".", "exc", "except", ":", "exc", "=", "RuntimeError", "raise", "exc", "(", "self", ".", "error_msg", ")" ]
Add an exception.
[ "Add", "an", "exception", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L240-L261
train
newville/asteval
asteval/asteval.py
Interpreter.run
def run(self, node, expr=None, lineno=None, with_raise=True): """Execute parsed Ast representation for an expression.""" # Note: keep the 'node is None' test: internal code here may run # run(None) and expect a None in return. if time.time() - self.start_time > self.max_time: raise RuntimeError(ERR_MAX_TIME.format(self.max_time)) out = None if len(self.error) > 0: return out if node is None: return out if isinstance(node, str): node = self.parse(node) if lineno is not None: self.lineno = lineno if expr is not None: self.expr = expr # get handler for this node: # on_xxx with handle nodes of type 'xxx', etc try: handler = self.node_handlers[node.__class__.__name__.lower()] except KeyError: return self.unimplemented(node) # run the handler: this will likely generate # recursive calls into this run method. try: ret = handler(node) if isinstance(ret, enumerate): ret = list(ret) return ret except: if with_raise: self.raise_exception(node, expr=expr)
python
def run(self, node, expr=None, lineno=None, with_raise=True): """Execute parsed Ast representation for an expression.""" # Note: keep the 'node is None' test: internal code here may run # run(None) and expect a None in return. if time.time() - self.start_time > self.max_time: raise RuntimeError(ERR_MAX_TIME.format(self.max_time)) out = None if len(self.error) > 0: return out if node is None: return out if isinstance(node, str): node = self.parse(node) if lineno is not None: self.lineno = lineno if expr is not None: self.expr = expr # get handler for this node: # on_xxx with handle nodes of type 'xxx', etc try: handler = self.node_handlers[node.__class__.__name__.lower()] except KeyError: return self.unimplemented(node) # run the handler: this will likely generate # recursive calls into this run method. try: ret = handler(node) if isinstance(ret, enumerate): ret = list(ret) return ret except: if with_raise: self.raise_exception(node, expr=expr)
[ "def", "run", "(", "self", ",", "node", ",", "expr", "=", "None", ",", "lineno", "=", "None", ",", "with_raise", "=", "True", ")", ":", "# Note: keep the 'node is None' test: internal code here may run", "# run(None) and expect a None in return.", "if", "time", ".", "time", "(", ")", "-", "self", ".", "start_time", ">", "self", ".", "max_time", ":", "raise", "RuntimeError", "(", "ERR_MAX_TIME", ".", "format", "(", "self", ".", "max_time", ")", ")", "out", "=", "None", "if", "len", "(", "self", ".", "error", ")", ">", "0", ":", "return", "out", "if", "node", "is", "None", ":", "return", "out", "if", "isinstance", "(", "node", ",", "str", ")", ":", "node", "=", "self", ".", "parse", "(", "node", ")", "if", "lineno", "is", "not", "None", ":", "self", ".", "lineno", "=", "lineno", "if", "expr", "is", "not", "None", ":", "self", ".", "expr", "=", "expr", "# get handler for this node:", "# on_xxx with handle nodes of type 'xxx', etc", "try", ":", "handler", "=", "self", ".", "node_handlers", "[", "node", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "return", "self", ".", "unimplemented", "(", "node", ")", "# run the handler: this will likely generate", "# recursive calls into this run method.", "try", ":", "ret", "=", "handler", "(", "node", ")", "if", "isinstance", "(", "ret", ",", "enumerate", ")", ":", "ret", "=", "list", "(", "ret", ")", "return", "ret", "except", ":", "if", "with_raise", ":", "self", ".", "raise_exception", "(", "node", ",", "expr", "=", "expr", ")" ]
Execute parsed Ast representation for an expression.
[ "Execute", "parsed", "Ast", "representation", "for", "an", "expression", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L279-L313
train
newville/asteval
asteval/asteval.py
Interpreter.eval
def eval(self, expr, lineno=0, show_errors=True): """Evaluate a single statement.""" self.lineno = lineno self.error = [] self.start_time = time.time() try: node = self.parse(expr) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return try: return self.run(node, expr=expr, lineno=lineno) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return
python
def eval(self, expr, lineno=0, show_errors=True): """Evaluate a single statement.""" self.lineno = lineno self.error = [] self.start_time = time.time() try: node = self.parse(expr) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return try: return self.run(node, expr=expr, lineno=lineno) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return
[ "def", "eval", "(", "self", ",", "expr", ",", "lineno", "=", "0", ",", "show_errors", "=", "True", ")", ":", "self", ".", "lineno", "=", "lineno", "self", ".", "error", "=", "[", "]", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "try", ":", "node", "=", "self", ".", "parse", "(", "expr", ")", "except", ":", "errmsg", "=", "exc_info", "(", ")", "[", "1", "]", "if", "len", "(", "self", ".", "error", ")", ">", "0", ":", "errmsg", "=", "\"\\n\"", ".", "join", "(", "self", ".", "error", "[", "0", "]", ".", "get_error", "(", ")", ")", "if", "not", "show_errors", ":", "try", ":", "exc", "=", "self", ".", "error", "[", "0", "]", ".", "exc", "except", ":", "exc", "=", "RuntimeError", "raise", "exc", "(", "errmsg", ")", "print", "(", "errmsg", ",", "file", "=", "self", ".", "err_writer", ")", "return", "try", ":", "return", "self", ".", "run", "(", "node", ",", "expr", "=", "expr", ",", "lineno", "=", "lineno", ")", "except", ":", "errmsg", "=", "exc_info", "(", ")", "[", "1", "]", "if", "len", "(", "self", ".", "error", ")", ">", "0", ":", "errmsg", "=", "\"\\n\"", ".", "join", "(", "self", ".", "error", "[", "0", "]", ".", "get_error", "(", ")", ")", "if", "not", "show_errors", ":", "try", ":", "exc", "=", "self", ".", "error", "[", "0", "]", ".", "exc", "except", ":", "exc", "=", "RuntimeError", "raise", "exc", "(", "errmsg", ")", "print", "(", "errmsg", ",", "file", "=", "self", ".", "err_writer", ")", "return" ]
Evaluate a single statement.
[ "Evaluate", "a", "single", "statement", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L319-L351
train
newville/asteval
asteval/asteval.py
Interpreter.on_module
def on_module(self, node): # ():('body',) """Module def.""" out = None for tnode in node.body: out = self.run(tnode) return out
python
def on_module(self, node): # ():('body',) """Module def.""" out = None for tnode in node.body: out = self.run(tnode) return out
[ "def", "on_module", "(", "self", ",", "node", ")", ":", "# ():('body',)", "out", "=", "None", "for", "tnode", "in", "node", ".", "body", ":", "out", "=", "self", ".", "run", "(", "tnode", ")", "return", "out" ]
Module def.
[ "Module", "def", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L378-L383
train
newville/asteval
asteval/asteval.py
Interpreter.on_assert
def on_assert(self, node): # ('test', 'msg') """Assert statement.""" if not self.run(node.test): self.raise_exception(node, exc=AssertionError, msg=node.msg) return True
python
def on_assert(self, node): # ('test', 'msg') """Assert statement.""" if not self.run(node.test): self.raise_exception(node, exc=AssertionError, msg=node.msg) return True
[ "def", "on_assert", "(", "self", ",", "node", ")", ":", "# ('test', 'msg')", "if", "not", "self", ".", "run", "(", "node", ".", "test", ")", ":", "self", ".", "raise_exception", "(", "node", ",", "exc", "=", "AssertionError", ",", "msg", "=", "node", ".", "msg", ")", "return", "True" ]
Assert statement.
[ "Assert", "statement", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L411-L415
train
newville/asteval
asteval/asteval.py
Interpreter.on_name
def on_name(self, node): # ('id', 'ctx') """Name node.""" ctx = node.ctx.__class__ if ctx in (ast.Param, ast.Del): return str(node.id) else: if node.id in self.symtable: return self.symtable[node.id] else: msg = "name '%s' is not defined" % node.id self.raise_exception(node, exc=NameError, msg=msg)
python
def on_name(self, node): # ('id', 'ctx') """Name node.""" ctx = node.ctx.__class__ if ctx in (ast.Param, ast.Del): return str(node.id) else: if node.id in self.symtable: return self.symtable[node.id] else: msg = "name '%s' is not defined" % node.id self.raise_exception(node, exc=NameError, msg=msg)
[ "def", "on_name", "(", "self", ",", "node", ")", ":", "# ('id', 'ctx')", "ctx", "=", "node", ".", "ctx", ".", "__class__", "if", "ctx", "in", "(", "ast", ".", "Param", ",", "ast", ".", "Del", ")", ":", "return", "str", "(", "node", ".", "id", ")", "else", ":", "if", "node", ".", "id", "in", "self", ".", "symtable", ":", "return", "self", ".", "symtable", "[", "node", ".", "id", "]", "else", ":", "msg", "=", "\"name '%s' is not defined\"", "%", "node", ".", "id", "self", ".", "raise_exception", "(", "node", ",", "exc", "=", "NameError", ",", "msg", "=", "msg", ")" ]
Name node.
[ "Name", "node", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L442-L452
train
newville/asteval
asteval/asteval.py
Interpreter.on_attribute
def on_attribute(self, node): # ('value', 'attr', 'ctx') """Extract attribute.""" ctx = node.ctx.__class__ if ctx == ast.Store: msg = "attribute for storage: shouldn't be here!" self.raise_exception(node, exc=RuntimeError, msg=msg) sym = self.run(node.value) if ctx == ast.Del: return delattr(sym, node.attr) # ctx is ast.Load fmt = "cannnot access attribute '%s' for %s" if node.attr not in UNSAFE_ATTRS: fmt = "no attribute '%s' for %s" try: return getattr(sym, node.attr) except AttributeError: pass # AttributeError or accessed unsafe attribute obj = self.run(node.value) msg = fmt % (node.attr, obj) self.raise_exception(node, exc=AttributeError, msg=msg)
python
def on_attribute(self, node): # ('value', 'attr', 'ctx') """Extract attribute.""" ctx = node.ctx.__class__ if ctx == ast.Store: msg = "attribute for storage: shouldn't be here!" self.raise_exception(node, exc=RuntimeError, msg=msg) sym = self.run(node.value) if ctx == ast.Del: return delattr(sym, node.attr) # ctx is ast.Load fmt = "cannnot access attribute '%s' for %s" if node.attr not in UNSAFE_ATTRS: fmt = "no attribute '%s' for %s" try: return getattr(sym, node.attr) except AttributeError: pass # AttributeError or accessed unsafe attribute obj = self.run(node.value) msg = fmt % (node.attr, obj) self.raise_exception(node, exc=AttributeError, msg=msg)
[ "def", "on_attribute", "(", "self", ",", "node", ")", ":", "# ('value', 'attr', 'ctx')", "ctx", "=", "node", ".", "ctx", ".", "__class__", "if", "ctx", "==", "ast", ".", "Store", ":", "msg", "=", "\"attribute for storage: shouldn't be here!\"", "self", ".", "raise_exception", "(", "node", ",", "exc", "=", "RuntimeError", ",", "msg", "=", "msg", ")", "sym", "=", "self", ".", "run", "(", "node", ".", "value", ")", "if", "ctx", "==", "ast", ".", "Del", ":", "return", "delattr", "(", "sym", ",", "node", ".", "attr", ")", "# ctx is ast.Load", "fmt", "=", "\"cannnot access attribute '%s' for %s\"", "if", "node", ".", "attr", "not", "in", "UNSAFE_ATTRS", ":", "fmt", "=", "\"no attribute '%s' for %s\"", "try", ":", "return", "getattr", "(", "sym", ",", "node", ".", "attr", ")", "except", "AttributeError", ":", "pass", "# AttributeError or accessed unsafe attribute", "obj", "=", "self", ".", "run", "(", "node", ".", "value", ")", "msg", "=", "fmt", "%", "(", "node", ".", "attr", ",", "obj", ")", "self", ".", "raise_exception", "(", "node", ",", "exc", "=", "AttributeError", ",", "msg", "=", "msg", ")" ]
Extract attribute.
[ "Extract", "attribute", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L496-L519
train
newville/asteval
asteval/asteval.py
Interpreter.on_assign
def on_assign(self, node): # ('targets', 'value') """Simple assignment.""" val = self.run(node.value) for tnode in node.targets: self.node_assign(tnode, val) return
python
def on_assign(self, node): # ('targets', 'value') """Simple assignment.""" val = self.run(node.value) for tnode in node.targets: self.node_assign(tnode, val) return
[ "def", "on_assign", "(", "self", ",", "node", ")", ":", "# ('targets', 'value')", "val", "=", "self", ".", "run", "(", "node", ".", "value", ")", "for", "tnode", "in", "node", ".", "targets", ":", "self", ".", "node_assign", "(", "tnode", ",", "val", ")", "return" ]
Simple assignment.
[ "Simple", "assignment", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L521-L526
train
newville/asteval
asteval/asteval.py
Interpreter.on_augassign
def on_augassign(self, node): # ('target', 'op', 'value') """Augmented assign.""" return self.on_assign(ast.Assign(targets=[node.target], value=ast.BinOp(left=node.target, op=node.op, right=node.value)))
python
def on_augassign(self, node): # ('target', 'op', 'value') """Augmented assign.""" return self.on_assign(ast.Assign(targets=[node.target], value=ast.BinOp(left=node.target, op=node.op, right=node.value)))
[ "def", "on_augassign", "(", "self", ",", "node", ")", ":", "# ('target', 'op', 'value')", "return", "self", ".", "on_assign", "(", "ast", ".", "Assign", "(", "targets", "=", "[", "node", ".", "target", "]", ",", "value", "=", "ast", ".", "BinOp", "(", "left", "=", "node", ".", "target", ",", "op", "=", "node", ".", "op", ",", "right", "=", "node", ".", "value", ")", ")", ")" ]
Augmented assign.
[ "Augmented", "assign", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L528-L533
train
newville/asteval
asteval/asteval.py
Interpreter.on_slice
def on_slice(self, node): # ():('lower', 'upper', 'step') """Simple slice.""" return slice(self.run(node.lower), self.run(node.upper), self.run(node.step))
python
def on_slice(self, node): # ():('lower', 'upper', 'step') """Simple slice.""" return slice(self.run(node.lower), self.run(node.upper), self.run(node.step))
[ "def", "on_slice", "(", "self", ",", "node", ")", ":", "# ():('lower', 'upper', 'step')", "return", "slice", "(", "self", ".", "run", "(", "node", ".", "lower", ")", ",", "self", ".", "run", "(", "node", ".", "upper", ")", ",", "self", ".", "run", "(", "node", ".", "step", ")", ")" ]
Simple slice.
[ "Simple", "slice", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L535-L539
train
newville/asteval
asteval/asteval.py
Interpreter.on_extslice
def on_extslice(self, node): # ():('dims',) """Extended slice.""" return tuple([self.run(tnode) for tnode in node.dims])
python
def on_extslice(self, node): # ():('dims',) """Extended slice.""" return tuple([self.run(tnode) for tnode in node.dims])
[ "def", "on_extslice", "(", "self", ",", "node", ")", ":", "# ():('dims',)", "return", "tuple", "(", "[", "self", ".", "run", "(", "tnode", ")", "for", "tnode", "in", "node", ".", "dims", "]", ")" ]
Extended slice.
[ "Extended", "slice", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L541-L543
train
newville/asteval
asteval/asteval.py
Interpreter.on_delete
def on_delete(self, node): # ('targets',) """Delete statement.""" for tnode in node.targets: if tnode.ctx.__class__ != ast.Del: break children = [] while tnode.__class__ == ast.Attribute: children.append(tnode.attr) tnode = tnode.value if tnode.__class__ == ast.Name and tnode.id not in self.readonly_symbols: children.append(tnode.id) children.reverse() self.symtable.pop('.'.join(children)) else: msg = "could not delete symbol" self.raise_exception(node, msg=msg)
python
def on_delete(self, node): # ('targets',) """Delete statement.""" for tnode in node.targets: if tnode.ctx.__class__ != ast.Del: break children = [] while tnode.__class__ == ast.Attribute: children.append(tnode.attr) tnode = tnode.value if tnode.__class__ == ast.Name and tnode.id not in self.readonly_symbols: children.append(tnode.id) children.reverse() self.symtable.pop('.'.join(children)) else: msg = "could not delete symbol" self.raise_exception(node, msg=msg)
[ "def", "on_delete", "(", "self", ",", "node", ")", ":", "# ('targets',)", "for", "tnode", "in", "node", ".", "targets", ":", "if", "tnode", ".", "ctx", ".", "__class__", "!=", "ast", ".", "Del", ":", "break", "children", "=", "[", "]", "while", "tnode", ".", "__class__", "==", "ast", ".", "Attribute", ":", "children", ".", "append", "(", "tnode", ".", "attr", ")", "tnode", "=", "tnode", ".", "value", "if", "tnode", ".", "__class__", "==", "ast", ".", "Name", "and", "tnode", ".", "id", "not", "in", "self", ".", "readonly_symbols", ":", "children", ".", "append", "(", "tnode", ".", "id", ")", "children", ".", "reverse", "(", ")", "self", ".", "symtable", ".", "pop", "(", "'.'", ".", "join", "(", "children", ")", ")", "else", ":", "msg", "=", "\"could not delete symbol\"", "self", ".", "raise_exception", "(", "node", ",", "msg", "=", "msg", ")" ]
Delete statement.
[ "Delete", "statement", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L559-L575
train
newville/asteval
asteval/asteval.py
Interpreter.on_unaryop
def on_unaryop(self, node): # ('op', 'operand') """Unary operator.""" return op2func(node.op)(self.run(node.operand))
python
def on_unaryop(self, node): # ('op', 'operand') """Unary operator.""" return op2func(node.op)(self.run(node.operand))
[ "def", "on_unaryop", "(", "self", ",", "node", ")", ":", "# ('op', 'operand')", "return", "op2func", "(", "node", ".", "op", ")", "(", "self", ".", "run", "(", "node", ".", "operand", ")", ")" ]
Unary operator.
[ "Unary", "operator", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L577-L579
train
newville/asteval
asteval/asteval.py
Interpreter.on_binop
def on_binop(self, node): # ('left', 'op', 'right') """Binary operator.""" return op2func(node.op)(self.run(node.left), self.run(node.right))
python
def on_binop(self, node): # ('left', 'op', 'right') """Binary operator.""" return op2func(node.op)(self.run(node.left), self.run(node.right))
[ "def", "on_binop", "(", "self", ",", "node", ")", ":", "# ('left', 'op', 'right')", "return", "op2func", "(", "node", ".", "op", ")", "(", "self", ".", "run", "(", "node", ".", "left", ")", ",", "self", ".", "run", "(", "node", ".", "right", ")", ")" ]
Binary operator.
[ "Binary", "operator", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L581-L584
train
newville/asteval
asteval/asteval.py
Interpreter.on_boolop
def on_boolop(self, node): # ('op', 'values') """Boolean operator.""" val = self.run(node.values[0]) is_and = ast.And == node.op.__class__ if (is_and and val) or (not is_and and not val): for n in node.values[1:]: val = op2func(node.op)(val, self.run(n)) if (is_and and not val) or (not is_and and val): break return val
python
def on_boolop(self, node): # ('op', 'values') """Boolean operator.""" val = self.run(node.values[0]) is_and = ast.And == node.op.__class__ if (is_and and val) or (not is_and and not val): for n in node.values[1:]: val = op2func(node.op)(val, self.run(n)) if (is_and and not val) or (not is_and and val): break return val
[ "def", "on_boolop", "(", "self", ",", "node", ")", ":", "# ('op', 'values')", "val", "=", "self", ".", "run", "(", "node", ".", "values", "[", "0", "]", ")", "is_and", "=", "ast", ".", "And", "==", "node", ".", "op", ".", "__class__", "if", "(", "is_and", "and", "val", ")", "or", "(", "not", "is_and", "and", "not", "val", ")", ":", "for", "n", "in", "node", ".", "values", "[", "1", ":", "]", ":", "val", "=", "op2func", "(", "node", ".", "op", ")", "(", "val", ",", "self", ".", "run", "(", "n", ")", ")", "if", "(", "is_and", "and", "not", "val", ")", "or", "(", "not", "is_and", "and", "val", ")", ":", "break", "return", "val" ]
Boolean operator.
[ "Boolean", "operator", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L586-L595
train
newville/asteval
asteval/asteval.py
Interpreter._printer
def _printer(self, *out, **kws): """Generic print function.""" flush = kws.pop('flush', True) fileh = kws.pop('file', self.writer) sep = kws.pop('sep', ' ') end = kws.pop('sep', '\n') print(*out, file=fileh, sep=sep, end=end) if flush: fileh.flush()
python
def _printer(self, *out, **kws): """Generic print function.""" flush = kws.pop('flush', True) fileh = kws.pop('file', self.writer) sep = kws.pop('sep', ' ') end = kws.pop('sep', '\n') print(*out, file=fileh, sep=sep, end=end) if flush: fileh.flush()
[ "def", "_printer", "(", "self", ",", "*", "out", ",", "*", "*", "kws", ")", ":", "flush", "=", "kws", ".", "pop", "(", "'flush'", ",", "True", ")", "fileh", "=", "kws", ".", "pop", "(", "'file'", ",", "self", ".", "writer", ")", "sep", "=", "kws", ".", "pop", "(", "'sep'", ",", "' '", ")", "end", "=", "kws", ".", "pop", "(", "'sep'", ",", "'\\n'", ")", "print", "(", "*", "out", ",", "file", "=", "fileh", ",", "sep", "=", "sep", ",", "end", "=", "end", ")", "if", "flush", ":", "fileh", ".", "flush", "(", ")" ]
Generic print function.
[ "Generic", "print", "function", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L626-L635
train
newville/asteval
asteval/asteval.py
Interpreter.on_if
def on_if(self, node): # ('test', 'body', 'orelse') """Regular if-then-else statement.""" block = node.body if not self.run(node.test): block = node.orelse for tnode in block: self.run(tnode)
python
def on_if(self, node): # ('test', 'body', 'orelse') """Regular if-then-else statement.""" block = node.body if not self.run(node.test): block = node.orelse for tnode in block: self.run(tnode)
[ "def", "on_if", "(", "self", ",", "node", ")", ":", "# ('test', 'body', 'orelse')", "block", "=", "node", ".", "body", "if", "not", "self", ".", "run", "(", "node", ".", "test", ")", ":", "block", "=", "node", ".", "orelse", "for", "tnode", "in", "block", ":", "self", ".", "run", "(", "tnode", ")" ]
Regular if-then-else statement.
[ "Regular", "if", "-", "then", "-", "else", "statement", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L637-L643
train
newville/asteval
asteval/asteval.py
Interpreter.on_ifexp
def on_ifexp(self, node): # ('test', 'body', 'orelse') """If expressions.""" expr = node.orelse if self.run(node.test): expr = node.body return self.run(expr)
python
def on_ifexp(self, node): # ('test', 'body', 'orelse') """If expressions.""" expr = node.orelse if self.run(node.test): expr = node.body return self.run(expr)
[ "def", "on_ifexp", "(", "self", ",", "node", ")", ":", "# ('test', 'body', 'orelse')", "expr", "=", "node", ".", "orelse", "if", "self", ".", "run", "(", "node", ".", "test", ")", ":", "expr", "=", "node", ".", "body", "return", "self", ".", "run", "(", "expr", ")" ]
If expressions.
[ "If", "expressions", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L645-L650
train
newville/asteval
asteval/asteval.py
Interpreter.on_while
def on_while(self, node): # ('test', 'body', 'orelse') """While blocks.""" while self.run(node.test): self._interrupt = None for tnode in node.body: self.run(tnode) if self._interrupt is not None: break if isinstance(self._interrupt, ast.Break): break else: for tnode in node.orelse: self.run(tnode) self._interrupt = None
python
def on_while(self, node): # ('test', 'body', 'orelse') """While blocks.""" while self.run(node.test): self._interrupt = None for tnode in node.body: self.run(tnode) if self._interrupt is not None: break if isinstance(self._interrupt, ast.Break): break else: for tnode in node.orelse: self.run(tnode) self._interrupt = None
[ "def", "on_while", "(", "self", ",", "node", ")", ":", "# ('test', 'body', 'orelse')", "while", "self", ".", "run", "(", "node", ".", "test", ")", ":", "self", ".", "_interrupt", "=", "None", "for", "tnode", "in", "node", ".", "body", ":", "self", ".", "run", "(", "tnode", ")", "if", "self", ".", "_interrupt", "is", "not", "None", ":", "break", "if", "isinstance", "(", "self", ".", "_interrupt", ",", "ast", ".", "Break", ")", ":", "break", "else", ":", "for", "tnode", "in", "node", ".", "orelse", ":", "self", ".", "run", "(", "tnode", ")", "self", ".", "_interrupt", "=", "None" ]
While blocks.
[ "While", "blocks", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L652-L665
train
newville/asteval
asteval/asteval.py
Interpreter.on_for
def on_for(self, node): # ('target', 'iter', 'body', 'orelse') """For blocks.""" for val in self.run(node.iter): self.node_assign(node.target, val) self._interrupt = None for tnode in node.body: self.run(tnode) if self._interrupt is not None: break if isinstance(self._interrupt, ast.Break): break else: for tnode in node.orelse: self.run(tnode) self._interrupt = None
python
def on_for(self, node): # ('target', 'iter', 'body', 'orelse') """For blocks.""" for val in self.run(node.iter): self.node_assign(node.target, val) self._interrupt = None for tnode in node.body: self.run(tnode) if self._interrupt is not None: break if isinstance(self._interrupt, ast.Break): break else: for tnode in node.orelse: self.run(tnode) self._interrupt = None
[ "def", "on_for", "(", "self", ",", "node", ")", ":", "# ('target', 'iter', 'body', 'orelse')", "for", "val", "in", "self", ".", "run", "(", "node", ".", "iter", ")", ":", "self", ".", "node_assign", "(", "node", ".", "target", ",", "val", ")", "self", ".", "_interrupt", "=", "None", "for", "tnode", "in", "node", ".", "body", ":", "self", ".", "run", "(", "tnode", ")", "if", "self", ".", "_interrupt", "is", "not", "None", ":", "break", "if", "isinstance", "(", "self", ".", "_interrupt", ",", "ast", ".", "Break", ")", ":", "break", "else", ":", "for", "tnode", "in", "node", ".", "orelse", ":", "self", ".", "run", "(", "tnode", ")", "self", ".", "_interrupt", "=", "None" ]
For blocks.
[ "For", "blocks", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L667-L681
train
newville/asteval
asteval/asteval.py
Interpreter.on_listcomp
def on_listcomp(self, node): # ('elt', 'generators') """List comprehension.""" out = [] for tnode in node.generators: if tnode.__class__ == ast.comprehension: for val in self.run(tnode.iter): self.node_assign(tnode.target, val) add = True for cond in tnode.ifs: add = add and self.run(cond) if add: out.append(self.run(node.elt)) return out
python
def on_listcomp(self, node): # ('elt', 'generators') """List comprehension.""" out = [] for tnode in node.generators: if tnode.__class__ == ast.comprehension: for val in self.run(tnode.iter): self.node_assign(tnode.target, val) add = True for cond in tnode.ifs: add = add and self.run(cond) if add: out.append(self.run(node.elt)) return out
[ "def", "on_listcomp", "(", "self", ",", "node", ")", ":", "# ('elt', 'generators')", "out", "=", "[", "]", "for", "tnode", "in", "node", ".", "generators", ":", "if", "tnode", ".", "__class__", "==", "ast", ".", "comprehension", ":", "for", "val", "in", "self", ".", "run", "(", "tnode", ".", "iter", ")", ":", "self", ".", "node_assign", "(", "tnode", ".", "target", ",", "val", ")", "add", "=", "True", "for", "cond", "in", "tnode", ".", "ifs", ":", "add", "=", "add", "and", "self", ".", "run", "(", "cond", ")", "if", "add", ":", "out", ".", "append", "(", "self", ".", "run", "(", "node", ".", "elt", ")", ")", "return", "out" ]
List comprehension.
[ "List", "comprehension", "." ]
bb7d3a95079f96ead75ea55662014bbcc82f9b28
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L683-L695
train