repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
networks-lab/metaknowledge | metaknowledge/recordCollection.py | makeNodeTuple | def makeNodeTuple(citation, idVal, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR):
"""Makes a tuple of idVal and a dict of the selected attributes"""
d = {}
if nodeInfo:
if nodeType == 'full':
if coreValues:
if citation in coreCitesDict:
R = coreCitesDict[citation]
d['MK-ID'] = R.id
if not detailedValues:
infoVals = []
for tag in coreValues:
tagVal = R.get(tag)
if isinstance(tagVal, str):
infoVals.append(tagVal.replace(',',''))
elif isinstance(tagVal, list):
infoVals.append(tagVal[0].replace(',',''))
else:
pass
d['info'] = ', '.join(infoVals)
else:
for tag in coreValues:
v = R.get(tag, None)
if isinstance(v, list):
d[tag] = '|'.join(sorted(v))
else:
d[tag] = v
d['inCore'] = True
if addCR:
d['citations'] = '|'.join((str(c) for c in R.get('citations', [])))
else:
d['MK-ID'] = 'None'
d['info'] = citation.allButDOI()
d['inCore'] = False
if addCR:
d['citations'] = ''
else:
d['info'] = citation.allButDOI()
elif nodeType == 'journal':
if citation.isJournal():
d['info'] = str(citation.FullJournalName())
else:
d['info'] = "None"
elif nodeType == 'original':
d['info'] = str(citation)
else:
d['info'] = idVal
if fullInfo:
d['fullCite'] = str(citation)
if count:
d['count'] = 1
return (idVal, d) | python | def makeNodeTuple(citation, idVal, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR):
"""Makes a tuple of idVal and a dict of the selected attributes"""
d = {}
if nodeInfo:
if nodeType == 'full':
if coreValues:
if citation in coreCitesDict:
R = coreCitesDict[citation]
d['MK-ID'] = R.id
if not detailedValues:
infoVals = []
for tag in coreValues:
tagVal = R.get(tag)
if isinstance(tagVal, str):
infoVals.append(tagVal.replace(',',''))
elif isinstance(tagVal, list):
infoVals.append(tagVal[0].replace(',',''))
else:
pass
d['info'] = ', '.join(infoVals)
else:
for tag in coreValues:
v = R.get(tag, None)
if isinstance(v, list):
d[tag] = '|'.join(sorted(v))
else:
d[tag] = v
d['inCore'] = True
if addCR:
d['citations'] = '|'.join((str(c) for c in R.get('citations', [])))
else:
d['MK-ID'] = 'None'
d['info'] = citation.allButDOI()
d['inCore'] = False
if addCR:
d['citations'] = ''
else:
d['info'] = citation.allButDOI()
elif nodeType == 'journal':
if citation.isJournal():
d['info'] = str(citation.FullJournalName())
else:
d['info'] = "None"
elif nodeType == 'original':
d['info'] = str(citation)
else:
d['info'] = idVal
if fullInfo:
d['fullCite'] = str(citation)
if count:
d['count'] = 1
return (idVal, d) | [
"def",
"makeNodeTuple",
"(",
"citation",
",",
"idVal",
",",
"nodeInfo",
",",
"fullInfo",
",",
"nodeType",
",",
"count",
",",
"coreCitesDict",
",",
"coreValues",
",",
"detailedValues",
",",
"addCR",
")",
":",
"d",
"=",
"{",
"}",
"if",
"nodeInfo",
":",
"if",
"nodeType",
"==",
"'full'",
":",
"if",
"coreValues",
":",
"if",
"citation",
"in",
"coreCitesDict",
":",
"R",
"=",
"coreCitesDict",
"[",
"citation",
"]",
"d",
"[",
"'MK-ID'",
"]",
"=",
"R",
".",
"id",
"if",
"not",
"detailedValues",
":",
"infoVals",
"=",
"[",
"]",
"for",
"tag",
"in",
"coreValues",
":",
"tagVal",
"=",
"R",
".",
"get",
"(",
"tag",
")",
"if",
"isinstance",
"(",
"tagVal",
",",
"str",
")",
":",
"infoVals",
".",
"append",
"(",
"tagVal",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
"elif",
"isinstance",
"(",
"tagVal",
",",
"list",
")",
":",
"infoVals",
".",
"append",
"(",
"tagVal",
"[",
"0",
"]",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
"else",
":",
"pass",
"d",
"[",
"'info'",
"]",
"=",
"', '",
".",
"join",
"(",
"infoVals",
")",
"else",
":",
"for",
"tag",
"in",
"coreValues",
":",
"v",
"=",
"R",
".",
"get",
"(",
"tag",
",",
"None",
")",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"d",
"[",
"tag",
"]",
"=",
"'|'",
".",
"join",
"(",
"sorted",
"(",
"v",
")",
")",
"else",
":",
"d",
"[",
"tag",
"]",
"=",
"v",
"d",
"[",
"'inCore'",
"]",
"=",
"True",
"if",
"addCR",
":",
"d",
"[",
"'citations'",
"]",
"=",
"'|'",
".",
"join",
"(",
"(",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"R",
".",
"get",
"(",
"'citations'",
",",
"[",
"]",
")",
")",
")",
"else",
":",
"d",
"[",
"'MK-ID'",
"]",
"=",
"'None'",
"d",
"[",
"'info'",
"]",
"=",
"citation",
".",
"allButDOI",
"(",
")",
"d",
"[",
"'inCore'",
"]",
"=",
"False",
"if",
"addCR",
":",
"d",
"[",
"'citations'",
"]",
"=",
"''",
"else",
":",
"d",
"[",
"'info'",
"]",
"=",
"citation",
".",
"allButDOI",
"(",
")",
"elif",
"nodeType",
"==",
"'journal'",
":",
"if",
"citation",
".",
"isJournal",
"(",
")",
":",
"d",
"[",
"'info'",
"]",
"=",
"str",
"(",
"citation",
".",
"FullJournalName",
"(",
")",
")",
"else",
":",
"d",
"[",
"'info'",
"]",
"=",
"\"None\"",
"elif",
"nodeType",
"==",
"'original'",
":",
"d",
"[",
"'info'",
"]",
"=",
"str",
"(",
"citation",
")",
"else",
":",
"d",
"[",
"'info'",
"]",
"=",
"idVal",
"if",
"fullInfo",
":",
"d",
"[",
"'fullCite'",
"]",
"=",
"str",
"(",
"citation",
")",
"if",
"count",
":",
"d",
"[",
"'count'",
"]",
"=",
"1",
"return",
"(",
"idVal",
",",
"d",
")"
]
| Makes a tuple of idVal and a dict of the selected attributes | [
"Makes",
"a",
"tuple",
"of",
"idVal",
"and",
"a",
"dict",
"of",
"the",
"selected",
"attributes"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1709-L1760 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | expandRecs | def expandRecs(G, RecCollect, nodeType, weighted):
"""Expand all the citations from _RecCollect_"""
for Rec in RecCollect:
fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)]
if len(fullCiteList) > 1:
for i, citeID1 in enumerate(fullCiteList):
if citeID1 in G:
for citeID2 in fullCiteList[i + 1:]:
if citeID2 not in G:
G.add_node(citeID2, **G.node[citeID1])
if weighted:
G.add_edge(citeID1, citeID2, weight = 1)
else:
G.add_edge(citeID1, citeID2)
elif weighted:
try:
G.edges[citeID1, citeID2]['weight'] += 1
except KeyError:
G.add_edge(citeID1, citeID2, weight = 1)
for e1, e2, data in G.edges(citeID1, data = True):
G.add_edge(citeID2, e2, **data) | python | def expandRecs(G, RecCollect, nodeType, weighted):
"""Expand all the citations from _RecCollect_"""
for Rec in RecCollect:
fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)]
if len(fullCiteList) > 1:
for i, citeID1 in enumerate(fullCiteList):
if citeID1 in G:
for citeID2 in fullCiteList[i + 1:]:
if citeID2 not in G:
G.add_node(citeID2, **G.node[citeID1])
if weighted:
G.add_edge(citeID1, citeID2, weight = 1)
else:
G.add_edge(citeID1, citeID2)
elif weighted:
try:
G.edges[citeID1, citeID2]['weight'] += 1
except KeyError:
G.add_edge(citeID1, citeID2, weight = 1)
for e1, e2, data in G.edges(citeID1, data = True):
G.add_edge(citeID2, e2, **data) | [
"def",
"expandRecs",
"(",
"G",
",",
"RecCollect",
",",
"nodeType",
",",
"weighted",
")",
":",
"for",
"Rec",
"in",
"RecCollect",
":",
"fullCiteList",
"=",
"[",
"makeID",
"(",
"c",
",",
"nodeType",
")",
"for",
"c",
"in",
"Rec",
".",
"createCitation",
"(",
"multiCite",
"=",
"True",
")",
"]",
"if",
"len",
"(",
"fullCiteList",
")",
">",
"1",
":",
"for",
"i",
",",
"citeID1",
"in",
"enumerate",
"(",
"fullCiteList",
")",
":",
"if",
"citeID1",
"in",
"G",
":",
"for",
"citeID2",
"in",
"fullCiteList",
"[",
"i",
"+",
"1",
":",
"]",
":",
"if",
"citeID2",
"not",
"in",
"G",
":",
"G",
".",
"add_node",
"(",
"citeID2",
",",
"*",
"*",
"G",
".",
"node",
"[",
"citeID1",
"]",
")",
"if",
"weighted",
":",
"G",
".",
"add_edge",
"(",
"citeID1",
",",
"citeID2",
",",
"weight",
"=",
"1",
")",
"else",
":",
"G",
".",
"add_edge",
"(",
"citeID1",
",",
"citeID2",
")",
"elif",
"weighted",
":",
"try",
":",
"G",
".",
"edges",
"[",
"citeID1",
",",
"citeID2",
"]",
"[",
"'weight'",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"G",
".",
"add_edge",
"(",
"citeID1",
",",
"citeID2",
",",
"weight",
"=",
"1",
")",
"for",
"e1",
",",
"e2",
",",
"data",
"in",
"G",
".",
"edges",
"(",
"citeID1",
",",
"data",
"=",
"True",
")",
":",
"G",
".",
"add_edge",
"(",
"citeID2",
",",
"e2",
",",
"*",
"*",
"data",
")"
]
| Expand all the citations from _RecCollect_ | [
"Expand",
"all",
"the",
"citations",
"from",
"_RecCollect_"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1792-L1812 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.dropNonJournals | def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False):
"""Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
"""
if dropBad:
self.dropBadEntries()
if invert:
self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()}
else:
self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()} | python | def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False):
"""Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
"""
if dropBad:
self.dropBadEntries()
if invert:
self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()}
else:
self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()} | [
"def",
"dropNonJournals",
"(",
"self",
",",
"ptVal",
"=",
"'J'",
",",
"dropBad",
"=",
"True",
",",
"invert",
"=",
"False",
")",
":",
"if",
"dropBad",
":",
"self",
".",
"dropBadEntries",
"(",
")",
"if",
"invert",
":",
"self",
".",
"_collection",
"=",
"{",
"r",
"for",
"r",
"in",
"self",
".",
"_collection",
"if",
"r",
"[",
"'pubType'",
"]",
"!=",
"ptVal",
".",
"upper",
"(",
")",
"}",
"else",
":",
"self",
".",
"_collection",
"=",
"{",
"r",
"for",
"r",
"in",
"self",
".",
"_collection",
"if",
"r",
"[",
"'pubType'",
"]",
"==",
"ptVal",
".",
"upper",
"(",
")",
"}"
]
| Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True` | [
"Drops",
"the",
"non",
"journal",
"type",
"Records",
"from",
"the",
"collection",
"this",
"is",
"done",
"by",
"checking",
"_ptVal_",
"against",
"the",
"PT",
"tag"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L192-L214 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.writeFile | def writeFile(self, fname = None):
"""Writes the `RecordCollection` to a file, the written file's format is identical to those download from WOS. The order of `Records` written is random.
# Parameters
_fname_ : `optional [str]`
> Default `None`, if given the output file will written to _fanme_, if `None` the `RecordCollection`'s name's first 200 characters are used with the suffix .isi
"""
if len(self._collectedTypes) < 2:
recEncoding = self.peek().encoding()
else:
recEncoding = 'utf-8'
if fname:
f = open(fname, mode = 'w', encoding = recEncoding)
else:
f = open(self.name[:200] + '.txt', mode = 'w', encoding = recEncoding)
if self._collectedTypes == {'WOSRecord'}:
f.write("\ufeffFN Thomson Reuters Web of Science\u2122\n")
f.write("VR 1.0\n")
elif self._collectedTypes == {'MedlineRecord'}:
f.write('\n')
elif self._collectedTypes == {'ScopusRecord'}:
f.write("\ufeff{}\n".format(','.join(scopusHeader)))
for R in self._collection:
R.writeRecord(f)
f.write('\n')
if self._collectedTypes == {'WOSRecord'}:
f.write('EF')
f.close() | python | def writeFile(self, fname = None):
"""Writes the `RecordCollection` to a file, the written file's format is identical to those download from WOS. The order of `Records` written is random.
# Parameters
_fname_ : `optional [str]`
> Default `None`, if given the output file will written to _fanme_, if `None` the `RecordCollection`'s name's first 200 characters are used with the suffix .isi
"""
if len(self._collectedTypes) < 2:
recEncoding = self.peek().encoding()
else:
recEncoding = 'utf-8'
if fname:
f = open(fname, mode = 'w', encoding = recEncoding)
else:
f = open(self.name[:200] + '.txt', mode = 'w', encoding = recEncoding)
if self._collectedTypes == {'WOSRecord'}:
f.write("\ufeffFN Thomson Reuters Web of Science\u2122\n")
f.write("VR 1.0\n")
elif self._collectedTypes == {'MedlineRecord'}:
f.write('\n')
elif self._collectedTypes == {'ScopusRecord'}:
f.write("\ufeff{}\n".format(','.join(scopusHeader)))
for R in self._collection:
R.writeRecord(f)
f.write('\n')
if self._collectedTypes == {'WOSRecord'}:
f.write('EF')
f.close() | [
"def",
"writeFile",
"(",
"self",
",",
"fname",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"_collectedTypes",
")",
"<",
"2",
":",
"recEncoding",
"=",
"self",
".",
"peek",
"(",
")",
".",
"encoding",
"(",
")",
"else",
":",
"recEncoding",
"=",
"'utf-8'",
"if",
"fname",
":",
"f",
"=",
"open",
"(",
"fname",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"recEncoding",
")",
"else",
":",
"f",
"=",
"open",
"(",
"self",
".",
"name",
"[",
":",
"200",
"]",
"+",
"'.txt'",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"recEncoding",
")",
"if",
"self",
".",
"_collectedTypes",
"==",
"{",
"'WOSRecord'",
"}",
":",
"f",
".",
"write",
"(",
"\"\\ufeffFN Thomson Reuters Web of Science\\u2122\\n\"",
")",
"f",
".",
"write",
"(",
"\"VR 1.0\\n\"",
")",
"elif",
"self",
".",
"_collectedTypes",
"==",
"{",
"'MedlineRecord'",
"}",
":",
"f",
".",
"write",
"(",
"'\\n'",
")",
"elif",
"self",
".",
"_collectedTypes",
"==",
"{",
"'ScopusRecord'",
"}",
":",
"f",
".",
"write",
"(",
"\"\\ufeff{}\\n\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"scopusHeader",
")",
")",
")",
"for",
"R",
"in",
"self",
".",
"_collection",
":",
"R",
".",
"writeRecord",
"(",
"f",
")",
"f",
".",
"write",
"(",
"'\\n'",
")",
"if",
"self",
".",
"_collectedTypes",
"==",
"{",
"'WOSRecord'",
"}",
":",
"f",
".",
"write",
"(",
"'EF'",
")",
"f",
".",
"close",
"(",
")"
]
| Writes the `RecordCollection` to a file, the written file's format is identical to those download from WOS. The order of `Records` written is random.
# Parameters
_fname_ : `optional [str]`
> Default `None`, if given the output file will written to _fanme_, if `None` the `RecordCollection`'s name's first 200 characters are used with the suffix .isi | [
"Writes",
"the",
"RecordCollection",
"to",
"a",
"file",
"the",
"written",
"file",
"s",
"format",
"is",
"identical",
"to",
"those",
"download",
"from",
"WOS",
".",
"The",
"order",
"of",
"Records",
"written",
"is",
"random",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L216-L245 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.writeBib | def writeBib(self, fname = None, maxStringLength = 1000, wosMode = False, reducedOutput = False, niceIDs = True):
"""Writes a bibTex entry to _fname_ for each `Record` in the collection.
If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names.
**Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only.
**Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands
# Parameters
_fname_ : `optional [str]`
> Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to .
_maxStringLength_ : `optional [int]`
> Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings
_WOSMode_ : `optional [bool]`
> Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that.
_restrictedOutput_ : `optional [bool]`
> Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'`
_niceID_ : `optional [bool]`
> Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag
"""
if fname:
f = open(fname, mode = 'w', encoding = 'utf-8')
else:
f = open(self.name[:200] + '.bib', mode = 'w', encoding = 'utf-8')
f.write("%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n")
#I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans
for R in self:
try:
f.write('\n\n')
f.write(R.bibString(maxLength = maxStringLength, WOSMode = wosMode, restrictedOutput = reducedOutput, niceID = niceIDs))
except BadWOSRecord:
pass
except AttributeError:
raise RecordsNotCompatible("The Record '{}', with ID '{}' does not support writing to bibtext files.".format(R, R.id))
f.close() | python | def writeBib(self, fname = None, maxStringLength = 1000, wosMode = False, reducedOutput = False, niceIDs = True):
"""Writes a bibTex entry to _fname_ for each `Record` in the collection.
If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names.
**Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only.
**Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands
# Parameters
_fname_ : `optional [str]`
> Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to .
_maxStringLength_ : `optional [int]`
> Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings
_WOSMode_ : `optional [bool]`
> Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that.
_restrictedOutput_ : `optional [bool]`
> Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'`
_niceID_ : `optional [bool]`
> Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag
"""
if fname:
f = open(fname, mode = 'w', encoding = 'utf-8')
else:
f = open(self.name[:200] + '.bib', mode = 'w', encoding = 'utf-8')
f.write("%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n")
#I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans
for R in self:
try:
f.write('\n\n')
f.write(R.bibString(maxLength = maxStringLength, WOSMode = wosMode, restrictedOutput = reducedOutput, niceID = niceIDs))
except BadWOSRecord:
pass
except AttributeError:
raise RecordsNotCompatible("The Record '{}', with ID '{}' does not support writing to bibtext files.".format(R, R.id))
f.close() | [
"def",
"writeBib",
"(",
"self",
",",
"fname",
"=",
"None",
",",
"maxStringLength",
"=",
"1000",
",",
"wosMode",
"=",
"False",
",",
"reducedOutput",
"=",
"False",
",",
"niceIDs",
"=",
"True",
")",
":",
"if",
"fname",
":",
"f",
"=",
"open",
"(",
"fname",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"self",
".",
"name",
"[",
":",
"200",
"]",
"+",
"'.bib'",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"f",
".",
"write",
"(",
"\"%This file was generated by the metaknowledge Python package.\\n%The contents have been automatically generated and are likely to not work with\\n%LaTeX without some human intervention. This file is meant for other automatic\\n%systems and not to be used directly for making citations\\n\"",
")",
"#I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans",
"for",
"R",
"in",
"self",
":",
"try",
":",
"f",
".",
"write",
"(",
"'\\n\\n'",
")",
"f",
".",
"write",
"(",
"R",
".",
"bibString",
"(",
"maxLength",
"=",
"maxStringLength",
",",
"WOSMode",
"=",
"wosMode",
",",
"restrictedOutput",
"=",
"reducedOutput",
",",
"niceID",
"=",
"niceIDs",
")",
")",
"except",
"BadWOSRecord",
":",
"pass",
"except",
"AttributeError",
":",
"raise",
"RecordsNotCompatible",
"(",
"\"The Record '{}', with ID '{}' does not support writing to bibtext files.\"",
".",
"format",
"(",
"R",
",",
"R",
".",
"id",
")",
")",
"f",
".",
"close",
"(",
")"
]
| Writes a bibTex entry to _fname_ for each `Record` in the collection.
If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names.
**Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only.
**Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands
# Parameters
_fname_ : `optional [str]`
> Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to .
_maxStringLength_ : `optional [int]`
> Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings
_WOSMode_ : `optional [bool]`
> Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that.
_restrictedOutput_ : `optional [bool]`
> Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'`
_niceID_ : `optional [bool]`
> Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag | [
"Writes",
"a",
"bibTex",
"entry",
"to",
"_fname_",
"for",
"each",
"Record",
"in",
"the",
"collection",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L373-L418 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.makeDict | def makeDict(self, onlyTheseTags = None, longNames = False, raw = False, numAuthors = True, genderCounts = True):
"""Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection, `None` is given when there is no value and they are in the same order across each tag.
When used with pandas: `pandas.DataFrame(RC.makeDict())` returns a data frame with each column a tag and each row a Record.
# Parameters
_onlyTheseTags_ : `optional [iterable]`
> Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given.
> If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc).
_longNames_ : `optional [bool]`
> Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used.
_cleanedVal_ : `optional [bool]`
> Default `True`, if `True` the processed values for each `Record`'s field will be provided, otherwise the raw values are given.
_numAuthors_ : `optional [bool]`
> Default `True`, if `True` adds the number of authors as the column `'numAuthors'`.
"""
if onlyTheseTags:
for i in range(len(onlyTheseTags)):
if onlyTheseTags[i] in fullToTagDict:
onlyTheseTags[i] = fullToTagDict[onlyTheseTags[i]]
retrievedFields = onlyTheseTags
else:
retrievedFields = []
for R in self:
tagsLst = [t for t in R.keys() if t not in retrievedFields]
retrievedFields += tagsLst
if longNames:
try:
retrievedFields = [tagToFullDict[t] for t in retrievedFields]
except KeyError:
raise KeyError("One of the tags could not be converted to a long name.")
retDict = {k : [] for k in retrievedFields}
if numAuthors:
retDict["num-Authors"] = []
if genderCounts:
retDict.update({'num-Male' : [], 'num-Female' : [], 'num-Unknown' : []})
for R in self:
if numAuthors:
retDict["num-Authors"].append(len(R.get('authorsShort', [])))
if genderCounts:
m, f, u = R.authGenders(_countsTuple = True)
retDict['num-Male'].append(m)
retDict['num-Female'].append(f)
retDict['num-Unknown'].append(u)
for k, v in R.subDict(retrievedFields, raw = raw).items():
retDict[k].append(v)
return retDict | python | def makeDict(self, onlyTheseTags = None, longNames = False, raw = False, numAuthors = True, genderCounts = True):
"""Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection, `None` is given when there is no value and they are in the same order across each tag.
When used with pandas: `pandas.DataFrame(RC.makeDict())` returns a data frame with each column a tag and each row a Record.
# Parameters
_onlyTheseTags_ : `optional [iterable]`
> Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given.
> If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc).
_longNames_ : `optional [bool]`
> Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used.
_cleanedVal_ : `optional [bool]`
> Default `True`, if `True` the processed values for each `Record`'s field will be provided, otherwise the raw values are given.
_numAuthors_ : `optional [bool]`
> Default `True`, if `True` adds the number of authors as the column `'numAuthors'`.
"""
if onlyTheseTags:
for i in range(len(onlyTheseTags)):
if onlyTheseTags[i] in fullToTagDict:
onlyTheseTags[i] = fullToTagDict[onlyTheseTags[i]]
retrievedFields = onlyTheseTags
else:
retrievedFields = []
for R in self:
tagsLst = [t for t in R.keys() if t not in retrievedFields]
retrievedFields += tagsLst
if longNames:
try:
retrievedFields = [tagToFullDict[t] for t in retrievedFields]
except KeyError:
raise KeyError("One of the tags could not be converted to a long name.")
retDict = {k : [] for k in retrievedFields}
if numAuthors:
retDict["num-Authors"] = []
if genderCounts:
retDict.update({'num-Male' : [], 'num-Female' : [], 'num-Unknown' : []})
for R in self:
if numAuthors:
retDict["num-Authors"].append(len(R.get('authorsShort', [])))
if genderCounts:
m, f, u = R.authGenders(_countsTuple = True)
retDict['num-Male'].append(m)
retDict['num-Female'].append(f)
retDict['num-Unknown'].append(u)
for k, v in R.subDict(retrievedFields, raw = raw).items():
retDict[k].append(v)
return retDict | [
"def",
"makeDict",
"(",
"self",
",",
"onlyTheseTags",
"=",
"None",
",",
"longNames",
"=",
"False",
",",
"raw",
"=",
"False",
",",
"numAuthors",
"=",
"True",
",",
"genderCounts",
"=",
"True",
")",
":",
"if",
"onlyTheseTags",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"onlyTheseTags",
")",
")",
":",
"if",
"onlyTheseTags",
"[",
"i",
"]",
"in",
"fullToTagDict",
":",
"onlyTheseTags",
"[",
"i",
"]",
"=",
"fullToTagDict",
"[",
"onlyTheseTags",
"[",
"i",
"]",
"]",
"retrievedFields",
"=",
"onlyTheseTags",
"else",
":",
"retrievedFields",
"=",
"[",
"]",
"for",
"R",
"in",
"self",
":",
"tagsLst",
"=",
"[",
"t",
"for",
"t",
"in",
"R",
".",
"keys",
"(",
")",
"if",
"t",
"not",
"in",
"retrievedFields",
"]",
"retrievedFields",
"+=",
"tagsLst",
"if",
"longNames",
":",
"try",
":",
"retrievedFields",
"=",
"[",
"tagToFullDict",
"[",
"t",
"]",
"for",
"t",
"in",
"retrievedFields",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"One of the tags could not be converted to a long name.\"",
")",
"retDict",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
"in",
"retrievedFields",
"}",
"if",
"numAuthors",
":",
"retDict",
"[",
"\"num-Authors\"",
"]",
"=",
"[",
"]",
"if",
"genderCounts",
":",
"retDict",
".",
"update",
"(",
"{",
"'num-Male'",
":",
"[",
"]",
",",
"'num-Female'",
":",
"[",
"]",
",",
"'num-Unknown'",
":",
"[",
"]",
"}",
")",
"for",
"R",
"in",
"self",
":",
"if",
"numAuthors",
":",
"retDict",
"[",
"\"num-Authors\"",
"]",
".",
"append",
"(",
"len",
"(",
"R",
".",
"get",
"(",
"'authorsShort'",
",",
"[",
"]",
")",
")",
")",
"if",
"genderCounts",
":",
"m",
",",
"f",
",",
"u",
"=",
"R",
".",
"authGenders",
"(",
"_countsTuple",
"=",
"True",
")",
"retDict",
"[",
"'num-Male'",
"]",
".",
"append",
"(",
"m",
")",
"retDict",
"[",
"'num-Female'",
"]",
".",
"append",
"(",
"f",
")",
"retDict",
"[",
"'num-Unknown'",
"]",
".",
"append",
"(",
"u",
")",
"for",
"k",
",",
"v",
"in",
"R",
".",
"subDict",
"(",
"retrievedFields",
",",
"raw",
"=",
"raw",
")",
".",
"items",
"(",
")",
":",
"retDict",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"retDict"
]
| Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection, `None` is given when there is no value and they are in the same order across each tag.
When used with pandas: `pandas.DataFrame(RC.makeDict())` returns a data frame with each column a tag and each row a Record.
# Parameters
_onlyTheseTags_ : `optional [iterable]`
> Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given.
> If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc).
_longNames_ : `optional [bool]`
> Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used.
_cleanedVal_ : `optional [bool]`
> Default `True`, if `True` the processed values for each `Record`'s field will be provided, otherwise the raw values are given.
_numAuthors_ : `optional [bool]`
> Default `True`, if `True` adds the number of authors as the column `'numAuthors'`. | [
"Returns",
"a",
"dict",
"with",
"each",
"key",
"a",
"tag",
"and",
"the",
"values",
"being",
"lists",
"of",
"the",
"values",
"for",
"each",
"of",
"the",
"Records",
"in",
"the",
"collection",
"None",
"is",
"given",
"when",
"there",
"is",
"no",
"value",
"and",
"they",
"are",
"in",
"the",
"same",
"order",
"across",
"each",
"tag",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L698-L753 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.getCitations | def getCitations(self, field = None, values = None, pandasFriendly = True, counts = True):
"""Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string, year, journal, author's name and the number of times it occured.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
_counts_ : `optional bool`
> Default `True`, if `False` the counts columns will be removed
# Returns
`dict`
> A pandas ready dict with all the Citations
"""
retCites = []
if values is not None:
if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container):
values = [values]
for R in self:
retCites += R.getCitations(field = field, values = values, pandasFriendly = False)
if pandasFriendly:
return _pandasPrep(retCites, counts)
else:
return list(set(retCites)) | python | def getCitations(self, field = None, values = None, pandasFriendly = True, counts = True):
"""Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string, year, journal, author's name and the number of times it occured.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
_counts_ : `optional bool`
> Default `True`, if `False` the counts columns will be removed
# Returns
`dict`
> A pandas ready dict with all the Citations
"""
retCites = []
if values is not None:
if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container):
values = [values]
for R in self:
retCites += R.getCitations(field = field, values = values, pandasFriendly = False)
if pandasFriendly:
return _pandasPrep(retCites, counts)
else:
return list(set(retCites)) | [
"def",
"getCitations",
"(",
"self",
",",
"field",
"=",
"None",
",",
"values",
"=",
"None",
",",
"pandasFriendly",
"=",
"True",
",",
"counts",
"=",
"True",
")",
":",
"retCites",
"=",
"[",
"]",
"if",
"values",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
"or",
"not",
"isinstance",
"(",
"values",
",",
"collections",
".",
"abc",
".",
"Container",
")",
":",
"values",
"=",
"[",
"values",
"]",
"for",
"R",
"in",
"self",
":",
"retCites",
"+=",
"R",
".",
"getCitations",
"(",
"field",
"=",
"field",
",",
"values",
"=",
"values",
",",
"pandasFriendly",
"=",
"False",
")",
"if",
"pandasFriendly",
":",
"return",
"_pandasPrep",
"(",
"retCites",
",",
"counts",
")",
"else",
":",
"return",
"list",
"(",
"set",
"(",
"retCites",
")",
")"
]
| Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string, year, journal, author's name and the number of times it occured.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
_counts_ : `optional bool`
> Default `True`, if `False` the counts columns will be removed
# Returns
`dict`
> A pandas ready dict with all the Citations | [
"Creates",
"a",
"pandas",
"ready",
"dict",
"with",
"each",
"row",
"a",
"different",
"citation",
"the",
"contained",
"Records",
"and",
"columns",
"containing",
"the",
"original",
"string",
"year",
"journal",
"author",
"s",
"name",
"and",
"the",
"number",
"of",
"times",
"it",
"occured",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L900-L940 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.networkCoCitation | def networkCoCitation(self, dropAnon = True, nodeType = "full", nodeInfo = True, fullInfo = False, weighted = True, dropNonJournals = False, count = True, keyWords = None, detailedCore = True, detailedCoreAttributes = False, coreOnly = False, expandedCore = False, addCR = False):
"""Creates a co-citation network for the RecordCollection.
# Parameters
_nodeType_ : `optional [str]`
> One of `"full"`, `"original"`, `"author"`, `"journal"` or `"year"`. Specifies the value of the nodes in the graph. The default `"full"` causes the citations to be compared holistically using the [metaknowledge.Citation](./Citation.html#metaknowledge.citation.Citation) builtin comparison operators. `"original"` uses the raw original strings of the citations. While `"author"`, `"journal"` and `"year"` each use the author, journal and year respectively.
_dropAnon_ : `optional [bool]`
> default `True`, if `True` citations labeled anonymous are removed from the network
_nodeInfo_ : `optional [bool]`
> default `True`, if `True` an extra piece of information is stored with each node. The extra inforamtion is detemined by _nodeType_.
_fullInfo_ : `optional [bool]`
> default `False`, if `True` the original citation string is added to the node as an extra value, the attribute is labeled as fullCite
_weighted_ : `optional [bool]`
> default `True`, wether the edges are weighted. If `True` the edges are weighted by the number of citations.
_dropNonJournals_ : `optional [bool]`
> default `False`, wether to drop citations of non-journals
_count_ : `optional [bool]`
> default `True`, causes the number of occurrences of a node to be counted
_keyWords_ : `optional [str] or [list[str]]`
> A string or list of strings that the citations are checked against, if they contain any of the strings they are removed from the network
_detailedCore_ : `optional [bool or iterable[WOS tag Strings]]`
> default `True`, if `True` all Citations from the core (those of records in the RecordCollection) and the _nodeType_ is `'full'` all nodes from the core will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['AF', 'PY', 'TI', 'SO', 'VL', 'BP']`.
> If _detailedCore_ is an iterable (That evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attribute. All
> The resultant string is the values of each tag, with commas removed, seperated by `', '`, just like the info given by non-core Citations. Note that for tags like `'AF'` that return lists only the first entry in the list will be used. Also a second attribute is created for all nodes called inCore wich is a boolean describing if the node is in the core or not.
> Note: _detailedCore_ is not identical to the _detailedInfo_ argument of [Recordcollection.networkCoAuthor()](#metaknowledge.RecordCollection.networkCoAuthor)
_coreOnly_ : `optional [bool]`
> default `False`, if `True` only Citations from the RecordCollection will be included in the network
_expandedCore_ : `optional [bool]`
> default `False`, if `True` all citations in the ouput graph that are records in the collection will be duplicated for each author. If the nodes are `"full"`, `"original"` or `"author"` this will result in new noded being created for the other options the results are **not** defined or tested. Edges will be created between each of the nodes for each record expanded, attributes will be copied from exiting nodes.
# Returns
`Networkx Graph`
> A networkx graph with hashes as ID and co-citation as edges
"""
allowedTypes = ["full", "original", "author", "journal", "year"]
if nodeType not in allowedTypes:
raise RCValueError("{} is not an allowed nodeType.".format(nodeType))
coreValues = []
if bool(detailedCore):
try:
for tag in detailedCore:
coreValues.append(normalizeToTag(tag))
except TypeError:
coreValues = ['id', 'authorsFull', 'year', 'title', 'journal', 'volume', 'beginningPage']
tmpgrph = nx.Graph()
pcount = 0
progArgs = (0, "Starting to make a co-citation network")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if coreOnly or coreValues or expandedCore:
coreCitesDict = {R.createCitation() : R for R in self}
if coreOnly:
coreCites = coreCitesDict.keys()
else:
coreCites = None
else:
coreCitesDict = None
coreCites = None
for R in self:
if PBar:
pcount += 1
PBar.updateVal(pcount / len(self), "Analyzing: {}".format(R))
Cites = R.get('citations')
if Cites:
filteredCites = filterCites(Cites, nodeType, dropAnon, dropNonJournals, keyWords, coreCites)
addToNetwork(tmpgrph, filteredCites, count, weighted, nodeType, nodeInfo , fullInfo, coreCitesDict, coreValues, detailedCoreAttributes, addCR, headNd = None)
if expandedCore:
if PBar:
PBar.updateVal(.98, "Expanding core Records")
expandRecs(tmpgrph, self, nodeType, weighted)
if PBar:
PBar.finish("Done making a co-citation network from {}".format(self))
return tmpgrph | python | def networkCoCitation(self, dropAnon = True, nodeType = "full", nodeInfo = True, fullInfo = False, weighted = True, dropNonJournals = False, count = True, keyWords = None, detailedCore = True, detailedCoreAttributes = False, coreOnly = False, expandedCore = False, addCR = False):
"""Creates a co-citation network for the RecordCollection.
# Parameters
_nodeType_ : `optional [str]`
> One of `"full"`, `"original"`, `"author"`, `"journal"` or `"year"`. Specifies the value of the nodes in the graph. The default `"full"` causes the citations to be compared holistically using the [metaknowledge.Citation](./Citation.html#metaknowledge.citation.Citation) builtin comparison operators. `"original"` uses the raw original strings of the citations. While `"author"`, `"journal"` and `"year"` each use the author, journal and year respectively.
_dropAnon_ : `optional [bool]`
> default `True`, if `True` citations labeled anonymous are removed from the network
_nodeInfo_ : `optional [bool]`
> default `True`, if `True` an extra piece of information is stored with each node. The extra inforamtion is detemined by _nodeType_.
_fullInfo_ : `optional [bool]`
> default `False`, if `True` the original citation string is added to the node as an extra value, the attribute is labeled as fullCite
_weighted_ : `optional [bool]`
> default `True`, wether the edges are weighted. If `True` the edges are weighted by the number of citations.
_dropNonJournals_ : `optional [bool]`
> default `False`, wether to drop citations of non-journals
_count_ : `optional [bool]`
> default `True`, causes the number of occurrences of a node to be counted
_keyWords_ : `optional [str] or [list[str]]`
> A string or list of strings that the citations are checked against, if they contain any of the strings they are removed from the network
_detailedCore_ : `optional [bool or iterable[WOS tag Strings]]`
> default `True`, if `True` all Citations from the core (those of records in the RecordCollection) and the _nodeType_ is `'full'` all nodes from the core will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['AF', 'PY', 'TI', 'SO', 'VL', 'BP']`.
> If _detailedCore_ is an iterable (That evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attribute. All
> The resultant string is the values of each tag, with commas removed, seperated by `', '`, just like the info given by non-core Citations. Note that for tags like `'AF'` that return lists only the first entry in the list will be used. Also a second attribute is created for all nodes called inCore wich is a boolean describing if the node is in the core or not.
> Note: _detailedCore_ is not identical to the _detailedInfo_ argument of [Recordcollection.networkCoAuthor()](#metaknowledge.RecordCollection.networkCoAuthor)
_coreOnly_ : `optional [bool]`
> default `False`, if `True` only Citations from the RecordCollection will be included in the network
_expandedCore_ : `optional [bool]`
> default `False`, if `True` all citations in the ouput graph that are records in the collection will be duplicated for each author. If the nodes are `"full"`, `"original"` or `"author"` this will result in new noded being created for the other options the results are **not** defined or tested. Edges will be created between each of the nodes for each record expanded, attributes will be copied from exiting nodes.
# Returns
`Networkx Graph`
> A networkx graph with hashes as ID and co-citation as edges
"""
allowedTypes = ["full", "original", "author", "journal", "year"]
if nodeType not in allowedTypes:
raise RCValueError("{} is not an allowed nodeType.".format(nodeType))
coreValues = []
if bool(detailedCore):
try:
for tag in detailedCore:
coreValues.append(normalizeToTag(tag))
except TypeError:
coreValues = ['id', 'authorsFull', 'year', 'title', 'journal', 'volume', 'beginningPage']
tmpgrph = nx.Graph()
pcount = 0
progArgs = (0, "Starting to make a co-citation network")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if coreOnly or coreValues or expandedCore:
coreCitesDict = {R.createCitation() : R for R in self}
if coreOnly:
coreCites = coreCitesDict.keys()
else:
coreCites = None
else:
coreCitesDict = None
coreCites = None
for R in self:
if PBar:
pcount += 1
PBar.updateVal(pcount / len(self), "Analyzing: {}".format(R))
Cites = R.get('citations')
if Cites:
filteredCites = filterCites(Cites, nodeType, dropAnon, dropNonJournals, keyWords, coreCites)
addToNetwork(tmpgrph, filteredCites, count, weighted, nodeType, nodeInfo , fullInfo, coreCitesDict, coreValues, detailedCoreAttributes, addCR, headNd = None)
if expandedCore:
if PBar:
PBar.updateVal(.98, "Expanding core Records")
expandRecs(tmpgrph, self, nodeType, weighted)
if PBar:
PBar.finish("Done making a co-citation network from {}".format(self))
return tmpgrph | [
"def",
"networkCoCitation",
"(",
"self",
",",
"dropAnon",
"=",
"True",
",",
"nodeType",
"=",
"\"full\"",
",",
"nodeInfo",
"=",
"True",
",",
"fullInfo",
"=",
"False",
",",
"weighted",
"=",
"True",
",",
"dropNonJournals",
"=",
"False",
",",
"count",
"=",
"True",
",",
"keyWords",
"=",
"None",
",",
"detailedCore",
"=",
"True",
",",
"detailedCoreAttributes",
"=",
"False",
",",
"coreOnly",
"=",
"False",
",",
"expandedCore",
"=",
"False",
",",
"addCR",
"=",
"False",
")",
":",
"allowedTypes",
"=",
"[",
"\"full\"",
",",
"\"original\"",
",",
"\"author\"",
",",
"\"journal\"",
",",
"\"year\"",
"]",
"if",
"nodeType",
"not",
"in",
"allowedTypes",
":",
"raise",
"RCValueError",
"(",
"\"{} is not an allowed nodeType.\"",
".",
"format",
"(",
"nodeType",
")",
")",
"coreValues",
"=",
"[",
"]",
"if",
"bool",
"(",
"detailedCore",
")",
":",
"try",
":",
"for",
"tag",
"in",
"detailedCore",
":",
"coreValues",
".",
"append",
"(",
"normalizeToTag",
"(",
"tag",
")",
")",
"except",
"TypeError",
":",
"coreValues",
"=",
"[",
"'id'",
",",
"'authorsFull'",
",",
"'year'",
",",
"'title'",
",",
"'journal'",
",",
"'volume'",
",",
"'beginningPage'",
"]",
"tmpgrph",
"=",
"nx",
".",
"Graph",
"(",
")",
"pcount",
"=",
"0",
"progArgs",
"=",
"(",
"0",
",",
"\"Starting to make a co-citation network\"",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"if",
"coreOnly",
"or",
"coreValues",
"or",
"expandedCore",
":",
"coreCitesDict",
"=",
"{",
"R",
".",
"createCitation",
"(",
")",
":",
"R",
"for",
"R",
"in",
"self",
"}",
"if",
"coreOnly",
":",
"coreCites",
"=",
"coreCitesDict",
".",
"keys",
"(",
")",
"else",
":",
"coreCites",
"=",
"None",
"else",
":",
"coreCitesDict",
"=",
"None",
"coreCites",
"=",
"None",
"for",
"R",
"in",
"self",
":",
"if",
"PBar",
":",
"pcount",
"+=",
"1",
"PBar",
".",
"updateVal",
"(",
"pcount",
"/",
"len",
"(",
"self",
")",
",",
"\"Analyzing: {}\"",
".",
"format",
"(",
"R",
")",
")",
"Cites",
"=",
"R",
".",
"get",
"(",
"'citations'",
")",
"if",
"Cites",
":",
"filteredCites",
"=",
"filterCites",
"(",
"Cites",
",",
"nodeType",
",",
"dropAnon",
",",
"dropNonJournals",
",",
"keyWords",
",",
"coreCites",
")",
"addToNetwork",
"(",
"tmpgrph",
",",
"filteredCites",
",",
"count",
",",
"weighted",
",",
"nodeType",
",",
"nodeInfo",
",",
"fullInfo",
",",
"coreCitesDict",
",",
"coreValues",
",",
"detailedCoreAttributes",
",",
"addCR",
",",
"headNd",
"=",
"None",
")",
"if",
"expandedCore",
":",
"if",
"PBar",
":",
"PBar",
".",
"updateVal",
"(",
".98",
",",
"\"Expanding core Records\"",
")",
"expandRecs",
"(",
"tmpgrph",
",",
"self",
",",
"nodeType",
",",
"weighted",
")",
"if",
"PBar",
":",
"PBar",
".",
"finish",
"(",
"\"Done making a co-citation network from {}\"",
".",
"format",
"(",
"self",
")",
")",
"return",
"tmpgrph"
]
| Creates a co-citation network for the RecordCollection.
# Parameters
_nodeType_ : `optional [str]`
> One of `"full"`, `"original"`, `"author"`, `"journal"` or `"year"`. Specifies the value of the nodes in the graph. The default `"full"` causes the citations to be compared holistically using the [metaknowledge.Citation](./Citation.html#metaknowledge.citation.Citation) builtin comparison operators. `"original"` uses the raw original strings of the citations. While `"author"`, `"journal"` and `"year"` each use the author, journal and year respectively.
_dropAnon_ : `optional [bool]`
> default `True`, if `True` citations labeled anonymous are removed from the network
_nodeInfo_ : `optional [bool]`
> default `True`, if `True` an extra piece of information is stored with each node. The extra inforamtion is detemined by _nodeType_.
_fullInfo_ : `optional [bool]`
> default `False`, if `True` the original citation string is added to the node as an extra value, the attribute is labeled as fullCite
_weighted_ : `optional [bool]`
> default `True`, wether the edges are weighted. If `True` the edges are weighted by the number of citations.
_dropNonJournals_ : `optional [bool]`
> default `False`, wether to drop citations of non-journals
_count_ : `optional [bool]`
> default `True`, causes the number of occurrences of a node to be counted
_keyWords_ : `optional [str] or [list[str]]`
> A string or list of strings that the citations are checked against, if they contain any of the strings they are removed from the network
_detailedCore_ : `optional [bool or iterable[WOS tag Strings]]`
> default `True`, if `True` all Citations from the core (those of records in the RecordCollection) and the _nodeType_ is `'full'` all nodes from the core will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['AF', 'PY', 'TI', 'SO', 'VL', 'BP']`.
> If _detailedCore_ is an iterable (That evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attribute. All
> The resultant string is the values of each tag, with commas removed, seperated by `', '`, just like the info given by non-core Citations. Note that for tags like `'AF'` that return lists only the first entry in the list will be used. Also a second attribute is created for all nodes called inCore wich is a boolean describing if the node is in the core or not.
> Note: _detailedCore_ is not identical to the _detailedInfo_ argument of [Recordcollection.networkCoAuthor()](#metaknowledge.RecordCollection.networkCoAuthor)
_coreOnly_ : `optional [bool]`
> default `False`, if `True` only Citations from the RecordCollection will be included in the network
_expandedCore_ : `optional [bool]`
> default `False`, if `True` all citations in the ouput graph that are records in the collection will be duplicated for each author. If the nodes are `"full"`, `"original"` or `"author"` this will result in new noded being created for the other options the results are **not** defined or tested. Edges will be created between each of the nodes for each record expanded, attributes will be copied from exiting nodes.
# Returns
`Networkx Graph`
> A networkx graph with hashes as ID and co-citation as edges | [
"Creates",
"a",
"co",
"-",
"citation",
"network",
"for",
"the",
"RecordCollection",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1075-L1177 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.networkBibCoupling | def networkBibCoupling(self, weighted = True, fullInfo = False, addCR = False):
"""Creates a bibliographic coupling network based on citations for the RecordCollection.
# Parameters
_weighted_ : `optional bool`
> Default `True`, if `True` the weight of the edges will be added to the network
_fullInfo_ : `optional bool`
> Default `False`, if `True` the full citation string will be added to each of the nodes of the network.
# Returns
`Networkx Graph`
> A graph of the bibliographic coupling
"""
progArgs = (0, "Make a citation network for coupling")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
citeGrph = self.networkCitation(weighted = False, directed = True, detailedCore = True, fullInfo = fullInfo, count = False, nodeInfo = True, addCR = addCR, _quiet = True)
pcount = 0
pmax = len(citeGrph)
PBar.updateVal(.2, "Starting to classify nodes")
workingGrph = nx.Graph()
couplingSet = set()
for n, d in citeGrph.nodes(data = True):
pcount += 1
PBar.updateVal(.2 + .4 * (pcount / pmax), "Classifying: {}".format(n))
if d['inCore']:
workingGrph.add_node(n, **d)
if citeGrph.in_degree(n) > 0:
couplingSet.add(n)
pcount = 0
pmax = len(couplingSet)
for n in couplingSet:
PBar.updateVal(.6 + .4 * (pcount / pmax), "Coupling: {}".format(n))
citesLst = list(citeGrph.in_edges(n))
for i, edgeOuter in enumerate(citesLst):
outerNode = edgeOuter[0]
for edgeInner in citesLst[i + 1:]:
innerNode = edgeInner[0]
if weighted and workingGrph.has_edge(outerNode, innerNode):
workingGrph.edges[outerNode, innerNode]['weight'] += 1
elif weighted:
workingGrph.add_edge(outerNode, innerNode, weight = 1)
else:
workingGrph.add_edge(outerNode, innerNode)
PBar.finish("Done making a bib-coupling network from {}".format(self))
return workingGrph | python | def networkBibCoupling(self, weighted = True, fullInfo = False, addCR = False):
"""Creates a bibliographic coupling network based on citations for the RecordCollection.
# Parameters
_weighted_ : `optional bool`
> Default `True`, if `True` the weight of the edges will be added to the network
_fullInfo_ : `optional bool`
> Default `False`, if `True` the full citation string will be added to each of the nodes of the network.
# Returns
`Networkx Graph`
> A graph of the bibliographic coupling
"""
progArgs = (0, "Make a citation network for coupling")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
citeGrph = self.networkCitation(weighted = False, directed = True, detailedCore = True, fullInfo = fullInfo, count = False, nodeInfo = True, addCR = addCR, _quiet = True)
pcount = 0
pmax = len(citeGrph)
PBar.updateVal(.2, "Starting to classify nodes")
workingGrph = nx.Graph()
couplingSet = set()
for n, d in citeGrph.nodes(data = True):
pcount += 1
PBar.updateVal(.2 + .4 * (pcount / pmax), "Classifying: {}".format(n))
if d['inCore']:
workingGrph.add_node(n, **d)
if citeGrph.in_degree(n) > 0:
couplingSet.add(n)
pcount = 0
pmax = len(couplingSet)
for n in couplingSet:
PBar.updateVal(.6 + .4 * (pcount / pmax), "Coupling: {}".format(n))
citesLst = list(citeGrph.in_edges(n))
for i, edgeOuter in enumerate(citesLst):
outerNode = edgeOuter[0]
for edgeInner in citesLst[i + 1:]:
innerNode = edgeInner[0]
if weighted and workingGrph.has_edge(outerNode, innerNode):
workingGrph.edges[outerNode, innerNode]['weight'] += 1
elif weighted:
workingGrph.add_edge(outerNode, innerNode, weight = 1)
else:
workingGrph.add_edge(outerNode, innerNode)
PBar.finish("Done making a bib-coupling network from {}".format(self))
return workingGrph | [
"def",
"networkBibCoupling",
"(",
"self",
",",
"weighted",
"=",
"True",
",",
"fullInfo",
"=",
"False",
",",
"addCR",
"=",
"False",
")",
":",
"progArgs",
"=",
"(",
"0",
",",
"\"Make a citation network for coupling\"",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"citeGrph",
"=",
"self",
".",
"networkCitation",
"(",
"weighted",
"=",
"False",
",",
"directed",
"=",
"True",
",",
"detailedCore",
"=",
"True",
",",
"fullInfo",
"=",
"fullInfo",
",",
"count",
"=",
"False",
",",
"nodeInfo",
"=",
"True",
",",
"addCR",
"=",
"addCR",
",",
"_quiet",
"=",
"True",
")",
"pcount",
"=",
"0",
"pmax",
"=",
"len",
"(",
"citeGrph",
")",
"PBar",
".",
"updateVal",
"(",
".2",
",",
"\"Starting to classify nodes\"",
")",
"workingGrph",
"=",
"nx",
".",
"Graph",
"(",
")",
"couplingSet",
"=",
"set",
"(",
")",
"for",
"n",
",",
"d",
"in",
"citeGrph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"pcount",
"+=",
"1",
"PBar",
".",
"updateVal",
"(",
".2",
"+",
".4",
"*",
"(",
"pcount",
"/",
"pmax",
")",
",",
"\"Classifying: {}\"",
".",
"format",
"(",
"n",
")",
")",
"if",
"d",
"[",
"'inCore'",
"]",
":",
"workingGrph",
".",
"add_node",
"(",
"n",
",",
"*",
"*",
"d",
")",
"if",
"citeGrph",
".",
"in_degree",
"(",
"n",
")",
">",
"0",
":",
"couplingSet",
".",
"add",
"(",
"n",
")",
"pcount",
"=",
"0",
"pmax",
"=",
"len",
"(",
"couplingSet",
")",
"for",
"n",
"in",
"couplingSet",
":",
"PBar",
".",
"updateVal",
"(",
".6",
"+",
".4",
"*",
"(",
"pcount",
"/",
"pmax",
")",
",",
"\"Coupling: {}\"",
".",
"format",
"(",
"n",
")",
")",
"citesLst",
"=",
"list",
"(",
"citeGrph",
".",
"in_edges",
"(",
"n",
")",
")",
"for",
"i",
",",
"edgeOuter",
"in",
"enumerate",
"(",
"citesLst",
")",
":",
"outerNode",
"=",
"edgeOuter",
"[",
"0",
"]",
"for",
"edgeInner",
"in",
"citesLst",
"[",
"i",
"+",
"1",
":",
"]",
":",
"innerNode",
"=",
"edgeInner",
"[",
"0",
"]",
"if",
"weighted",
"and",
"workingGrph",
".",
"has_edge",
"(",
"outerNode",
",",
"innerNode",
")",
":",
"workingGrph",
".",
"edges",
"[",
"outerNode",
",",
"innerNode",
"]",
"[",
"'weight'",
"]",
"+=",
"1",
"elif",
"weighted",
":",
"workingGrph",
".",
"add_edge",
"(",
"outerNode",
",",
"innerNode",
",",
"weight",
"=",
"1",
")",
"else",
":",
"workingGrph",
".",
"add_edge",
"(",
"outerNode",
",",
"innerNode",
")",
"PBar",
".",
"finish",
"(",
"\"Done making a bib-coupling network from {}\"",
".",
"format",
"(",
"self",
")",
")",
"return",
"workingGrph"
]
| Creates a bibliographic coupling network based on citations for the RecordCollection.
# Parameters
_weighted_ : `optional bool`
> Default `True`, if `True` the weight of the edges will be added to the network
_fullInfo_ : `optional bool`
> Default `False`, if `True` the full citation string will be added to each of the nodes of the network.
# Returns
`Networkx Graph`
> A graph of the bibliographic coupling | [
"Creates",
"a",
"bibliographic",
"coupling",
"network",
"based",
"on",
"citations",
"for",
"the",
"RecordCollection",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1294-L1348 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.yearSplit | def yearSplit(self, startYear, endYear, dropMissingYears = True):
"""Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive.
# Parameters
_startYear_ : `int`
> The smallest year to be included in the returned RecordCollection
_endYear_ : `int`
> The largest year to be included in the returned RecordCollection
_dropMissingYears_ : `optional [bool]`
> Default `True`, if `True` Records with missing years will be dropped. If `False` a `TypeError` exception will be raised
# Returns
`RecordCollection`
> A RecordCollection of Records from _startYear_ to _endYear_
"""
recordsInRange = set()
for R in self:
try:
if R.get('year') >= startYear and R.get('year') <= endYear:
recordsInRange.add(R)
except TypeError:
if dropMissingYears:
pass
else:
raise
RCret = RecordCollection(recordsInRange, name = "{}({}-{})".format(self.name, startYear, endYear), quietStart = True)
RCret._collectedTypes = self._collectedTypes.copy()
return RCret | python | def yearSplit(self, startYear, endYear, dropMissingYears = True):
"""Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive.
# Parameters
_startYear_ : `int`
> The smallest year to be included in the returned RecordCollection
_endYear_ : `int`
> The largest year to be included in the returned RecordCollection
_dropMissingYears_ : `optional [bool]`
> Default `True`, if `True` Records with missing years will be dropped. If `False` a `TypeError` exception will be raised
# Returns
`RecordCollection`
> A RecordCollection of Records from _startYear_ to _endYear_
"""
recordsInRange = set()
for R in self:
try:
if R.get('year') >= startYear and R.get('year') <= endYear:
recordsInRange.add(R)
except TypeError:
if dropMissingYears:
pass
else:
raise
RCret = RecordCollection(recordsInRange, name = "{}({}-{})".format(self.name, startYear, endYear), quietStart = True)
RCret._collectedTypes = self._collectedTypes.copy()
return RCret | [
"def",
"yearSplit",
"(",
"self",
",",
"startYear",
",",
"endYear",
",",
"dropMissingYears",
"=",
"True",
")",
":",
"recordsInRange",
"=",
"set",
"(",
")",
"for",
"R",
"in",
"self",
":",
"try",
":",
"if",
"R",
".",
"get",
"(",
"'year'",
")",
">=",
"startYear",
"and",
"R",
".",
"get",
"(",
"'year'",
")",
"<=",
"endYear",
":",
"recordsInRange",
".",
"add",
"(",
"R",
")",
"except",
"TypeError",
":",
"if",
"dropMissingYears",
":",
"pass",
"else",
":",
"raise",
"RCret",
"=",
"RecordCollection",
"(",
"recordsInRange",
",",
"name",
"=",
"\"{}({}-{})\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"startYear",
",",
"endYear",
")",
",",
"quietStart",
"=",
"True",
")",
"RCret",
".",
"_collectedTypes",
"=",
"self",
".",
"_collectedTypes",
".",
"copy",
"(",
")",
"return",
"RCret"
]
| Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive.
# Parameters
_startYear_ : `int`
> The smallest year to be included in the returned RecordCollection
_endYear_ : `int`
> The largest year to be included in the returned RecordCollection
_dropMissingYears_ : `optional [bool]`
> Default `True`, if `True` Records with missing years will be dropped. If `False` a `TypeError` exception will be raised
# Returns
`RecordCollection`
> A RecordCollection of Records from _startYear_ to _endYear_ | [
"Creates",
"a",
"RecordCollection",
"of",
"Records",
"from",
"the",
"years",
"between",
"_startYear_",
"and",
"_endYear_",
"inclusive",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1362-L1397 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.localCiteStats | def localCiteStats(self, pandasFriendly = False, keyType = "citation"):
"""Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values
# Parameters
_pandasFriendly_ : `optional [bool]`
> default `False`, makes the output be a dict with two keys one `'Citations'` is the citations the other is their occurrence counts as `'Counts'`.
_keyType_ : `optional [str]`
> default `'citation'`, the type of key to use for the dictionary, the valid strings are `'citation'`, `'journal'`, `'year'` or `'author'`. IF changed from `'citation'` all citations matching the requested option will be contracted and their counts added together.
# Returns
`dict[str, int or Citation : int]`
> A dictionary with keys as given by _keyType_ and integers giving their rates of occurrence in the collection
"""
count = 0
recCount = len(self)
progArgs = (0, "Starting to get the local stats on {}s.".format(keyType))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
keyTypesLst = ["citation", "journal", "year", "author"]
citesDict = {}
if keyType not in keyTypesLst:
raise TypeError("{} is not a valid key type, only '{}' or '{}' are.".format(keyType, "', '".join(keyTypesLst[:-1]), keyTypesLst[-1]))
for R in self:
rCites = R.get('citations')
if PBar:
count += 1
PBar.updateVal(count / recCount, "Analysing: {}".format(R.UT))
if rCites:
for c in rCites:
if keyType == keyTypesLst[0]:
cVal = c
else:
cVal = getattr(c, keyType)
if cVal is None:
continue
if cVal in citesDict:
citesDict[cVal] += 1
else:
citesDict[cVal] = 1
if PBar:
PBar.finish("Done, {} {} fields analysed".format(len(citesDict), keyType))
if pandasFriendly:
citeLst = []
countLst = []
for cite, occ in citesDict.items():
citeLst.append(cite)
countLst.append(occ)
return {"Citations" : citeLst, "Counts" : countLst}
else:
return citesDict | python | def localCiteStats(self, pandasFriendly = False, keyType = "citation"):
"""Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values
# Parameters
_pandasFriendly_ : `optional [bool]`
> default `False`, makes the output be a dict with two keys one `'Citations'` is the citations the other is their occurrence counts as `'Counts'`.
_keyType_ : `optional [str]`
> default `'citation'`, the type of key to use for the dictionary, the valid strings are `'citation'`, `'journal'`, `'year'` or `'author'`. IF changed from `'citation'` all citations matching the requested option will be contracted and their counts added together.
# Returns
`dict[str, int or Citation : int]`
> A dictionary with keys as given by _keyType_ and integers giving their rates of occurrence in the collection
"""
count = 0
recCount = len(self)
progArgs = (0, "Starting to get the local stats on {}s.".format(keyType))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
keyTypesLst = ["citation", "journal", "year", "author"]
citesDict = {}
if keyType not in keyTypesLst:
raise TypeError("{} is not a valid key type, only '{}' or '{}' are.".format(keyType, "', '".join(keyTypesLst[:-1]), keyTypesLst[-1]))
for R in self:
rCites = R.get('citations')
if PBar:
count += 1
PBar.updateVal(count / recCount, "Analysing: {}".format(R.UT))
if rCites:
for c in rCites:
if keyType == keyTypesLst[0]:
cVal = c
else:
cVal = getattr(c, keyType)
if cVal is None:
continue
if cVal in citesDict:
citesDict[cVal] += 1
else:
citesDict[cVal] = 1
if PBar:
PBar.finish("Done, {} {} fields analysed".format(len(citesDict), keyType))
if pandasFriendly:
citeLst = []
countLst = []
for cite, occ in citesDict.items():
citeLst.append(cite)
countLst.append(occ)
return {"Citations" : citeLst, "Counts" : countLst}
else:
return citesDict | [
"def",
"localCiteStats",
"(",
"self",
",",
"pandasFriendly",
"=",
"False",
",",
"keyType",
"=",
"\"citation\"",
")",
":",
"count",
"=",
"0",
"recCount",
"=",
"len",
"(",
"self",
")",
"progArgs",
"=",
"(",
"0",
",",
"\"Starting to get the local stats on {}s.\"",
".",
"format",
"(",
"keyType",
")",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"keyTypesLst",
"=",
"[",
"\"citation\"",
",",
"\"journal\"",
",",
"\"year\"",
",",
"\"author\"",
"]",
"citesDict",
"=",
"{",
"}",
"if",
"keyType",
"not",
"in",
"keyTypesLst",
":",
"raise",
"TypeError",
"(",
"\"{} is not a valid key type, only '{}' or '{}' are.\"",
".",
"format",
"(",
"keyType",
",",
"\"', '\"",
".",
"join",
"(",
"keyTypesLst",
"[",
":",
"-",
"1",
"]",
")",
",",
"keyTypesLst",
"[",
"-",
"1",
"]",
")",
")",
"for",
"R",
"in",
"self",
":",
"rCites",
"=",
"R",
".",
"get",
"(",
"'citations'",
")",
"if",
"PBar",
":",
"count",
"+=",
"1",
"PBar",
".",
"updateVal",
"(",
"count",
"/",
"recCount",
",",
"\"Analysing: {}\"",
".",
"format",
"(",
"R",
".",
"UT",
")",
")",
"if",
"rCites",
":",
"for",
"c",
"in",
"rCites",
":",
"if",
"keyType",
"==",
"keyTypesLst",
"[",
"0",
"]",
":",
"cVal",
"=",
"c",
"else",
":",
"cVal",
"=",
"getattr",
"(",
"c",
",",
"keyType",
")",
"if",
"cVal",
"is",
"None",
":",
"continue",
"if",
"cVal",
"in",
"citesDict",
":",
"citesDict",
"[",
"cVal",
"]",
"+=",
"1",
"else",
":",
"citesDict",
"[",
"cVal",
"]",
"=",
"1",
"if",
"PBar",
":",
"PBar",
".",
"finish",
"(",
"\"Done, {} {} fields analysed\"",
".",
"format",
"(",
"len",
"(",
"citesDict",
")",
",",
"keyType",
")",
")",
"if",
"pandasFriendly",
":",
"citeLst",
"=",
"[",
"]",
"countLst",
"=",
"[",
"]",
"for",
"cite",
",",
"occ",
"in",
"citesDict",
".",
"items",
"(",
")",
":",
"citeLst",
".",
"append",
"(",
"cite",
")",
"countLst",
".",
"append",
"(",
"occ",
")",
"return",
"{",
"\"Citations\"",
":",
"citeLst",
",",
"\"Counts\"",
":",
"countLst",
"}",
"else",
":",
"return",
"citesDict"
]
| Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values
# Parameters
_pandasFriendly_ : `optional [bool]`
> default `False`, makes the output be a dict with two keys one `'Citations'` is the citations the other is their occurrence counts as `'Counts'`.
_keyType_ : `optional [str]`
> default `'citation'`, the type of key to use for the dictionary, the valid strings are `'citation'`, `'journal'`, `'year'` or `'author'`. IF changed from `'citation'` all citations matching the requested option will be contracted and their counts added together.
# Returns
`dict[str, int or Citation : int]`
> A dictionary with keys as given by _keyType_ and integers giving their rates of occurrence in the collection | [
"Returns",
"a",
"dict",
"with",
"all",
"the",
"citations",
"in",
"the",
"CR",
"field",
"as",
"keys",
"and",
"the",
"number",
"of",
"times",
"they",
"occur",
"as",
"the",
"values"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1399-L1457 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.localCitesOf | def localCitesOf(self, rec):
"""Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it.
# Parameters
_rec_ : `Record, str or Citation`
> The object that is being cited
# Returns
`RecordCollection`
> A `RecordCollection` containing only those `Records` that cite _rec_
"""
localCites = []
if isinstance(rec, Record):
recCite = rec.createCitation()
if isinstance(rec, str):
try:
recCite = self.getID(rec)
except ValueError:
try:
recCite = Citation(rec)
except AttributeError:
raise ValueError("{} is not a valid WOS string or a valid citation string".format(recCite))
else:
if recCite is None:
return RecordCollection(inCollection = localCites, name = "Records_citing_{}".format(rec), quietStart = True)
else:
recCite = recCite.createCitation()
elif isinstance(rec, Citation):
recCite = rec
else:
raise ValueError("{} is not a valid input, rec must be a Record, string or Citation object.".format(rec))
for R in self:
rCites = R.get('citations')
if rCites:
for cite in rCites:
if recCite == cite:
localCites.append(R)
break
return RecordCollection(inCollection = localCites, name = "Records_citing_'{}'".format(rec), quietStart = True) | python | def localCitesOf(self, rec):
"""Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it.
# Parameters
_rec_ : `Record, str or Citation`
> The object that is being cited
# Returns
`RecordCollection`
> A `RecordCollection` containing only those `Records` that cite _rec_
"""
localCites = []
if isinstance(rec, Record):
recCite = rec.createCitation()
if isinstance(rec, str):
try:
recCite = self.getID(rec)
except ValueError:
try:
recCite = Citation(rec)
except AttributeError:
raise ValueError("{} is not a valid WOS string or a valid citation string".format(recCite))
else:
if recCite is None:
return RecordCollection(inCollection = localCites, name = "Records_citing_{}".format(rec), quietStart = True)
else:
recCite = recCite.createCitation()
elif isinstance(rec, Citation):
recCite = rec
else:
raise ValueError("{} is not a valid input, rec must be a Record, string or Citation object.".format(rec))
for R in self:
rCites = R.get('citations')
if rCites:
for cite in rCites:
if recCite == cite:
localCites.append(R)
break
return RecordCollection(inCollection = localCites, name = "Records_citing_'{}'".format(rec), quietStart = True) | [
"def",
"localCitesOf",
"(",
"self",
",",
"rec",
")",
":",
"localCites",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"rec",
",",
"Record",
")",
":",
"recCite",
"=",
"rec",
".",
"createCitation",
"(",
")",
"if",
"isinstance",
"(",
"rec",
",",
"str",
")",
":",
"try",
":",
"recCite",
"=",
"self",
".",
"getID",
"(",
"rec",
")",
"except",
"ValueError",
":",
"try",
":",
"recCite",
"=",
"Citation",
"(",
"rec",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid WOS string or a valid citation string\"",
".",
"format",
"(",
"recCite",
")",
")",
"else",
":",
"if",
"recCite",
"is",
"None",
":",
"return",
"RecordCollection",
"(",
"inCollection",
"=",
"localCites",
",",
"name",
"=",
"\"Records_citing_{}\"",
".",
"format",
"(",
"rec",
")",
",",
"quietStart",
"=",
"True",
")",
"else",
":",
"recCite",
"=",
"recCite",
".",
"createCitation",
"(",
")",
"elif",
"isinstance",
"(",
"rec",
",",
"Citation",
")",
":",
"recCite",
"=",
"rec",
"else",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid input, rec must be a Record, string or Citation object.\"",
".",
"format",
"(",
"rec",
")",
")",
"for",
"R",
"in",
"self",
":",
"rCites",
"=",
"R",
".",
"get",
"(",
"'citations'",
")",
"if",
"rCites",
":",
"for",
"cite",
"in",
"rCites",
":",
"if",
"recCite",
"==",
"cite",
":",
"localCites",
".",
"append",
"(",
"R",
")",
"break",
"return",
"RecordCollection",
"(",
"inCollection",
"=",
"localCites",
",",
"name",
"=",
"\"Records_citing_'{}'\"",
".",
"format",
"(",
"rec",
")",
",",
"quietStart",
"=",
"True",
")"
]
| Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it.
# Parameters
_rec_ : `Record, str or Citation`
> The object that is being cited
# Returns
`RecordCollection`
> A `RecordCollection` containing only those `Records` that cite _rec_ | [
"Takes",
"in",
"a",
"Record",
"WOS",
"string",
"citation",
"string",
"or",
"Citation",
"and",
"returns",
"a",
"RecordCollection",
"of",
"all",
"records",
"that",
"cite",
"it",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1459-L1501 | train |
networks-lab/metaknowledge | metaknowledge/recordCollection.py | RecordCollection.citeFilter | def citeFilter(self, keyString = '', field = 'all', reverse = False, caseSensitive = False):
"""Filters `Records` by some string, _keyString_, in their citations and returns all `Records` with at least one citation possessing _keyString_ in the field given by _field_.
# Parameters
_keyString_ : `optional [str]`
> Default `''`, gives the string to be searched for, if it is is blank then all citations with the specified field will be matched
_field_ : `optional [str]`
> Default `'all'`, gives the component of the citation to be looked at, it can be one of a few strings. The default is `'all'` which will cause the entire original `Citation` to be searched. It can be used to search across fields, e.g. `'1970, V2'` is a valid keystring
The other options are:
+ `'author'`, searches the author field
+ `'year'`, searches the year field
+ `'journal'`, searches the journal field
+ `'V'`, searches the volume field
+ `'P'`, searches the page field
+ `'misc'`, searches all the remaining uncategorized information
+ `'anonymous'`, searches for anonymous `Citations`, _keyString_ is not ignored
+ `'bad'`, searches for bad citations, keyString is not used
_reverse_ : `optional [bool]`
> Default `False`, being set to `True` causes all `Records` not matching the query to be returned
_caseSensitive_ : `optional [bool]`
> Default `False`, if `True` causes the search across the original to be case sensitive, **only** the `'all'` option can be case sensitive
"""
retRecs = []
keyString = str(keyString)
for R in self:
try:
if field == 'all':
for cite in R.get('citations'):
if caseSensitive:
if keyString in cite.original:
retRecs.append(R)
break
else:
if keyString.upper() in cite.original.upper():
retRecs.append(R)
break
elif field == 'author':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.author.upper():
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'journal':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.journal:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'year':
for cite in R.get('citations'):
try:
if int(keyString) == cite.year:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'V':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.V:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'P':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.P:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'misc':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.misc:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'anonymous':
for cite in R.get('citations'):
if cite.isAnonymous():
retRecs.append(R)
break
elif field == 'bad':
for cite in R.get('citations'):
if cite.bad:
retRecs.append(R)
break
except TypeError:
pass
if reverse:
excluded = []
for R in self:
if R not in retRecs:
excluded.append(R)
return RecordCollection(inCollection = excluded, name = self.name, quietStart = True)
else:
return RecordCollection(inCollection = retRecs, name = self.name, quietStart = True) | python | def citeFilter(self, keyString = '', field = 'all', reverse = False, caseSensitive = False):
"""Filters `Records` by some string, _keyString_, in their citations and returns all `Records` with at least one citation possessing _keyString_ in the field given by _field_.
# Parameters
_keyString_ : `optional [str]`
> Default `''`, gives the string to be searched for, if it is is blank then all citations with the specified field will be matched
_field_ : `optional [str]`
> Default `'all'`, gives the component of the citation to be looked at, it can be one of a few strings. The default is `'all'` which will cause the entire original `Citation` to be searched. It can be used to search across fields, e.g. `'1970, V2'` is a valid keystring
The other options are:
+ `'author'`, searches the author field
+ `'year'`, searches the year field
+ `'journal'`, searches the journal field
+ `'V'`, searches the volume field
+ `'P'`, searches the page field
+ `'misc'`, searches all the remaining uncategorized information
+ `'anonymous'`, searches for anonymous `Citations`, _keyString_ is not ignored
+ `'bad'`, searches for bad citations, keyString is not used
_reverse_ : `optional [bool]`
> Default `False`, being set to `True` causes all `Records` not matching the query to be returned
_caseSensitive_ : `optional [bool]`
> Default `False`, if `True` causes the search across the original to be case sensitive, **only** the `'all'` option can be case sensitive
"""
retRecs = []
keyString = str(keyString)
for R in self:
try:
if field == 'all':
for cite in R.get('citations'):
if caseSensitive:
if keyString in cite.original:
retRecs.append(R)
break
else:
if keyString.upper() in cite.original.upper():
retRecs.append(R)
break
elif field == 'author':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.author.upper():
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'journal':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.journal:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'year':
for cite in R.get('citations'):
try:
if int(keyString) == cite.year:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'V':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.V:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'P':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.P:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'misc':
for cite in R.get('citations'):
try:
if keyString.upper() in cite.misc:
retRecs.append(R)
break
except AttributeError:
pass
elif field == 'anonymous':
for cite in R.get('citations'):
if cite.isAnonymous():
retRecs.append(R)
break
elif field == 'bad':
for cite in R.get('citations'):
if cite.bad:
retRecs.append(R)
break
except TypeError:
pass
if reverse:
excluded = []
for R in self:
if R not in retRecs:
excluded.append(R)
return RecordCollection(inCollection = excluded, name = self.name, quietStart = True)
else:
return RecordCollection(inCollection = retRecs, name = self.name, quietStart = True) | [
"def",
"citeFilter",
"(",
"self",
",",
"keyString",
"=",
"''",
",",
"field",
"=",
"'all'",
",",
"reverse",
"=",
"False",
",",
"caseSensitive",
"=",
"False",
")",
":",
"retRecs",
"=",
"[",
"]",
"keyString",
"=",
"str",
"(",
"keyString",
")",
"for",
"R",
"in",
"self",
":",
"try",
":",
"if",
"field",
"==",
"'all'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"if",
"caseSensitive",
":",
"if",
"keyString",
"in",
"cite",
".",
"original",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"else",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"original",
".",
"upper",
"(",
")",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"elif",
"field",
"==",
"'author'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"author",
".",
"upper",
"(",
")",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'journal'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"journal",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'year'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"int",
"(",
"keyString",
")",
"==",
"cite",
".",
"year",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'V'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"V",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'P'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"P",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'misc'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"try",
":",
"if",
"keyString",
".",
"upper",
"(",
")",
"in",
"cite",
".",
"misc",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"AttributeError",
":",
"pass",
"elif",
"field",
"==",
"'anonymous'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"if",
"cite",
".",
"isAnonymous",
"(",
")",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"elif",
"field",
"==",
"'bad'",
":",
"for",
"cite",
"in",
"R",
".",
"get",
"(",
"'citations'",
")",
":",
"if",
"cite",
".",
"bad",
":",
"retRecs",
".",
"append",
"(",
"R",
")",
"break",
"except",
"TypeError",
":",
"pass",
"if",
"reverse",
":",
"excluded",
"=",
"[",
"]",
"for",
"R",
"in",
"self",
":",
"if",
"R",
"not",
"in",
"retRecs",
":",
"excluded",
".",
"append",
"(",
"R",
")",
"return",
"RecordCollection",
"(",
"inCollection",
"=",
"excluded",
",",
"name",
"=",
"self",
".",
"name",
",",
"quietStart",
"=",
"True",
")",
"else",
":",
"return",
"RecordCollection",
"(",
"inCollection",
"=",
"retRecs",
",",
"name",
"=",
"self",
".",
"name",
",",
"quietStart",
"=",
"True",
")"
]
| Filters `Records` by some string, _keyString_, in their citations and returns all `Records` with at least one citation possessing _keyString_ in the field given by _field_.
# Parameters
_keyString_ : `optional [str]`
> Default `''`, gives the string to be searched for, if it is is blank then all citations with the specified field will be matched
_field_ : `optional [str]`
> Default `'all'`, gives the component of the citation to be looked at, it can be one of a few strings. The default is `'all'` which will cause the entire original `Citation` to be searched. It can be used to search across fields, e.g. `'1970, V2'` is a valid keystring
The other options are:
+ `'author'`, searches the author field
+ `'year'`, searches the year field
+ `'journal'`, searches the journal field
+ `'V'`, searches the volume field
+ `'P'`, searches the page field
+ `'misc'`, searches all the remaining uncategorized information
+ `'anonymous'`, searches for anonymous `Citations`, _keyString_ is not ignored
+ `'bad'`, searches for bad citations, keyString is not used
_reverse_ : `optional [bool]`
> Default `False`, being set to `True` causes all `Records` not matching the query to be returned
_caseSensitive_ : `optional [bool]`
> Default `False`, if `True` causes the search across the original to be case sensitive, **only** the `'all'` option can be case sensitive | [
"Filters",
"Records",
"by",
"some",
"string",
"_keyString_",
"in",
"their",
"citations",
"and",
"returns",
"all",
"Records",
"with",
"at",
"least",
"one",
"citation",
"possessing",
"_keyString_",
"in",
"the",
"field",
"given",
"by",
"_field_",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1503-L1615 | train |
networks-lab/metaknowledge | metaknowledge/citation.py | filterNonJournals | def filterNonJournals(citesLst, invert = False):
"""Removes the `Citations` from _citesLst_ that are not journals
# Parameters
_citesLst_ : `list [Citation]`
> A list of citations to be filtered
_invert_ : `optional [bool]`
> Default `False`, if `True` non-journals will be kept instead of journals
# Returns
`list [Citation]`
> A filtered list of Citations from _citesLst_
"""
retCites = []
for c in citesLst:
if c.isJournal():
if not invert:
retCites.append(c)
elif invert:
retCites.append(c)
return retCites | python | def filterNonJournals(citesLst, invert = False):
"""Removes the `Citations` from _citesLst_ that are not journals
# Parameters
_citesLst_ : `list [Citation]`
> A list of citations to be filtered
_invert_ : `optional [bool]`
> Default `False`, if `True` non-journals will be kept instead of journals
# Returns
`list [Citation]`
> A filtered list of Citations from _citesLst_
"""
retCites = []
for c in citesLst:
if c.isJournal():
if not invert:
retCites.append(c)
elif invert:
retCites.append(c)
return retCites | [
"def",
"filterNonJournals",
"(",
"citesLst",
",",
"invert",
"=",
"False",
")",
":",
"retCites",
"=",
"[",
"]",
"for",
"c",
"in",
"citesLst",
":",
"if",
"c",
".",
"isJournal",
"(",
")",
":",
"if",
"not",
"invert",
":",
"retCites",
".",
"append",
"(",
"c",
")",
"elif",
"invert",
":",
"retCites",
".",
"append",
"(",
"c",
")",
"return",
"retCites"
]
| Removes the `Citations` from _citesLst_ that are not journals
# Parameters
_citesLst_ : `list [Citation]`
> A list of citations to be filtered
_invert_ : `optional [bool]`
> Default `False`, if `True` non-journals will be kept instead of journals
# Returns
`list [Citation]`
> A filtered list of Citations from _citesLst_ | [
"Removes",
"the",
"Citations",
"from",
"_citesLst_",
"that",
"are",
"not",
"journals"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/citation.py#L364-L391 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.add | def add(self, elem):
""" Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added
"""
if isinstance(elem, self._allowedTypes):
self._collection.add(elem)
self._collectedTypes.add(type(elem).__name__)
else:
raise CollectionTypeError("{} can only contain '{}', '{}' is not allowed.".format(type(self).__name__, self._allowedTypes, elem)) | python | def add(self, elem):
""" Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added
"""
if isinstance(elem, self._allowedTypes):
self._collection.add(elem)
self._collectedTypes.add(type(elem).__name__)
else:
raise CollectionTypeError("{} can only contain '{}', '{}' is not allowed.".format(type(self).__name__, self._allowedTypes, elem)) | [
"def",
"add",
"(",
"self",
",",
"elem",
")",
":",
"if",
"isinstance",
"(",
"elem",
",",
"self",
".",
"_allowedTypes",
")",
":",
"self",
".",
"_collection",
".",
"add",
"(",
"elem",
")",
"self",
".",
"_collectedTypes",
".",
"add",
"(",
"type",
"(",
"elem",
")",
".",
"__name__",
")",
"else",
":",
"raise",
"CollectionTypeError",
"(",
"\"{} can only contain '{}', '{}' is not allowed.\"",
".",
"format",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"self",
".",
"_allowedTypes",
",",
"elem",
")",
")"
]
| Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added | [
"Adds",
"_elem_",
"to",
"the",
"collection",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L120-L133 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.remove | def remove(self, elem):
"""Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing
# Parameters
_elem_ : `object`
> The object to be removed
"""
try:
return self._collection.remove(elem)
except KeyError:
raise KeyError("'{}' was not found in the {}: '{}'.".format(elem, type(self).__name__, self)) from None | python | def remove(self, elem):
"""Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing
# Parameters
_elem_ : `object`
> The object to be removed
"""
try:
return self._collection.remove(elem)
except KeyError:
raise KeyError("'{}' was not found in the {}: '{}'.".format(elem, type(self).__name__, self)) from None | [
"def",
"remove",
"(",
"self",
",",
"elem",
")",
":",
"try",
":",
"return",
"self",
".",
"_collection",
".",
"remove",
"(",
"elem",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"'{}' was not found in the {}: '{}'.\"",
".",
"format",
"(",
"elem",
",",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"self",
")",
")",
"from",
"None"
]
| Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing
# Parameters
_elem_ : `object`
> The object to be removed | [
"Removes",
"_elem_",
"from",
"the",
"collection",
"will",
"raise",
"a",
"KeyError",
"is",
"_elem_",
"is",
"missing"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L147-L159 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.clear | def clear(self):
""""Removes all elements from the collection and resets the error handling
"""
self.bad = False
self.errors = {}
self._collection.clear() | python | def clear(self):
""""Removes all elements from the collection and resets the error handling
"""
self.bad = False
self.errors = {}
self._collection.clear() | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"bad",
"=",
"False",
"self",
".",
"errors",
"=",
"{",
"}",
"self",
".",
"_collection",
".",
"clear",
"(",
")"
]
| Removes all elements from the collection and resets the error handling | [
"Removes",
"all",
"elements",
"from",
"the",
"collection",
"and",
"resets",
"the",
"error",
"handling"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L161-L166 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.pop | def pop(self):
"""Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection
"""
try:
return self._collection.pop()
except KeyError:
raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None | python | def pop(self):
"""Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection
"""
try:
return self._collection.pop()
except KeyError:
raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None | [
"def",
"pop",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_collection",
".",
"pop",
"(",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Nothing left in the {}: '{}'.\"",
".",
"format",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"self",
")",
")",
"from",
"None"
]
| Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection | [
"Removes",
"a",
"random",
"element",
"from",
"the",
"collection",
"and",
"returns",
"it"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L168-L180 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.copy | def copy(self):
"""Creates a shallow copy of the collection
# Returns
`Collection`
> A copy of the `Collection`
"""
collectedCopy = copy.copy(self)
collectedCopy._collection = copy.copy(collectedCopy._collection)
self._collectedTypes = copy.copy(self._collectedTypes)
self._allowedTypes = copy.copy(self._allowedTypes)
collectedCopy.errors = copy.copy(collectedCopy.errors)
return collectedCopy | python | def copy(self):
"""Creates a shallow copy of the collection
# Returns
`Collection`
> A copy of the `Collection`
"""
collectedCopy = copy.copy(self)
collectedCopy._collection = copy.copy(collectedCopy._collection)
self._collectedTypes = copy.copy(self._collectedTypes)
self._allowedTypes = copy.copy(self._allowedTypes)
collectedCopy.errors = copy.copy(collectedCopy.errors)
return collectedCopy | [
"def",
"copy",
"(",
"self",
")",
":",
"collectedCopy",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"collectedCopy",
".",
"_collection",
"=",
"copy",
".",
"copy",
"(",
"collectedCopy",
".",
"_collection",
")",
"self",
".",
"_collectedTypes",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_collectedTypes",
")",
"self",
".",
"_allowedTypes",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_allowedTypes",
")",
"collectedCopy",
".",
"errors",
"=",
"copy",
".",
"copy",
"(",
"collectedCopy",
".",
"errors",
")",
"return",
"collectedCopy"
]
| Creates a shallow copy of the collection
# Returns
`Collection`
> A copy of the `Collection` | [
"Creates",
"a",
"shallow",
"copy",
"of",
"the",
"collection"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L279-L293 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.chunk | def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(i)
currentSize += 1
return chunks | python | def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(i)
currentSize += 1
return chunks | [
"def",
"chunk",
"(",
"self",
",",
"maxSize",
")",
":",
"chunks",
"=",
"[",
"]",
"currentSize",
"=",
"maxSize",
"+",
"1",
"for",
"i",
"in",
"self",
":",
"if",
"currentSize",
">=",
"maxSize",
":",
"currentSize",
"=",
"0",
"chunks",
".",
"append",
"(",
"type",
"(",
"self",
")",
"(",
"{",
"i",
"}",
",",
"name",
"=",
"'Chunk-{}-of-{}'",
".",
"format",
"(",
"len",
"(",
"chunks",
")",
",",
"self",
".",
"name",
")",
",",
"quietStart",
"=",
"True",
")",
")",
"else",
":",
"chunks",
"[",
"-",
"1",
"]",
".",
"add",
"(",
"i",
")",
"currentSize",
"+=",
"1",
"return",
"chunks"
]
| Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original | [
"Splits",
"the",
"Collection",
"into",
"_maxSize_",
"size",
"or",
"smaller",
"Collections"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L309-L334 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | Collection.split | def split(self, maxSize):
"""Destructively, splits the `Collection` into _maxSize_ size or smaller `Collections`. The source `Collection` will be empty after this operation
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
try:
while True:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({self.pop()}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(self.pop())
currentSize += 1
except KeyError:
self.clear()
self.name = 'Emptied-{}'.format(self.name)
return chunks | python | def split(self, maxSize):
"""Destructively, splits the `Collection` into _maxSize_ size or smaller `Collections`. The source `Collection` will be empty after this operation
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
try:
while True:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({self.pop()}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(self.pop())
currentSize += 1
except KeyError:
self.clear()
self.name = 'Emptied-{}'.format(self.name)
return chunks | [
"def",
"split",
"(",
"self",
",",
"maxSize",
")",
":",
"chunks",
"=",
"[",
"]",
"currentSize",
"=",
"maxSize",
"+",
"1",
"try",
":",
"while",
"True",
":",
"if",
"currentSize",
">=",
"maxSize",
":",
"currentSize",
"=",
"0",
"chunks",
".",
"append",
"(",
"type",
"(",
"self",
")",
"(",
"{",
"self",
".",
"pop",
"(",
")",
"}",
",",
"name",
"=",
"'Chunk-{}-of-{}'",
".",
"format",
"(",
"len",
"(",
"chunks",
")",
",",
"self",
".",
"name",
")",
",",
"quietStart",
"=",
"True",
")",
")",
"else",
":",
"chunks",
"[",
"-",
"1",
"]",
".",
"add",
"(",
"self",
".",
"pop",
"(",
")",
")",
"currentSize",
"+=",
"1",
"except",
"KeyError",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"name",
"=",
"'Emptied-{}'",
".",
"format",
"(",
"self",
".",
"name",
")",
"return",
"chunks"
]
| Destructively, splits the `Collection` into _maxSize_ size or smaller `Collections`. The source `Collection` will be empty after this operation
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original | [
"Destructively",
"splits",
"the",
"Collection",
"into",
"_maxSize_",
"size",
"or",
"smaller",
"Collections",
".",
"The",
"source",
"Collection",
"will",
"be",
"empty",
"after",
"this",
"operation"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L336-L364 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.containsID | def containsID(self, idVal):
"""Checks if the collected items contains the give _idVal_
# Parameters
_idVal_ : `str`
> The queried id string
# Returns
`bool`
> `True` if the item is in the collection
"""
for i in self:
if i.id == idVal:
return True
return False | python | def containsID(self, idVal):
"""Checks if the collected items contains the give _idVal_
# Parameters
_idVal_ : `str`
> The queried id string
# Returns
`bool`
> `True` if the item is in the collection
"""
for i in self:
if i.id == idVal:
return True
return False | [
"def",
"containsID",
"(",
"self",
",",
"idVal",
")",
":",
"for",
"i",
"in",
"self",
":",
"if",
"i",
".",
"id",
"==",
"idVal",
":",
"return",
"True",
"return",
"False"
]
| Checks if the collected items contains the give _idVal_
# Parameters
_idVal_ : `str`
> The queried id string
# Returns
`bool`
> `True` if the item is in the collection | [
"Checks",
"if",
"the",
"collected",
"items",
"contains",
"the",
"give",
"_idVal_"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L420-L438 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.discardID | def discardID(self, idVal):
"""Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string
"""
for i in self:
if i.id == idVal:
self._collection.discard(i)
return | python | def discardID(self, idVal):
"""Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string
"""
for i in self:
if i.id == idVal:
self._collection.discard(i)
return | [
"def",
"discardID",
"(",
"self",
",",
"idVal",
")",
":",
"for",
"i",
"in",
"self",
":",
"if",
"i",
".",
"id",
"==",
"idVal",
":",
"self",
".",
"_collection",
".",
"discard",
"(",
"i",
")",
"return"
]
| Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string | [
"Checks",
"if",
"the",
"collected",
"items",
"contains",
"the",
"give",
"_idVal_",
"and",
"discards",
"it",
"if",
"it",
"is",
"found",
"will",
"not",
"raise",
"an",
"exception",
"if",
"item",
"is",
"not",
"found"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L440-L452 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.removeID | def removeID(self, idVal):
"""Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found
# Parameters
_idVal_ : `str`
> The removed id string
"""
for i in self:
if i.id == idVal:
self._collection.remove(i)
return
raise KeyError("A Record with the ID '{}' was not found in the RecordCollection: '{}'.".format(idVal, self)) | python | def removeID(self, idVal):
"""Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found
# Parameters
_idVal_ : `str`
> The removed id string
"""
for i in self:
if i.id == idVal:
self._collection.remove(i)
return
raise KeyError("A Record with the ID '{}' was not found in the RecordCollection: '{}'.".format(idVal, self)) | [
"def",
"removeID",
"(",
"self",
",",
"idVal",
")",
":",
"for",
"i",
"in",
"self",
":",
"if",
"i",
".",
"id",
"==",
"idVal",
":",
"self",
".",
"_collection",
".",
"remove",
"(",
"i",
")",
"return",
"raise",
"KeyError",
"(",
"\"A Record with the ID '{}' was not found in the RecordCollection: '{}'.\"",
".",
"format",
"(",
"idVal",
",",
"self",
")",
")"
]
| Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found
# Parameters
_idVal_ : `str`
> The removed id string | [
"Checks",
"if",
"the",
"collected",
"items",
"contains",
"the",
"give",
"_idVal_",
"and",
"removes",
"it",
"if",
"it",
"is",
"found",
"will",
"raise",
"a",
"KeyError",
"if",
"item",
"is",
"not",
"found"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L454-L467 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.badEntries | def badEntries(self):
"""Creates a new collection of the same type with only the bad entries
# Returns
`CollectionWithIDs`
> A collection of only the bad entries
"""
badEntries = set()
for i in self:
if i.bad:
badEntries.add(i)
return type(self)(badEntries, quietStart = True) | python | def badEntries(self):
"""Creates a new collection of the same type with only the bad entries
# Returns
`CollectionWithIDs`
> A collection of only the bad entries
"""
badEntries = set()
for i in self:
if i.bad:
badEntries.add(i)
return type(self)(badEntries, quietStart = True) | [
"def",
"badEntries",
"(",
"self",
")",
":",
"badEntries",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"self",
":",
"if",
"i",
".",
"bad",
":",
"badEntries",
".",
"add",
"(",
"i",
")",
"return",
"type",
"(",
"self",
")",
"(",
"badEntries",
",",
"quietStart",
"=",
"True",
")"
]
| Creates a new collection of the same type with only the bad entries
# Returns
`CollectionWithIDs`
> A collection of only the bad entries | [
"Creates",
"a",
"new",
"collection",
"of",
"the",
"same",
"type",
"with",
"only",
"the",
"bad",
"entries"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L489-L502 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.dropBadEntries | def dropBadEntries(self):
"""Removes all the bad entries from the collection
"""
self._collection = set((i for i in self if not i.bad))
self.bad = False
self.errors = {} | python | def dropBadEntries(self):
"""Removes all the bad entries from the collection
"""
self._collection = set((i for i in self if not i.bad))
self.bad = False
self.errors = {} | [
"def",
"dropBadEntries",
"(",
"self",
")",
":",
"self",
".",
"_collection",
"=",
"set",
"(",
"(",
"i",
"for",
"i",
"in",
"self",
"if",
"not",
"i",
".",
"bad",
")",
")",
"self",
".",
"bad",
"=",
"False",
"self",
".",
"errors",
"=",
"{",
"}"
]
| Removes all the bad entries from the collection | [
"Removes",
"all",
"the",
"bad",
"entries",
"from",
"the",
"collection"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L504-L509 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.tags | def tags(self):
"""Creates a list of all the tags of the contained items
# Returns
`list [str]`
> A list of all the tags
"""
tags = set()
for i in self:
tags |= set(i.keys())
return tags | python | def tags(self):
"""Creates a list of all the tags of the contained items
# Returns
`list [str]`
> A list of all the tags
"""
tags = set()
for i in self:
tags |= set(i.keys())
return tags | [
"def",
"tags",
"(",
"self",
")",
":",
"tags",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"self",
":",
"tags",
"|=",
"set",
"(",
"i",
".",
"keys",
"(",
")",
")",
"return",
"tags"
]
| Creates a list of all the tags of the contained items
# Returns
`list [str]`
> A list of all the tags | [
"Creates",
"a",
"list",
"of",
"all",
"the",
"tags",
"of",
"the",
"contained",
"items"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L511-L523 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.rankedSeries | def rankedSeries(self, tag, outputFile = None, giveCounts = True, giveRanks = False, greatestFirst = True, pandasMode = True, limitTo = None):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by their number of occurrences. A list can also be returned with the the counts or ranks added or it can be written to a file.
# Parameters
_tag_ : `str`
> The tag to be ranked
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their counts
_giveCounts_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their counts. This supersedes _giveRanks_.
_giveRanks_ : `optional bool`
> Default `False`, if `True` and _giveCounts_ is `False`, the retuned list will be composed of tuples the first values being the tag value and the second their ranks. This is superseded by _giveCounts_.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest ranked value first, otherwise the lowest ranked will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
if giveRanks and giveCounts:
raise mkException("rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True.")
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
seriesDict[entry] += 1
else:
seriesDict[entry] = 1
seriesList = sorted(seriesDict.items(), key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'count'))
writer.writerows(seriesList)
if giveCounts and not pandasMode:
return seriesList
elif giveRanks or pandasMode:
if not greatestFirst:
seriesList.reverse()
currentRank = 1
retList = []
panDict = {'entry' : [], 'count' : [], 'rank' : []}
try:
currentCount = seriesList[0][1]
except IndexError:
#Empty series so no need to loop
pass
else:
for valString, count in seriesList:
if currentCount > count:
currentRank += 1
currentCount = count
if pandasMode:
panDict['entry'].append(valString)
panDict['count'].append(count)
panDict['rank'].append(currentRank)
else:
retList.append((valString, currentRank))
if not greatestFirst:
retList.reverse()
if pandasMode:
return panDict
else:
return retList
else:
return [e for e,c in seriesList] | python | def rankedSeries(self, tag, outputFile = None, giveCounts = True, giveRanks = False, greatestFirst = True, pandasMode = True, limitTo = None):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by their number of occurrences. A list can also be returned with the the counts or ranks added or it can be written to a file.
# Parameters
_tag_ : `str`
> The tag to be ranked
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their counts
_giveCounts_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their counts. This supersedes _giveRanks_.
_giveRanks_ : `optional bool`
> Default `False`, if `True` and _giveCounts_ is `False`, the retuned list will be composed of tuples the first values being the tag value and the second their ranks. This is superseded by _giveCounts_.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest ranked value first, otherwise the lowest ranked will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
if giveRanks and giveCounts:
raise mkException("rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True.")
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
seriesDict[entry] += 1
else:
seriesDict[entry] = 1
seriesList = sorted(seriesDict.items(), key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'count'))
writer.writerows(seriesList)
if giveCounts and not pandasMode:
return seriesList
elif giveRanks or pandasMode:
if not greatestFirst:
seriesList.reverse()
currentRank = 1
retList = []
panDict = {'entry' : [], 'count' : [], 'rank' : []}
try:
currentCount = seriesList[0][1]
except IndexError:
#Empty series so no need to loop
pass
else:
for valString, count in seriesList:
if currentCount > count:
currentRank += 1
currentCount = count
if pandasMode:
panDict['entry'].append(valString)
panDict['count'].append(count)
panDict['rank'].append(currentRank)
else:
retList.append((valString, currentRank))
if not greatestFirst:
retList.reverse()
if pandasMode:
return panDict
else:
return retList
else:
return [e for e,c in seriesList] | [
"def",
"rankedSeries",
"(",
"self",
",",
"tag",
",",
"outputFile",
"=",
"None",
",",
"giveCounts",
"=",
"True",
",",
"giveRanks",
"=",
"False",
",",
"greatestFirst",
"=",
"True",
",",
"pandasMode",
"=",
"True",
",",
"limitTo",
"=",
"None",
")",
":",
"if",
"giveRanks",
"and",
"giveCounts",
":",
"raise",
"mkException",
"(",
"\"rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True.\"",
")",
"seriesDict",
"=",
"{",
"}",
"for",
"R",
"in",
"self",
":",
"#This should be faster than using get, since get is a wrapper for __getitem__",
"try",
":",
"val",
"=",
"R",
"[",
"tag",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"val",
"=",
"[",
"val",
"]",
"for",
"entry",
"in",
"val",
":",
"if",
"limitTo",
"and",
"entry",
"not",
"in",
"limitTo",
":",
"continue",
"if",
"entry",
"in",
"seriesDict",
":",
"seriesDict",
"[",
"entry",
"]",
"+=",
"1",
"else",
":",
"seriesDict",
"[",
"entry",
"]",
"=",
"1",
"seriesList",
"=",
"sorted",
"(",
"seriesDict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"greatestFirst",
")",
"if",
"outputFile",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"outputFile",
",",
"'w'",
")",
"as",
"f",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"dialect",
"=",
"'excel'",
")",
"writer",
".",
"writerow",
"(",
"(",
"str",
"(",
"tag",
")",
",",
"'count'",
")",
")",
"writer",
".",
"writerows",
"(",
"seriesList",
")",
"if",
"giveCounts",
"and",
"not",
"pandasMode",
":",
"return",
"seriesList",
"elif",
"giveRanks",
"or",
"pandasMode",
":",
"if",
"not",
"greatestFirst",
":",
"seriesList",
".",
"reverse",
"(",
")",
"currentRank",
"=",
"1",
"retList",
"=",
"[",
"]",
"panDict",
"=",
"{",
"'entry'",
":",
"[",
"]",
",",
"'count'",
":",
"[",
"]",
",",
"'rank'",
":",
"[",
"]",
"}",
"try",
":",
"currentCount",
"=",
"seriesList",
"[",
"0",
"]",
"[",
"1",
"]",
"except",
"IndexError",
":",
"#Empty series so no need to loop",
"pass",
"else",
":",
"for",
"valString",
",",
"count",
"in",
"seriesList",
":",
"if",
"currentCount",
">",
"count",
":",
"currentRank",
"+=",
"1",
"currentCount",
"=",
"count",
"if",
"pandasMode",
":",
"panDict",
"[",
"'entry'",
"]",
".",
"append",
"(",
"valString",
")",
"panDict",
"[",
"'count'",
"]",
".",
"append",
"(",
"count",
")",
"panDict",
"[",
"'rank'",
"]",
".",
"append",
"(",
"currentRank",
")",
"else",
":",
"retList",
".",
"append",
"(",
"(",
"valString",
",",
"currentRank",
")",
")",
"if",
"not",
"greatestFirst",
":",
"retList",
".",
"reverse",
"(",
")",
"if",
"pandasMode",
":",
"return",
"panDict",
"else",
":",
"return",
"retList",
"else",
":",
"return",
"[",
"e",
"for",
"e",
",",
"c",
"in",
"seriesList",
"]"
]
| Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by their number of occurrences. A list can also be returned with the the counts or ranks added or it can be written to a file.
# Parameters
_tag_ : `str`
> The tag to be ranked
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their counts
_giveCounts_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their counts. This supersedes _giveRanks_.
_giveRanks_ : `optional bool`
> Default `False`, if `True` and _giveCounts_ is `False`, the retuned list will be composed of tuples the first values being the tag value and the second their ranks. This is superseded by _giveCounts_.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest ranked value first, otherwise the lowest ranked will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True` | [
"Creates",
"an",
"pandas",
"dict",
"of",
"the",
"ordered",
"list",
"of",
"all",
"the",
"values",
"of",
"_tag_",
"with",
"and",
"ranked",
"by",
"their",
"number",
"of",
"occurrences",
".",
"A",
"list",
"can",
"also",
"be",
"returned",
"with",
"the",
"the",
"counts",
"or",
"ranks",
"added",
"or",
"it",
"can",
"be",
"written",
"to",
"a",
"file",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L569-L663 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.timeSeries | def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
year = R['year']
except KeyError:
continue
if tag is None:
seriesDict[R] = {year : 1}
else:
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
try:
seriesDict[entry][year] += 1
except KeyError:
seriesDict[entry][year] = 1
else:
seriesDict[entry] = {year : 1}
seriesList = []
for e, yd in seriesDict.items():
seriesList += [(e, y) for y in yd.keys()]
seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'years'))
writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items()))
if pandasMode:
panDict = {'entry' : [], 'count' : [], 'year' : []}
for entry, year in seriesList:
panDict['entry'].append(entry)
panDict['year'].append(year)
panDict['count'].append(seriesDict[entry][year])
return panDict
elif giveYears:
return seriesList
else:
return [e for e,c in seriesList] | python | def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
year = R['year']
except KeyError:
continue
if tag is None:
seriesDict[R] = {year : 1}
else:
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
try:
seriesDict[entry][year] += 1
except KeyError:
seriesDict[entry][year] = 1
else:
seriesDict[entry] = {year : 1}
seriesList = []
for e, yd in seriesDict.items():
seriesList += [(e, y) for y in yd.keys()]
seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'years'))
writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items()))
if pandasMode:
panDict = {'entry' : [], 'count' : [], 'year' : []}
for entry, year in seriesList:
panDict['entry'].append(entry)
panDict['year'].append(year)
panDict['count'].append(seriesDict[entry][year])
return panDict
elif giveYears:
return seriesList
else:
return [e for e,c in seriesList] | [
"def",
"timeSeries",
"(",
"self",
",",
"tag",
"=",
"None",
",",
"outputFile",
"=",
"None",
",",
"giveYears",
"=",
"True",
",",
"greatestFirst",
"=",
"True",
",",
"limitTo",
"=",
"False",
",",
"pandasMode",
"=",
"True",
")",
":",
"seriesDict",
"=",
"{",
"}",
"for",
"R",
"in",
"self",
":",
"#This should be faster than using get, since get is a wrapper for __getitem__",
"try",
":",
"year",
"=",
"R",
"[",
"'year'",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"tag",
"is",
"None",
":",
"seriesDict",
"[",
"R",
"]",
"=",
"{",
"year",
":",
"1",
"}",
"else",
":",
"try",
":",
"val",
"=",
"R",
"[",
"tag",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"val",
"=",
"[",
"val",
"]",
"for",
"entry",
"in",
"val",
":",
"if",
"limitTo",
"and",
"entry",
"not",
"in",
"limitTo",
":",
"continue",
"if",
"entry",
"in",
"seriesDict",
":",
"try",
":",
"seriesDict",
"[",
"entry",
"]",
"[",
"year",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"seriesDict",
"[",
"entry",
"]",
"[",
"year",
"]",
"=",
"1",
"else",
":",
"seriesDict",
"[",
"entry",
"]",
"=",
"{",
"year",
":",
"1",
"}",
"seriesList",
"=",
"[",
"]",
"for",
"e",
",",
"yd",
"in",
"seriesDict",
".",
"items",
"(",
")",
":",
"seriesList",
"+=",
"[",
"(",
"e",
",",
"y",
")",
"for",
"y",
"in",
"yd",
".",
"keys",
"(",
")",
"]",
"seriesList",
"=",
"sorted",
"(",
"seriesList",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"greatestFirst",
")",
"if",
"outputFile",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"outputFile",
",",
"'w'",
")",
"as",
"f",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"dialect",
"=",
"'excel'",
")",
"writer",
".",
"writerow",
"(",
"(",
"str",
"(",
"tag",
")",
",",
"'years'",
")",
")",
"writer",
".",
"writerows",
"(",
"(",
"(",
"k",
",",
"'|'",
".",
"join",
"(",
"(",
"str",
"(",
"y",
")",
"for",
"y",
"in",
"v",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"seriesDict",
".",
"items",
"(",
")",
")",
")",
"if",
"pandasMode",
":",
"panDict",
"=",
"{",
"'entry'",
":",
"[",
"]",
",",
"'count'",
":",
"[",
"]",
",",
"'year'",
":",
"[",
"]",
"}",
"for",
"entry",
",",
"year",
"in",
"seriesList",
":",
"panDict",
"[",
"'entry'",
"]",
".",
"append",
"(",
"entry",
")",
"panDict",
"[",
"'year'",
"]",
".",
"append",
"(",
"year",
")",
"panDict",
"[",
"'count'",
"]",
".",
"append",
"(",
"seriesDict",
"[",
"entry",
"]",
"[",
"year",
"]",
")",
"return",
"panDict",
"elif",
"giveYears",
":",
"return",
"seriesList",
"else",
":",
"return",
"[",
"e",
"for",
"e",
",",
"c",
"in",
"seriesList",
"]"
]
| Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True` | [
"Creates",
"an",
"pandas",
"dict",
"of",
"the",
"ordered",
"list",
"of",
"all",
"the",
"values",
"of",
"_tag_",
"with",
"and",
"ranked",
"by",
"the",
"year",
"the",
"occurred",
"in",
"multiple",
"year",
"occurrences",
"will",
"create",
"multiple",
"entries",
".",
"A",
"list",
"can",
"also",
"be",
"returned",
"with",
"the",
"the",
"counts",
"or",
"years",
"added",
"or",
"it",
"can",
"be",
"written",
"to",
"a",
"file",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L665-L747 | train |
networks-lab/metaknowledge | metaknowledge/mkCollection.py | CollectionWithIDs.cooccurrenceCounts | def cooccurrenceCounts(self, keyTag, *countedTags):
"""Counts the number of times values from any of the _countedTags_ occurs with _keyTag_. The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts.
# Parameters
_keyTag_ : `str`
> The tag used as the key for the returned dictionary
_*countedTags_ : `str, str, str, ...`
> The tags used as the key for the returned dictionary's values
# Returns
`dict[str:dict[str:int]]`
> The dictionary of counts
"""
if not isinstance(keyTag, str):
raise TagError("'{}' is not a string it cannot be used as a tag.".format(keyTag))
if len(countedTags) < 1:
TagError("You need to provide atleast one tag")
for tag in countedTags:
if not isinstance(tag, str):
raise TagError("'{}' is not a string it cannot be used as a tag.".format(tag))
occurenceDict = {}
progArgs = (0, "Starting to count the co-occurrences of '{}' and' {}'".format(keyTag, "','".join(countedTags)))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
for i, R in enumerate(self):
PBar.updateVal(i / len(self), "Analyzing {}".format(R))
keyVal = R.get(keyTag)
if keyVal is None:
continue
if not isinstance(keyVal, list):
keyVal = [keyVal]
for key in keyVal:
if key not in occurenceDict:
occurenceDict[key] = {}
for tag in countedTags:
tagval = R.get(tag)
if tagval is None:
continue
if not isinstance(tagval, list):
tagval = [tagval]
for val in tagval:
for key in keyVal:
try:
occurenceDict[key][val] += 1
except KeyError:
occurenceDict[key][val] = 1
PBar.finish("Done extracting the co-occurrences of '{}' and '{}'".format(keyTag, "','".join(countedTags)))
return occurenceDict | python | def cooccurrenceCounts(self, keyTag, *countedTags):
"""Counts the number of times values from any of the _countedTags_ occurs with _keyTag_. The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts.
# Parameters
_keyTag_ : `str`
> The tag used as the key for the returned dictionary
_*countedTags_ : `str, str, str, ...`
> The tags used as the key for the returned dictionary's values
# Returns
`dict[str:dict[str:int]]`
> The dictionary of counts
"""
if not isinstance(keyTag, str):
raise TagError("'{}' is not a string it cannot be used as a tag.".format(keyTag))
if len(countedTags) < 1:
TagError("You need to provide atleast one tag")
for tag in countedTags:
if not isinstance(tag, str):
raise TagError("'{}' is not a string it cannot be used as a tag.".format(tag))
occurenceDict = {}
progArgs = (0, "Starting to count the co-occurrences of '{}' and' {}'".format(keyTag, "','".join(countedTags)))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
for i, R in enumerate(self):
PBar.updateVal(i / len(self), "Analyzing {}".format(R))
keyVal = R.get(keyTag)
if keyVal is None:
continue
if not isinstance(keyVal, list):
keyVal = [keyVal]
for key in keyVal:
if key not in occurenceDict:
occurenceDict[key] = {}
for tag in countedTags:
tagval = R.get(tag)
if tagval is None:
continue
if not isinstance(tagval, list):
tagval = [tagval]
for val in tagval:
for key in keyVal:
try:
occurenceDict[key][val] += 1
except KeyError:
occurenceDict[key][val] = 1
PBar.finish("Done extracting the co-occurrences of '{}' and '{}'".format(keyTag, "','".join(countedTags)))
return occurenceDict | [
"def",
"cooccurrenceCounts",
"(",
"self",
",",
"keyTag",
",",
"*",
"countedTags",
")",
":",
"if",
"not",
"isinstance",
"(",
"keyTag",
",",
"str",
")",
":",
"raise",
"TagError",
"(",
"\"'{}' is not a string it cannot be used as a tag.\"",
".",
"format",
"(",
"keyTag",
")",
")",
"if",
"len",
"(",
"countedTags",
")",
"<",
"1",
":",
"TagError",
"(",
"\"You need to provide atleast one tag\"",
")",
"for",
"tag",
"in",
"countedTags",
":",
"if",
"not",
"isinstance",
"(",
"tag",
",",
"str",
")",
":",
"raise",
"TagError",
"(",
"\"'{}' is not a string it cannot be used as a tag.\"",
".",
"format",
"(",
"tag",
")",
")",
"occurenceDict",
"=",
"{",
"}",
"progArgs",
"=",
"(",
"0",
",",
"\"Starting to count the co-occurrences of '{}' and' {}'\"",
".",
"format",
"(",
"keyTag",
",",
"\"','\"",
".",
"join",
"(",
"countedTags",
")",
")",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"for",
"i",
",",
"R",
"in",
"enumerate",
"(",
"self",
")",
":",
"PBar",
".",
"updateVal",
"(",
"i",
"/",
"len",
"(",
"self",
")",
",",
"\"Analyzing {}\"",
".",
"format",
"(",
"R",
")",
")",
"keyVal",
"=",
"R",
".",
"get",
"(",
"keyTag",
")",
"if",
"keyVal",
"is",
"None",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"keyVal",
",",
"list",
")",
":",
"keyVal",
"=",
"[",
"keyVal",
"]",
"for",
"key",
"in",
"keyVal",
":",
"if",
"key",
"not",
"in",
"occurenceDict",
":",
"occurenceDict",
"[",
"key",
"]",
"=",
"{",
"}",
"for",
"tag",
"in",
"countedTags",
":",
"tagval",
"=",
"R",
".",
"get",
"(",
"tag",
")",
"if",
"tagval",
"is",
"None",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"tagval",
",",
"list",
")",
":",
"tagval",
"=",
"[",
"tagval",
"]",
"for",
"val",
"in",
"tagval",
":",
"for",
"key",
"in",
"keyVal",
":",
"try",
":",
"occurenceDict",
"[",
"key",
"]",
"[",
"val",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"occurenceDict",
"[",
"key",
"]",
"[",
"val",
"]",
"=",
"1",
"PBar",
".",
"finish",
"(",
"\"Done extracting the co-occurrences of '{}' and '{}'\"",
".",
"format",
"(",
"keyTag",
",",
"\"','\"",
".",
"join",
"(",
"countedTags",
")",
")",
")",
"return",
"occurenceDict"
]
| Counts the number of times values from any of the _countedTags_ occurs with _keyTag_. The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts.
# Parameters
_keyTag_ : `str`
> The tag used as the key for the returned dictionary
_*countedTags_ : `str, str, str, ...`
> The tags used as the key for the returned dictionary's values
# Returns
`dict[str:dict[str:int]]`
> The dictionary of counts | [
"Counts",
"the",
"number",
"of",
"times",
"values",
"from",
"any",
"of",
"the",
"_countedTags_",
"occurs",
"with",
"_keyTag_",
".",
"The",
"counts",
"are",
"retuned",
"as",
"a",
"dictionary",
"with",
"the",
"values",
"of",
"_keyTag_",
"mapping",
"to",
"dictionaries",
"with",
"each",
"of",
"the",
"_countedTags_",
"values",
"mapping",
"to",
"thier",
"counts",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L749-L806 | train |
networks-lab/metaknowledge | metaknowledge/diffusion.py | makeNodeID | def makeNodeID(Rec, ndType, extras = None):
"""Helper to make a node ID, extras is currently not used"""
if ndType == 'raw':
recID = Rec
else:
recID = Rec.get(ndType)
if recID is None:
pass
elif isinstance(recID, list):
recID = tuple(recID)
else:
recID = recID
extraDict = {}
if extras:
for tag in extras:
if tag == "raw":
extraDict['Tag'] = Rec
else:
extraDict['Tag'] = Rec.get(tag)
return recID, extraDict | python | def makeNodeID(Rec, ndType, extras = None):
"""Helper to make a node ID, extras is currently not used"""
if ndType == 'raw':
recID = Rec
else:
recID = Rec.get(ndType)
if recID is None:
pass
elif isinstance(recID, list):
recID = tuple(recID)
else:
recID = recID
extraDict = {}
if extras:
for tag in extras:
if tag == "raw":
extraDict['Tag'] = Rec
else:
extraDict['Tag'] = Rec.get(tag)
return recID, extraDict | [
"def",
"makeNodeID",
"(",
"Rec",
",",
"ndType",
",",
"extras",
"=",
"None",
")",
":",
"if",
"ndType",
"==",
"'raw'",
":",
"recID",
"=",
"Rec",
"else",
":",
"recID",
"=",
"Rec",
".",
"get",
"(",
"ndType",
")",
"if",
"recID",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"recID",
",",
"list",
")",
":",
"recID",
"=",
"tuple",
"(",
"recID",
")",
"else",
":",
"recID",
"=",
"recID",
"extraDict",
"=",
"{",
"}",
"if",
"extras",
":",
"for",
"tag",
"in",
"extras",
":",
"if",
"tag",
"==",
"\"raw\"",
":",
"extraDict",
"[",
"'Tag'",
"]",
"=",
"Rec",
"else",
":",
"extraDict",
"[",
"'Tag'",
"]",
"=",
"Rec",
".",
"get",
"(",
"tag",
")",
"return",
"recID",
",",
"extraDict"
]
| Helper to make a node ID, extras is currently not used | [
"Helper",
"to",
"make",
"a",
"node",
"ID",
"extras",
"is",
"currently",
"not",
"used"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/diffusion.py#L351-L370 | train |
networks-lab/metaknowledge | docs/mkdsupport.py | pandoc_process | def pandoc_process(app, what, name, obj, options, lines):
""""Convert docstrings in Markdown into reStructureText using pandoc
"""
if not lines:
return None
input_format = app.config.mkdsupport_use_parser
output_format = 'rst'
# Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a
# unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding
text = SEP.join(lines)
text = pypandoc.convert_text(text, output_format, format=input_format)
# The 'lines' in Sphinx is a list of strings and the value should be changed
del lines[:]
lines.extend(text.split(SEP)) | python | def pandoc_process(app, what, name, obj, options, lines):
""""Convert docstrings in Markdown into reStructureText using pandoc
"""
if not lines:
return None
input_format = app.config.mkdsupport_use_parser
output_format = 'rst'
# Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a
# unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding
text = SEP.join(lines)
text = pypandoc.convert_text(text, output_format, format=input_format)
# The 'lines' in Sphinx is a list of strings and the value should be changed
del lines[:]
lines.extend(text.split(SEP)) | [
"def",
"pandoc_process",
"(",
"app",
",",
"what",
",",
"name",
",",
"obj",
",",
"options",
",",
"lines",
")",
":",
"if",
"not",
"lines",
":",
"return",
"None",
"input_format",
"=",
"app",
".",
"config",
".",
"mkdsupport_use_parser",
"output_format",
"=",
"'rst'",
"# Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a",
"# unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding",
"text",
"=",
"SEP",
".",
"join",
"(",
"lines",
")",
"text",
"=",
"pypandoc",
".",
"convert_text",
"(",
"text",
",",
"output_format",
",",
"format",
"=",
"input_format",
")",
"# The 'lines' in Sphinx is a list of strings and the value should be changed",
"del",
"lines",
"[",
":",
"]",
"lines",
".",
"extend",
"(",
"text",
".",
"split",
"(",
"SEP",
")",
")"
]
| Convert docstrings in Markdown into reStructureText using pandoc | [
"Convert",
"docstrings",
"in",
"Markdown",
"into",
"reStructureText",
"using",
"pandoc"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/docs/mkdsupport.py#L26-L43 | train |
networks-lab/metaknowledge | metaknowledge/medline/tagProcessing/specialFunctions.py | beginningPage | def beginningPage(R):
"""As pages may not be given as numbers this is the most accurate this function can be"""
p = R['PG']
if p.startswith('suppl '):
p = p[6:]
return p.split(' ')[0].split('-')[0].replace(';', '') | python | def beginningPage(R):
"""As pages may not be given as numbers this is the most accurate this function can be"""
p = R['PG']
if p.startswith('suppl '):
p = p[6:]
return p.split(' ')[0].split('-')[0].replace(';', '') | [
"def",
"beginningPage",
"(",
"R",
")",
":",
"p",
"=",
"R",
"[",
"'PG'",
"]",
"if",
"p",
".",
"startswith",
"(",
"'suppl '",
")",
":",
"p",
"=",
"p",
"[",
"6",
":",
"]",
"return",
"p",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"';'",
",",
"''",
")"
]
| As pages may not be given as numbers this is the most accurate this function can be | [
"As",
"pages",
"may",
"not",
"be",
"given",
"as",
"numbers",
"this",
"is",
"the",
"most",
"accurate",
"this",
"function",
"can",
"be"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/specialFunctions.py#L27-L32 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | Record.copy | def copy(self):
"""Correctly copies the `Record`
# Returns
`Record`
> A completely decoupled copy of the original
"""
c = copy.copy(self)
c._fieldDict = c._fieldDict.copy()
return c | python | def copy(self):
"""Correctly copies the `Record`
# Returns
`Record`
> A completely decoupled copy of the original
"""
c = copy.copy(self)
c._fieldDict = c._fieldDict.copy()
return c | [
"def",
"copy",
"(",
"self",
")",
":",
"c",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"c",
".",
"_fieldDict",
"=",
"c",
".",
"_fieldDict",
".",
"copy",
"(",
")",
"return",
"c"
]
| Correctly copies the `Record`
# Returns
`Record`
> A completely decoupled copy of the original | [
"Correctly",
"copies",
"the",
"Record"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L202-L213 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | ExtendedRecord.values | def values(self, raw = False):
"""Like `values` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `ValuesView` contains the raw values
# Returns
`ValuesView`
> The values of the record
"""
if raw:
return self._fieldDict.values()
else:
return collections.abc.Mapping.values(self) | python | def values(self, raw = False):
"""Like `values` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `ValuesView` contains the raw values
# Returns
`ValuesView`
> The values of the record
"""
if raw:
return self._fieldDict.values()
else:
return collections.abc.Mapping.values(self) | [
"def",
"values",
"(",
"self",
",",
"raw",
"=",
"False",
")",
":",
"if",
"raw",
":",
"return",
"self",
".",
"_fieldDict",
".",
"values",
"(",
")",
"else",
":",
"return",
"collections",
".",
"abc",
".",
"Mapping",
".",
"values",
"(",
"self",
")"
]
| Like `values` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `ValuesView` contains the raw values
# Returns
`ValuesView`
> The values of the record | [
"Like",
"values",
"for",
"dicts",
"but",
"with",
"a",
"raw",
"option"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L402-L420 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | ExtendedRecord.items | def items(self, raw = False):
"""Like `items` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `KeysView` contains the raw values as the values
# Returns
`KeysView`
> The key-value pairs of the record
"""
if raw:
return self._fieldDict.items()
else:
return collections.abc.Mapping.items(self) | python | def items(self, raw = False):
"""Like `items` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `KeysView` contains the raw values as the values
# Returns
`KeysView`
> The key-value pairs of the record
"""
if raw:
return self._fieldDict.items()
else:
return collections.abc.Mapping.items(self) | [
"def",
"items",
"(",
"self",
",",
"raw",
"=",
"False",
")",
":",
"if",
"raw",
":",
"return",
"self",
".",
"_fieldDict",
".",
"items",
"(",
")",
"else",
":",
"return",
"collections",
".",
"abc",
".",
"Mapping",
".",
"items",
"(",
"self",
")"
]
| Like `items` for dicts but with a `raw` option
# Parameters
_raw_ : `optional [bool]`
> Default `False`, if `True` the `KeysView` contains the raw values as the values
# Returns
`KeysView`
> The key-value pairs of the record | [
"Like",
"items",
"for",
"dicts",
"but",
"with",
"a",
"raw",
"option"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L424-L442 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | ExtendedRecord.getCitations | def getCitations(self, field = None, values = None, pandasFriendly = True):
"""Creates a pandas ready dict with each row a different citation and columns containing the original string, year, journal and author's name.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
# Returns
`dict`
> A pandas ready dict with all the citations
"""
retCites = []
if values is not None:
if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container):
values = [values]
if field is not None:
for cite in self.get('citations', []):
try:
targetVal = getattr(cite, field)
if values is None or targetVal in values:
retCites.append(cite)
except AttributeError:
pass
else:
retCites = self.get('citations', [])
if pandasFriendly:
return _pandasPrep(retCites, False)
return retCites | python | def getCitations(self, field = None, values = None, pandasFriendly = True):
"""Creates a pandas ready dict with each row a different citation and columns containing the original string, year, journal and author's name.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
# Returns
`dict`
> A pandas ready dict with all the citations
"""
retCites = []
if values is not None:
if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container):
values = [values]
if field is not None:
for cite in self.get('citations', []):
try:
targetVal = getattr(cite, field)
if values is None or targetVal in values:
retCites.append(cite)
except AttributeError:
pass
else:
retCites = self.get('citations', [])
if pandasFriendly:
return _pandasPrep(retCites, False)
return retCites | [
"def",
"getCitations",
"(",
"self",
",",
"field",
"=",
"None",
",",
"values",
"=",
"None",
",",
"pandasFriendly",
"=",
"True",
")",
":",
"retCites",
"=",
"[",
"]",
"if",
"values",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
"or",
"not",
"isinstance",
"(",
"values",
",",
"collections",
".",
"abc",
".",
"Container",
")",
":",
"values",
"=",
"[",
"values",
"]",
"if",
"field",
"is",
"not",
"None",
":",
"for",
"cite",
"in",
"self",
".",
"get",
"(",
"'citations'",
",",
"[",
"]",
")",
":",
"try",
":",
"targetVal",
"=",
"getattr",
"(",
"cite",
",",
"field",
")",
"if",
"values",
"is",
"None",
"or",
"targetVal",
"in",
"values",
":",
"retCites",
".",
"append",
"(",
"cite",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"retCites",
"=",
"self",
".",
"get",
"(",
"'citations'",
",",
"[",
"]",
")",
"if",
"pandasFriendly",
":",
"return",
"_pandasPrep",
"(",
"retCites",
",",
"False",
")",
"return",
"retCites"
]
| Creates a pandas ready dict with each row a different citation and columns containing the original string, year, journal and author's name.
There are also options to filter the output citations with _field_ and _values_
# Parameters
_field_ : `optional str`
> Default `None`, if given all citations missing the named field will be dropped.
_values_ : `optional str or list[str]`
> Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included.
> e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]`
_pandasFriendly_ : `optional bool`
> Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict
# Returns
`dict`
> A pandas ready dict with all the citations | [
"Creates",
"a",
"pandas",
"ready",
"dict",
"with",
"each",
"row",
"a",
"different",
"citation",
"and",
"columns",
"containing",
"the",
"original",
"string",
"year",
"journal",
"and",
"author",
"s",
"name",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L546-L589 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | ExtendedRecord.subDict | def subDict(self, tags, raw = False):
"""Creates a dict of values of _tags_ from the Record. The tags are the keys and the values are the values. If the tag is missing the value will be `None`.
# Parameters
_tags_ : `list[str]`
> The list of tags requested
_raw_ : `optional [bool]`
>default `False` if `True` the retuned values of the dict will be unprocessed
# Returns
`dict`
> A dictionary with the keys _tags_ and the values from the record
"""
retDict = {}
for tag in tags:
retDict[tag] = self.get(tag, raw = raw)
return retDict | python | def subDict(self, tags, raw = False):
"""Creates a dict of values of _tags_ from the Record. The tags are the keys and the values are the values. If the tag is missing the value will be `None`.
# Parameters
_tags_ : `list[str]`
> The list of tags requested
_raw_ : `optional [bool]`
>default `False` if `True` the retuned values of the dict will be unprocessed
# Returns
`dict`
> A dictionary with the keys _tags_ and the values from the record
"""
retDict = {}
for tag in tags:
retDict[tag] = self.get(tag, raw = raw)
return retDict | [
"def",
"subDict",
"(",
"self",
",",
"tags",
",",
"raw",
"=",
"False",
")",
":",
"retDict",
"=",
"{",
"}",
"for",
"tag",
"in",
"tags",
":",
"retDict",
"[",
"tag",
"]",
"=",
"self",
".",
"get",
"(",
"tag",
",",
"raw",
"=",
"raw",
")",
"return",
"retDict"
]
| Creates a dict of values of _tags_ from the Record. The tags are the keys and the values are the values. If the tag is missing the value will be `None`.
# Parameters
_tags_ : `list[str]`
> The list of tags requested
_raw_ : `optional [bool]`
>default `False` if `True` the retuned values of the dict will be unprocessed
# Returns
`dict`
> A dictionary with the keys _tags_ and the values from the record | [
"Creates",
"a",
"dict",
"of",
"values",
"of",
"_tags_",
"from",
"the",
"Record",
".",
"The",
"tags",
"are",
"the",
"keys",
"and",
"the",
"values",
"are",
"the",
"values",
".",
"If",
"the",
"tag",
"is",
"missing",
"the",
"value",
"will",
"be",
"None",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L591-L613 | train |
networks-lab/metaknowledge | metaknowledge/mkRecord.py | ExtendedRecord.authGenders | def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False):
"""Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors.
# Parameters
_countsOnly_ : `optional bool`
> Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names
_fractionsMode_ : `optional bool`
> Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_
# Returns
`dict[str:str or int]`
> The mapping of genders to author's names or counts
"""
authDict = recordGenders(self)
if _countsTuple or countsOnly or fractionsMode:
rawList = list(authDict.values())
countsList = []
for k in ('Male','Female','Unknown'):
countsList.append(rawList.count(k))
if fractionsMode:
tot = sum(countsList)
for i in range(3):
countsList.append(countsList.pop(0) / tot)
if _countsTuple:
return tuple(countsList)
else:
return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]}
else:
return authDict | python | def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False):
"""Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors.
# Parameters
_countsOnly_ : `optional bool`
> Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names
_fractionsMode_ : `optional bool`
> Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_
# Returns
`dict[str:str or int]`
> The mapping of genders to author's names or counts
"""
authDict = recordGenders(self)
if _countsTuple or countsOnly or fractionsMode:
rawList = list(authDict.values())
countsList = []
for k in ('Male','Female','Unknown'):
countsList.append(rawList.count(k))
if fractionsMode:
tot = sum(countsList)
for i in range(3):
countsList.append(countsList.pop(0) / tot)
if _countsTuple:
return tuple(countsList)
else:
return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]}
else:
return authDict | [
"def",
"authGenders",
"(",
"self",
",",
"countsOnly",
"=",
"False",
",",
"fractionsMode",
"=",
"False",
",",
"_countsTuple",
"=",
"False",
")",
":",
"authDict",
"=",
"recordGenders",
"(",
"self",
")",
"if",
"_countsTuple",
"or",
"countsOnly",
"or",
"fractionsMode",
":",
"rawList",
"=",
"list",
"(",
"authDict",
".",
"values",
"(",
")",
")",
"countsList",
"=",
"[",
"]",
"for",
"k",
"in",
"(",
"'Male'",
",",
"'Female'",
",",
"'Unknown'",
")",
":",
"countsList",
".",
"append",
"(",
"rawList",
".",
"count",
"(",
"k",
")",
")",
"if",
"fractionsMode",
":",
"tot",
"=",
"sum",
"(",
"countsList",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"countsList",
".",
"append",
"(",
"countsList",
".",
"pop",
"(",
"0",
")",
"/",
"tot",
")",
"if",
"_countsTuple",
":",
"return",
"tuple",
"(",
"countsList",
")",
"else",
":",
"return",
"{",
"'Male'",
":",
"countsList",
"[",
"0",
"]",
",",
"'Female'",
":",
"countsList",
"[",
"1",
"]",
",",
"'Unknown'",
":",
"countsList",
"[",
"2",
"]",
"}",
"else",
":",
"return",
"authDict"
]
| Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors.
# Parameters
_countsOnly_ : `optional bool`
> Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names
_fractionsMode_ : `optional bool`
> Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_
# Returns
`dict[str:str or int]`
> The mapping of genders to author's names or counts | [
"Creates",
"a",
"dict",
"mapping",
"Male",
"Female",
"and",
"Unknown",
"to",
"lists",
"of",
"the",
"names",
"of",
"all",
"the",
"authors",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L660-L695 | train |
networks-lab/metaknowledge | metaknowledge/proquest/proQuestHandlers.py | proQuestParser | def proQuestParser(proFile):
"""Parses a ProQuest file, _proFile_, to extract the individual entries.
A ProQuest file has three sections, first a list of the contained entries, second the full metadata and finally a bibtex formatted entry for the record. This parser only uses the first two as the bibtex contains no information the second section does not. Also, the first section is only used to verify the second section. The returned [ProQuestRecord](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) contains the data from the second section, with the same key strings as ProQuest uses and the unlabeled sections are called in order, `'Name'`, `'Author'` and `'url'`.
# Parameters
_proFile_ : `str`
> A path to a valid ProQuest file, use [isProQuestFile](#metaknowledge.proquest.proQuestHandlers.isProQuestFile) to verify
# Returns
`set[ProQuestRecord]`
> Records for each of the entries
"""
#assumes the file is ProQuest
nameDict = {}
recSet = set()
error = None
lineNum = 0
try:
with open(proFile, 'r', encoding = 'utf-8') as openfile:
f = enumerate(openfile, start = 1)
for i in range(12):
lineNum, line = next(f)
# f is file so it *should* end, or at least cause a parser error eventually
while True:
lineNum, line = next(f)
lineNum, line = next(f)
if line == 'Bibliography\n':
for i in range(3):
lineNum, line = next(f)
break
else:
s = line.split('. ')
nameDict[int(s[0])] = '. '.join(s[1:])[:-1]
while True:
#import pdb; pdb.set_trace()
lineNum, line = next(f)
if line == 'Bibliography\n':
break
elif line.startswith('Document '):
n = int(line[9:].split(' of ')[0])
R = ProQuestRecord(f, sFile = proFile, sLine = lineNum)
if R.get('Title') != nameDict[n]:
error = BadProQuestFile("The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'.".format(lineNum, R.get('Title', "TITLE MISSING"), n, nameDict[n]))
raise StopIteration
recSet.add(R)
lineNum, line = next(f)
else:
#Parsing failed
error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect".format(proFile, lineNum))
raise StopIteration
except (UnicodeDecodeError, StopIteration, ValueError) as e:
if error is None:
error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'".format(proFile, lineNum, e))
return recSet, error | python | def proQuestParser(proFile):
"""Parses a ProQuest file, _proFile_, to extract the individual entries.
A ProQuest file has three sections, first a list of the contained entries, second the full metadata and finally a bibtex formatted entry for the record. This parser only uses the first two as the bibtex contains no information the second section does not. Also, the first section is only used to verify the second section. The returned [ProQuestRecord](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) contains the data from the second section, with the same key strings as ProQuest uses and the unlabeled sections are called in order, `'Name'`, `'Author'` and `'url'`.
# Parameters
_proFile_ : `str`
> A path to a valid ProQuest file, use [isProQuestFile](#metaknowledge.proquest.proQuestHandlers.isProQuestFile) to verify
# Returns
`set[ProQuestRecord]`
> Records for each of the entries
"""
#assumes the file is ProQuest
nameDict = {}
recSet = set()
error = None
lineNum = 0
try:
with open(proFile, 'r', encoding = 'utf-8') as openfile:
f = enumerate(openfile, start = 1)
for i in range(12):
lineNum, line = next(f)
# f is file so it *should* end, or at least cause a parser error eventually
while True:
lineNum, line = next(f)
lineNum, line = next(f)
if line == 'Bibliography\n':
for i in range(3):
lineNum, line = next(f)
break
else:
s = line.split('. ')
nameDict[int(s[0])] = '. '.join(s[1:])[:-1]
while True:
#import pdb; pdb.set_trace()
lineNum, line = next(f)
if line == 'Bibliography\n':
break
elif line.startswith('Document '):
n = int(line[9:].split(' of ')[0])
R = ProQuestRecord(f, sFile = proFile, sLine = lineNum)
if R.get('Title') != nameDict[n]:
error = BadProQuestFile("The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'.".format(lineNum, R.get('Title', "TITLE MISSING"), n, nameDict[n]))
raise StopIteration
recSet.add(R)
lineNum, line = next(f)
else:
#Parsing failed
error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect".format(proFile, lineNum))
raise StopIteration
except (UnicodeDecodeError, StopIteration, ValueError) as e:
if error is None:
error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'".format(proFile, lineNum, e))
return recSet, error | [
"def",
"proQuestParser",
"(",
"proFile",
")",
":",
"#assumes the file is ProQuest",
"nameDict",
"=",
"{",
"}",
"recSet",
"=",
"set",
"(",
")",
"error",
"=",
"None",
"lineNum",
"=",
"0",
"try",
":",
"with",
"open",
"(",
"proFile",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"openfile",
":",
"f",
"=",
"enumerate",
"(",
"openfile",
",",
"start",
"=",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"12",
")",
":",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"# f is file so it *should* end, or at least cause a parser error eventually",
"while",
"True",
":",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"if",
"line",
"==",
"'Bibliography\\n'",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"break",
"else",
":",
"s",
"=",
"line",
".",
"split",
"(",
"'. '",
")",
"nameDict",
"[",
"int",
"(",
"s",
"[",
"0",
"]",
")",
"]",
"=",
"'. '",
".",
"join",
"(",
"s",
"[",
"1",
":",
"]",
")",
"[",
":",
"-",
"1",
"]",
"while",
"True",
":",
"#import pdb; pdb.set_trace()",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"if",
"line",
"==",
"'Bibliography\\n'",
":",
"break",
"elif",
"line",
".",
"startswith",
"(",
"'Document '",
")",
":",
"n",
"=",
"int",
"(",
"line",
"[",
"9",
":",
"]",
".",
"split",
"(",
"' of '",
")",
"[",
"0",
"]",
")",
"R",
"=",
"ProQuestRecord",
"(",
"f",
",",
"sFile",
"=",
"proFile",
",",
"sLine",
"=",
"lineNum",
")",
"if",
"R",
".",
"get",
"(",
"'Title'",
")",
"!=",
"nameDict",
"[",
"n",
"]",
":",
"error",
"=",
"BadProQuestFile",
"(",
"\"The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'.\"",
".",
"format",
"(",
"lineNum",
",",
"R",
".",
"get",
"(",
"'Title'",
",",
"\"TITLE MISSING\"",
")",
",",
"n",
",",
"nameDict",
"[",
"n",
"]",
")",
")",
"raise",
"StopIteration",
"recSet",
".",
"add",
"(",
"R",
")",
"lineNum",
",",
"line",
"=",
"next",
"(",
"f",
")",
"else",
":",
"#Parsing failed",
"error",
"=",
"BadProQuestFile",
"(",
"\"The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect\"",
".",
"format",
"(",
"proFile",
",",
"lineNum",
")",
")",
"raise",
"StopIteration",
"except",
"(",
"UnicodeDecodeError",
",",
"StopIteration",
",",
"ValueError",
")",
"as",
"e",
":",
"if",
"error",
"is",
"None",
":",
"error",
"=",
"BadProQuestFile",
"(",
"\"The file '{}' has parts of it that are unparsable starting at line: {}.\\nThe error was: '{}'\"",
".",
"format",
"(",
"proFile",
",",
"lineNum",
",",
"e",
")",
")",
"return",
"recSet",
",",
"error"
]
| Parses a ProQuest file, _proFile_, to extract the individual entries.
A ProQuest file has three sections, first a list of the contained entries, second the full metadata and finally a bibtex formatted entry for the record. This parser only uses the first two as the bibtex contains no information the second section does not. Also, the first section is only used to verify the second section. The returned [ProQuestRecord](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) contains the data from the second section, with the same key strings as ProQuest uses and the unlabeled sections are called in order, `'Name'`, `'Author'` and `'url'`.
# Parameters
_proFile_ : `str`
> A path to a valid ProQuest file, use [isProQuestFile](#metaknowledge.proquest.proQuestHandlers.isProQuestFile) to verify
# Returns
`set[ProQuestRecord]`
> Records for each of the entries | [
"Parses",
"a",
"ProQuest",
"file",
"_proFile_",
"to",
"extract",
"the",
"individual",
"entries",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/proquest/proQuestHandlers.py#L42-L100 | train |
networks-lab/metaknowledge | metaknowledge/grants/nsfGrant.py | NSFGrant.getInvestigators | def getInvestigators(self, tags = None, seperator = ";", _getTag = False):
"""Returns a list of the names of investigators. The optional arguments are ignored.
# Returns
`list [str]`
> A list of all the found investigator's names
"""
if tags is None:
tags = ['Investigator']
elif isinstance(tags, str):
tags = ['Investigator', tags]
else:
tags.append('Investigator')
return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag) | python | def getInvestigators(self, tags = None, seperator = ";", _getTag = False):
"""Returns a list of the names of investigators. The optional arguments are ignored.
# Returns
`list [str]`
> A list of all the found investigator's names
"""
if tags is None:
tags = ['Investigator']
elif isinstance(tags, str):
tags = ['Investigator', tags]
else:
tags.append('Investigator')
return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag) | [
"def",
"getInvestigators",
"(",
"self",
",",
"tags",
"=",
"None",
",",
"seperator",
"=",
"\";\"",
",",
"_getTag",
"=",
"False",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"'Investigator'",
"]",
"elif",
"isinstance",
"(",
"tags",
",",
"str",
")",
":",
"tags",
"=",
"[",
"'Investigator'",
",",
"tags",
"]",
"else",
":",
"tags",
".",
"append",
"(",
"'Investigator'",
")",
"return",
"super",
"(",
")",
".",
"getInvestigators",
"(",
"tags",
"=",
"tags",
",",
"seperator",
"=",
"seperator",
",",
"_getTag",
"=",
"_getTag",
")"
]
| Returns a list of the names of investigators. The optional arguments are ignored.
# Returns
`list [str]`
> A list of all the found investigator's names | [
"Returns",
"a",
"list",
"of",
"the",
"names",
"of",
"investigators",
".",
"The",
"optional",
"arguments",
"are",
"ignored",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/nsfGrant.py#L22-L37 | train |
networks-lab/metaknowledge | metaknowledge/genders/nameGender.py | nameStringGender | def nameStringGender(s, noExcept = False):
"""Expects `first, last`"""
global mappingDict
try:
first = s.split(', ')[1].split(' ')[0].title()
except IndexError:
if noExcept:
return 'Unknown'
else:
return GenderException("The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.".format(s))
if mappingDict is None:
mappingDict = getMapping()
return mappingDict.get(first, 'Unknown') | python | def nameStringGender(s, noExcept = False):
"""Expects `first, last`"""
global mappingDict
try:
first = s.split(', ')[1].split(' ')[0].title()
except IndexError:
if noExcept:
return 'Unknown'
else:
return GenderException("The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.".format(s))
if mappingDict is None:
mappingDict = getMapping()
return mappingDict.get(first, 'Unknown') | [
"def",
"nameStringGender",
"(",
"s",
",",
"noExcept",
"=",
"False",
")",
":",
"global",
"mappingDict",
"try",
":",
"first",
"=",
"s",
".",
"split",
"(",
"', '",
")",
"[",
"1",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
".",
"title",
"(",
")",
"except",
"IndexError",
":",
"if",
"noExcept",
":",
"return",
"'Unknown'",
"else",
":",
"return",
"GenderException",
"(",
"\"The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.\"",
".",
"format",
"(",
"s",
")",
")",
"if",
"mappingDict",
"is",
"None",
":",
"mappingDict",
"=",
"getMapping",
"(",
")",
"return",
"mappingDict",
".",
"get",
"(",
"first",
",",
"'Unknown'",
")"
]
| Expects `first, last` | [
"Expects",
"first",
"last"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/genders/nameGender.py#L54-L66 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | j9urlGenerator | def j9urlGenerator(nameDict = False):
"""How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change.
They are of the form:
> "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html"
> Where {VAL} is a capital letter or the string "0-9"
# Returns
`list[str]`
> A list of all the url's strings
"""
start = "https://images.webofknowledge.com/images/help/WOS/"
end = "_abrvjt.html"
if nameDict:
urls = {"0-9" : start + "0-9" + end}
for c in string.ascii_uppercase:
urls[c] = start + c + end
else:
urls = [start + "0-9" + end]
for c in string.ascii_uppercase:
urls.append(start + c + end)
return urls | python | def j9urlGenerator(nameDict = False):
"""How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change.
They are of the form:
> "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html"
> Where {VAL} is a capital letter or the string "0-9"
# Returns
`list[str]`
> A list of all the url's strings
"""
start = "https://images.webofknowledge.com/images/help/WOS/"
end = "_abrvjt.html"
if nameDict:
urls = {"0-9" : start + "0-9" + end}
for c in string.ascii_uppercase:
urls[c] = start + c + end
else:
urls = [start + "0-9" + end]
for c in string.ascii_uppercase:
urls.append(start + c + end)
return urls | [
"def",
"j9urlGenerator",
"(",
"nameDict",
"=",
"False",
")",
":",
"start",
"=",
"\"https://images.webofknowledge.com/images/help/WOS/\"",
"end",
"=",
"\"_abrvjt.html\"",
"if",
"nameDict",
":",
"urls",
"=",
"{",
"\"0-9\"",
":",
"start",
"+",
"\"0-9\"",
"+",
"end",
"}",
"for",
"c",
"in",
"string",
".",
"ascii_uppercase",
":",
"urls",
"[",
"c",
"]",
"=",
"start",
"+",
"c",
"+",
"end",
"else",
":",
"urls",
"=",
"[",
"start",
"+",
"\"0-9\"",
"+",
"end",
"]",
"for",
"c",
"in",
"string",
".",
"ascii_uppercase",
":",
"urls",
".",
"append",
"(",
"start",
"+",
"c",
"+",
"end",
")",
"return",
"urls"
]
| How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change.
They are of the form:
> "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html"
> Where {VAL} is a capital letter or the string "0-9"
# Returns
`list[str]`
> A list of all the url's strings | [
"How",
"to",
"get",
"all",
"the",
"urls",
"for",
"the",
"WOS",
"Journal",
"Title",
"Abbreviations",
".",
"Each",
"is",
"varies",
"by",
"only",
"a",
"few",
"characters",
".",
"These",
"are",
"the",
"currently",
"in",
"use",
"urls",
"they",
"may",
"change",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L14-L38 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | _j9SaveCurrent | def _j9SaveCurrent(sDir = '.'):
"""Downloads and saves all the webpages
For Backend
"""
dname = os.path.normpath(sDir + '/' + datetime.datetime.now().strftime("%Y-%m-%d_J9_AbbreviationDocs"))
if not os.path.isdir(dname):
os.mkdir(dname)
os.chdir(dname)
else:
os.chdir(dname)
for urlID, urlString in j9urlGenerator(nameDict = True).items():
fname = "{}_abrvjt.html".format(urlID)
f = open(fname, 'wb')
f.write(urllib.request.urlopen(urlString).read()) | python | def _j9SaveCurrent(sDir = '.'):
"""Downloads and saves all the webpages
For Backend
"""
dname = os.path.normpath(sDir + '/' + datetime.datetime.now().strftime("%Y-%m-%d_J9_AbbreviationDocs"))
if not os.path.isdir(dname):
os.mkdir(dname)
os.chdir(dname)
else:
os.chdir(dname)
for urlID, urlString in j9urlGenerator(nameDict = True).items():
fname = "{}_abrvjt.html".format(urlID)
f = open(fname, 'wb')
f.write(urllib.request.urlopen(urlString).read()) | [
"def",
"_j9SaveCurrent",
"(",
"sDir",
"=",
"'.'",
")",
":",
"dname",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"sDir",
"+",
"'/'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d_J9_AbbreviationDocs\"",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dname",
")",
":",
"os",
".",
"mkdir",
"(",
"dname",
")",
"os",
".",
"chdir",
"(",
"dname",
")",
"else",
":",
"os",
".",
"chdir",
"(",
"dname",
")",
"for",
"urlID",
",",
"urlString",
"in",
"j9urlGenerator",
"(",
"nameDict",
"=",
"True",
")",
".",
"items",
"(",
")",
":",
"fname",
"=",
"\"{}_abrvjt.html\"",
".",
"format",
"(",
"urlID",
")",
"f",
"=",
"open",
"(",
"fname",
",",
"'wb'",
")",
"f",
".",
"write",
"(",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"urlString",
")",
".",
"read",
"(",
")",
")"
]
| Downloads and saves all the webpages
For Backend | [
"Downloads",
"and",
"saves",
"all",
"the",
"webpages"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L40-L54 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | _getDict | def _getDict(j9Page):
"""Parses a Journal Title Abbreviations page
Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed
For Backend
"""
slines = j9Page.read().decode('utf-8').split('\n')
while slines.pop(0) != "<DL>":
pass
currentName = slines.pop(0).split('"></A><DT>')[1]
currentTag = slines.pop(0).split("<B><DD>\t")[1]
j9Dict = {}
while True:
try:
j9Dict[currentTag].append(currentName)
except KeyError:
j9Dict[currentTag] = [currentName]
try:
currentName = slines.pop(0).split('</B><DT>')[1]
currentTag = slines.pop(0).split("<B><DD>\t")[1]
except IndexError:
break
return j9Dict | python | def _getDict(j9Page):
"""Parses a Journal Title Abbreviations page
Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed
For Backend
"""
slines = j9Page.read().decode('utf-8').split('\n')
while slines.pop(0) != "<DL>":
pass
currentName = slines.pop(0).split('"></A><DT>')[1]
currentTag = slines.pop(0).split("<B><DD>\t")[1]
j9Dict = {}
while True:
try:
j9Dict[currentTag].append(currentName)
except KeyError:
j9Dict[currentTag] = [currentName]
try:
currentName = slines.pop(0).split('</B><DT>')[1]
currentTag = slines.pop(0).split("<B><DD>\t")[1]
except IndexError:
break
return j9Dict | [
"def",
"_getDict",
"(",
"j9Page",
")",
":",
"slines",
"=",
"j9Page",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'\\n'",
")",
"while",
"slines",
".",
"pop",
"(",
"0",
")",
"!=",
"\"<DL>\"",
":",
"pass",
"currentName",
"=",
"slines",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
"'\"></A><DT>'",
")",
"[",
"1",
"]",
"currentTag",
"=",
"slines",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
"\"<B><DD>\\t\"",
")",
"[",
"1",
"]",
"j9Dict",
"=",
"{",
"}",
"while",
"True",
":",
"try",
":",
"j9Dict",
"[",
"currentTag",
"]",
".",
"append",
"(",
"currentName",
")",
"except",
"KeyError",
":",
"j9Dict",
"[",
"currentTag",
"]",
"=",
"[",
"currentName",
"]",
"try",
":",
"currentName",
"=",
"slines",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
"'</B><DT>'",
")",
"[",
"1",
"]",
"currentTag",
"=",
"slines",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
"\"<B><DD>\\t\"",
")",
"[",
"1",
"]",
"except",
"IndexError",
":",
"break",
"return",
"j9Dict"
]
| Parses a Journal Title Abbreviations page
Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed
For Backend | [
"Parses",
"a",
"Journal",
"Title",
"Abbreviations",
"page"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L56-L79 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | _getCurrentj9Dict | def _getCurrentj9Dict():
"""Downloads and parses all the webpages
For Backend
"""
urls = j9urlGenerator()
j9Dict = {}
for url in urls:
d = _getDict(urllib.request.urlopen(url))
if len(d) == 0:
raise RuntimeError("Parsing failed, this is could require an update of the parser.")
j9Dict.update(d)
return j9Dict | python | def _getCurrentj9Dict():
"""Downloads and parses all the webpages
For Backend
"""
urls = j9urlGenerator()
j9Dict = {}
for url in urls:
d = _getDict(urllib.request.urlopen(url))
if len(d) == 0:
raise RuntimeError("Parsing failed, this is could require an update of the parser.")
j9Dict.update(d)
return j9Dict | [
"def",
"_getCurrentj9Dict",
"(",
")",
":",
"urls",
"=",
"j9urlGenerator",
"(",
")",
"j9Dict",
"=",
"{",
"}",
"for",
"url",
"in",
"urls",
":",
"d",
"=",
"_getDict",
"(",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
")",
"if",
"len",
"(",
"d",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Parsing failed, this is could require an update of the parser.\"",
")",
"j9Dict",
".",
"update",
"(",
"d",
")",
"return",
"j9Dict"
]
| Downloads and parses all the webpages
For Backend | [
"Downloads",
"and",
"parses",
"all",
"the",
"webpages"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L81-L93 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | updatej9DB | def updatej9DB(dbname = abrevDBname, saveRawHTML = False):
"""Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date.
"""
if saveRawHTML:
rawDir = '{}/j9Raws'.format(os.path.dirname(__file__))
if not os.path.isdir(rawDir):
os.mkdir(rawDir)
_j9SaveCurrent(sDir = rawDir)
dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname)
try:
with dbm.dumb.open(dbLoc, flag = 'c') as db:
try:
j9Dict = _getCurrentj9Dict()
except urllib.error.URLError:
raise urllib.error.URLError("Unable to access server, check your connection")
for k, v in j9Dict.items():
if k in db:
for jName in v:
if jName not in j9Dict[k]:
j9Dict[k] += '|' + jName
else:
db[k] = '|'.join(v)
except dbm.dumb.error as e:
raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'".format(dbLoc, os.path.dirname(__file__), e)) | python | def updatej9DB(dbname = abrevDBname, saveRawHTML = False):
"""Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date.
"""
if saveRawHTML:
rawDir = '{}/j9Raws'.format(os.path.dirname(__file__))
if not os.path.isdir(rawDir):
os.mkdir(rawDir)
_j9SaveCurrent(sDir = rawDir)
dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname)
try:
with dbm.dumb.open(dbLoc, flag = 'c') as db:
try:
j9Dict = _getCurrentj9Dict()
except urllib.error.URLError:
raise urllib.error.URLError("Unable to access server, check your connection")
for k, v in j9Dict.items():
if k in db:
for jName in v:
if jName not in j9Dict[k]:
j9Dict[k] += '|' + jName
else:
db[k] = '|'.join(v)
except dbm.dumb.error as e:
raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'".format(dbLoc, os.path.dirname(__file__), e)) | [
"def",
"updatej9DB",
"(",
"dbname",
"=",
"abrevDBname",
",",
"saveRawHTML",
"=",
"False",
")",
":",
"if",
"saveRawHTML",
":",
"rawDir",
"=",
"'{}/j9Raws'",
".",
"format",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"rawDir",
")",
":",
"os",
".",
"mkdir",
"(",
"rawDir",
")",
"_j9SaveCurrent",
"(",
"sDir",
"=",
"rawDir",
")",
"dbLoc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"dbname",
")",
"try",
":",
"with",
"dbm",
".",
"dumb",
".",
"open",
"(",
"dbLoc",
",",
"flag",
"=",
"'c'",
")",
"as",
"db",
":",
"try",
":",
"j9Dict",
"=",
"_getCurrentj9Dict",
"(",
")",
"except",
"urllib",
".",
"error",
".",
"URLError",
":",
"raise",
"urllib",
".",
"error",
".",
"URLError",
"(",
"\"Unable to access server, check your connection\"",
")",
"for",
"k",
",",
"v",
"in",
"j9Dict",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"db",
":",
"for",
"jName",
"in",
"v",
":",
"if",
"jName",
"not",
"in",
"j9Dict",
"[",
"k",
"]",
":",
"j9Dict",
"[",
"k",
"]",
"+=",
"'|'",
"+",
"jName",
"else",
":",
"db",
"[",
"k",
"]",
"=",
"'|'",
".",
"join",
"(",
"v",
")",
"except",
"dbm",
".",
"dumb",
".",
"error",
"as",
"e",
":",
"raise",
"JournalDataBaseError",
"(",
"\"Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\\nThe error was '{}'\"",
".",
"format",
"(",
"dbLoc",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"e",
")",
")"
]
| Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date. | [
"Updates",
"the",
"database",
"of",
"Journal",
"Title",
"Abbreviations",
".",
"Requires",
"an",
"internet",
"connection",
".",
"The",
"data",
"base",
"is",
"saved",
"relative",
"to",
"the",
"source",
"file",
"not",
"the",
"working",
"directory",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L95-L128 | train |
networks-lab/metaknowledge | metaknowledge/journalAbbreviations/backend.py | getj9dict | def getj9dict(dbname = abrevDBname, manualDB = manualDBname, returnDict ='both'):
"""Returns the dictionary of journal abbreviations mapping to a list of the associated journal names. By default the local database is used. The database is in the file _dbname_ in the same directory as this source file
# Parameters
_dbname_ : `optional [str]`
> The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched.
_manualDB_ : `optional [str]`
> The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched.
_returnDict_ : `optional [str]`
> default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`.
"""
dbLoc = os.path.normpath(os.path.dirname(__file__))
retDict = {}
try:
if returnDict == 'both' or returnDict == 'WOS':
with dbm.dumb.open(dbLoc + '/{}'.format(dbname)) as db:
if len(db) == 0:
raise JournalDataBaseError("J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB().")
for k, v in db.items():
retDict[k.decode('utf-8')] = v.decode('utf-8').split('|')
except JournalDataBaseError:
updatej9DB()
return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict)
try:
if returnDict == 'both' or returnDict == 'manual':
if os.path.isfile(dbLoc + '/{}.dat'.format(manualDB)):
with dbm.dumb.open(dbLoc + '/{}'.format(manualDB)) as db:
for k, v in db.items():
retDict[k.decode('utf-8')] = v.decode('utf-8').split('|')
else:
if returnDict == 'manual':
raise JournalDataBaseError("Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})".format(manualDB))
except JournalDataBaseError:
updatej9DB(dbname = manualDB)
return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict)
return retDict | python | def getj9dict(dbname = abrevDBname, manualDB = manualDBname, returnDict ='both'):
"""Returns the dictionary of journal abbreviations mapping to a list of the associated journal names. By default the local database is used. The database is in the file _dbname_ in the same directory as this source file
# Parameters
_dbname_ : `optional [str]`
> The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched.
_manualDB_ : `optional [str]`
> The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched.
_returnDict_ : `optional [str]`
> default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`.
"""
dbLoc = os.path.normpath(os.path.dirname(__file__))
retDict = {}
try:
if returnDict == 'both' or returnDict == 'WOS':
with dbm.dumb.open(dbLoc + '/{}'.format(dbname)) as db:
if len(db) == 0:
raise JournalDataBaseError("J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB().")
for k, v in db.items():
retDict[k.decode('utf-8')] = v.decode('utf-8').split('|')
except JournalDataBaseError:
updatej9DB()
return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict)
try:
if returnDict == 'both' or returnDict == 'manual':
if os.path.isfile(dbLoc + '/{}.dat'.format(manualDB)):
with dbm.dumb.open(dbLoc + '/{}'.format(manualDB)) as db:
for k, v in db.items():
retDict[k.decode('utf-8')] = v.decode('utf-8').split('|')
else:
if returnDict == 'manual':
raise JournalDataBaseError("Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})".format(manualDB))
except JournalDataBaseError:
updatej9DB(dbname = manualDB)
return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict)
return retDict | [
"def",
"getj9dict",
"(",
"dbname",
"=",
"abrevDBname",
",",
"manualDB",
"=",
"manualDBname",
",",
"returnDict",
"=",
"'both'",
")",
":",
"dbLoc",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"retDict",
"=",
"{",
"}",
"try",
":",
"if",
"returnDict",
"==",
"'both'",
"or",
"returnDict",
"==",
"'WOS'",
":",
"with",
"dbm",
".",
"dumb",
".",
"open",
"(",
"dbLoc",
"+",
"'/{}'",
".",
"format",
"(",
"dbname",
")",
")",
"as",
"db",
":",
"if",
"len",
"(",
"db",
")",
"==",
"0",
":",
"raise",
"JournalDataBaseError",
"(",
"\"J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB().\"",
")",
"for",
"k",
",",
"v",
"in",
"db",
".",
"items",
"(",
")",
":",
"retDict",
"[",
"k",
".",
"decode",
"(",
"'utf-8'",
")",
"]",
"=",
"v",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'|'",
")",
"except",
"JournalDataBaseError",
":",
"updatej9DB",
"(",
")",
"return",
"getj9dict",
"(",
"dbname",
"=",
"dbname",
",",
"manualDB",
"=",
"manualDB",
",",
"returnDict",
"=",
"returnDict",
")",
"try",
":",
"if",
"returnDict",
"==",
"'both'",
"or",
"returnDict",
"==",
"'manual'",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"dbLoc",
"+",
"'/{}.dat'",
".",
"format",
"(",
"manualDB",
")",
")",
":",
"with",
"dbm",
".",
"dumb",
".",
"open",
"(",
"dbLoc",
"+",
"'/{}'",
".",
"format",
"(",
"manualDB",
")",
")",
"as",
"db",
":",
"for",
"k",
",",
"v",
"in",
"db",
".",
"items",
"(",
")",
":",
"retDict",
"[",
"k",
".",
"decode",
"(",
"'utf-8'",
")",
"]",
"=",
"v",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'|'",
")",
"else",
":",
"if",
"returnDict",
"==",
"'manual'",
":",
"raise",
"JournalDataBaseError",
"(",
"\"Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})\"",
".",
"format",
"(",
"manualDB",
")",
")",
"except",
"JournalDataBaseError",
":",
"updatej9DB",
"(",
"dbname",
"=",
"manualDB",
")",
"return",
"getj9dict",
"(",
"dbname",
"=",
"dbname",
",",
"manualDB",
"=",
"manualDB",
",",
"returnDict",
"=",
"returnDict",
")",
"return",
"retDict"
]
| Returns the dictionary of journal abbreviations mapping to a list of the associated journal names. By default the local database is used. The database is in the file _dbname_ in the same directory as this source file
# Parameters
_dbname_ : `optional [str]`
> The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched.
_manualDB_ : `optional [str]`
> The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched.
_returnDict_ : `optional [str]`
> default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`. | [
"Returns",
"the",
"dictionary",
"of",
"journal",
"abbreviations",
"mapping",
"to",
"a",
"list",
"of",
"the",
"associated",
"journal",
"names",
".",
"By",
"default",
"the",
"local",
"database",
"is",
"used",
".",
"The",
"database",
"is",
"in",
"the",
"file",
"_dbname_",
"in",
"the",
"same",
"directory",
"as",
"this",
"source",
"file"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L130-L172 | train |
networks-lab/metaknowledge | metaknowledge/WOS/tagProcessing/funcDicts.py | normalizeToTag | def normalizeToTag(val):
"""Converts tags or full names to 2 character tags, case insensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The short name of _val_
"""
try:
val = val.upper()
except AttributeError:
raise KeyError("{} is not a tag or name string".format(val))
if val not in tagsAndNameSetUpper:
raise KeyError("{} is not a tag or name string".format(val))
else:
try:
return fullToTagDictUpper[val]
except KeyError:
return val | python | def normalizeToTag(val):
"""Converts tags or full names to 2 character tags, case insensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The short name of _val_
"""
try:
val = val.upper()
except AttributeError:
raise KeyError("{} is not a tag or name string".format(val))
if val not in tagsAndNameSetUpper:
raise KeyError("{} is not a tag or name string".format(val))
else:
try:
return fullToTagDictUpper[val]
except KeyError:
return val | [
"def",
"normalizeToTag",
"(",
"val",
")",
":",
"try",
":",
"val",
"=",
"val",
".",
"upper",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"KeyError",
"(",
"\"{} is not a tag or name string\"",
".",
"format",
"(",
"val",
")",
")",
"if",
"val",
"not",
"in",
"tagsAndNameSetUpper",
":",
"raise",
"KeyError",
"(",
"\"{} is not a tag or name string\"",
".",
"format",
"(",
"val",
")",
")",
"else",
":",
"try",
":",
"return",
"fullToTagDictUpper",
"[",
"val",
"]",
"except",
"KeyError",
":",
"return",
"val"
]
| Converts tags or full names to 2 character tags, case insensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The short name of _val_ | [
"Converts",
"tags",
"or",
"full",
"names",
"to",
"2",
"character",
"tags",
"case",
"insensitive"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/funcDicts.py#L41-L66 | train |
networks-lab/metaknowledge | metaknowledge/WOS/tagProcessing/funcDicts.py | normalizeToName | def normalizeToName(val):
"""Converts tags or full names to full names, case sensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The full name of _val_
"""
if val not in tagsAndNameSet:
raise KeyError("{} is not a tag or name string".format(val))
else:
try:
return tagToFullDict[val]
except KeyError:
return val | python | def normalizeToName(val):
"""Converts tags or full names to full names, case sensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The full name of _val_
"""
if val not in tagsAndNameSet:
raise KeyError("{} is not a tag or name string".format(val))
else:
try:
return tagToFullDict[val]
except KeyError:
return val | [
"def",
"normalizeToName",
"(",
"val",
")",
":",
"if",
"val",
"not",
"in",
"tagsAndNameSet",
":",
"raise",
"KeyError",
"(",
"\"{} is not a tag or name string\"",
".",
"format",
"(",
"val",
")",
")",
"else",
":",
"try",
":",
"return",
"tagToFullDict",
"[",
"val",
"]",
"except",
"KeyError",
":",
"return",
"val"
]
| Converts tags or full names to full names, case sensitive
# Parameters
_val_: `str`
> A two character string giving the tag or its full name
# Returns
`str`
> The full name of _val_ | [
"Converts",
"tags",
"or",
"full",
"names",
"to",
"full",
"names",
"case",
"sensitive"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/funcDicts.py#L68-L89 | train |
networks-lab/metaknowledge | metaknowledge/grants/baseGrant.py | Grant.update | def update(self, other):
"""Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence.
# Parameters
_other_ : `Grant`
> Another `Grant` of the same type as _self_
"""
if type(self) != type(other):
return NotImplemented
else:
if other.bad:
self.error = other.error
self.bad = True
self._fieldDict.update(other._fieldDict) | python | def update(self, other):
"""Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence.
# Parameters
_other_ : `Grant`
> Another `Grant` of the same type as _self_
"""
if type(self) != type(other):
return NotImplemented
else:
if other.bad:
self.error = other.error
self.bad = True
self._fieldDict.update(other._fieldDict) | [
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"if",
"type",
"(",
"self",
")",
"!=",
"type",
"(",
"other",
")",
":",
"return",
"NotImplemented",
"else",
":",
"if",
"other",
".",
"bad",
":",
"self",
".",
"error",
"=",
"other",
".",
"error",
"self",
".",
"bad",
"=",
"True",
"self",
".",
"_fieldDict",
".",
"update",
"(",
"other",
".",
"_fieldDict",
")"
]
| Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence.
# Parameters
_other_ : `Grant`
> Another `Grant` of the same type as _self_ | [
"Adds",
"all",
"the",
"tag",
"-",
"entry",
"pairs",
"from",
"_other_",
"to",
"the",
"Grant",
".",
"If",
"there",
"is",
"a",
"conflict",
"_other_",
"takes",
"precedence",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/baseGrant.py#L99-L114 | train |
kxgames/glooey | glooey/widget.py | EventDispatcher.relay_events_from | def relay_events_from(self, originator, event_type, *more_event_types):
"""
Configure this handler to re-dispatch events from another handler.
This method configures this handler dispatch an event of type
*event_type* whenever *originator* dispatches events of the same type
or any of the types in *more_event_types*. Any arguments passed to the
original event are copied to the new event.
This method is mean to be useful for creating composite widgets that
want to present a simple API by making it seem like the events being
generated by their children are actually coming from them. See the
`/composing_widgets` tutorial for an example.
"""
handlers = {
event_type: lambda *args, **kwargs: \
self.dispatch_event(event_type, *args, **kwargs)
for event_type in (event_type,) + more_event_types
}
originator.set_handlers(**handlers) | python | def relay_events_from(self, originator, event_type, *more_event_types):
"""
Configure this handler to re-dispatch events from another handler.
This method configures this handler dispatch an event of type
*event_type* whenever *originator* dispatches events of the same type
or any of the types in *more_event_types*. Any arguments passed to the
original event are copied to the new event.
This method is mean to be useful for creating composite widgets that
want to present a simple API by making it seem like the events being
generated by their children are actually coming from them. See the
`/composing_widgets` tutorial for an example.
"""
handlers = {
event_type: lambda *args, **kwargs: \
self.dispatch_event(event_type, *args, **kwargs)
for event_type in (event_type,) + more_event_types
}
originator.set_handlers(**handlers) | [
"def",
"relay_events_from",
"(",
"self",
",",
"originator",
",",
"event_type",
",",
"*",
"more_event_types",
")",
":",
"handlers",
"=",
"{",
"event_type",
":",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"self",
".",
"dispatch_event",
"(",
"event_type",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"event_type",
"in",
"(",
"event_type",
",",
")",
"+",
"more_event_types",
"}",
"originator",
".",
"set_handlers",
"(",
"*",
"*",
"handlers",
")"
]
| Configure this handler to re-dispatch events from another handler.
This method configures this handler dispatch an event of type
*event_type* whenever *originator* dispatches events of the same type
or any of the types in *more_event_types*. Any arguments passed to the
original event are copied to the new event.
This method is mean to be useful for creating composite widgets that
want to present a simple API by making it seem like the events being
generated by their children are actually coming from them. See the
`/composing_widgets` tutorial for an example. | [
"Configure",
"this",
"handler",
"to",
"re",
"-",
"dispatch",
"events",
"from",
"another",
"handler",
"."
]
| f0125c1f218b05cfb2efb52a88d80f54eae007a0 | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L25-L44 | train |
kxgames/glooey | glooey/widget.py | EventDispatcher.start_event | def start_event(self, event_type, *args, dt=1/60):
"""
Begin dispatching the given event at the given frequency.
Calling this method will cause an event of type *event_type* with
arguments *args* to be dispatched every *dt* seconds. This will
continue until `stop_event()` is called for the same event.
These continuously firing events are useful if, for example, you want
to make a button that scrolls for as long as it's being held.
"""
# Don't bother scheduling a timer if nobody's listening. This isn't
# great from a general-purpose perspective, because a long-lived event
# could have listeners attach and detach in the middle. But I don't
# like the idea of making a bunch of clocks to spit out a bunch of
# events that are never used, although to be fair I don't actually know
# how expensive that would be. If I want to make this implementation
# more general purpose, I could start and stop timers as necessary in
# the methods that add or remove handlers.
if not any(self.__yield_handlers(event_type)):
return
def on_time_interval(dt): #
self.dispatch_event(event_type, *args, dt)
pyglet.clock.schedule_interval(on_time_interval, dt)
self.__timers[event_type] = on_time_interval | python | def start_event(self, event_type, *args, dt=1/60):
"""
Begin dispatching the given event at the given frequency.
Calling this method will cause an event of type *event_type* with
arguments *args* to be dispatched every *dt* seconds. This will
continue until `stop_event()` is called for the same event.
These continuously firing events are useful if, for example, you want
to make a button that scrolls for as long as it's being held.
"""
# Don't bother scheduling a timer if nobody's listening. This isn't
# great from a general-purpose perspective, because a long-lived event
# could have listeners attach and detach in the middle. But I don't
# like the idea of making a bunch of clocks to spit out a bunch of
# events that are never used, although to be fair I don't actually know
# how expensive that would be. If I want to make this implementation
# more general purpose, I could start and stop timers as necessary in
# the methods that add or remove handlers.
if not any(self.__yield_handlers(event_type)):
return
def on_time_interval(dt): #
self.dispatch_event(event_type, *args, dt)
pyglet.clock.schedule_interval(on_time_interval, dt)
self.__timers[event_type] = on_time_interval | [
"def",
"start_event",
"(",
"self",
",",
"event_type",
",",
"*",
"args",
",",
"dt",
"=",
"1",
"/",
"60",
")",
":",
"# Don't bother scheduling a timer if nobody's listening. This isn't ",
"# great from a general-purpose perspective, because a long-lived event ",
"# could have listeners attach and detach in the middle. But I don't ",
"# like the idea of making a bunch of clocks to spit out a bunch of ",
"# events that are never used, although to be fair I don't actually know ",
"# how expensive that would be. If I want to make this implementation ",
"# more general purpose, I could start and stop timers as necessary in ",
"# the methods that add or remove handlers.",
"if",
"not",
"any",
"(",
"self",
".",
"__yield_handlers",
"(",
"event_type",
")",
")",
":",
"return",
"def",
"on_time_interval",
"(",
"dt",
")",
":",
"#",
"self",
".",
"dispatch_event",
"(",
"event_type",
",",
"*",
"args",
",",
"dt",
")",
"pyglet",
".",
"clock",
".",
"schedule_interval",
"(",
"on_time_interval",
",",
"dt",
")",
"self",
".",
"__timers",
"[",
"event_type",
"]",
"=",
"on_time_interval"
]
| Begin dispatching the given event at the given frequency.
Calling this method will cause an event of type *event_type* with
arguments *args* to be dispatched every *dt* seconds. This will
continue until `stop_event()` is called for the same event.
These continuously firing events are useful if, for example, you want
to make a button that scrolls for as long as it's being held. | [
"Begin",
"dispatching",
"the",
"given",
"event",
"at",
"the",
"given",
"frequency",
"."
]
| f0125c1f218b05cfb2efb52a88d80f54eae007a0 | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L46-L72 | train |
kxgames/glooey | glooey/widget.py | EventDispatcher.stop_event | def stop_event(self, event_type):
"""
Stop dispatching the given event.
It is not an error to attempt to stop an event that was never started,
the request will just be silently ignored.
"""
if event_type in self.__timers:
pyglet.clock.unschedule(self.__timers[event_type]) | python | def stop_event(self, event_type):
"""
Stop dispatching the given event.
It is not an error to attempt to stop an event that was never started,
the request will just be silently ignored.
"""
if event_type in self.__timers:
pyglet.clock.unschedule(self.__timers[event_type]) | [
"def",
"stop_event",
"(",
"self",
",",
"event_type",
")",
":",
"if",
"event_type",
"in",
"self",
".",
"__timers",
":",
"pyglet",
".",
"clock",
".",
"unschedule",
"(",
"self",
".",
"__timers",
"[",
"event_type",
"]",
")"
]
| Stop dispatching the given event.
It is not an error to attempt to stop an event that was never started,
the request will just be silently ignored. | [
"Stop",
"dispatching",
"the",
"given",
"event",
"."
]
| f0125c1f218b05cfb2efb52a88d80f54eae007a0 | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L74-L82 | train |
kxgames/glooey | glooey/widget.py | EventDispatcher.__yield_handlers | def __yield_handlers(self, event_type):
"""
Yield all the handlers registered for the given event type.
"""
if event_type not in self.event_types:
raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types))
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
if event_type in frame:
yield frame[event_type]
# Check instance for an event handler
if hasattr(self, event_type):
yield getattr(self, event_type) | python | def __yield_handlers(self, event_type):
"""
Yield all the handlers registered for the given event type.
"""
if event_type not in self.event_types:
raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types))
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
if event_type in frame:
yield frame[event_type]
# Check instance for an event handler
if hasattr(self, event_type):
yield getattr(self, event_type) | [
"def",
"__yield_handlers",
"(",
"self",
",",
"event_type",
")",
":",
"if",
"event_type",
"not",
"in",
"self",
".",
"event_types",
":",
"raise",
"ValueError",
"(",
"\"%r not found in %r.event_types == %r\"",
"%",
"(",
"event_type",
",",
"self",
",",
"self",
".",
"event_types",
")",
")",
"# Search handler stack for matching event handlers",
"for",
"frame",
"in",
"list",
"(",
"self",
".",
"_event_stack",
")",
":",
"if",
"event_type",
"in",
"frame",
":",
"yield",
"frame",
"[",
"event_type",
"]",
"# Check instance for an event handler",
"if",
"hasattr",
"(",
"self",
",",
"event_type",
")",
":",
"yield",
"getattr",
"(",
"self",
",",
"event_type",
")"
]
| Yield all the handlers registered for the given event type. | [
"Yield",
"all",
"the",
"handlers",
"registered",
"for",
"the",
"given",
"event",
"type",
"."
]
| f0125c1f218b05cfb2efb52a88d80f54eae007a0 | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L84-L98 | train |
kxgames/glooey | glooey/helpers.py | HoldUpdatesMixin._filter_pending_updates | def _filter_pending_updates(self):
"""
Return all the updates that need to be applied, from a list of all the
updates that were called while the hold was active. This method is
meant to be overridden by subclasses that want to customize how held
updates are applied.
The `self._pending_updates` member variable is a list containing a
(method, args, kwargs) tuple for each update that was called while
updates were being held. This list is in the order that the updates
were actually called, and any updates that were called more than once
will appear in this list more than once.
This method should yield or return an list of the tuples in the same
format representing the updates that should be applied, in the order
they should be applied. The default implementation filters out
duplicate updates without changing their order. In cases where it
matters, the last call to each update is used to determine the order.
"""
from more_itertools import unique_everseen as unique
yield from reversed(list(unique(reversed(self._pending_updates)))) | python | def _filter_pending_updates(self):
"""
Return all the updates that need to be applied, from a list of all the
updates that were called while the hold was active. This method is
meant to be overridden by subclasses that want to customize how held
updates are applied.
The `self._pending_updates` member variable is a list containing a
(method, args, kwargs) tuple for each update that was called while
updates were being held. This list is in the order that the updates
were actually called, and any updates that were called more than once
will appear in this list more than once.
This method should yield or return an list of the tuples in the same
format representing the updates that should be applied, in the order
they should be applied. The default implementation filters out
duplicate updates without changing their order. In cases where it
matters, the last call to each update is used to determine the order.
"""
from more_itertools import unique_everseen as unique
yield from reversed(list(unique(reversed(self._pending_updates)))) | [
"def",
"_filter_pending_updates",
"(",
"self",
")",
":",
"from",
"more_itertools",
"import",
"unique_everseen",
"as",
"unique",
"yield",
"from",
"reversed",
"(",
"list",
"(",
"unique",
"(",
"reversed",
"(",
"self",
".",
"_pending_updates",
")",
")",
")",
")"
]
| Return all the updates that need to be applied, from a list of all the
updates that were called while the hold was active. This method is
meant to be overridden by subclasses that want to customize how held
updates are applied.
The `self._pending_updates` member variable is a list containing a
(method, args, kwargs) tuple for each update that was called while
updates were being held. This list is in the order that the updates
were actually called, and any updates that were called more than once
will appear in this list more than once.
This method should yield or return an list of the tuples in the same
format representing the updates that should be applied, in the order
they should be applied. The default implementation filters out
duplicate updates without changing their order. In cases where it
matters, the last call to each update is used to determine the order. | [
"Return",
"all",
"the",
"updates",
"that",
"need",
"to",
"be",
"applied",
"from",
"a",
"list",
"of",
"all",
"the",
"updates",
"that",
"were",
"called",
"while",
"the",
"hold",
"was",
"active",
".",
"This",
"method",
"is",
"meant",
"to",
"be",
"overridden",
"by",
"subclasses",
"that",
"want",
"to",
"customize",
"how",
"held",
"updates",
"are",
"applied",
"."
]
| f0125c1f218b05cfb2efb52a88d80f54eae007a0 | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/helpers.py#L59-L79 | train |
csurfer/gitsuggest | gitsuggest/utilities.py | ReposToHTML.get_html | def get_html(self):
"""Method to convert the repository list to a search results page."""
here = path.abspath(path.dirname(__file__))
env = Environment(loader=FileSystemLoader(path.join(here, "res/")))
suggest = env.get_template("suggest.htm.j2")
return suggest.render(
logo=path.join(here, "res/logo.png"),
user_login=self.user,
repos=self.repos,
) | python | def get_html(self):
"""Method to convert the repository list to a search results page."""
here = path.abspath(path.dirname(__file__))
env = Environment(loader=FileSystemLoader(path.join(here, "res/")))
suggest = env.get_template("suggest.htm.j2")
return suggest.render(
logo=path.join(here, "res/logo.png"),
user_login=self.user,
repos=self.repos,
) | [
"def",
"get_html",
"(",
"self",
")",
":",
"here",
"=",
"path",
".",
"abspath",
"(",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"env",
"=",
"Environment",
"(",
"loader",
"=",
"FileSystemLoader",
"(",
"path",
".",
"join",
"(",
"here",
",",
"\"res/\"",
")",
")",
")",
"suggest",
"=",
"env",
".",
"get_template",
"(",
"\"suggest.htm.j2\"",
")",
"return",
"suggest",
".",
"render",
"(",
"logo",
"=",
"path",
".",
"join",
"(",
"here",
",",
"\"res/logo.png\"",
")",
",",
"user_login",
"=",
"self",
".",
"user",
",",
"repos",
"=",
"self",
".",
"repos",
",",
")"
]
| Method to convert the repository list to a search results page. | [
"Method",
"to",
"convert",
"the",
"repository",
"list",
"to",
"a",
"search",
"results",
"page",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/utilities.py#L26-L37 | train |
csurfer/gitsuggest | gitsuggest/utilities.py | ReposToHTML.to_html | def to_html(self, write_to):
"""Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to.
"""
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | python | def to_html(self, write_to):
"""Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to.
"""
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | [
"def",
"to_html",
"(",
"self",
",",
"write_to",
")",
":",
"page_html",
"=",
"self",
".",
"get_html",
"(",
")",
"with",
"open",
"(",
"write_to",
",",
"\"wb\"",
")",
"as",
"writefile",
":",
"writefile",
".",
"write",
"(",
"page_html",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
]
| Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to. | [
"Method",
"to",
"convert",
"the",
"repository",
"list",
"to",
"a",
"search",
"results",
"page",
"and",
"write",
"it",
"to",
"a",
"HTML",
"file",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/utilities.py#L39-L48 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.get_unique_repositories | def get_unique_repositories(repo_list):
"""Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them.
"""
unique_list = list()
included = defaultdict(lambda: False)
for repo in repo_list:
if not included[repo.full_name]:
unique_list.append(repo)
included[repo.full_name] = True
return unique_list | python | def get_unique_repositories(repo_list):
"""Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them.
"""
unique_list = list()
included = defaultdict(lambda: False)
for repo in repo_list:
if not included[repo.full_name]:
unique_list.append(repo)
included[repo.full_name] = True
return unique_list | [
"def",
"get_unique_repositories",
"(",
"repo_list",
")",
":",
"unique_list",
"=",
"list",
"(",
")",
"included",
"=",
"defaultdict",
"(",
"lambda",
":",
"False",
")",
"for",
"repo",
"in",
"repo_list",
":",
"if",
"not",
"included",
"[",
"repo",
".",
"full_name",
"]",
":",
"unique_list",
".",
"append",
"(",
"repo",
")",
"included",
"[",
"repo",
".",
"full_name",
"]",
"=",
"True",
"return",
"unique_list"
]
| Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them. | [
"Method",
"to",
"create",
"unique",
"list",
"of",
"repositories",
"from",
"the",
"list",
"of",
"repositories",
"given",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L74-L87 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.minus | def minus(repo_list_a, repo_list_b):
"""Method to create a list of repositories such that the repository
belongs to repo list a but not repo list b.
In an ideal scenario we should be able to do this by set(a) - set(b)
but as GithubRepositories have shown that set() on them is not reliable
resort to this until it is all sorted out.
:param repo_list_a: List of repositories.
:param repo_list_b: List of repositories.
"""
included = defaultdict(lambda: False)
for repo in repo_list_b:
included[repo.full_name] = True
a_minus_b = list()
for repo in repo_list_a:
if not included[repo.full_name]:
included[repo.full_name] = True
a_minus_b.append(repo)
return a_minus_b | python | def minus(repo_list_a, repo_list_b):
"""Method to create a list of repositories such that the repository
belongs to repo list a but not repo list b.
In an ideal scenario we should be able to do this by set(a) - set(b)
but as GithubRepositories have shown that set() on them is not reliable
resort to this until it is all sorted out.
:param repo_list_a: List of repositories.
:param repo_list_b: List of repositories.
"""
included = defaultdict(lambda: False)
for repo in repo_list_b:
included[repo.full_name] = True
a_minus_b = list()
for repo in repo_list_a:
if not included[repo.full_name]:
included[repo.full_name] = True
a_minus_b.append(repo)
return a_minus_b | [
"def",
"minus",
"(",
"repo_list_a",
",",
"repo_list_b",
")",
":",
"included",
"=",
"defaultdict",
"(",
"lambda",
":",
"False",
")",
"for",
"repo",
"in",
"repo_list_b",
":",
"included",
"[",
"repo",
".",
"full_name",
"]",
"=",
"True",
"a_minus_b",
"=",
"list",
"(",
")",
"for",
"repo",
"in",
"repo_list_a",
":",
"if",
"not",
"included",
"[",
"repo",
".",
"full_name",
"]",
":",
"included",
"[",
"repo",
".",
"full_name",
"]",
"=",
"True",
"a_minus_b",
".",
"append",
"(",
"repo",
")",
"return",
"a_minus_b"
]
| Method to create a list of repositories such that the repository
belongs to repo list a but not repo list b.
In an ideal scenario we should be able to do this by set(a) - set(b)
but as GithubRepositories have shown that set() on them is not reliable
resort to this until it is all sorted out.
:param repo_list_a: List of repositories.
:param repo_list_b: List of repositories. | [
"Method",
"to",
"create",
"a",
"list",
"of",
"repositories",
"such",
"that",
"the",
"repository",
"belongs",
"to",
"repo",
"list",
"a",
"but",
"not",
"repo",
"list",
"b",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L90-L112 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__populate_repositories_of_interest | def __populate_repositories_of_interest(self, username):
"""Method to populate repositories which will be used to suggest
repositories for the user. For this purpose we use two kinds of
repositories.
1. Repositories starred by user him/herself.
2. Repositories starred by the users followed by the user.
:param username: Username for the user for whom repositories are being
suggested for.
"""
# Handle to the user to whom repositories need to be suggested.
user = self.github.get_user(username)
# Procure repositories starred by the user.
self.user_starred_repositories.extend(user.get_starred())
# Repositories starred by users followed by the user.
if self.deep_dive:
for following_user in user.get_following():
self.user_following_starred_repositories.extend(
following_user.get_starred()
) | python | def __populate_repositories_of_interest(self, username):
"""Method to populate repositories which will be used to suggest
repositories for the user. For this purpose we use two kinds of
repositories.
1. Repositories starred by user him/herself.
2. Repositories starred by the users followed by the user.
:param username: Username for the user for whom repositories are being
suggested for.
"""
# Handle to the user to whom repositories need to be suggested.
user = self.github.get_user(username)
# Procure repositories starred by the user.
self.user_starred_repositories.extend(user.get_starred())
# Repositories starred by users followed by the user.
if self.deep_dive:
for following_user in user.get_following():
self.user_following_starred_repositories.extend(
following_user.get_starred()
) | [
"def",
"__populate_repositories_of_interest",
"(",
"self",
",",
"username",
")",
":",
"# Handle to the user to whom repositories need to be suggested.",
"user",
"=",
"self",
".",
"github",
".",
"get_user",
"(",
"username",
")",
"# Procure repositories starred by the user.",
"self",
".",
"user_starred_repositories",
".",
"extend",
"(",
"user",
".",
"get_starred",
"(",
")",
")",
"# Repositories starred by users followed by the user.",
"if",
"self",
".",
"deep_dive",
":",
"for",
"following_user",
"in",
"user",
".",
"get_following",
"(",
")",
":",
"self",
".",
"user_following_starred_repositories",
".",
"extend",
"(",
"following_user",
".",
"get_starred",
"(",
")",
")"
]
| Method to populate repositories which will be used to suggest
repositories for the user. For this purpose we use two kinds of
repositories.
1. Repositories starred by user him/herself.
2. Repositories starred by the users followed by the user.
:param username: Username for the user for whom repositories are being
suggested for. | [
"Method",
"to",
"populate",
"repositories",
"which",
"will",
"be",
"used",
"to",
"suggest",
"repositories",
"for",
"the",
"user",
".",
"For",
"this",
"purpose",
"we",
"use",
"two",
"kinds",
"of",
"repositories",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L114-L136 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__get_interests | def __get_interests(self):
"""Method to procure description of repositories the authenticated user
is interested in.
We currently attribute interest to:
1. The repositories the authenticated user has starred.
2. The repositories the users the authenticated user follows have
starred.
:return: List of repository descriptions.
"""
# All repositories of interest.
repos_of_interest = itertools.chain(
self.user_starred_repositories,
self.user_following_starred_repositories,
)
# Extract descriptions out of repositories of interest.
repo_descriptions = [repo.description for repo in repos_of_interest]
return list(set(repo_descriptions)) | python | def __get_interests(self):
"""Method to procure description of repositories the authenticated user
is interested in.
We currently attribute interest to:
1. The repositories the authenticated user has starred.
2. The repositories the users the authenticated user follows have
starred.
:return: List of repository descriptions.
"""
# All repositories of interest.
repos_of_interest = itertools.chain(
self.user_starred_repositories,
self.user_following_starred_repositories,
)
# Extract descriptions out of repositories of interest.
repo_descriptions = [repo.description for repo in repos_of_interest]
return list(set(repo_descriptions)) | [
"def",
"__get_interests",
"(",
"self",
")",
":",
"# All repositories of interest.",
"repos_of_interest",
"=",
"itertools",
".",
"chain",
"(",
"self",
".",
"user_starred_repositories",
",",
"self",
".",
"user_following_starred_repositories",
",",
")",
"# Extract descriptions out of repositories of interest.",
"repo_descriptions",
"=",
"[",
"repo",
".",
"description",
"for",
"repo",
"in",
"repos_of_interest",
"]",
"return",
"list",
"(",
"set",
"(",
"repo_descriptions",
")",
")"
]
| Method to procure description of repositories the authenticated user
is interested in.
We currently attribute interest to:
1. The repositories the authenticated user has starred.
2. The repositories the users the authenticated user follows have
starred.
:return: List of repository descriptions. | [
"Method",
"to",
"procure",
"description",
"of",
"repositories",
"the",
"authenticated",
"user",
"is",
"interested",
"in",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L138-L157 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__get_words_to_ignore | def __get_words_to_ignore(self):
"""Compiles list of all words to ignore.
:return: List of words to ignore.
"""
# Stop words in English.
english_stopwords = stopwords.words("english")
here = path.abspath(path.dirname(__file__))
# Languages in git repositories.
git_languages = []
with open(path.join(here, "gitlang/languages.txt"), "r") as langauges:
git_languages = [line.strip() for line in langauges]
# Other words to avoid in git repositories.
words_to_avoid = []
with open(path.join(here, "gitlang/others.txt"), "r") as languages:
words_to_avoid = [line.strip() for line in languages]
return set(
itertools.chain(english_stopwords, git_languages, words_to_avoid)
) | python | def __get_words_to_ignore(self):
"""Compiles list of all words to ignore.
:return: List of words to ignore.
"""
# Stop words in English.
english_stopwords = stopwords.words("english")
here = path.abspath(path.dirname(__file__))
# Languages in git repositories.
git_languages = []
with open(path.join(here, "gitlang/languages.txt"), "r") as langauges:
git_languages = [line.strip() for line in langauges]
# Other words to avoid in git repositories.
words_to_avoid = []
with open(path.join(here, "gitlang/others.txt"), "r") as languages:
words_to_avoid = [line.strip() for line in languages]
return set(
itertools.chain(english_stopwords, git_languages, words_to_avoid)
) | [
"def",
"__get_words_to_ignore",
"(",
"self",
")",
":",
"# Stop words in English.",
"english_stopwords",
"=",
"stopwords",
".",
"words",
"(",
"\"english\"",
")",
"here",
"=",
"path",
".",
"abspath",
"(",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"# Languages in git repositories.",
"git_languages",
"=",
"[",
"]",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"here",
",",
"\"gitlang/languages.txt\"",
")",
",",
"\"r\"",
")",
"as",
"langauges",
":",
"git_languages",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"langauges",
"]",
"# Other words to avoid in git repositories.",
"words_to_avoid",
"=",
"[",
"]",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"here",
",",
"\"gitlang/others.txt\"",
")",
",",
"\"r\"",
")",
"as",
"languages",
":",
"words_to_avoid",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"languages",
"]",
"return",
"set",
"(",
"itertools",
".",
"chain",
"(",
"english_stopwords",
",",
"git_languages",
",",
"words_to_avoid",
")",
")"
]
| Compiles list of all words to ignore.
:return: List of words to ignore. | [
"Compiles",
"list",
"of",
"all",
"words",
"to",
"ignore",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L159-L181 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__clean_and_tokenize | def __clean_and_tokenize(self, doc_list):
"""Method to clean and tokenize the document list.
:param doc_list: Document list to clean and tokenize.
:return: Cleaned and tokenized document list.
"""
# Some repositories fill entire documentation in description. We ignore
# such repositories for cleaner tokens.
doc_list = filter(
lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN,
doc_list,
)
cleaned_doc_list = list()
# Regular expression to remove out all punctuations, numbers and other
# un-necessary text substrings like emojis etc.
tokenizer = RegexpTokenizer(r"[a-zA-Z]+")
# Get stop words.
stopwords = self.__get_words_to_ignore()
# Get english words.
dict_words = self.__get_words_to_consider()
for doc in doc_list:
# Lowercase doc.
lower = doc.lower()
# Tokenize removing numbers and punctuation.
tokens = tokenizer.tokenize(lower)
# Include meaningful words.
tokens = [tok for tok in tokens if tok in dict_words]
# Remove stopwords.
tokens = [tok for tok in tokens if tok not in stopwords]
# Filter Nones if any are introduced.
tokens = [tok for tok in tokens if tok is not None]
cleaned_doc_list.append(tokens)
return cleaned_doc_list | python | def __clean_and_tokenize(self, doc_list):
"""Method to clean and tokenize the document list.
:param doc_list: Document list to clean and tokenize.
:return: Cleaned and tokenized document list.
"""
# Some repositories fill entire documentation in description. We ignore
# such repositories for cleaner tokens.
doc_list = filter(
lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN,
doc_list,
)
cleaned_doc_list = list()
# Regular expression to remove out all punctuations, numbers and other
# un-necessary text substrings like emojis etc.
tokenizer = RegexpTokenizer(r"[a-zA-Z]+")
# Get stop words.
stopwords = self.__get_words_to_ignore()
# Get english words.
dict_words = self.__get_words_to_consider()
for doc in doc_list:
# Lowercase doc.
lower = doc.lower()
# Tokenize removing numbers and punctuation.
tokens = tokenizer.tokenize(lower)
# Include meaningful words.
tokens = [tok for tok in tokens if tok in dict_words]
# Remove stopwords.
tokens = [tok for tok in tokens if tok not in stopwords]
# Filter Nones if any are introduced.
tokens = [tok for tok in tokens if tok is not None]
cleaned_doc_list.append(tokens)
return cleaned_doc_list | [
"def",
"__clean_and_tokenize",
"(",
"self",
",",
"doc_list",
")",
":",
"# Some repositories fill entire documentation in description. We ignore",
"# such repositories for cleaner tokens.",
"doc_list",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
"and",
"len",
"(",
"x",
")",
"<=",
"GitSuggest",
".",
"MAX_DESC_LEN",
",",
"doc_list",
",",
")",
"cleaned_doc_list",
"=",
"list",
"(",
")",
"# Regular expression to remove out all punctuations, numbers and other",
"# un-necessary text substrings like emojis etc.",
"tokenizer",
"=",
"RegexpTokenizer",
"(",
"r\"[a-zA-Z]+\"",
")",
"# Get stop words.",
"stopwords",
"=",
"self",
".",
"__get_words_to_ignore",
"(",
")",
"# Get english words.",
"dict_words",
"=",
"self",
".",
"__get_words_to_consider",
"(",
")",
"for",
"doc",
"in",
"doc_list",
":",
"# Lowercase doc.",
"lower",
"=",
"doc",
".",
"lower",
"(",
")",
"# Tokenize removing numbers and punctuation.",
"tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"lower",
")",
"# Include meaningful words.",
"tokens",
"=",
"[",
"tok",
"for",
"tok",
"in",
"tokens",
"if",
"tok",
"in",
"dict_words",
"]",
"# Remove stopwords.",
"tokens",
"=",
"[",
"tok",
"for",
"tok",
"in",
"tokens",
"if",
"tok",
"not",
"in",
"stopwords",
"]",
"# Filter Nones if any are introduced.",
"tokens",
"=",
"[",
"tok",
"for",
"tok",
"in",
"tokens",
"if",
"tok",
"is",
"not",
"None",
"]",
"cleaned_doc_list",
".",
"append",
"(",
"tokens",
")",
"return",
"cleaned_doc_list"
]
| Method to clean and tokenize the document list.
:param doc_list: Document list to clean and tokenize.
:return: Cleaned and tokenized document list. | [
"Method",
"to",
"clean",
"and",
"tokenize",
"the",
"document",
"list",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L190-L233 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__construct_lda_model | def __construct_lda_model(self):
"""Method to create LDA model to procure list of topics from.
We do that by first fetching the descriptions of repositories user has
shown interest in. We tokenize the hence fetched descriptions to
procure list of cleaned tokens by dropping all the stop words and
language names from it.
We use the cleaned and sanitized token list to train LDA model from
which we hope to procure topics of interests to the authenticated user.
"""
# Fetch descriptions of repos of interest to authenticated user.
repos_of_interest = self.__get_interests()
# Procure clean tokens from the descriptions.
cleaned_tokens = self.__clean_and_tokenize(repos_of_interest)
# If cleaned tokens are empty, it can cause an exception while
# generating LDA. But tokens shouldn't be something meaningful as that
# would mean we are suggesting repos without reason. Hence the random
# string to ensure that LDA doesn't cause exception but the token
# doesn't generate any suggestions either.
if not cleaned_tokens:
cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]]
# Setup LDA requisites.
dictionary = corpora.Dictionary(cleaned_tokens)
corpus = [dictionary.doc2bow(text) for text in cleaned_tokens]
# Generate LDA model
self.lda_model = models.ldamodel.LdaModel(
corpus, num_topics=1, id2word=dictionary, passes=10
) | python | def __construct_lda_model(self):
"""Method to create LDA model to procure list of topics from.
We do that by first fetching the descriptions of repositories user has
shown interest in. We tokenize the hence fetched descriptions to
procure list of cleaned tokens by dropping all the stop words and
language names from it.
We use the cleaned and sanitized token list to train LDA model from
which we hope to procure topics of interests to the authenticated user.
"""
# Fetch descriptions of repos of interest to authenticated user.
repos_of_interest = self.__get_interests()
# Procure clean tokens from the descriptions.
cleaned_tokens = self.__clean_and_tokenize(repos_of_interest)
# If cleaned tokens are empty, it can cause an exception while
# generating LDA. But tokens shouldn't be something meaningful as that
# would mean we are suggesting repos without reason. Hence the random
# string to ensure that LDA doesn't cause exception but the token
# doesn't generate any suggestions either.
if not cleaned_tokens:
cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]]
# Setup LDA requisites.
dictionary = corpora.Dictionary(cleaned_tokens)
corpus = [dictionary.doc2bow(text) for text in cleaned_tokens]
# Generate LDA model
self.lda_model = models.ldamodel.LdaModel(
corpus, num_topics=1, id2word=dictionary, passes=10
) | [
"def",
"__construct_lda_model",
"(",
"self",
")",
":",
"# Fetch descriptions of repos of interest to authenticated user.",
"repos_of_interest",
"=",
"self",
".",
"__get_interests",
"(",
")",
"# Procure clean tokens from the descriptions.",
"cleaned_tokens",
"=",
"self",
".",
"__clean_and_tokenize",
"(",
"repos_of_interest",
")",
"# If cleaned tokens are empty, it can cause an exception while",
"# generating LDA. But tokens shouldn't be something meaningful as that",
"# would mean we are suggesting repos without reason. Hence the random",
"# string to ensure that LDA doesn't cause exception but the token",
"# doesn't generate any suggestions either.",
"if",
"not",
"cleaned_tokens",
":",
"cleaned_tokens",
"=",
"[",
"[",
"\"zkfgzkfgzkfgzkfgzkfgzkfg\"",
"]",
"]",
"# Setup LDA requisites.",
"dictionary",
"=",
"corpora",
".",
"Dictionary",
"(",
"cleaned_tokens",
")",
"corpus",
"=",
"[",
"dictionary",
".",
"doc2bow",
"(",
"text",
")",
"for",
"text",
"in",
"cleaned_tokens",
"]",
"# Generate LDA model",
"self",
".",
"lda_model",
"=",
"models",
".",
"ldamodel",
".",
"LdaModel",
"(",
"corpus",
",",
"num_topics",
"=",
"1",
",",
"id2word",
"=",
"dictionary",
",",
"passes",
"=",
"10",
")"
]
| Method to create LDA model to procure list of topics from.
We do that by first fetching the descriptions of repositories user has
shown interest in. We tokenize the hence fetched descriptions to
procure list of cleaned tokens by dropping all the stop words and
language names from it.
We use the cleaned and sanitized token list to train LDA model from
which we hope to procure topics of interests to the authenticated user. | [
"Method",
"to",
"create",
"LDA",
"model",
"to",
"procure",
"list",
"of",
"topics",
"from",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L235-L267 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.__get_query_for_repos | def __get_query_for_repos(self, term_count=5):
"""Method to procure query based on topics authenticated user is
interested in.
:param term_count: Count of terms in query.
:return: Query string.
"""
repo_query_terms = list()
for term in self.lda_model.get_topic_terms(0, topn=term_count):
repo_query_terms.append(self.lda_model.id2word[term[0]])
return " ".join(repo_query_terms) | python | def __get_query_for_repos(self, term_count=5):
"""Method to procure query based on topics authenticated user is
interested in.
:param term_count: Count of terms in query.
:return: Query string.
"""
repo_query_terms = list()
for term in self.lda_model.get_topic_terms(0, topn=term_count):
repo_query_terms.append(self.lda_model.id2word[term[0]])
return " ".join(repo_query_terms) | [
"def",
"__get_query_for_repos",
"(",
"self",
",",
"term_count",
"=",
"5",
")",
":",
"repo_query_terms",
"=",
"list",
"(",
")",
"for",
"term",
"in",
"self",
".",
"lda_model",
".",
"get_topic_terms",
"(",
"0",
",",
"topn",
"=",
"term_count",
")",
":",
"repo_query_terms",
".",
"append",
"(",
"self",
".",
"lda_model",
".",
"id2word",
"[",
"term",
"[",
"0",
"]",
"]",
")",
"return",
"\" \"",
".",
"join",
"(",
"repo_query_terms",
")"
]
| Method to procure query based on topics authenticated user is
interested in.
:param term_count: Count of terms in query.
:return: Query string. | [
"Method",
"to",
"procure",
"query",
"based",
"on",
"topics",
"authenticated",
"user",
"is",
"interested",
"in",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L269-L279 | train |
csurfer/gitsuggest | gitsuggest/suggest.py | GitSuggest.get_suggested_repositories | def get_suggested_repositories(self):
"""Method to procure suggested repositories for the user.
:return: Iterator to procure suggested repositories for the user.
"""
if self.suggested_repositories is None:
# Procure repositories to suggest to user.
repository_set = list()
for term_count in range(5, 2, -1):
query = self.__get_query_for_repos(term_count=term_count)
repository_set.extend(self.__get_repos_for_query(query))
# Remove repositories authenticated user is already interested in.
catchy_repos = GitSuggest.minus(
repository_set, self.user_starred_repositories
)
# Filter out repositories with too long descriptions. This is a
# measure to weed out spammy repositories.
filtered_repos = []
if len(catchy_repos) > 0:
for repo in catchy_repos:
if (
repo is not None
and repo.description is not None
and len(repo.description) <= GitSuggest.MAX_DESC_LEN
):
filtered_repos.append(repo)
# Present the repositories, highly starred to not starred.
filtered_repos = sorted(
filtered_repos,
key=attrgetter("stargazers_count"),
reverse=True,
)
self.suggested_repositories = GitSuggest.get_unique_repositories(
filtered_repos
)
# Return an iterator to help user fetch the repository listing.
for repository in self.suggested_repositories:
yield repository | python | def get_suggested_repositories(self):
"""Method to procure suggested repositories for the user.
:return: Iterator to procure suggested repositories for the user.
"""
if self.suggested_repositories is None:
# Procure repositories to suggest to user.
repository_set = list()
for term_count in range(5, 2, -1):
query = self.__get_query_for_repos(term_count=term_count)
repository_set.extend(self.__get_repos_for_query(query))
# Remove repositories authenticated user is already interested in.
catchy_repos = GitSuggest.minus(
repository_set, self.user_starred_repositories
)
# Filter out repositories with too long descriptions. This is a
# measure to weed out spammy repositories.
filtered_repos = []
if len(catchy_repos) > 0:
for repo in catchy_repos:
if (
repo is not None
and repo.description is not None
and len(repo.description) <= GitSuggest.MAX_DESC_LEN
):
filtered_repos.append(repo)
# Present the repositories, highly starred to not starred.
filtered_repos = sorted(
filtered_repos,
key=attrgetter("stargazers_count"),
reverse=True,
)
self.suggested_repositories = GitSuggest.get_unique_repositories(
filtered_repos
)
# Return an iterator to help user fetch the repository listing.
for repository in self.suggested_repositories:
yield repository | [
"def",
"get_suggested_repositories",
"(",
"self",
")",
":",
"if",
"self",
".",
"suggested_repositories",
"is",
"None",
":",
"# Procure repositories to suggest to user.",
"repository_set",
"=",
"list",
"(",
")",
"for",
"term_count",
"in",
"range",
"(",
"5",
",",
"2",
",",
"-",
"1",
")",
":",
"query",
"=",
"self",
".",
"__get_query_for_repos",
"(",
"term_count",
"=",
"term_count",
")",
"repository_set",
".",
"extend",
"(",
"self",
".",
"__get_repos_for_query",
"(",
"query",
")",
")",
"# Remove repositories authenticated user is already interested in.",
"catchy_repos",
"=",
"GitSuggest",
".",
"minus",
"(",
"repository_set",
",",
"self",
".",
"user_starred_repositories",
")",
"# Filter out repositories with too long descriptions. This is a",
"# measure to weed out spammy repositories.",
"filtered_repos",
"=",
"[",
"]",
"if",
"len",
"(",
"catchy_repos",
")",
">",
"0",
":",
"for",
"repo",
"in",
"catchy_repos",
":",
"if",
"(",
"repo",
"is",
"not",
"None",
"and",
"repo",
".",
"description",
"is",
"not",
"None",
"and",
"len",
"(",
"repo",
".",
"description",
")",
"<=",
"GitSuggest",
".",
"MAX_DESC_LEN",
")",
":",
"filtered_repos",
".",
"append",
"(",
"repo",
")",
"# Present the repositories, highly starred to not starred.",
"filtered_repos",
"=",
"sorted",
"(",
"filtered_repos",
",",
"key",
"=",
"attrgetter",
"(",
"\"stargazers_count\"",
")",
",",
"reverse",
"=",
"True",
",",
")",
"self",
".",
"suggested_repositories",
"=",
"GitSuggest",
".",
"get_unique_repositories",
"(",
"filtered_repos",
")",
"# Return an iterator to help user fetch the repository listing.",
"for",
"repository",
"in",
"self",
".",
"suggested_repositories",
":",
"yield",
"repository"
]
| Method to procure suggested repositories for the user.
:return: Iterator to procure suggested repositories for the user. | [
"Method",
"to",
"procure",
"suggested",
"repositories",
"for",
"the",
"user",
"."
]
| 02efdbf50acb094e502aef9c139dde62676455ee | https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L296-L339 | train |
bcicen/wikitables | wikitables/util.py | guess_type | def guess_type(s):
""" attempt to convert string value into numeric type """
sc = s.replace(',', '') # remove comma from potential numbers
try:
return int(sc)
except ValueError:
pass
try:
return float(sc)
except ValueError:
pass
return s | python | def guess_type(s):
""" attempt to convert string value into numeric type """
sc = s.replace(',', '') # remove comma from potential numbers
try:
return int(sc)
except ValueError:
pass
try:
return float(sc)
except ValueError:
pass
return s | [
"def",
"guess_type",
"(",
"s",
")",
":",
"sc",
"=",
"s",
".",
"replace",
"(",
"','",
",",
"''",
")",
"# remove comma from potential numbers",
"try",
":",
"return",
"int",
"(",
"sc",
")",
"except",
"ValueError",
":",
"pass",
"try",
":",
"return",
"float",
"(",
"sc",
")",
"except",
"ValueError",
":",
"pass",
"return",
"s"
]
| attempt to convert string value into numeric type | [
"attempt",
"to",
"convert",
"string",
"value",
"into",
"numeric",
"type"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/util.py#L15-L29 | train |
bcicen/wikitables | wikitables/readers.py | FieldReader.parse | def parse(self, node):
"""
Return generator yielding Field objects for a given node
"""
self._attrs = {}
vals = []
yielded = False
for x in self._read_parts(node):
if isinstance(x, Field):
yielded = True
x.attrs = self._attrs
yield x
else:
vals.append(ustr(x).strip(' \n\t'))
joined = ' '.join([ x for x in vals if x ])
if joined:
yielded = True
yield Field(node, guess_type(joined), self._attrs)
if not yielded:
yield Field(node, "", self._attrs) | python | def parse(self, node):
"""
Return generator yielding Field objects for a given node
"""
self._attrs = {}
vals = []
yielded = False
for x in self._read_parts(node):
if isinstance(x, Field):
yielded = True
x.attrs = self._attrs
yield x
else:
vals.append(ustr(x).strip(' \n\t'))
joined = ' '.join([ x for x in vals if x ])
if joined:
yielded = True
yield Field(node, guess_type(joined), self._attrs)
if not yielded:
yield Field(node, "", self._attrs) | [
"def",
"parse",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_attrs",
"=",
"{",
"}",
"vals",
"=",
"[",
"]",
"yielded",
"=",
"False",
"for",
"x",
"in",
"self",
".",
"_read_parts",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"Field",
")",
":",
"yielded",
"=",
"True",
"x",
".",
"attrs",
"=",
"self",
".",
"_attrs",
"yield",
"x",
"else",
":",
"vals",
".",
"append",
"(",
"ustr",
"(",
"x",
")",
".",
"strip",
"(",
"' \\n\\t'",
")",
")",
"joined",
"=",
"' '",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"vals",
"if",
"x",
"]",
")",
"if",
"joined",
":",
"yielded",
"=",
"True",
"yield",
"Field",
"(",
"node",
",",
"guess_type",
"(",
"joined",
")",
",",
"self",
".",
"_attrs",
")",
"if",
"not",
"yielded",
":",
"yield",
"Field",
"(",
"node",
",",
"\"\"",
",",
"self",
".",
"_attrs",
")"
]
| Return generator yielding Field objects for a given node | [
"Return",
"generator",
"yielding",
"Field",
"objects",
"for",
"a",
"given",
"node"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/readers.py#L21-L43 | train |
bcicen/wikitables | wikitables/readers.py | RowReader.parse | def parse(self, *nodes):
"""
Parse one or more `tr` nodes, yielding wikitables.Row objects
"""
for n in nodes:
if not n.contents:
continue
row = self._parse(n)
if not row.is_null:
yield row | python | def parse(self, *nodes):
"""
Parse one or more `tr` nodes, yielding wikitables.Row objects
"""
for n in nodes:
if not n.contents:
continue
row = self._parse(n)
if not row.is_null:
yield row | [
"def",
"parse",
"(",
"self",
",",
"*",
"nodes",
")",
":",
"for",
"n",
"in",
"nodes",
":",
"if",
"not",
"n",
".",
"contents",
":",
"continue",
"row",
"=",
"self",
".",
"_parse",
"(",
"n",
")",
"if",
"not",
"row",
".",
"is_null",
":",
"yield",
"row"
]
| Parse one or more `tr` nodes, yielding wikitables.Row objects | [
"Parse",
"one",
"or",
"more",
"tr",
"nodes",
"yielding",
"wikitables",
".",
"Row",
"objects"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/readers.py#L102-L111 | train |
bcicen/wikitables | wikitables/__init__.py | WikiTable._find_header_row | def _find_header_row(self):
"""
Evaluate all rows and determine header position, based on
greatest number of 'th' tagged elements
"""
th_max = 0
header_idx = 0
for idx, tr in enumerate(self._tr_nodes):
th_count = len(tr.contents.filter_tags(matches=ftag('th')))
if th_count > th_max:
th_max = th_count
header_idx = idx
if not th_max:
return
self._log('found header at row %d (%d <th> elements)' % \
(header_idx, th_max))
header_row = self._tr_nodes.pop(header_idx)
return header_row.contents.filter_tags(matches=ftag('th')) | python | def _find_header_row(self):
"""
Evaluate all rows and determine header position, based on
greatest number of 'th' tagged elements
"""
th_max = 0
header_idx = 0
for idx, tr in enumerate(self._tr_nodes):
th_count = len(tr.contents.filter_tags(matches=ftag('th')))
if th_count > th_max:
th_max = th_count
header_idx = idx
if not th_max:
return
self._log('found header at row %d (%d <th> elements)' % \
(header_idx, th_max))
header_row = self._tr_nodes.pop(header_idx)
return header_row.contents.filter_tags(matches=ftag('th')) | [
"def",
"_find_header_row",
"(",
"self",
")",
":",
"th_max",
"=",
"0",
"header_idx",
"=",
"0",
"for",
"idx",
",",
"tr",
"in",
"enumerate",
"(",
"self",
".",
"_tr_nodes",
")",
":",
"th_count",
"=",
"len",
"(",
"tr",
".",
"contents",
".",
"filter_tags",
"(",
"matches",
"=",
"ftag",
"(",
"'th'",
")",
")",
")",
"if",
"th_count",
">",
"th_max",
":",
"th_max",
"=",
"th_count",
"header_idx",
"=",
"idx",
"if",
"not",
"th_max",
":",
"return",
"self",
".",
"_log",
"(",
"'found header at row %d (%d <th> elements)'",
"%",
"(",
"header_idx",
",",
"th_max",
")",
")",
"header_row",
"=",
"self",
".",
"_tr_nodes",
".",
"pop",
"(",
"header_idx",
")",
"return",
"header_row",
".",
"contents",
".",
"filter_tags",
"(",
"matches",
"=",
"ftag",
"(",
"'th'",
")",
")"
]
| Evaluate all rows and determine header position, based on
greatest number of 'th' tagged elements | [
"Evaluate",
"all",
"rows",
"and",
"determine",
"header",
"position",
"based",
"on",
"greatest",
"number",
"of",
"th",
"tagged",
"elements"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/__init__.py#L92-L112 | train |
bcicen/wikitables | wikitables/__init__.py | WikiTable._make_default_header | def _make_default_header(self):
"""
Return a generic placeholder header based on the tables column count
"""
td_max = 0
for idx, tr in enumerate(self._tr_nodes):
td_count = len(tr.contents.filter_tags(matches=ftag('td')))
if td_count > td_max:
td_max = td_count
self._log('creating default header (%d columns)' % td_max)
return [ 'column%d' % n for n in range(0,td_max) ] | python | def _make_default_header(self):
"""
Return a generic placeholder header based on the tables column count
"""
td_max = 0
for idx, tr in enumerate(self._tr_nodes):
td_count = len(tr.contents.filter_tags(matches=ftag('td')))
if td_count > td_max:
td_max = td_count
self._log('creating default header (%d columns)' % td_max)
return [ 'column%d' % n for n in range(0,td_max) ] | [
"def",
"_make_default_header",
"(",
"self",
")",
":",
"td_max",
"=",
"0",
"for",
"idx",
",",
"tr",
"in",
"enumerate",
"(",
"self",
".",
"_tr_nodes",
")",
":",
"td_count",
"=",
"len",
"(",
"tr",
".",
"contents",
".",
"filter_tags",
"(",
"matches",
"=",
"ftag",
"(",
"'td'",
")",
")",
")",
"if",
"td_count",
">",
"td_max",
":",
"td_max",
"=",
"td_count",
"self",
".",
"_log",
"(",
"'creating default header (%d columns)'",
"%",
"td_max",
")",
"return",
"[",
"'column%d'",
"%",
"n",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"td_max",
")",
"]"
]
| Return a generic placeholder header based on the tables column count | [
"Return",
"a",
"generic",
"placeholder",
"header",
"based",
"on",
"the",
"tables",
"column",
"count"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/__init__.py#L114-L126 | train |
bcicen/wikitables | wikitables/client.py | Client.fetch_page | def fetch_page(self, title, method='GET'):
""" Query for page by title """
params = { 'prop': 'revisions',
'format': 'json',
'action': 'query',
'explaintext': '',
'titles': title,
'rvprop': 'content' }
r = self.request(method, self.base_url, params=params)
r.raise_for_status()
pages = r.json()["query"]["pages"]
# use key from first result in 'pages' array
pageid = list(pages.keys())[0]
if pageid == '-1':
raise ArticleNotFound('no matching articles returned')
return pages[pageid] | python | def fetch_page(self, title, method='GET'):
""" Query for page by title """
params = { 'prop': 'revisions',
'format': 'json',
'action': 'query',
'explaintext': '',
'titles': title,
'rvprop': 'content' }
r = self.request(method, self.base_url, params=params)
r.raise_for_status()
pages = r.json()["query"]["pages"]
# use key from first result in 'pages' array
pageid = list(pages.keys())[0]
if pageid == '-1':
raise ArticleNotFound('no matching articles returned')
return pages[pageid] | [
"def",
"fetch_page",
"(",
"self",
",",
"title",
",",
"method",
"=",
"'GET'",
")",
":",
"params",
"=",
"{",
"'prop'",
":",
"'revisions'",
",",
"'format'",
":",
"'json'",
",",
"'action'",
":",
"'query'",
",",
"'explaintext'",
":",
"''",
",",
"'titles'",
":",
"title",
",",
"'rvprop'",
":",
"'content'",
"}",
"r",
"=",
"self",
".",
"request",
"(",
"method",
",",
"self",
".",
"base_url",
",",
"params",
"=",
"params",
")",
"r",
".",
"raise_for_status",
"(",
")",
"pages",
"=",
"r",
".",
"json",
"(",
")",
"[",
"\"query\"",
"]",
"[",
"\"pages\"",
"]",
"# use key from first result in 'pages' array",
"pageid",
"=",
"list",
"(",
"pages",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"if",
"pageid",
"==",
"'-1'",
":",
"raise",
"ArticleNotFound",
"(",
"'no matching articles returned'",
")",
"return",
"pages",
"[",
"pageid",
"]"
]
| Query for page by title | [
"Query",
"for",
"page",
"by",
"title"
]
| 055cbabaa60762edbab78bf6a76ba19875f328f7 | https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/client.py#L16-L32 | train |
wooparadog/pystack | pystack.py | print_stack | def print_stack(pid, include_greenlet=False, debugger=None, verbose=False):
"""Executes a file in a running Python process."""
# TextIOWrapper of Python 3 is so strange.
sys_stdout = getattr(sys.stdout, 'buffer', sys.stdout)
sys_stderr = getattr(sys.stderr, 'buffer', sys.stderr)
make_args = make_gdb_args
environ = dict(os.environ)
if (
debugger == 'lldb' or
(debugger is None and platform.system().lower() == 'darwin')
):
make_args = make_lldb_args
# fix the PATH environment variable for using built-in Python with lldb
environ['PATH'] = '/usr/bin:%s' % environ.get('PATH', '')
tmp_fd, tmp_path = tempfile.mkstemp()
os.chmod(tmp_path, 0o777)
commands = []
commands.append(FILE_OPEN_COMMAND)
commands.extend(UTILITY_COMMANDS)
commands.extend(THREAD_STACK_COMMANDS)
if include_greenlet:
commands.extend(GREENLET_STACK_COMMANDS)
commands.append(FILE_CLOSE_COMMAND)
command = r';'.join(commands)
args = make_args(pid, command % tmp_path)
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if verbose:
sys_stderr.write(b'Standard Output:\n%s\n' % out)
sys_stderr.write(b'Standard Error:\n%s\n' % err)
sys_stderr.flush()
for chunk in iter(functools.partial(os.read, tmp_fd, 1024), b''):
sys_stdout.write(chunk)
sys_stdout.write(b'\n')
sys_stdout.flush() | python | def print_stack(pid, include_greenlet=False, debugger=None, verbose=False):
"""Executes a file in a running Python process."""
# TextIOWrapper of Python 3 is so strange.
sys_stdout = getattr(sys.stdout, 'buffer', sys.stdout)
sys_stderr = getattr(sys.stderr, 'buffer', sys.stderr)
make_args = make_gdb_args
environ = dict(os.environ)
if (
debugger == 'lldb' or
(debugger is None and platform.system().lower() == 'darwin')
):
make_args = make_lldb_args
# fix the PATH environment variable for using built-in Python with lldb
environ['PATH'] = '/usr/bin:%s' % environ.get('PATH', '')
tmp_fd, tmp_path = tempfile.mkstemp()
os.chmod(tmp_path, 0o777)
commands = []
commands.append(FILE_OPEN_COMMAND)
commands.extend(UTILITY_COMMANDS)
commands.extend(THREAD_STACK_COMMANDS)
if include_greenlet:
commands.extend(GREENLET_STACK_COMMANDS)
commands.append(FILE_CLOSE_COMMAND)
command = r';'.join(commands)
args = make_args(pid, command % tmp_path)
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if verbose:
sys_stderr.write(b'Standard Output:\n%s\n' % out)
sys_stderr.write(b'Standard Error:\n%s\n' % err)
sys_stderr.flush()
for chunk in iter(functools.partial(os.read, tmp_fd, 1024), b''):
sys_stdout.write(chunk)
sys_stdout.write(b'\n')
sys_stdout.flush() | [
"def",
"print_stack",
"(",
"pid",
",",
"include_greenlet",
"=",
"False",
",",
"debugger",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"# TextIOWrapper of Python 3 is so strange.",
"sys_stdout",
"=",
"getattr",
"(",
"sys",
".",
"stdout",
",",
"'buffer'",
",",
"sys",
".",
"stdout",
")",
"sys_stderr",
"=",
"getattr",
"(",
"sys",
".",
"stderr",
",",
"'buffer'",
",",
"sys",
".",
"stderr",
")",
"make_args",
"=",
"make_gdb_args",
"environ",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"if",
"(",
"debugger",
"==",
"'lldb'",
"or",
"(",
"debugger",
"is",
"None",
"and",
"platform",
".",
"system",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'darwin'",
")",
")",
":",
"make_args",
"=",
"make_lldb_args",
"# fix the PATH environment variable for using built-in Python with lldb",
"environ",
"[",
"'PATH'",
"]",
"=",
"'/usr/bin:%s'",
"%",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
"tmp_fd",
",",
"tmp_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"os",
".",
"chmod",
"(",
"tmp_path",
",",
"0o777",
")",
"commands",
"=",
"[",
"]",
"commands",
".",
"append",
"(",
"FILE_OPEN_COMMAND",
")",
"commands",
".",
"extend",
"(",
"UTILITY_COMMANDS",
")",
"commands",
".",
"extend",
"(",
"THREAD_STACK_COMMANDS",
")",
"if",
"include_greenlet",
":",
"commands",
".",
"extend",
"(",
"GREENLET_STACK_COMMANDS",
")",
"commands",
".",
"append",
"(",
"FILE_CLOSE_COMMAND",
")",
"command",
"=",
"r';'",
".",
"join",
"(",
"commands",
")",
"args",
"=",
"make_args",
"(",
"pid",
",",
"command",
"%",
"tmp_path",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"process",
".",
"communicate",
"(",
")",
"if",
"verbose",
":",
"sys_stderr",
".",
"write",
"(",
"b'Standard Output:\\n%s\\n'",
"%",
"out",
")",
"sys_stderr",
".",
"write",
"(",
"b'Standard Error:\\n%s\\n'",
"%",
"err",
")",
"sys_stderr",
".",
"flush",
"(",
")",
"for",
"chunk",
"in",
"iter",
"(",
"functools",
".",
"partial",
"(",
"os",
".",
"read",
",",
"tmp_fd",
",",
"1024",
")",
",",
"b''",
")",
":",
"sys_stdout",
".",
"write",
"(",
"chunk",
")",
"sys_stdout",
".",
"write",
"(",
"b'\\n'",
")",
"sys_stdout",
".",
"flush",
"(",
")"
]
| Executes a file in a running Python process. | [
"Executes",
"a",
"file",
"in",
"a",
"running",
"Python",
"process",
"."
]
| 1ee5bb0ab516f60dd407d7b18d2faa752a8e289c | https://github.com/wooparadog/pystack/blob/1ee5bb0ab516f60dd407d7b18d2faa752a8e289c/pystack.py#L77-L116 | train |
wooparadog/pystack | pystack.py | cli_main | def cli_main(pid, include_greenlet, debugger, verbose):
'''Print stack of python process.
$ pystack <pid>
'''
try:
print_stack(pid, include_greenlet, debugger, verbose)
except DebuggerNotFound as e:
click.echo('DebuggerNotFound: %s' % e.args[0], err=True)
click.get_current_context().exit(1) | python | def cli_main(pid, include_greenlet, debugger, verbose):
'''Print stack of python process.
$ pystack <pid>
'''
try:
print_stack(pid, include_greenlet, debugger, verbose)
except DebuggerNotFound as e:
click.echo('DebuggerNotFound: %s' % e.args[0], err=True)
click.get_current_context().exit(1) | [
"def",
"cli_main",
"(",
"pid",
",",
"include_greenlet",
",",
"debugger",
",",
"verbose",
")",
":",
"try",
":",
"print_stack",
"(",
"pid",
",",
"include_greenlet",
",",
"debugger",
",",
"verbose",
")",
"except",
"DebuggerNotFound",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"'DebuggerNotFound: %s'",
"%",
"e",
".",
"args",
"[",
"0",
"]",
",",
"err",
"=",
"True",
")",
"click",
".",
"get_current_context",
"(",
")",
".",
"exit",
"(",
"1",
")"
]
| Print stack of python process.
$ pystack <pid> | [
"Print",
"stack",
"of",
"python",
"process",
"."
]
| 1ee5bb0ab516f60dd407d7b18d2faa752a8e289c | https://github.com/wooparadog/pystack/blob/1ee5bb0ab516f60dd407d7b18d2faa752a8e289c/pystack.py#L131-L140 | train |
rahul13ramesh/hidden_markov | hidden_markov/hmm_class.py | hmm.forward_algo | def forward_algo(self,observations):
""" Finds the probability of an observation sequence for given model parameters
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: The probability of occurence of the observation sequence
:rtype: float
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.forward_algo(observations))
.. note::
No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead.
"""
# Store total number of observations total_stages = len(observations)
total_stages = len(observations)
# Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number
# Inittialize Alpha
ob_ind = self.obs_map[ observations[0] ]
alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Iteratively find alpha(using knowledge of alpha in the previous stage)
for curr_t in range(1,total_stages):
ob_ind = self.obs_map[observations[curr_t]]
alpha = np.dot( alpha , self.trans_prob)
alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] ))
# Sum the alpha's over the last stage
total_prob = alpha.sum()
return ( total_prob ) | python | def forward_algo(self,observations):
""" Finds the probability of an observation sequence for given model parameters
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: The probability of occurence of the observation sequence
:rtype: float
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.forward_algo(observations))
.. note::
No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead.
"""
# Store total number of observations total_stages = len(observations)
total_stages = len(observations)
# Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number
# Inittialize Alpha
ob_ind = self.obs_map[ observations[0] ]
alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Iteratively find alpha(using knowledge of alpha in the previous stage)
for curr_t in range(1,total_stages):
ob_ind = self.obs_map[observations[curr_t]]
alpha = np.dot( alpha , self.trans_prob)
alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] ))
# Sum the alpha's over the last stage
total_prob = alpha.sum()
return ( total_prob ) | [
"def",
"forward_algo",
"(",
"self",
",",
"observations",
")",
":",
"# Store total number of observations total_stages = len(observations) ",
"total_stages",
"=",
"len",
"(",
"observations",
")",
"# Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number",
"# Inittialize Alpha",
"ob_ind",
"=",
"self",
".",
"obs_map",
"[",
"observations",
"[",
"0",
"]",
"]",
"alpha",
"=",
"np",
".",
"multiply",
"(",
"np",
".",
"transpose",
"(",
"self",
".",
"em_prob",
"[",
":",
",",
"ob_ind",
"]",
")",
",",
"self",
".",
"start_prob",
")",
"# Iteratively find alpha(using knowledge of alpha in the previous stage)",
"for",
"curr_t",
"in",
"range",
"(",
"1",
",",
"total_stages",
")",
":",
"ob_ind",
"=",
"self",
".",
"obs_map",
"[",
"observations",
"[",
"curr_t",
"]",
"]",
"alpha",
"=",
"np",
".",
"dot",
"(",
"alpha",
",",
"self",
".",
"trans_prob",
")",
"alpha",
"=",
"np",
".",
"multiply",
"(",
"alpha",
",",
"np",
".",
"transpose",
"(",
"self",
".",
"em_prob",
"[",
":",
",",
"ob_ind",
"]",
")",
")",
"# Sum the alpha's over the last stage",
"total_prob",
"=",
"alpha",
".",
"sum",
"(",
")",
"return",
"(",
"total_prob",
")"
]
| Finds the probability of an observation sequence for given model parameters
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: The probability of occurence of the observation sequence
:rtype: float
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.forward_algo(observations))
.. note::
No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead. | [
"Finds",
"the",
"probability",
"of",
"an",
"observation",
"sequence",
"for",
"given",
"model",
"parameters"
]
| 6ba6012665f9e09c980ff70901604d051ba57dcc | https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L144-L190 | train |
rahul13ramesh/hidden_markov | hidden_markov/hmm_class.py | hmm.viterbi | def viterbi(self,observations):
""" The probability of occurence of the observation sequence
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: Returns a list of hidden states.
:rtype: list of states
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.viterbi(observations))
"""
# Find total states,observations
total_stages = len(observations)
num_states = len(self.states)
# initialize data
# Path stores the state sequence giving maximum probability
old_path = np.zeros( (total_stages, num_states) )
new_path = np.zeros( (total_stages, num_states) )
# Find initial delta
# Map observation to an index
# delta[s] stores the probability of most probable path ending in state 's'
ob_ind = self.obs_map[ observations[0] ]
delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Scale delta
delta = delta /np.sum(delta)
# initialize path
old_path[0,:] = [i for i in range(num_states) ]
# Find delta[t][x] for each state 'x' at the iteration 't'
# delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path
for curr_t in range(1,total_stages):
# Map observation to an index
ob_ind = self.obs_map[ observations[curr_t] ]
# Find temp and take max along each row to get delta
temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] )
# Update delta and scale it
delta = temp.max(axis = 1).transpose()
delta = delta /np.sum(delta)
# Find state which is most probable using argax
# Convert to a list for easier processing
max_temp = temp.argmax(axis=1).transpose()
max_temp = np.ravel(max_temp).tolist()
# Update path
for s in range(num_states):
new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ]
new_path[curr_t,:] = [i for i in range(num_states) ]
old_path = new_path.copy()
# Find the state in last stage, giving maximum probability
final_max = np.argmax(np.ravel(delta))
best_path = old_path[:,final_max].tolist()
best_path_map = [ self.state_map[i] for i in best_path]
return best_path_map | python | def viterbi(self,observations):
""" The probability of occurence of the observation sequence
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: Returns a list of hidden states.
:rtype: list of states
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.viterbi(observations))
"""
# Find total states,observations
total_stages = len(observations)
num_states = len(self.states)
# initialize data
# Path stores the state sequence giving maximum probability
old_path = np.zeros( (total_stages, num_states) )
new_path = np.zeros( (total_stages, num_states) )
# Find initial delta
# Map observation to an index
# delta[s] stores the probability of most probable path ending in state 's'
ob_ind = self.obs_map[ observations[0] ]
delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Scale delta
delta = delta /np.sum(delta)
# initialize path
old_path[0,:] = [i for i in range(num_states) ]
# Find delta[t][x] for each state 'x' at the iteration 't'
# delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path
for curr_t in range(1,total_stages):
# Map observation to an index
ob_ind = self.obs_map[ observations[curr_t] ]
# Find temp and take max along each row to get delta
temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] )
# Update delta and scale it
delta = temp.max(axis = 1).transpose()
delta = delta /np.sum(delta)
# Find state which is most probable using argax
# Convert to a list for easier processing
max_temp = temp.argmax(axis=1).transpose()
max_temp = np.ravel(max_temp).tolist()
# Update path
for s in range(num_states):
new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ]
new_path[curr_t,:] = [i for i in range(num_states) ]
old_path = new_path.copy()
# Find the state in last stage, giving maximum probability
final_max = np.argmax(np.ravel(delta))
best_path = old_path[:,final_max].tolist()
best_path_map = [ self.state_map[i] for i in best_path]
return best_path_map | [
"def",
"viterbi",
"(",
"self",
",",
"observations",
")",
":",
"# Find total states,observations",
"total_stages",
"=",
"len",
"(",
"observations",
")",
"num_states",
"=",
"len",
"(",
"self",
".",
"states",
")",
"# initialize data",
"# Path stores the state sequence giving maximum probability",
"old_path",
"=",
"np",
".",
"zeros",
"(",
"(",
"total_stages",
",",
"num_states",
")",
")",
"new_path",
"=",
"np",
".",
"zeros",
"(",
"(",
"total_stages",
",",
"num_states",
")",
")",
"# Find initial delta",
"# Map observation to an index",
"# delta[s] stores the probability of most probable path ending in state 's' ",
"ob_ind",
"=",
"self",
".",
"obs_map",
"[",
"observations",
"[",
"0",
"]",
"]",
"delta",
"=",
"np",
".",
"multiply",
"(",
"np",
".",
"transpose",
"(",
"self",
".",
"em_prob",
"[",
":",
",",
"ob_ind",
"]",
")",
",",
"self",
".",
"start_prob",
")",
"# Scale delta",
"delta",
"=",
"delta",
"/",
"np",
".",
"sum",
"(",
"delta",
")",
"# initialize path",
"old_path",
"[",
"0",
",",
":",
"]",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"num_states",
")",
"]",
"# Find delta[t][x] for each state 'x' at the iteration 't'",
"# delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path",
"for",
"curr_t",
"in",
"range",
"(",
"1",
",",
"total_stages",
")",
":",
"# Map observation to an index",
"ob_ind",
"=",
"self",
".",
"obs_map",
"[",
"observations",
"[",
"curr_t",
"]",
"]",
"# Find temp and take max along each row to get delta",
"temp",
"=",
"np",
".",
"multiply",
"(",
"np",
".",
"multiply",
"(",
"delta",
",",
"self",
".",
"trans_prob",
".",
"transpose",
"(",
")",
")",
",",
"self",
".",
"em_prob",
"[",
":",
",",
"ob_ind",
"]",
")",
"# Update delta and scale it",
"delta",
"=",
"temp",
".",
"max",
"(",
"axis",
"=",
"1",
")",
".",
"transpose",
"(",
")",
"delta",
"=",
"delta",
"/",
"np",
".",
"sum",
"(",
"delta",
")",
"# Find state which is most probable using argax",
"# Convert to a list for easier processing",
"max_temp",
"=",
"temp",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
".",
"transpose",
"(",
")",
"max_temp",
"=",
"np",
".",
"ravel",
"(",
"max_temp",
")",
".",
"tolist",
"(",
")",
"# Update path",
"for",
"s",
"in",
"range",
"(",
"num_states",
")",
":",
"new_path",
"[",
":",
"curr_t",
",",
"s",
"]",
"=",
"old_path",
"[",
"0",
":",
"curr_t",
",",
"max_temp",
"[",
"s",
"]",
"]",
"new_path",
"[",
"curr_t",
",",
":",
"]",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"num_states",
")",
"]",
"old_path",
"=",
"new_path",
".",
"copy",
"(",
")",
"# Find the state in last stage, giving maximum probability",
"final_max",
"=",
"np",
".",
"argmax",
"(",
"np",
".",
"ravel",
"(",
"delta",
")",
")",
"best_path",
"=",
"old_path",
"[",
":",
",",
"final_max",
"]",
".",
"tolist",
"(",
")",
"best_path_map",
"=",
"[",
"self",
".",
"state_map",
"[",
"i",
"]",
"for",
"i",
"in",
"best_path",
"]",
"return",
"best_path_map"
]
| The probability of occurence of the observation sequence
**Arguments**:
:param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object.
:type observations: A list or tuple
:return: Returns a list of hidden states.
:rtype: list of states
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> print(test.viterbi(observations)) | [
"The",
"probability",
"of",
"occurence",
"of",
"the",
"observation",
"sequence"
]
| 6ba6012665f9e09c980ff70901604d051ba57dcc | https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L194-L277 | train |
rahul13ramesh/hidden_markov | hidden_markov/hmm_class.py | hmm.train_hmm | def train_hmm(self,observation_list, iterations, quantities):
""" Runs the Baum Welch Algorithm and finds the new model parameters
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param iterations: Maximum number of iterations for the algorithm
:type iterations: An integer
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Returns the emission, transition and start probabilites as numpy matrices
:rtype: Three numpy matices
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>>
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>> num_iter=1000
>>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)
>>> # e,t,s contain new emission transition and start probabilities
"""
obs_size = len(observation_list)
prob = float('inf')
q = quantities
# Train the model 'iteration' number of times
# store em_prob and trans_prob copies since you should use same values for one loop
for i in range(iterations):
emProbNew = np.asmatrix(np.zeros((self.em_prob.shape)))
transProbNew = np.asmatrix(np.zeros((self.trans_prob.shape)))
startProbNew = np.asmatrix(np.zeros((self.start_prob.shape)))
for j in range(obs_size):
# re-assing values based on weight
emProbNew= emProbNew + q[j] * self._train_emission(observation_list[j])
transProbNew = transProbNew + q[j] * self._train_transition(observation_list[j])
startProbNew = startProbNew + q[j] * self._train_start_prob(observation_list[j])
# Normalizing
em_norm = emProbNew.sum(axis = 1)
trans_norm = transProbNew.sum(axis = 1)
start_norm = startProbNew.sum(axis = 1)
emProbNew = emProbNew/ em_norm.transpose()
startProbNew = startProbNew/ start_norm.transpose()
transProbNew = transProbNew/ trans_norm.transpose()
self.em_prob,self.trans_prob = emProbNew,transProbNew
self.start_prob = startProbNew
if prob - self.log_prob(observation_list,quantities)>0.0000001:
prob = self.log_prob(observation_list,quantities)
else:
return self.em_prob, self.trans_prob , self.start_prob
return self.em_prob, self.trans_prob , self.start_prob | python | def train_hmm(self,observation_list, iterations, quantities):
""" Runs the Baum Welch Algorithm and finds the new model parameters
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param iterations: Maximum number of iterations for the algorithm
:type iterations: An integer
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Returns the emission, transition and start probabilites as numpy matrices
:rtype: Three numpy matices
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>>
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>> num_iter=1000
>>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)
>>> # e,t,s contain new emission transition and start probabilities
"""
obs_size = len(observation_list)
prob = float('inf')
q = quantities
# Train the model 'iteration' number of times
# store em_prob and trans_prob copies since you should use same values for one loop
for i in range(iterations):
emProbNew = np.asmatrix(np.zeros((self.em_prob.shape)))
transProbNew = np.asmatrix(np.zeros((self.trans_prob.shape)))
startProbNew = np.asmatrix(np.zeros((self.start_prob.shape)))
for j in range(obs_size):
# re-assing values based on weight
emProbNew= emProbNew + q[j] * self._train_emission(observation_list[j])
transProbNew = transProbNew + q[j] * self._train_transition(observation_list[j])
startProbNew = startProbNew + q[j] * self._train_start_prob(observation_list[j])
# Normalizing
em_norm = emProbNew.sum(axis = 1)
trans_norm = transProbNew.sum(axis = 1)
start_norm = startProbNew.sum(axis = 1)
emProbNew = emProbNew/ em_norm.transpose()
startProbNew = startProbNew/ start_norm.transpose()
transProbNew = transProbNew/ trans_norm.transpose()
self.em_prob,self.trans_prob = emProbNew,transProbNew
self.start_prob = startProbNew
if prob - self.log_prob(observation_list,quantities)>0.0000001:
prob = self.log_prob(observation_list,quantities)
else:
return self.em_prob, self.trans_prob , self.start_prob
return self.em_prob, self.trans_prob , self.start_prob | [
"def",
"train_hmm",
"(",
"self",
",",
"observation_list",
",",
"iterations",
",",
"quantities",
")",
":",
"obs_size",
"=",
"len",
"(",
"observation_list",
")",
"prob",
"=",
"float",
"(",
"'inf'",
")",
"q",
"=",
"quantities",
"# Train the model 'iteration' number of times",
"# store em_prob and trans_prob copies since you should use same values for one loop",
"for",
"i",
"in",
"range",
"(",
"iterations",
")",
":",
"emProbNew",
"=",
"np",
".",
"asmatrix",
"(",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"em_prob",
".",
"shape",
")",
")",
")",
"transProbNew",
"=",
"np",
".",
"asmatrix",
"(",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"trans_prob",
".",
"shape",
")",
")",
")",
"startProbNew",
"=",
"np",
".",
"asmatrix",
"(",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"start_prob",
".",
"shape",
")",
")",
")",
"for",
"j",
"in",
"range",
"(",
"obs_size",
")",
":",
"# re-assing values based on weight",
"emProbNew",
"=",
"emProbNew",
"+",
"q",
"[",
"j",
"]",
"*",
"self",
".",
"_train_emission",
"(",
"observation_list",
"[",
"j",
"]",
")",
"transProbNew",
"=",
"transProbNew",
"+",
"q",
"[",
"j",
"]",
"*",
"self",
".",
"_train_transition",
"(",
"observation_list",
"[",
"j",
"]",
")",
"startProbNew",
"=",
"startProbNew",
"+",
"q",
"[",
"j",
"]",
"*",
"self",
".",
"_train_start_prob",
"(",
"observation_list",
"[",
"j",
"]",
")",
"# Normalizing",
"em_norm",
"=",
"emProbNew",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"trans_norm",
"=",
"transProbNew",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"start_norm",
"=",
"startProbNew",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"emProbNew",
"=",
"emProbNew",
"/",
"em_norm",
".",
"transpose",
"(",
")",
"startProbNew",
"=",
"startProbNew",
"/",
"start_norm",
".",
"transpose",
"(",
")",
"transProbNew",
"=",
"transProbNew",
"/",
"trans_norm",
".",
"transpose",
"(",
")",
"self",
".",
"em_prob",
",",
"self",
".",
"trans_prob",
"=",
"emProbNew",
",",
"transProbNew",
"self",
".",
"start_prob",
"=",
"startProbNew",
"if",
"prob",
"-",
"self",
".",
"log_prob",
"(",
"observation_list",
",",
"quantities",
")",
">",
"0.0000001",
":",
"prob",
"=",
"self",
".",
"log_prob",
"(",
"observation_list",
",",
"quantities",
")",
"else",
":",
"return",
"self",
".",
"em_prob",
",",
"self",
".",
"trans_prob",
",",
"self",
".",
"start_prob",
"return",
"self",
".",
"em_prob",
",",
"self",
".",
"trans_prob",
",",
"self",
".",
"start_prob"
]
| Runs the Baum Welch Algorithm and finds the new model parameters
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param iterations: Maximum number of iterations for the algorithm
:type iterations: An integer
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Returns the emission, transition and start probabilites as numpy matrices
:rtype: Three numpy matices
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>>
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>> num_iter=1000
>>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)
>>> # e,t,s contain new emission transition and start probabilities | [
"Runs",
"the",
"Baum",
"Welch",
"Algorithm",
"and",
"finds",
"the",
"new",
"model",
"parameters"
]
| 6ba6012665f9e09c980ff70901604d051ba57dcc | https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L281-L363 | train |
rahul13ramesh/hidden_markov | hidden_markov/hmm_class.py | hmm.log_prob | def log_prob(self,observations_list, quantities):
""" Finds Weighted log probability of a list of observation sequences
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Weighted log probability of multiple observations.
:rtype: float
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>>
>>> prob = test.log_prob(observation_tuple, quantities_observations)
"""
prob = 0
for q,obs in enumerate(observations_list):
temp,c_scale = self._alpha_cal(obs)
prob = prob + -1 * quantities[q] * np.sum(np.log(c_scale))
return prob | python | def log_prob(self,observations_list, quantities):
""" Finds Weighted log probability of a list of observation sequences
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Weighted log probability of multiple observations.
:rtype: float
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>>
>>> prob = test.log_prob(observation_tuple, quantities_observations)
"""
prob = 0
for q,obs in enumerate(observations_list):
temp,c_scale = self._alpha_cal(obs)
prob = prob + -1 * quantities[q] * np.sum(np.log(c_scale))
return prob | [
"def",
"log_prob",
"(",
"self",
",",
"observations_list",
",",
"quantities",
")",
":",
"prob",
"=",
"0",
"for",
"q",
",",
"obs",
"in",
"enumerate",
"(",
"observations_list",
")",
":",
"temp",
",",
"c_scale",
"=",
"self",
".",
"_alpha_cal",
"(",
"obs",
")",
"prob",
"=",
"prob",
"+",
"-",
"1",
"*",
"quantities",
"[",
"q",
"]",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"log",
"(",
"c_scale",
")",
")",
"return",
"prob"
]
| Finds Weighted log probability of a list of observation sequences
**Arguments**:
:param observation_list: A nested list, or a list of lists
:type observation_list: Contains a list multiple observation sequences.
:param quantities: Number of times, each corresponding item in 'observation_list' occurs.
:type quantities: A list of integers
:return: Weighted log probability of multiple observations.
:rtype: float
**Features**:
Scaling applied here. This ensures that no underflow error occurs.
**Example**:
>>> states = ('s', 't')
>>> possible_observation = ('A','B' )
>>> # Numpy arrays of the data
>>> start_probability = np.matrix( '0.5 0.5 ')
>>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
>>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
>>> # Initialize class object
>>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
>>> observations = ('A', 'B','B','A')
>>> obs4 = ('B', 'A','B')
>>> observation_tuple = []
>>> observation_tuple.extend( [observations,obs4] )
>>> quantities_observations = [10, 20]
>>>
>>> prob = test.log_prob(observation_tuple, quantities_observations) | [
"Finds",
"Weighted",
"log",
"probability",
"of",
"a",
"list",
"of",
"observation",
"sequences"
]
| 6ba6012665f9e09c980ff70901604d051ba57dcc | https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L513-L555 | train |
mortada/fredapi | fredapi/fred.py | Fred.__fetch_data | def __fetch_data(self, url):
"""
helper function for fetching data given a request URL
"""
url += '&api_key=' + self.api_key
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root | python | def __fetch_data(self, url):
"""
helper function for fetching data given a request URL
"""
url += '&api_key=' + self.api_key
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root | [
"def",
"__fetch_data",
"(",
"self",
",",
"url",
")",
":",
"url",
"+=",
"'&api_key='",
"+",
"self",
".",
"api_key",
"try",
":",
"response",
"=",
"urlopen",
"(",
"url",
")",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"response",
".",
"read",
"(",
")",
")",
"except",
"HTTPError",
"as",
"exc",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"exc",
".",
"read",
"(",
")",
")",
"raise",
"ValueError",
"(",
"root",
".",
"get",
"(",
"'message'",
")",
")",
"return",
"root"
]
| helper function for fetching data given a request URL | [
"helper",
"function",
"for",
"fetching",
"data",
"given",
"a",
"request",
"URL"
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L58-L69 | train |
mortada/fredapi | fredapi/fred.py | Fred._parse | def _parse(self, date_str, format='%Y-%m-%d'):
"""
helper function for parsing FRED date string into datetime
"""
rv = pd.to_datetime(date_str, format=format)
if hasattr(rv, 'to_pydatetime'):
rv = rv.to_pydatetime()
return rv | python | def _parse(self, date_str, format='%Y-%m-%d'):
"""
helper function for parsing FRED date string into datetime
"""
rv = pd.to_datetime(date_str, format=format)
if hasattr(rv, 'to_pydatetime'):
rv = rv.to_pydatetime()
return rv | [
"def",
"_parse",
"(",
"self",
",",
"date_str",
",",
"format",
"=",
"'%Y-%m-%d'",
")",
":",
"rv",
"=",
"pd",
".",
"to_datetime",
"(",
"date_str",
",",
"format",
"=",
"format",
")",
"if",
"hasattr",
"(",
"rv",
",",
"'to_pydatetime'",
")",
":",
"rv",
"=",
"rv",
".",
"to_pydatetime",
"(",
")",
"return",
"rv"
]
| helper function for parsing FRED date string into datetime | [
"helper",
"function",
"for",
"parsing",
"FRED",
"date",
"string",
"into",
"datetime"
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L71-L78 | train |
mortada/fredapi | fredapi/fred.py | Fred.get_series_first_release | def get_series_first_release(self, series_id):
"""
Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data | python | def get_series_first_release(self, series_id):
"""
Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data | [
"def",
"get_series_first_release",
"(",
"self",
",",
"series_id",
")",
":",
"df",
"=",
"self",
".",
"get_series_all_releases",
"(",
"series_id",
")",
"first_release",
"=",
"df",
".",
"groupby",
"(",
"'date'",
")",
".",
"head",
"(",
"1",
")",
"data",
"=",
"first_release",
".",
"set_index",
"(",
"'date'",
")",
"[",
"'value'",
"]",
"return",
"data"
]
| Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series | [
"Get",
"first",
"-",
"release",
"data",
"for",
"a",
"Fred",
"series",
"id",
".",
"This",
"ignores",
"any",
"revision",
"to",
"the",
"data",
"series",
".",
"For",
"instance",
"The",
"US",
"GDP",
"for",
"Q1",
"2014",
"was",
"first",
"released",
"to",
"be",
"17149",
".",
"6",
"and",
"then",
"later",
"revised",
"to",
"17101",
".",
"3",
"and",
"17016",
".",
"0",
".",
"This",
"will",
"ignore",
"revisions",
"after",
"the",
"first",
"release",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L160-L179 | train |
mortada/fredapi | fredapi/fred.py | Fred.get_series_as_of_date | def get_series_as_of_date(self, series_id, as_of_date):
"""
Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
as_of_date = pd.to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data | python | def get_series_as_of_date(self, series_id, as_of_date):
"""
Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
as_of_date = pd.to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data | [
"def",
"get_series_as_of_date",
"(",
"self",
",",
"series_id",
",",
"as_of_date",
")",
":",
"as_of_date",
"=",
"pd",
".",
"to_datetime",
"(",
"as_of_date",
")",
"df",
"=",
"self",
".",
"get_series_all_releases",
"(",
"series_id",
")",
"data",
"=",
"df",
"[",
"df",
"[",
"'realtime_start'",
"]",
"<=",
"as_of_date",
"]",
"return",
"data"
]
| Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series | [
"Get",
"latest",
"data",
"for",
"a",
"Fred",
"series",
"id",
"as",
"known",
"on",
"a",
"particular",
"date",
".",
"This",
"includes",
"any",
"revision",
"to",
"the",
"data",
"series",
"before",
"or",
"on",
"as_of_date",
"but",
"ignores",
"any",
"revision",
"on",
"dates",
"after",
"as_of_date",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L181-L201 | train |
mortada/fredapi | fredapi/fred.py | Fred.get_series_vintage_dates | def get_series_vintage_dates(self, series_id):
"""
Get a list of vintage dates for a series. Vintage dates are the dates in history when a
series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates
"""
url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root.getchildren():
dates.append(self._parse(child.text))
return dates | python | def get_series_vintage_dates(self, series_id):
"""
Get a list of vintage dates for a series. Vintage dates are the dates in history when a
series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates
"""
url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root.getchildren():
dates.append(self._parse(child.text))
return dates | [
"def",
"get_series_vintage_dates",
"(",
"self",
",",
"series_id",
")",
":",
"url",
"=",
"\"%s/series/vintagedates?series_id=%s\"",
"%",
"(",
"self",
".",
"root_url",
",",
"series_id",
")",
"root",
"=",
"self",
".",
"__fetch_data",
"(",
"url",
")",
"if",
"root",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No vintage date exists for series id: '",
"+",
"series_id",
")",
"dates",
"=",
"[",
"]",
"for",
"child",
"in",
"root",
".",
"getchildren",
"(",
")",
":",
"dates",
".",
"append",
"(",
"self",
".",
"_parse",
"(",
"child",
".",
"text",
")",
")",
"return",
"dates"
]
| Get a list of vintage dates for a series. Vintage dates are the dates in history when a
series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates | [
"Get",
"a",
"list",
"of",
"vintage",
"dates",
"for",
"a",
"series",
".",
"Vintage",
"dates",
"are",
"the",
"dates",
"in",
"history",
"when",
"a",
"series",
"data",
"values",
"were",
"revised",
"or",
"new",
"data",
"values",
"were",
"released",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L250-L272 | train |
mortada/fredapi | fredapi/fred.py | Fred.__do_series_search | def __do_series_search(self, url):
"""
helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
"""
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned
for child in root.getchildren():
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
data = pd.DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total | python | def __do_series_search(self, url):
"""
helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
"""
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned
for child in root.getchildren():
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
data = pd.DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total | [
"def",
"__do_series_search",
"(",
"self",
",",
"url",
")",
":",
"root",
"=",
"self",
".",
"__fetch_data",
"(",
"url",
")",
"series_ids",
"=",
"[",
"]",
"data",
"=",
"{",
"}",
"num_results_returned",
"=",
"0",
"# number of results returned in this HTTP request",
"num_results_total",
"=",
"int",
"(",
"root",
".",
"get",
"(",
"'count'",
")",
")",
"# total number of results, this can be larger than number of results returned",
"for",
"child",
"in",
"root",
".",
"getchildren",
"(",
")",
":",
"num_results_returned",
"+=",
"1",
"series_id",
"=",
"child",
".",
"get",
"(",
"'id'",
")",
"series_ids",
".",
"append",
"(",
"series_id",
")",
"data",
"[",
"series_id",
"]",
"=",
"{",
"\"id\"",
":",
"series_id",
"}",
"fields",
"=",
"[",
"\"realtime_start\"",
",",
"\"realtime_end\"",
",",
"\"title\"",
",",
"\"observation_start\"",
",",
"\"observation_end\"",
",",
"\"frequency\"",
",",
"\"frequency_short\"",
",",
"\"units\"",
",",
"\"units_short\"",
",",
"\"seasonal_adjustment\"",
",",
"\"seasonal_adjustment_short\"",
",",
"\"last_updated\"",
",",
"\"popularity\"",
",",
"\"notes\"",
"]",
"for",
"field",
"in",
"fields",
":",
"data",
"[",
"series_id",
"]",
"[",
"field",
"]",
"=",
"child",
".",
"get",
"(",
"field",
")",
"if",
"num_results_returned",
">",
"0",
":",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"series_ids",
")",
".",
"T",
"# parse datetime columns",
"for",
"field",
"in",
"[",
"\"realtime_start\"",
",",
"\"realtime_end\"",
",",
"\"observation_start\"",
",",
"\"observation_end\"",
",",
"\"last_updated\"",
"]",
":",
"data",
"[",
"field",
"]",
"=",
"data",
"[",
"field",
"]",
".",
"apply",
"(",
"self",
".",
"_parse",
",",
"format",
"=",
"None",
")",
"# set index name",
"data",
".",
"index",
".",
"name",
"=",
"'series id'",
"else",
":",
"data",
"=",
"None",
"return",
"data",
",",
"num_results_total"
]
| helper function for making one HTTP request for data, and parsing the returned results into a DataFrame | [
"helper",
"function",
"for",
"making",
"one",
"HTTP",
"request",
"for",
"data",
"and",
"parsing",
"the",
"returned",
"results",
"into",
"a",
"DataFrame"
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L274-L305 | train |
mortada/fredapi | fredapi/fred.py | Fred.__get_search_results | def __get_search_results(self, url, limit, order_by, sort_order, filter):
"""
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
"""
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
if filter is not None:
if len(filter) == 2:
url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1])
else:
raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)')
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed) | python | def __get_search_results(self, url, limit, order_by, sort_order, filter):
"""
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
"""
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
if filter is not None:
if len(filter) == 2:
url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1])
else:
raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)')
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed) | [
"def",
"__get_search_results",
"(",
"self",
",",
"url",
",",
"limit",
",",
"order_by",
",",
"sort_order",
",",
"filter",
")",
":",
"order_by_options",
"=",
"[",
"'search_rank'",
",",
"'series_id'",
",",
"'title'",
",",
"'units'",
",",
"'frequency'",
",",
"'seasonal_adjustment'",
",",
"'realtime_start'",
",",
"'realtime_end'",
",",
"'last_updated'",
",",
"'observation_start'",
",",
"'observation_end'",
",",
"'popularity'",
"]",
"if",
"order_by",
"is",
"not",
"None",
":",
"if",
"order_by",
"in",
"order_by_options",
":",
"url",
"=",
"url",
"+",
"'&order_by='",
"+",
"order_by",
"else",
":",
"raise",
"ValueError",
"(",
"'%s is not in the valid list of order_by options: %s'",
"%",
"(",
"order_by",
",",
"str",
"(",
"order_by_options",
")",
")",
")",
"if",
"filter",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"filter",
")",
"==",
"2",
":",
"url",
"=",
"url",
"+",
"'&filter_variable=%s&filter_value=%s'",
"%",
"(",
"filter",
"[",
"0",
"]",
",",
"filter",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Filter should be a 2 item tuple like (filter_variable, filter_value)'",
")",
"sort_order_options",
"=",
"[",
"'asc'",
",",
"'desc'",
"]",
"if",
"sort_order",
"is",
"not",
"None",
":",
"if",
"sort_order",
"in",
"sort_order_options",
":",
"url",
"=",
"url",
"+",
"'&sort_order='",
"+",
"sort_order",
"else",
":",
"raise",
"ValueError",
"(",
"'%s is not in the valid list of sort_order options: %s'",
"%",
"(",
"sort_order",
",",
"str",
"(",
"sort_order_options",
")",
")",
")",
"data",
",",
"num_results_total",
"=",
"self",
".",
"__do_series_search",
"(",
"url",
")",
"if",
"data",
"is",
"None",
":",
"return",
"data",
"if",
"limit",
"==",
"0",
":",
"max_results_needed",
"=",
"num_results_total",
"else",
":",
"max_results_needed",
"=",
"limit",
"if",
"max_results_needed",
">",
"self",
".",
"max_results_per_request",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"max_results_needed",
"//",
"self",
".",
"max_results_per_request",
"+",
"1",
")",
":",
"offset",
"=",
"i",
"*",
"self",
".",
"max_results_per_request",
"next_data",
",",
"_",
"=",
"self",
".",
"__do_series_search",
"(",
"url",
"+",
"'&offset='",
"+",
"str",
"(",
"offset",
")",
")",
"data",
"=",
"data",
".",
"append",
"(",
"next_data",
")",
"return",
"data",
".",
"head",
"(",
"max_results_needed",
")"
]
| helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data. | [
"helper",
"function",
"for",
"getting",
"search",
"results",
"up",
"to",
"specified",
"limit",
"on",
"the",
"number",
"of",
"results",
".",
"The",
"Fred",
"HTTP",
"API",
"truncates",
"to",
"1000",
"results",
"per",
"request",
"so",
"this",
"may",
"issue",
"multiple",
"HTTP",
"requests",
"to",
"obtain",
"more",
"available",
"data",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L307-L349 | train |
mortada/fredapi | fredapi/fred.py | Fred.search | def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None):
"""
Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/series/search?search_text=%s&" % (self.root_url,
quote_plus(text))
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
return info | python | def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None):
"""
Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/series/search?search_text=%s&" % (self.root_url,
quote_plus(text))
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
return info | [
"def",
"search",
"(",
"self",
",",
"text",
",",
"limit",
"=",
"1000",
",",
"order_by",
"=",
"None",
",",
"sort_order",
"=",
"None",
",",
"filter",
"=",
"None",
")",
":",
"url",
"=",
"\"%s/series/search?search_text=%s&\"",
"%",
"(",
"self",
".",
"root_url",
",",
"quote_plus",
"(",
"text",
")",
")",
"info",
"=",
"self",
".",
"__get_search_results",
"(",
"url",
",",
"limit",
",",
"order_by",
",",
"sort_order",
",",
"filter",
")",
"return",
"info"
]
| Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series | [
"Do",
"a",
"fulltext",
"search",
"for",
"series",
"in",
"the",
"Fred",
"dataset",
".",
"Returns",
"information",
"about",
"matching",
"series",
"in",
"a",
"DataFrame",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L351-L379 | train |
mortada/fredapi | fredapi/fred.py | Fred.search_by_release | def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a release id. Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/release/series?release_id=%d" % (self.root_url, release_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info | python | def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a release id. Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/release/series?release_id=%d" % (self.root_url, release_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info | [
"def",
"search_by_release",
"(",
"self",
",",
"release_id",
",",
"limit",
"=",
"0",
",",
"order_by",
"=",
"None",
",",
"sort_order",
"=",
"None",
",",
"filter",
"=",
"None",
")",
":",
"url",
"=",
"\"%s/release/series?release_id=%d\"",
"%",
"(",
"self",
".",
"root_url",
",",
"release_id",
")",
"info",
"=",
"self",
".",
"__get_search_results",
"(",
"url",
",",
"limit",
",",
"order_by",
",",
"sort_order",
",",
"filter",
")",
"if",
"info",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No series exists for release id: '",
"+",
"str",
"(",
"release_id",
")",
")",
"return",
"info"
]
| Search for series that belongs to a release id. Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series | [
"Search",
"for",
"series",
"that",
"belongs",
"to",
"a",
"release",
"id",
".",
"Returns",
"information",
"about",
"matching",
"series",
"in",
"a",
"DataFrame",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L381-L410 | train |
mortada/fredapi | fredapi/fred.py | Fred.search_by_category | def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info | python | def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info | [
"def",
"search_by_category",
"(",
"self",
",",
"category_id",
",",
"limit",
"=",
"0",
",",
"order_by",
"=",
"None",
",",
"sort_order",
"=",
"None",
",",
"filter",
"=",
"None",
")",
":",
"url",
"=",
"\"%s/category/series?category_id=%d&\"",
"%",
"(",
"self",
".",
"root_url",
",",
"category_id",
")",
"info",
"=",
"self",
".",
"__get_search_results",
"(",
"url",
",",
"limit",
",",
"order_by",
",",
"sort_order",
",",
"filter",
")",
"if",
"info",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No series exists for category id: '",
"+",
"str",
"(",
"category_id",
")",
")",
"return",
"info"
]
| Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series | [
"Search",
"for",
"series",
"that",
"belongs",
"to",
"a",
"category",
"id",
".",
"Returns",
"information",
"about",
"matching",
"series",
"in",
"a",
"DataFrame",
"."
]
| d3ca79efccb9525f2752a0d6da90e793e87c3fd8 | https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L412-L442 | train |
mathiasertl/django-ca | ca/django_ca/managers.py | CertificateManager.init | def init(self, ca, csr, **kwargs):
"""Create a signed certificate from a CSR and store it to the database.
All parameters are passed on to :py:func:`Certificate.objects.sign_cert()
<django_ca.managers.CertificateManager.sign_cert>`.
"""
c = self.model(ca=ca)
c.x509, csr = self.sign_cert(ca, csr, **kwargs)
c.csr = csr.public_bytes(Encoding.PEM).decode('utf-8')
c.save()
post_issue_cert.send(sender=self.model, cert=c)
return c | python | def init(self, ca, csr, **kwargs):
"""Create a signed certificate from a CSR and store it to the database.
All parameters are passed on to :py:func:`Certificate.objects.sign_cert()
<django_ca.managers.CertificateManager.sign_cert>`.
"""
c = self.model(ca=ca)
c.x509, csr = self.sign_cert(ca, csr, **kwargs)
c.csr = csr.public_bytes(Encoding.PEM).decode('utf-8')
c.save()
post_issue_cert.send(sender=self.model, cert=c)
return c | [
"def",
"init",
"(",
"self",
",",
"ca",
",",
"csr",
",",
"*",
"*",
"kwargs",
")",
":",
"c",
"=",
"self",
".",
"model",
"(",
"ca",
"=",
"ca",
")",
"c",
".",
"x509",
",",
"csr",
"=",
"self",
".",
"sign_cert",
"(",
"ca",
",",
"csr",
",",
"*",
"*",
"kwargs",
")",
"c",
".",
"csr",
"=",
"csr",
".",
"public_bytes",
"(",
"Encoding",
".",
"PEM",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"c",
".",
"save",
"(",
")",
"post_issue_cert",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"model",
",",
"cert",
"=",
"c",
")",
"return",
"c"
]
| Create a signed certificate from a CSR and store it to the database.
All parameters are passed on to :py:func:`Certificate.objects.sign_cert()
<django_ca.managers.CertificateManager.sign_cert>`. | [
"Create",
"a",
"signed",
"certificate",
"from",
"a",
"CSR",
"and",
"store",
"it",
"to",
"the",
"database",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/managers.py#L442-L455 | train |
mathiasertl/django-ca | ca/django_ca/admin.py | CertificateMixin.download_bundle_view | def download_bundle_view(self, request, pk):
"""A view that allows the user to download a certificate bundle in PEM format."""
return self._download_response(request, pk, bundle=True) | python | def download_bundle_view(self, request, pk):
"""A view that allows the user to download a certificate bundle in PEM format."""
return self._download_response(request, pk, bundle=True) | [
"def",
"download_bundle_view",
"(",
"self",
",",
"request",
",",
"pk",
")",
":",
"return",
"self",
".",
"_download_response",
"(",
"request",
",",
"pk",
",",
"bundle",
"=",
"True",
")"
]
| A view that allows the user to download a certificate bundle in PEM format. | [
"A",
"view",
"that",
"allows",
"the",
"user",
"to",
"download",
"a",
"certificate",
"bundle",
"in",
"PEM",
"format",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/admin.py#L118-L121 | train |
mathiasertl/django-ca | ca/django_ca/admin.py | CertificateMixin.get_actions | def get_actions(self, request):
"""Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work.
"""
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions | python | def get_actions(self, request):
"""Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work.
"""
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions | [
"def",
"get_actions",
"(",
"self",
",",
"request",
")",
":",
"actions",
"=",
"super",
"(",
"CertificateMixin",
",",
"self",
")",
".",
"get_actions",
"(",
"request",
")",
"actions",
".",
"pop",
"(",
"'delete_selected'",
",",
"''",
")",
"return",
"actions"
]
| Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work. | [
"Disable",
"the",
"delete",
"selected",
"admin",
"action",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/admin.py#L126-L134 | train |
mathiasertl/django-ca | ca/django_ca/profiles.py | get_cert_profile_kwargs | def get_cert_profile_kwargs(name=None):
"""Get kwargs suitable for get_cert X509 keyword arguments from the given profile."""
if name is None:
name = ca_settings.CA_DEFAULT_PROFILE
profile = deepcopy(ca_settings.CA_PROFILES[name])
kwargs = {
'cn_in_san': profile['cn_in_san'],
'subject': get_default_subject(name=name),
}
key_usage = profile.get('keyUsage')
if key_usage and key_usage.get('value'):
kwargs['key_usage'] = KeyUsage(key_usage)
ext_key_usage = profile.get('extendedKeyUsage')
if ext_key_usage and ext_key_usage.get('value'):
kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage)
tls_feature = profile.get('TLSFeature')
if tls_feature and tls_feature.get('value'):
kwargs['tls_feature'] = TLSFeature(tls_feature)
if profile.get('ocsp_no_check'):
kwargs['ocsp_no_check'] = profile['ocsp_no_check']
return kwargs | python | def get_cert_profile_kwargs(name=None):
"""Get kwargs suitable for get_cert X509 keyword arguments from the given profile."""
if name is None:
name = ca_settings.CA_DEFAULT_PROFILE
profile = deepcopy(ca_settings.CA_PROFILES[name])
kwargs = {
'cn_in_san': profile['cn_in_san'],
'subject': get_default_subject(name=name),
}
key_usage = profile.get('keyUsage')
if key_usage and key_usage.get('value'):
kwargs['key_usage'] = KeyUsage(key_usage)
ext_key_usage = profile.get('extendedKeyUsage')
if ext_key_usage and ext_key_usage.get('value'):
kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage)
tls_feature = profile.get('TLSFeature')
if tls_feature and tls_feature.get('value'):
kwargs['tls_feature'] = TLSFeature(tls_feature)
if profile.get('ocsp_no_check'):
kwargs['ocsp_no_check'] = profile['ocsp_no_check']
return kwargs | [
"def",
"get_cert_profile_kwargs",
"(",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"ca_settings",
".",
"CA_DEFAULT_PROFILE",
"profile",
"=",
"deepcopy",
"(",
"ca_settings",
".",
"CA_PROFILES",
"[",
"name",
"]",
")",
"kwargs",
"=",
"{",
"'cn_in_san'",
":",
"profile",
"[",
"'cn_in_san'",
"]",
",",
"'subject'",
":",
"get_default_subject",
"(",
"name",
"=",
"name",
")",
",",
"}",
"key_usage",
"=",
"profile",
".",
"get",
"(",
"'keyUsage'",
")",
"if",
"key_usage",
"and",
"key_usage",
".",
"get",
"(",
"'value'",
")",
":",
"kwargs",
"[",
"'key_usage'",
"]",
"=",
"KeyUsage",
"(",
"key_usage",
")",
"ext_key_usage",
"=",
"profile",
".",
"get",
"(",
"'extendedKeyUsage'",
")",
"if",
"ext_key_usage",
"and",
"ext_key_usage",
".",
"get",
"(",
"'value'",
")",
":",
"kwargs",
"[",
"'extended_key_usage'",
"]",
"=",
"ExtendedKeyUsage",
"(",
"ext_key_usage",
")",
"tls_feature",
"=",
"profile",
".",
"get",
"(",
"'TLSFeature'",
")",
"if",
"tls_feature",
"and",
"tls_feature",
".",
"get",
"(",
"'value'",
")",
":",
"kwargs",
"[",
"'tls_feature'",
"]",
"=",
"TLSFeature",
"(",
"tls_feature",
")",
"if",
"profile",
".",
"get",
"(",
"'ocsp_no_check'",
")",
":",
"kwargs",
"[",
"'ocsp_no_check'",
"]",
"=",
"profile",
"[",
"'ocsp_no_check'",
"]",
"return",
"kwargs"
]
| Get kwargs suitable for get_cert X509 keyword arguments from the given profile. | [
"Get",
"kwargs",
"suitable",
"for",
"get_cert",
"X509",
"keyword",
"arguments",
"from",
"the",
"given",
"profile",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/profiles.py#L25-L49 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | format_name | def format_name(subject):
"""Convert a subject into the canonical form for distinguished names.
This function does not take care of sorting the subject in any meaningful order.
Examples::
>>> format_name([('CN', 'example.com'), ])
'/CN=example.com'
>>> format_name([('CN', 'example.com'), ('O', "My Organization"), ])
'/CN=example.com/O=My Organization'
"""
if isinstance(subject, x509.Name):
subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject]
return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject])) | python | def format_name(subject):
"""Convert a subject into the canonical form for distinguished names.
This function does not take care of sorting the subject in any meaningful order.
Examples::
>>> format_name([('CN', 'example.com'), ])
'/CN=example.com'
>>> format_name([('CN', 'example.com'), ('O', "My Organization"), ])
'/CN=example.com/O=My Organization'
"""
if isinstance(subject, x509.Name):
subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject]
return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject])) | [
"def",
"format_name",
"(",
"subject",
")",
":",
"if",
"isinstance",
"(",
"subject",
",",
"x509",
".",
"Name",
")",
":",
"subject",
"=",
"[",
"(",
"OID_NAME_MAPPINGS",
"[",
"s",
".",
"oid",
"]",
",",
"s",
".",
"value",
")",
"for",
"s",
"in",
"subject",
"]",
"return",
"'/%s'",
"%",
"(",
"'/'",
".",
"join",
"(",
"[",
"'%s=%s'",
"%",
"(",
"force_text",
"(",
"k",
")",
",",
"force_text",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"subject",
"]",
")",
")"
]
| Convert a subject into the canonical form for distinguished names.
This function does not take care of sorting the subject in any meaningful order.
Examples::
>>> format_name([('CN', 'example.com'), ])
'/CN=example.com'
>>> format_name([('CN', 'example.com'), ('O', "My Organization"), ])
'/CN=example.com/O=My Organization' | [
"Convert",
"a",
"subject",
"into",
"the",
"canonical",
"form",
"for",
"distinguished",
"names",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L125-L140 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | format_general_name | def format_general_name(name):
"""Format a single general name.
>>> import ipaddress
>>> format_general_name(x509.DNSName('example.com'))
'DNS:example.com'
>>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1')))
'IP:127.0.0.1'
"""
if isinstance(name, x509.DirectoryName):
value = format_name(name.value)
else:
value = name.value
return '%s:%s' % (SAN_NAME_MAPPINGS[type(name)], value) | python | def format_general_name(name):
"""Format a single general name.
>>> import ipaddress
>>> format_general_name(x509.DNSName('example.com'))
'DNS:example.com'
>>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1')))
'IP:127.0.0.1'
"""
if isinstance(name, x509.DirectoryName):
value = format_name(name.value)
else:
value = name.value
return '%s:%s' % (SAN_NAME_MAPPINGS[type(name)], value) | [
"def",
"format_general_name",
"(",
"name",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"x509",
".",
"DirectoryName",
")",
":",
"value",
"=",
"format_name",
"(",
"name",
".",
"value",
")",
"else",
":",
"value",
"=",
"name",
".",
"value",
"return",
"'%s:%s'",
"%",
"(",
"SAN_NAME_MAPPINGS",
"[",
"type",
"(",
"name",
")",
"]",
",",
"value",
")"
]
| Format a single general name.
>>> import ipaddress
>>> format_general_name(x509.DNSName('example.com'))
'DNS:example.com'
>>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1')))
'IP:127.0.0.1' | [
"Format",
"a",
"single",
"general",
"name",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L143-L157 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | add_colons | def add_colons(s):
"""Add colons after every second digit.
This function is used in functions to prettify serials.
>>> add_colons('teststring')
'te:st:st:ri:ng'
"""
return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)]) | python | def add_colons(s):
"""Add colons after every second digit.
This function is used in functions to prettify serials.
>>> add_colons('teststring')
'te:st:st:ri:ng'
"""
return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)]) | [
"def",
"add_colons",
"(",
"s",
")",
":",
"return",
"':'",
".",
"join",
"(",
"[",
"s",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"s",
")",
",",
"2",
")",
"]",
")"
]
| Add colons after every second digit.
This function is used in functions to prettify serials.
>>> add_colons('teststring')
'te:st:st:ri:ng' | [
"Add",
"colons",
"after",
"every",
"second",
"digit",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L200-L208 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | int_to_hex | def int_to_hex(i):
"""Create a hex-representation of the given serial.
>>> int_to_hex(12345678)
'BC:61:4E'
"""
s = hex(i)[2:].upper()
if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA
# Strip the "L" suffix, since hex(1L) -> 0x1L.
# NOTE: Do not convert to int earlier. int(<very-large-long>) is still long
s = s[:-1]
return add_colons(s) | python | def int_to_hex(i):
"""Create a hex-representation of the given serial.
>>> int_to_hex(12345678)
'BC:61:4E'
"""
s = hex(i)[2:].upper()
if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA
# Strip the "L" suffix, since hex(1L) -> 0x1L.
# NOTE: Do not convert to int earlier. int(<very-large-long>) is still long
s = s[:-1]
return add_colons(s) | [
"def",
"int_to_hex",
"(",
"i",
")",
":",
"s",
"=",
"hex",
"(",
"i",
")",
"[",
"2",
":",
"]",
".",
"upper",
"(",
")",
"if",
"six",
".",
"PY2",
"is",
"True",
"and",
"isinstance",
"(",
"i",
",",
"long",
")",
":",
"# pragma: only py2 # NOQA",
"# Strip the \"L\" suffix, since hex(1L) -> 0x1L.",
"# NOTE: Do not convert to int earlier. int(<very-large-long>) is still long",
"s",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"return",
"add_colons",
"(",
"s",
")"
]
| Create a hex-representation of the given serial.
>>> int_to_hex(12345678)
'BC:61:4E' | [
"Create",
"a",
"hex",
"-",
"representation",
"of",
"the",
"given",
"serial",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L211-L222 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | parse_name | def parse_name(name):
"""Parses a subject string as used in OpenSSLs command line utilities.
The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example
``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient
on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``,
whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``).
>>> parse_name('/CN=example.com')
[('CN', 'example.com')]
>>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com')
[('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')]
Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted
based on x509 name specifications regardless of the given order:
>>> parse_name('L="Vienna / District"/[email protected]')
[('L', 'Vienna / District'), ('emailAddress', '[email protected]')]
>>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT')
True
Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes,
so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected.
>>> parse_name('L="Vienna / District"/CN=example.com')
[('L', 'Vienna / District'), ('CN', 'example.com')]
But note that it's still easy to trick this function, if you really want to. The following example is
*not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's
certainly different:
>>> parse_name('L="Vienna " District"/CN=example.com')
[('L', 'Vienna'), ('CN', 'example.com')]
Examples of where this string is used are:
.. code-block:: console
# openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com'
# openssl x509 -in cert.pem -noout -subject -nameopt compat
/C=AT/L=Vienna/CN=example.com
"""
name = name.strip()
if not name: # empty subjects are ok
return []
try:
items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)]
except KeyError as e:
raise ValueError('Unknown x509 name field: %s' % e.args[0])
# Check that no OIDs not in MULTIPLE_OIDS occur more then once
for key, oid in NAME_OID_MAPPINGS.items():
if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS:
raise ValueError('Subject contains multiple "%s" fields' % key)
return sort_name(items) | python | def parse_name(name):
"""Parses a subject string as used in OpenSSLs command line utilities.
The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example
``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient
on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``,
whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``).
>>> parse_name('/CN=example.com')
[('CN', 'example.com')]
>>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com')
[('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')]
Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted
based on x509 name specifications regardless of the given order:
>>> parse_name('L="Vienna / District"/[email protected]')
[('L', 'Vienna / District'), ('emailAddress', '[email protected]')]
>>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT')
True
Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes,
so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected.
>>> parse_name('L="Vienna / District"/CN=example.com')
[('L', 'Vienna / District'), ('CN', 'example.com')]
But note that it's still easy to trick this function, if you really want to. The following example is
*not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's
certainly different:
>>> parse_name('L="Vienna " District"/CN=example.com')
[('L', 'Vienna'), ('CN', 'example.com')]
Examples of where this string is used are:
.. code-block:: console
# openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com'
# openssl x509 -in cert.pem -noout -subject -nameopt compat
/C=AT/L=Vienna/CN=example.com
"""
name = name.strip()
if not name: # empty subjects are ok
return []
try:
items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)]
except KeyError as e:
raise ValueError('Unknown x509 name field: %s' % e.args[0])
# Check that no OIDs not in MULTIPLE_OIDS occur more then once
for key, oid in NAME_OID_MAPPINGS.items():
if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS:
raise ValueError('Subject contains multiple "%s" fields' % key)
return sort_name(items) | [
"def",
"parse_name",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"strip",
"(",
")",
"if",
"not",
"name",
":",
"# empty subjects are ok",
"return",
"[",
"]",
"try",
":",
"items",
"=",
"[",
"(",
"NAME_CASE_MAPPINGS",
"[",
"t",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"]",
",",
"force_text",
"(",
"t",
"[",
"2",
"]",
")",
")",
"for",
"t",
"in",
"NAME_RE",
".",
"findall",
"(",
"name",
")",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Unknown x509 name field: %s'",
"%",
"e",
".",
"args",
"[",
"0",
"]",
")",
"# Check that no OIDs not in MULTIPLE_OIDS occur more then once",
"for",
"key",
",",
"oid",
"in",
"NAME_OID_MAPPINGS",
".",
"items",
"(",
")",
":",
"if",
"sum",
"(",
"1",
"for",
"t",
"in",
"items",
"if",
"t",
"[",
"0",
"]",
"==",
"key",
")",
">",
"1",
"and",
"oid",
"not",
"in",
"MULTIPLE_OIDS",
":",
"raise",
"ValueError",
"(",
"'Subject contains multiple \"%s\" fields'",
"%",
"key",
")",
"return",
"sort_name",
"(",
"items",
")"
]
| Parses a subject string as used in OpenSSLs command line utilities.
The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example
``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient
on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``,
whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``).
>>> parse_name('/CN=example.com')
[('CN', 'example.com')]
>>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com')
[('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')]
Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted
based on x509 name specifications regardless of the given order:
>>> parse_name('L="Vienna / District"/[email protected]')
[('L', 'Vienna / District'), ('emailAddress', '[email protected]')]
>>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT')
True
Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes,
so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected.
>>> parse_name('L="Vienna / District"/CN=example.com')
[('L', 'Vienna / District'), ('CN', 'example.com')]
But note that it's still easy to trick this function, if you really want to. The following example is
*not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's
certainly different:
>>> parse_name('L="Vienna " District"/CN=example.com')
[('L', 'Vienna'), ('CN', 'example.com')]
Examples of where this string is used are:
.. code-block:: console
# openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com'
# openssl x509 -in cert.pem -noout -subject -nameopt compat
/C=AT/L=Vienna/CN=example.com | [
"Parses",
"a",
"subject",
"string",
"as",
"used",
"in",
"OpenSSLs",
"command",
"line",
"utilities",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L245-L301 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | parse_general_name | def parse_general_name(name):
"""Parse a general name from user input.
This function will do its best to detect the intended type of any value passed to it:
>>> parse_general_name('example.com')
<DNSName(value='example.com')>
>>> parse_general_name('*.example.com')
<DNSName(value='*.example.com')>
>>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains
<DNSName(value='.example.com')>
>>> parse_general_name('[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('1.2.3.4')
<IPAddress(value=1.2.3.4)>
>>> parse_general_name('fd00::1')
<IPAddress(value=fd00::1)>
>>> parse_general_name('/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't
work, an exception will be raised:
>>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
idna.core.IDNAError: ...
If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`:
>>> parse_general_name('email:[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('URI:https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('dirname:/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
Some more exotic values can only be generated by using this prefix:
>>> parse_general_name('rid:2.5.4.3')
<RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)>
>>> parse_general_name('otherName:2.5.4.3;UTF8:example.com')
<OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')>
If you give a prefixed value, this function is less forgiving of any typos and does not catch any
exceptions:
>>> parse_general_name('email:foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com
"""
name = force_text(name)
typ = None
match = GENERAL_NAME_RE.match(name)
if match is not None:
typ, name = match.groups()
typ = typ.lower()
if typ is None:
if re.match('[a-z0-9]{2,}://', name): # Looks like a URI
try:
return x509.UniformResourceIdentifier(name)
except Exception: # pragma: no cover - this really accepts anything
pass
if '@' in name: # Looks like an Email address
try:
return x509.RFC822Name(validate_email(name))
except Exception:
pass
if name.strip().startswith('/'): # maybe it's a dirname?
return x509.DirectoryName(x509_name(name))
# Try to parse this as IPAddress/Network
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
# Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
# Almost anything passes as DNS name, so this is our default fallback
return x509.DNSName(name)
if typ == 'uri':
return x509.UniformResourceIdentifier(name)
elif typ == 'email':
return x509.RFC822Name(validate_email(name))
elif typ == 'ip':
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
raise ValueError('Could not parse IP address.')
elif typ == 'rid':
return x509.RegisteredID(x509.ObjectIdentifier(name))
elif typ == 'othername':
regex = "(.*);(.*):(.*)"
if re.match(regex, name) is not None:
oid, asn_typ, val = re.match(regex, name).groups()
oid = x509.ObjectIdentifier(oid)
if asn_typ == 'UTF8':
val = val.encode('utf-8')
elif asn_typ == 'OctetString':
val = bytes(bytearray.fromhex(val))
val = OctetString(val).dump()
else:
raise ValueError('Unsupported ASN type in otherName: %s' % asn_typ)
val = force_bytes(val)
return x509.OtherName(oid, val)
else:
raise ValueError('Incorrect otherName format: %s' % name)
elif typ == 'dirname':
return x509.DirectoryName(x509_name(name))
else:
# Try to encode the domain name. DNSName() does not validate the domain name, but this
# check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
return x509.DNSName(name) | python | def parse_general_name(name):
"""Parse a general name from user input.
This function will do its best to detect the intended type of any value passed to it:
>>> parse_general_name('example.com')
<DNSName(value='example.com')>
>>> parse_general_name('*.example.com')
<DNSName(value='*.example.com')>
>>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains
<DNSName(value='.example.com')>
>>> parse_general_name('[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('1.2.3.4')
<IPAddress(value=1.2.3.4)>
>>> parse_general_name('fd00::1')
<IPAddress(value=fd00::1)>
>>> parse_general_name('/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't
work, an exception will be raised:
>>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
idna.core.IDNAError: ...
If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`:
>>> parse_general_name('email:[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('URI:https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('dirname:/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
Some more exotic values can only be generated by using this prefix:
>>> parse_general_name('rid:2.5.4.3')
<RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)>
>>> parse_general_name('otherName:2.5.4.3;UTF8:example.com')
<OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')>
If you give a prefixed value, this function is less forgiving of any typos and does not catch any
exceptions:
>>> parse_general_name('email:foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com
"""
name = force_text(name)
typ = None
match = GENERAL_NAME_RE.match(name)
if match is not None:
typ, name = match.groups()
typ = typ.lower()
if typ is None:
if re.match('[a-z0-9]{2,}://', name): # Looks like a URI
try:
return x509.UniformResourceIdentifier(name)
except Exception: # pragma: no cover - this really accepts anything
pass
if '@' in name: # Looks like an Email address
try:
return x509.RFC822Name(validate_email(name))
except Exception:
pass
if name.strip().startswith('/'): # maybe it's a dirname?
return x509.DirectoryName(x509_name(name))
# Try to parse this as IPAddress/Network
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
# Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
# Almost anything passes as DNS name, so this is our default fallback
return x509.DNSName(name)
if typ == 'uri':
return x509.UniformResourceIdentifier(name)
elif typ == 'email':
return x509.RFC822Name(validate_email(name))
elif typ == 'ip':
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
raise ValueError('Could not parse IP address.')
elif typ == 'rid':
return x509.RegisteredID(x509.ObjectIdentifier(name))
elif typ == 'othername':
regex = "(.*);(.*):(.*)"
if re.match(regex, name) is not None:
oid, asn_typ, val = re.match(regex, name).groups()
oid = x509.ObjectIdentifier(oid)
if asn_typ == 'UTF8':
val = val.encode('utf-8')
elif asn_typ == 'OctetString':
val = bytes(bytearray.fromhex(val))
val = OctetString(val).dump()
else:
raise ValueError('Unsupported ASN type in otherName: %s' % asn_typ)
val = force_bytes(val)
return x509.OtherName(oid, val)
else:
raise ValueError('Incorrect otherName format: %s' % name)
elif typ == 'dirname':
return x509.DirectoryName(x509_name(name))
else:
# Try to encode the domain name. DNSName() does not validate the domain name, but this
# check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
return x509.DNSName(name) | [
"def",
"parse_general_name",
"(",
"name",
")",
":",
"name",
"=",
"force_text",
"(",
"name",
")",
"typ",
"=",
"None",
"match",
"=",
"GENERAL_NAME_RE",
".",
"match",
"(",
"name",
")",
"if",
"match",
"is",
"not",
"None",
":",
"typ",
",",
"name",
"=",
"match",
".",
"groups",
"(",
")",
"typ",
"=",
"typ",
".",
"lower",
"(",
")",
"if",
"typ",
"is",
"None",
":",
"if",
"re",
".",
"match",
"(",
"'[a-z0-9]{2,}://'",
",",
"name",
")",
":",
"# Looks like a URI",
"try",
":",
"return",
"x509",
".",
"UniformResourceIdentifier",
"(",
"name",
")",
"except",
"Exception",
":",
"# pragma: no cover - this really accepts anything",
"pass",
"if",
"'@'",
"in",
"name",
":",
"# Looks like an Email address",
"try",
":",
"return",
"x509",
".",
"RFC822Name",
"(",
"validate_email",
"(",
"name",
")",
")",
"except",
"Exception",
":",
"pass",
"if",
"name",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'/'",
")",
":",
"# maybe it's a dirname?",
"return",
"x509",
".",
"DirectoryName",
"(",
"x509_name",
"(",
"name",
")",
")",
"# Try to parse this as IPAddress/Network",
"try",
":",
"return",
"x509",
".",
"IPAddress",
"(",
"ip_address",
"(",
"name",
")",
")",
"except",
"ValueError",
":",
"pass",
"try",
":",
"return",
"x509",
".",
"IPAddress",
"(",
"ip_network",
"(",
"name",
")",
")",
"except",
"ValueError",
":",
"pass",
"# Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail.",
"if",
"name",
".",
"startswith",
"(",
"'*.'",
")",
":",
"idna",
".",
"encode",
"(",
"name",
"[",
"2",
":",
"]",
")",
"elif",
"name",
".",
"startswith",
"(",
"'.'",
")",
":",
"idna",
".",
"encode",
"(",
"name",
"[",
"1",
":",
"]",
")",
"else",
":",
"idna",
".",
"encode",
"(",
"name",
")",
"# Almost anything passes as DNS name, so this is our default fallback",
"return",
"x509",
".",
"DNSName",
"(",
"name",
")",
"if",
"typ",
"==",
"'uri'",
":",
"return",
"x509",
".",
"UniformResourceIdentifier",
"(",
"name",
")",
"elif",
"typ",
"==",
"'email'",
":",
"return",
"x509",
".",
"RFC822Name",
"(",
"validate_email",
"(",
"name",
")",
")",
"elif",
"typ",
"==",
"'ip'",
":",
"try",
":",
"return",
"x509",
".",
"IPAddress",
"(",
"ip_address",
"(",
"name",
")",
")",
"except",
"ValueError",
":",
"pass",
"try",
":",
"return",
"x509",
".",
"IPAddress",
"(",
"ip_network",
"(",
"name",
")",
")",
"except",
"ValueError",
":",
"pass",
"raise",
"ValueError",
"(",
"'Could not parse IP address.'",
")",
"elif",
"typ",
"==",
"'rid'",
":",
"return",
"x509",
".",
"RegisteredID",
"(",
"x509",
".",
"ObjectIdentifier",
"(",
"name",
")",
")",
"elif",
"typ",
"==",
"'othername'",
":",
"regex",
"=",
"\"(.*);(.*):(.*)\"",
"if",
"re",
".",
"match",
"(",
"regex",
",",
"name",
")",
"is",
"not",
"None",
":",
"oid",
",",
"asn_typ",
",",
"val",
"=",
"re",
".",
"match",
"(",
"regex",
",",
"name",
")",
".",
"groups",
"(",
")",
"oid",
"=",
"x509",
".",
"ObjectIdentifier",
"(",
"oid",
")",
"if",
"asn_typ",
"==",
"'UTF8'",
":",
"val",
"=",
"val",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"asn_typ",
"==",
"'OctetString'",
":",
"val",
"=",
"bytes",
"(",
"bytearray",
".",
"fromhex",
"(",
"val",
")",
")",
"val",
"=",
"OctetString",
"(",
"val",
")",
".",
"dump",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported ASN type in otherName: %s'",
"%",
"asn_typ",
")",
"val",
"=",
"force_bytes",
"(",
"val",
")",
"return",
"x509",
".",
"OtherName",
"(",
"oid",
",",
"val",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Incorrect otherName format: %s'",
"%",
"name",
")",
"elif",
"typ",
"==",
"'dirname'",
":",
"return",
"x509",
".",
"DirectoryName",
"(",
"x509_name",
"(",
"name",
")",
")",
"else",
":",
"# Try to encode the domain name. DNSName() does not validate the domain name, but this",
"# check will fail.",
"if",
"name",
".",
"startswith",
"(",
"'*.'",
")",
":",
"idna",
".",
"encode",
"(",
"name",
"[",
"2",
":",
"]",
")",
"elif",
"name",
".",
"startswith",
"(",
"'.'",
")",
":",
"idna",
".",
"encode",
"(",
"name",
"[",
"1",
":",
"]",
")",
"else",
":",
"idna",
".",
"encode",
"(",
"name",
")",
"return",
"x509",
".",
"DNSName",
"(",
"name",
")"
]
| Parse a general name from user input.
This function will do its best to detect the intended type of any value passed to it:
>>> parse_general_name('example.com')
<DNSName(value='example.com')>
>>> parse_general_name('*.example.com')
<DNSName(value='*.example.com')>
>>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains
<DNSName(value='.example.com')>
>>> parse_general_name('[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('1.2.3.4')
<IPAddress(value=1.2.3.4)>
>>> parse_general_name('fd00::1')
<IPAddress(value=fd00::1)>
>>> parse_general_name('/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't
work, an exception will be raised:
>>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
idna.core.IDNAError: ...
If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`:
>>> parse_general_name('email:[email protected]')
<RFC822Name(value='[email protected]')>
>>> parse_general_name('URI:https://example.com')
<UniformResourceIdentifier(value='https://example.com')>
>>> parse_general_name('dirname:/CN=example.com')
<DirectoryName(value=<Name(CN=example.com)>)>
Some more exotic values can only be generated by using this prefix:
>>> parse_general_name('rid:2.5.4.3')
<RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)>
>>> parse_general_name('otherName:2.5.4.3;UTF8:example.com')
<OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')>
If you give a prefixed value, this function is less forgiving of any typos and does not catch any
exceptions:
>>> parse_general_name('email:foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com | [
"Parse",
"a",
"general",
"name",
"from",
"user",
"input",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L345-L490 | train |
mathiasertl/django-ca | ca/django_ca/utils.py | parse_hash_algorithm | def parse_hash_algorithm(value=None):
"""Parse a hash algorithm value.
The most common use case is to pass a str naming a class in
:py:mod:`~cg:cryptography.hazmat.primitives.hashes`.
For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM
<settings-ca-digest-algorithm>`, and passing an
:py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that
instance unchanged.
Example usage::
>>> parse_hash_algorithm() # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown hash algorithm: Wrong
>>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown type passed: object
Parameters
----------
value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional
The value to parse, the function description on how possible values are used.
Returns
-------
algorithm
A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance.
Raises
------
ValueError
If an unknown object is passed or if ``value`` does not name a known algorithm.
"""
if value is None:
return ca_settings.CA_DIGEST_ALGORITHM
elif isinstance(value, type) and issubclass(value, hashes.HashAlgorithm):
return value()
elif isinstance(value, hashes.HashAlgorithm):
return value
elif isinstance(value, six.string_types):
try:
return getattr(hashes, value.strip())()
except AttributeError:
raise ValueError('Unknown hash algorithm: %s' % value)
else:
raise ValueError('Unknown type passed: %s' % type(value).__name__) | python | def parse_hash_algorithm(value=None):
"""Parse a hash algorithm value.
The most common use case is to pass a str naming a class in
:py:mod:`~cg:cryptography.hazmat.primitives.hashes`.
For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM
<settings-ca-digest-algorithm>`, and passing an
:py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that
instance unchanged.
Example usage::
>>> parse_hash_algorithm() # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown hash algorithm: Wrong
>>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown type passed: object
Parameters
----------
value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional
The value to parse, the function description on how possible values are used.
Returns
-------
algorithm
A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance.
Raises
------
ValueError
If an unknown object is passed or if ``value`` does not name a known algorithm.
"""
if value is None:
return ca_settings.CA_DIGEST_ALGORITHM
elif isinstance(value, type) and issubclass(value, hashes.HashAlgorithm):
return value()
elif isinstance(value, hashes.HashAlgorithm):
return value
elif isinstance(value, six.string_types):
try:
return getattr(hashes, value.strip())()
except AttributeError:
raise ValueError('Unknown hash algorithm: %s' % value)
else:
raise ValueError('Unknown type passed: %s' % type(value).__name__) | [
"def",
"parse_hash_algorithm",
"(",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"ca_settings",
".",
"CA_DIGEST_ALGORITHM",
"elif",
"isinstance",
"(",
"value",
",",
"type",
")",
"and",
"issubclass",
"(",
"value",
",",
"hashes",
".",
"HashAlgorithm",
")",
":",
"return",
"value",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"hashes",
".",
"HashAlgorithm",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"hashes",
",",
"value",
".",
"strip",
"(",
")",
")",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"'Unknown hash algorithm: %s'",
"%",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown type passed: %s'",
"%",
"type",
"(",
"value",
")",
".",
"__name__",
")"
]
| Parse a hash algorithm value.
The most common use case is to pass a str naming a class in
:py:mod:`~cg:cryptography.hazmat.primitives.hashes`.
For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM
<settings-ca-digest-algorithm>`, and passing an
:py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that
instance unchanged.
Example usage::
>>> parse_hash_algorithm() # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.hashes.SHA512 object at ...>
>>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown hash algorithm: Wrong
>>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Unknown type passed: object
Parameters
----------
value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional
The value to parse, the function description on how possible values are used.
Returns
-------
algorithm
A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance.
Raises
------
ValueError
If an unknown object is passed or if ``value`` does not name a known algorithm. | [
"Parse",
"a",
"hash",
"algorithm",
"value",
"."
]
| 976d7ea05276320f20daed2a6d59c8f5660fe976 | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L493-L555 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.