Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def wosParser(isifile):
plst = set()
error = None
try:
with open(isifile, 'r', encoding='utf-8-sig') as openfile:
f = enumerate(openfile, start = 0)
while "VR 1.0" not in f.__next__()[1]:
pass
notEnd = True
while notEnd:
line = f.__next__()
if line[1] == '':
error = BadWOSFile("'{}' does not have an 'EF', lines 1 to {} were checked".format(isifile, line[0] + 1))
elif line[1].isspace():
continue
elif 'EF' in line[1][:2]:
notEnd = False
continue
else:
try:
plst.add(WOSRecord(itertools.chain([line], f), sFile = isifile, sLine = line[0]))
except BadWOSFile as e:
try:
s = f.__next__()[1]
while s[:2] != 'ER':
s = f.__next__()[1]
except:
error = BadWOSFile("The file {} was not terminated corrrectly caused the following error:\n{}".format(isifile, str(e)))
try:
f.__next__()
except StopIteration:
pass
else:
error = BadWOSFile("EF not at end of " + isifile)
except UnicodeDecodeError:
try:
error = BadWOSFile("'{}' has a unicode issue on line: {}.".format(isifile, f.__next__()[0]))
except:
#Fallback needed incase f.__next__() causes issues
error = BadWOSFile("'{}' has a unicode issue. Probably when being opened or possibly on the first line".format(isifile))
except StopIteration:
error = BadWOSFile("The file '{}' ends before EF was found".format(isifile))
except KeyboardInterrupt as e:
error = e
finally:
if isinstance(error, KeyboardInterrupt):
raise error
return plst, error | [
"This is a function that is used to create [RecordCollections](../classes/RecordCollection.html#metaknowledge.RecordCollection) from files.\n\n **wosParser**() reads the file given by the path isifile, checks that the header is correct then reads until it reaches EF. All WOS records it encounters are parsed with [recordParser()](#metaknowledge.WOS.recordWOS.recordParser) and converted into [Records](../classes/Record.html#metaknowledge.Record). A list of these `Records` is returned.\n\n `BadWOSFile` is raised if an issue is found with the file.\n\n # Parameters\n\n _isifile_ : `str`\n\n > The path to the target file\n\n # Returns\n\n `List[Record]`\n\n > All the `Records` found in _isifile_\n "
]
|
Please provide a description of the function:def isScopusFile(infile, checkedLines = 2, maxHeaderDiff = 3):
try:
with open(infile, 'r', encoding='utf-8') as openfile:
if openfile.read(1) != "\ufeff":
return False
for i in range(checkedLines):
if len(set(openfile.readline()[:-1].split(',')) ^ set(scopusHeader)) < maxHeaderDiff:
return True
except (StopIteration, UnicodeDecodeError):
return False
else:
return False | [
"Determines if _infile_ is the path to a Scopus csv file. A file is considerd to be a Scopus file if it has the correct encoding (`utf-8` with BOM (Byte Order Mark)) and within the first _checkedLines_ a line contains the complete header, the list of all header entries in order is found in [`scopus.scopusHeader`](#metaknowledge.scopus).\n\n **Note** this is for csv files _not_ plain text files from scopus, plain text files are not complete.\n\n # Parameters\n\n _infile_ : `str`\n\n > The path to the targets file\n\n _checkedLines_ : `optional [int]`\n\n > default 2, the number of lines to check for the header\n\n _maxHeaderDiff_ : `optional [int]`\n\n > default 3, maximum number of different entries in the potetial file from the current known header `metaknowledge.scopus.scopusHeader`, if exceeded an `False` will be returned\n\n # Returns\n\n `bool`\n\n > `True` if the file is a Scopus csv file\n "
]
|
Please provide a description of the function:def scopusParser(scopusFile):
#assumes the file is Scopus
recSet = set()
error = None
lineNum = 0
try:
with open(scopusFile, 'r', encoding = 'utf-8') as openfile:
#Get rid of the BOM
openfile.read(1)
header = openfile.readline()[:-1].split(',')
if len(set(header) ^ set(scopusHeader)) == 0:
header = None
lineNum = 0
try:
for line, row in enumerate(openfile, start = 2):
lineNum = line
recSet.add(ScopusRecord(row, header = header, sFile = scopusFile, sLine = line))
except BadScopusFile as e:
if error is None:
error = BadScopusFile("The file '{}' becomes unparsable after line: {}, due to the error: {} ".format(scopusFile, lineNum, e))
except (csv.Error, UnicodeDecodeError):
if error is None:
error = BadScopusFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(scopusFile, lineNum))
return recSet, error | [
"Parses a scopus file, _scopusFile_, to extract the individual lines as [ScopusRecords](../classes/ScopusRecord.html#metaknowledge.scopus.ScopusRecord).\n\n A Scopus file is a csv (Comma-separated values) with a complete header, see [`scopus.scopusHeader`](#metaknowledge.scopus) for the entries, and each line after it containing a record's entry. The string valued entries are quoted with double quotes which means double quotes inside them can cause issues, see [scopusRecordParser()](#metaknowledge.scopus.recordScopus.scopusRecordParser) for more information.\n\n # Parameters\n\n _scopusFile_ : `str`\n\n > A path to a valid scopus file, use [isScopusFile()](#metaknowledge.scopus.scopusHandlers.isScopusFile) to verify\n\n # Returns\n\n `set[ScopusRecord]`\n\n > Records for each of the entries\n "
]
|
Please provide a description of the function:def make_grid(rect, cells={}, num_rows=0, num_cols=0, padding=None,
inner_padding=None, outer_padding=None, row_heights={}, col_widths={},
default_row_height='expand', default_col_width='expand'):
grid = Grid(
bounding_rect=rect,
min_cell_rects=cells,
num_rows=num_rows,
num_cols=num_cols,
padding=padding,
inner_padding=inner_padding,
outer_padding=outer_padding,
row_heights=row_heights,
col_widths=col_widths,
default_row_height=default_row_height,
default_col_width=default_col_width,
)
return grid.make_cells() | [
"\n Return rectangles for each cell in the specified grid. The rectangles are \n returned in a dictionary where the keys are (row, col) tuples.\n "
]
|
Please provide a description of the function:def lorem_ipsum(num_sentences=None, num_paragraphs=None):
paragraphs = [
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam justo sem, malesuada ut ultricies ac, bibendum eu neque. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean at tellus ut velit dignissim tincidunt. Curabitur euismod laoreet orci semper dignissim. Suspendisse potenti. Vivamus sed enim quis dui pulvinar pharetra. Duis condimentum ultricies ipsum, sed ornare leo vestibulum vitae. Sed ut justo massa, varius molestie diam. Sed lacus quam, tempor in dictum sed, posuere et diam. Maecenas tincidunt enim elementum turpis blandit tempus. Nam lectus justo, adipiscing vitae ultricies egestas, porta nec diam. Aenean ac neque tortor. Cras tempus lacus nec leo ultrices suscipit. Etiam sed aliquam tortor. Duis lacus metus, euismod ut viverra sit amet, pulvinar sed urna.',
'Aenean ut metus in arcu mattis iaculis quis eu nisl. Donec ornare, massa ut vestibulum vestibulum, metus sapien pretium ante, eu vulputate lorem augue vestibulum orci. Donec consequat aliquam sagittis. Sed in tellus pretium tortor hendrerit cursus congue sit amet turpis. Sed neque lacus, lacinia ut consectetur eget, faucibus vitae lacus. Integer eu purus ac purus tempus mollis non sed dui. Vestibulum volutpat erat magna. Etiam nisl eros, eleifend a viverra sed, interdum sollicitudin erat. Integer a orci in dolor suscipit cursus. Maecenas hendrerit neque odio. Nulla orci orci, varius id viverra in, molestie vel lacus. Donec at odio quis augue bibendum lobortis nec ac urna. Ut lacinia hendrerit tortor mattis rhoncus. Proin nunc tortor, congue ac adipiscing sit amet, aliquet in lorem. Nulla blandit tempor arcu, ut tempus quam posuere eu. In magna neque, venenatis nec tincidunt vitae, lobortis eget nulla.',
'Praesent sit amet nibh turpis, vitae lacinia metus. Ut nisi lacus, feugiat quis feugiat nec, pretium a diam. Aenean bibendum sem eget lorem ullamcorper mattis. Donec elementum purus vel felis vulputate pretium. Duis in ipsum est. Nulla consequat tempor sodales. Donec scelerisque enim eu tellus eleifend imperdiet. Quisque ullamcorper bibendum justo sit amet tincidunt. Donec tempus lacus quis diam varius placerat. Cras metus magna, congue sit amet pulvinar viverra, laoreet vel felis. Praesent sit amet consequat enim. Phasellus arcu nisl, volutpat et molestie a, sagittis a est. Maecenas tincidunt, sem non pharetra mollis, diam nisl ornare tellus, at euismod libero arcu ornare risus. Vestibulum laoreet sollicitudin purus in pharetra. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.',
'Nullam pellentesque tempor bibendum. Praesent dictum turpis nec quam consectetur aliquam. Aliquam id turpis nunc. Pellentesque fermentum lacus at tortor auctor venenatis. Maecenas blandit dui lectus. Nunc pellentesque pharetra suscipit. Nullam et metus diam, a congue leo. Curabitur convallis augue in lectus scelerisque non rhoncus lorem molestie. Curabitur in mi a erat dictum pharetra iaculis eu diam.',
'Nunc lorem magna, rhoncus sodales mattis quis, tincidunt eu mi. In ultrices, lectus ac porttitor tempor, odio nibh facilisis tortor, ac aliquet nisi ante non felis. Praesent ligula nisl, hendrerit ac volutpat non, varius quis tellus. Sed ornare faucibus elit eget faucibus. Nullam sem tellus, commodo id ullamcorper ut, imperdiet ac eros. Sed quis lorem id urna cursus laoreet et eget lacus. Nullam tristique semper sem, eget tempus sem pellentesque sit amet. Donec sed orci augue, convallis tempor tellus. Sed consequat commodo ante a pretium. Nulla et est mauris. Nullam at massa justo. Proin tempor arcu ac eros suscipit varius. Fusce vestibulum quam placerat tellus imperdiet et venenatis diam tristique. Sed pretium tempor tellus, consequat pulvinar massa pellentesque a.',
'Nulla et lorem vel urna fringilla malesuada ut sit amet tortor. Donec id leo mi. Proin sagittis blandit lacus, placerat imperdiet justo pellentesque ac. Cras iaculis aliquam faucibus. Aenean urna nisi, laoreet ac fringilla dignissim, lacinia eget orci. Vivamus porta lacinia dapibus. Aenean molestie, augue sit amet blandit suscipit, tellus turpis ullamcorper purus, ut pretium turpis lorem quis neque. Pellentesque porta dui at arcu mollis tristique. Suspendisse feugiat felis quis felis sollicitudin porttitor.',
'Morbi vestibulum, massa quis posuere facilisis, quam lacus porttitor tortor, id fringilla elit velit ac felis. Fusce at luctus risus. Mauris bibendum diam quis odio auctor quis porta massa pellentesque. Proin congue, nisl eu feugiat faucibus, justo orci congue neque, a porta tellus ipsum accumsan turpis. Ut neque enim, dignissim nec fermentum sed, laoreet id orci. Duis fringilla, elit vel tempus porttitor, purus tellus dapibus nisl, eu scelerisque diam lorem vel ante. Ut tempor, urna nec bibendum facilisis, sapien dui ornare lectus, at tempor ligula diam sit amet ligula. Sed a dui in ipsum eleifend egestas.',
'Quisque ornare fringilla velit, et tincidunt purus convallis vel. Sed venenatis, risus vitae volutpat rhoncus, sapien lorem lacinia elit, id dictum sapien dui vitae lorem. Praesent aliquet accumsan eros quis tempor. Suspendisse eget justo quis arcu bibendum adipiscing. Phasellus quis erat nec massa elementum porta. Nam venenatis elementum mi vel porta. Nunc vel augue non tellus euismod convallis. Curabitur commodo augue vel augue ultrices in fringilla nunc cursus. Mauris auctor laoreet neque, id gravida velit suscipit eget. Maecenas eget libero in lacus auctor feugiat. Pellentesque in lectus felis, eu dictum tortor. Aenean sagittis, massa malesuada dapibus tincidunt, leo massa imperdiet ante, nec mollis nisl turpis in orci. Proin ut purus et eros sagittis volutpat.',
'Donec molestie sem et metus bibendum convallis semper arcu imperdiet. Curabitur quam libero, fermentum vel adipiscing a, cursus at neque. Maecenas cursus risus vestibulum diam ultricies rutrum. Nullam in enim vel lorem accumsan pulvinar. Cras eget viverra turpis. Sed eget lectus urna, eget venenatis libero. Donec porta libero eu est pulvinar pretium. Ut lectus arcu, aliquam et vestibulum euismod, mattis at orci. Fusce dolor lorem, bibendum a dignissim ut, facilisis eu enim. Morbi erat nibh, interdum non ultricies non, porta ac lacus. Curabitur et nunc nec turpis convallis ullamcorper eget vitae mi.',
'Curabitur porta molestie sapien, non rhoncus turpis gravida vel. Ut est lacus, elementum eu pretium sit amet, tristique vel orci. Praesent quis suscipit urna. Donec pellentesque molestie tellus sit amet fringilla. Etiam tempus viverra ipsum et tempus. Nunc ut odio imperdiet lorem malesuada bibendum. In aliquam ligula eu sem ullamcorper pulvinar. Quisque sollicitudin placerat dolor et porttitor. Nulla adipiscing lorem id libero aliquet interdum. Suspendisse vehicula fermentum congue. Cras fringilla nisl vitae lectus mollis viverra. Aliquam pharetra lobortis risus, a elementum elit condimentum in. Aenean tincidunt varius faucibus. Nulla non nisi lorem. Suspendisse id sapien a enim lobortis aliquam.',
'Aliquam erat volutpat. Maecenas neque leo, mattis eu pretium vel, mattis in ante. Nullam sagittis leo diam. Quisque tempor magna in justo vestibulum eget egestas nibh pellentesque. Pellentesque in enim vitae velit pellentesque hendrerit. Cras ultricies, dui et imperdiet gravida, nunc nisl cursus tortor, sit amet porttitor dolor nibh a justo. Praesent ut mauris vitae turpis lobortis scelerisque a nec ligula. Donec turpis erat, iaculis vel dapibus vel, varius id lorem. Integer et enim erat, at eleifend libero.',
'Phasellus id mi ut nunc cursus pellentesque. Aliquam erat volutpat. Vivamus pretium posuere tellus, ac aliquet metus iaculis eget. Curabitur in mi enim. Duis pretium pretium dui, ut iaculis ipsum scelerisque ut. Proin quam dolor, eleifend et porta vitae, cursus molestie lectus. Aenean dignissim laoreet consectetur. Cras iaculis, lectus imperdiet condimentum suscipit, metus nisi egestas arcu, in tempus sem ipsum eu eros. Vestibulum a orci in elit congue euismod quis quis nisi.',
'In quis urna leo, at malesuada ipsum. Vestibulum sollicitudin ullamcorper hendrerit. Vestibulum vestibulum mi sodales nulla sagittis commodo. Maecenas nisi lorem, placerat vel aliquet quis, dictum ac ligula. Vestibulum egestas accumsan accumsan. Aenean lobortis pharetra erat convallis pretium. Aliquam consequat facilisis porta. Cras hendrerit nunc et mauris egestas hendrerit. Proin rhoncus, mi id ullamcorper pharetra, ipsum sapien blandit turpis, et ultricies purus neque eget justo. Quisque sodales, nisi in cursus rutrum, elit nibh volutpat lacus, nec sollicitudin erat leo at lectus. Morbi ac dolor mi, vel ultricies quam.',
'Sed hendrerit nisl id lectus cursus in adipiscing lorem rutrum. Morbi nisl justo, egestas ac aliquet at, scelerisque luctus sapien. Donec sollicitudin elementum mattis. Praesent semper, ante euismod accumsan gravida, ante neque convallis augue, quis vulputate erat nunc vitae tellus. Duis ac lectus ullamcorper purus commodo luctus. Etiam quis augue in purus molestie imperdiet. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam posuere commodo turpis, at pulvinar tortor scelerisque et. Nam vulputate dui sed magna interdum sollicitudin. Nam pulvinar euismod enim vitae malesuada. Aenean non molestie leo. Pellentesque quis lacus mi, et ornare nibh. Etiam pharetra, odio vitae euismod faucibus, nunc urna pulvinar felis, eget molestie est enim sit amet sapien. Vivamus eu neque nulla.',
'Mauris eget nibh ut augue malesuada tristique nec quis urna. Vestibulum faucibus, mauris sed posuere volutpat, felis lacus vulputate felis, eget luctus lorem nulla sed velit. Proin et purus nec quam tristique cursus. Nullam adipiscing tortor imperdiet purus facilisis eu luctus nulla vestibulum. Sed pulvinar risus sollicitudin risus fringilla et hendrerit lorem accumsan. Vestibulum venenatis est sit amet nunc gravida nec aliquam arcu adipiscing. Nam quis aliquet mauris. Cras nec neque vitae tellus posuere posuere.',
'Nulla facilisi. Vestibulum sit amet dui turpis. Aliquam erat volutpat. In hac habitasse platea dictumst. Morbi in enim nec massa semper tincidunt. Ut fermentum iaculis dui, sed adipiscing dolor porta at. Nam hendrerit libero non nisi ornare eu cursus mauris accumsan. Ut ullamcorper, odio vel ultrices suscipit, metus libero ornare dui, non dapibus est dui vehicula ipsum.',
'Nam diam sapien, lacinia vel sollicitudin interdum, faucibus aliquam enim. Mauris tristique iaculis purus eu lacinia. Suspendisse condimentum, dolor a euismod lacinia, leo orci pellentesque orci, non rhoncus turpis lorem sed lacus. Integer velit nisl, rutrum sit amet posuere at, vulputate ultrices tortor. Nullam pharetra, orci tempor dapibus elementum, felis nulla lacinia nunc, quis ultricies dui lectus dictum diam. Praesent eu velit magna, eu lacinia leo. Duis sit amet bibendum dui. Duis tincidunt vulputate dolor eu euismod. Pellentesque nisl sem, mollis ac venenatis a, facilisis vitae ligula. Vivamus sem leo, vestibulum tincidunt iaculis nec, tristique tincidunt mi. Suspendisse imperdiet elit vitae turpis ullamcorper luctus. Aenean in augue mauris. Vivamus nisi libero, dignissim non consectetur sodales, fermentum at sem. Nulla tincidunt fringilla justo quis pulvinar. Nam ac sem sed diam pellentesque egestas vitae ac nisi. Praesent scelerisque dapibus mi vitae tempor.',
'Donec tempor, massa non pulvinar suscipit, justo dolor pharetra nisl, ut semper libero lorem non tortor. Integer dapibus arcu viverra nisi hendrerit mattis et ut mauris. Maecenas pulvinar, orci vitae ultricies egestas, orci nisi rutrum justo, eu volutpat nibh odio ac purus. Nulla pellentesque sem eget arcu imperdiet ullamcorper. Curabitur nec magna massa. Morbi lobortis urna sed ligula commodo viverra. Pellentesque molestie, ipsum nec faucibus mollis, neque purus sodales sapien, in convallis nisi libero et lorem. Ut sed rutrum leo. Aliquam eleifend, felis quis ullamcorper consequat, dolor mi vulputate ipsum, lobortis ultricies felis nulla at augue.',
'Ut gravida porttitor arcu, malesuada mollis urna vehicula nec. Suspendisse sagittis nulla condimentum libero lacinia sed dapibus dui egestas. Etiam convallis congue ipsum, eu fermentum turpis rutrum id. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Ut nunc eros, sagittis a venenatis et, interdum in leo. Curabitur urna magna, volutpat in mattis ut, adipiscing et ligula. Nam dignissim mattis accumsan. Nulla vehicula felis vel turpis tempus hendrerit. Phasellus rhoncus vulputate massa, tincidunt euismod dui porttitor ac. Sed ut sapien quam, ac egestas odio. Pellentesque at aliquet ante. Donec rhoncus ornare lacus eu ullamcorper. Vestibulum sit amet hendrerit magna. Nulla sed diam nulla.',
'Nulla vestibulum sagittis arcu in egestas. Aliquam sed ante justo. Quisque nec dolor nibh, sed feugiat mi. Etiam lorem elit, interdum eu tempor nec, tincidunt eu risus. Fusce id libero augue. Curabitur ultrices, lorem eget mollis fringilla, dolor leo euismod tellus, congue luctus nisi purus vitae urna. Suspendisse tempor orci accumsan sem pretium at accumsan augue tristique. Proin sed turpis at mi feugiat lacinia a nec sem. Suspendisse vel facilisis leo. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Duis ornare enim nec ante adipiscing tincidunt. Maecenas ut justo iaculis leo vestibulum blandit quis vitae mauris. Proin in vestibulum massa.',
]
if num_paragraphs:
paragraphs = paragraphs[:num_paragraphs]
text = '\n\n'.join(paragraphs)
sentences = text.split('.')
if num_sentences:
sentences = sentences[:num_sentences]
lorem = '.'.join(sentences).strip()
if not lorem.endswith('.'):
lorem += '.'
return lorem | [
"\n Return the given amount of \"Lorem ipsum...\" text.\n "
]
|
Please provide a description of the function:def relay_events_from(self, originator, event_type, *more_event_types):
handlers = {
event_type: lambda *args, **kwargs: \
self.dispatch_event(event_type, *args, **kwargs)
for event_type in (event_type,) + more_event_types
}
originator.set_handlers(**handlers) | [
"\n Configure this handler to re-dispatch events from another handler.\n\n This method configures this handler dispatch an event of type \n *event_type* whenever *originator* dispatches events of the same type \n or any of the types in *more_event_types*. Any arguments passed to the \n original event are copied to the new event.\n\n This method is mean to be useful for creating composite widgets that \n want to present a simple API by making it seem like the events being \n generated by their children are actually coming from them. See the \n `/composing_widgets` tutorial for an example.\n "
]
|
Please provide a description of the function:def start_event(self, event_type, *args, dt=1/60):
# Don't bother scheduling a timer if nobody's listening. This isn't
# great from a general-purpose perspective, because a long-lived event
# could have listeners attach and detach in the middle. But I don't
# like the idea of making a bunch of clocks to spit out a bunch of
# events that are never used, although to be fair I don't actually know
# how expensive that would be. If I want to make this implementation
# more general purpose, I could start and stop timers as necessary in
# the methods that add or remove handlers.
if not any(self.__yield_handlers(event_type)):
return
def on_time_interval(dt): #
self.dispatch_event(event_type, *args, dt)
pyglet.clock.schedule_interval(on_time_interval, dt)
self.__timers[event_type] = on_time_interval | [
"\n Begin dispatching the given event at the given frequency.\n\n Calling this method will cause an event of type *event_type* with \n arguments *args* to be dispatched every *dt* seconds. This will \n continue until `stop_event()` is called for the same event.\n\n These continuously firing events are useful if, for example, you want \n to make a button that scrolls for as long as it's being held.\n "
]
|
Please provide a description of the function:def stop_event(self, event_type):
if event_type in self.__timers:
pyglet.clock.unschedule(self.__timers[event_type]) | [
"\n Stop dispatching the given event.\n\n It is not an error to attempt to stop an event that was never started, \n the request will just be silently ignored.\n "
]
|
Please provide a description of the function:def __yield_handlers(self, event_type):
if event_type not in self.event_types:
raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types))
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
if event_type in frame:
yield frame[event_type]
# Check instance for an event handler
if hasattr(self, event_type):
yield getattr(self, event_type) | [
"\n Yield all the handlers registered for the given event type.\n "
]
|
Please provide a description of the function:def _filter_pending_updates(self):
from more_itertools import unique_everseen as unique
yield from reversed(list(unique(reversed(self._pending_updates)))) | [
"\n Return all the updates that need to be applied, from a list of all the \n updates that were called while the hold was active. This method is \n meant to be overridden by subclasses that want to customize how held \n updates are applied.\n\n The `self._pending_updates` member variable is a list containing a \n (method, args, kwargs) tuple for each update that was called while \n updates were being held. This list is in the order that the updates \n were actually called, and any updates that were called more than once \n will appear in this list more than once.\n \n This method should yield or return an list of the tuples in the same \n format representing the updates that should be applied, in the order \n they should be applied. The default implementation filters out \n duplicate updates without changing their order. In cases where it \n matters, the last call to each update is used to determine the order.\n "
]
|
Please provide a description of the function:def main():
# Create command line parser.
parser = argparse.ArgumentParser()
# Adding command line arguments.
parser.add_argument("username", help="Github Username", default=None)
parser.add_argument(
"--deep_dive",
help=" ".join(
[
"If added considers repositories starred by users",
"you follow along with repositories you have",
"starred. Is significantly slower.",
]
),
action="store_true",
default=False,
)
# Parse command line arguments.
arguments = parser.parse_args()
if arguments.username is None:
parser.print_help()
return
print("")
print(
crayons.white(
"Authentication (with password) have higher rate limits."
)
)
print(
crayons.white(
"Skipping password might cause failure due to rate limit."
)
)
print("")
password = getpass.getpass(
crayons.blue(
"Enter password (to skip press enter without entering anything): ",
bold=True,
)
)
try:
gs = GitSuggest(
username=arguments.username,
password=password,
token=None,
deep_dive=arguments.deep_dive,
)
except BadCredentialsException:
print("")
print(
crayons.red(
"Incorrect password provided, to skip password enter nothing.",
bold=True,
)
)
exit()
except TwoFactorException:
print("")
print(
crayons.red(
"\n".join(
[
"You have 2FA set up, please enter a personal access token.",
"You can generate one on https://github.com/settings/tokens",
]
),
bold=True,
)
)
exit()
print("")
print(crayons.green("Suggestions generated!"))
file_name = "/tmp/gitresults.html"
repos = list(gs.get_suggested_repositories())
r2h = ReposToHTML(arguments.username, repos)
r2h.to_html(file_name)
webbrowser.open_new("file://" + file_name) | [
"Starting point for the program execution."
]
|
Please provide a description of the function:def get_html(self):
here = path.abspath(path.dirname(__file__))
env = Environment(loader=FileSystemLoader(path.join(here, "res/")))
suggest = env.get_template("suggest.htm.j2")
return suggest.render(
logo=path.join(here, "res/logo.png"),
user_login=self.user,
repos=self.repos,
) | [
"Method to convert the repository list to a search results page."
]
|
Please provide a description of the function:def to_html(self, write_to):
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | [
"Method to convert the repository list to a search results page and\n write it to a HTML file.\n\n :param write_to: File/Path to write the html file to.\n "
]
|
Please provide a description of the function:def get_unique_repositories(repo_list):
unique_list = list()
included = defaultdict(lambda: False)
for repo in repo_list:
if not included[repo.full_name]:
unique_list.append(repo)
included[repo.full_name] = True
return unique_list | [
"Method to create unique list of repositories from the list of\n repositories given.\n\n :param repo_list: List of repositories which might contain duplicates.\n :return: List of repositories with no duplicate in them.\n "
]
|
Please provide a description of the function:def minus(repo_list_a, repo_list_b):
included = defaultdict(lambda: False)
for repo in repo_list_b:
included[repo.full_name] = True
a_minus_b = list()
for repo in repo_list_a:
if not included[repo.full_name]:
included[repo.full_name] = True
a_minus_b.append(repo)
return a_minus_b | [
"Method to create a list of repositories such that the repository\n belongs to repo list a but not repo list b.\n\n In an ideal scenario we should be able to do this by set(a) - set(b)\n but as GithubRepositories have shown that set() on them is not reliable\n resort to this until it is all sorted out.\n\n :param repo_list_a: List of repositories.\n :param repo_list_b: List of repositories.\n "
]
|
Please provide a description of the function:def __populate_repositories_of_interest(self, username):
# Handle to the user to whom repositories need to be suggested.
user = self.github.get_user(username)
# Procure repositories starred by the user.
self.user_starred_repositories.extend(user.get_starred())
# Repositories starred by users followed by the user.
if self.deep_dive:
for following_user in user.get_following():
self.user_following_starred_repositories.extend(
following_user.get_starred()
) | [
"Method to populate repositories which will be used to suggest\n repositories for the user. For this purpose we use two kinds of\n repositories.\n\n 1. Repositories starred by user him/herself.\n 2. Repositories starred by the users followed by the user.\n\n :param username: Username for the user for whom repositories are being\n suggested for.\n "
]
|
Please provide a description of the function:def __get_interests(self):
# All repositories of interest.
repos_of_interest = itertools.chain(
self.user_starred_repositories,
self.user_following_starred_repositories,
)
# Extract descriptions out of repositories of interest.
repo_descriptions = [repo.description for repo in repos_of_interest]
return list(set(repo_descriptions)) | [
"Method to procure description of repositories the authenticated user\n is interested in.\n\n We currently attribute interest to:\n 1. The repositories the authenticated user has starred.\n 2. The repositories the users the authenticated user follows have\n starred.\n\n :return: List of repository descriptions.\n "
]
|
Please provide a description of the function:def __get_words_to_ignore(self):
# Stop words in English.
english_stopwords = stopwords.words("english")
here = path.abspath(path.dirname(__file__))
# Languages in git repositories.
git_languages = []
with open(path.join(here, "gitlang/languages.txt"), "r") as langauges:
git_languages = [line.strip() for line in langauges]
# Other words to avoid in git repositories.
words_to_avoid = []
with open(path.join(here, "gitlang/others.txt"), "r") as languages:
words_to_avoid = [line.strip() for line in languages]
return set(
itertools.chain(english_stopwords, git_languages, words_to_avoid)
) | [
"Compiles list of all words to ignore.\n\n :return: List of words to ignore.\n "
]
|
Please provide a description of the function:def __clean_and_tokenize(self, doc_list):
# Some repositories fill entire documentation in description. We ignore
# such repositories for cleaner tokens.
doc_list = filter(
lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN,
doc_list,
)
cleaned_doc_list = list()
# Regular expression to remove out all punctuations, numbers and other
# un-necessary text substrings like emojis etc.
tokenizer = RegexpTokenizer(r"[a-zA-Z]+")
# Get stop words.
stopwords = self.__get_words_to_ignore()
# Get english words.
dict_words = self.__get_words_to_consider()
for doc in doc_list:
# Lowercase doc.
lower = doc.lower()
# Tokenize removing numbers and punctuation.
tokens = tokenizer.tokenize(lower)
# Include meaningful words.
tokens = [tok for tok in tokens if tok in dict_words]
# Remove stopwords.
tokens = [tok for tok in tokens if tok not in stopwords]
# Filter Nones if any are introduced.
tokens = [tok for tok in tokens if tok is not None]
cleaned_doc_list.append(tokens)
return cleaned_doc_list | [
"Method to clean and tokenize the document list.\n\n :param doc_list: Document list to clean and tokenize.\n :return: Cleaned and tokenized document list.\n "
]
|
Please provide a description of the function:def __construct_lda_model(self):
# Fetch descriptions of repos of interest to authenticated user.
repos_of_interest = self.__get_interests()
# Procure clean tokens from the descriptions.
cleaned_tokens = self.__clean_and_tokenize(repos_of_interest)
# If cleaned tokens are empty, it can cause an exception while
# generating LDA. But tokens shouldn't be something meaningful as that
# would mean we are suggesting repos without reason. Hence the random
# string to ensure that LDA doesn't cause exception but the token
# doesn't generate any suggestions either.
if not cleaned_tokens:
cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]]
# Setup LDA requisites.
dictionary = corpora.Dictionary(cleaned_tokens)
corpus = [dictionary.doc2bow(text) for text in cleaned_tokens]
# Generate LDA model
self.lda_model = models.ldamodel.LdaModel(
corpus, num_topics=1, id2word=dictionary, passes=10
) | [
"Method to create LDA model to procure list of topics from.\n\n We do that by first fetching the descriptions of repositories user has\n shown interest in. We tokenize the hence fetched descriptions to\n procure list of cleaned tokens by dropping all the stop words and\n language names from it.\n\n We use the cleaned and sanitized token list to train LDA model from\n which we hope to procure topics of interests to the authenticated user.\n "
]
|
Please provide a description of the function:def __get_query_for_repos(self, term_count=5):
repo_query_terms = list()
for term in self.lda_model.get_topic_terms(0, topn=term_count):
repo_query_terms.append(self.lda_model.id2word[term[0]])
return " ".join(repo_query_terms) | [
"Method to procure query based on topics authenticated user is\n interested in.\n\n :param term_count: Count of terms in query.\n :return: Query string.\n "
]
|
Please provide a description of the function:def get_suggested_repositories(self):
if self.suggested_repositories is None:
# Procure repositories to suggest to user.
repository_set = list()
for term_count in range(5, 2, -1):
query = self.__get_query_for_repos(term_count=term_count)
repository_set.extend(self.__get_repos_for_query(query))
# Remove repositories authenticated user is already interested in.
catchy_repos = GitSuggest.minus(
repository_set, self.user_starred_repositories
)
# Filter out repositories with too long descriptions. This is a
# measure to weed out spammy repositories.
filtered_repos = []
if len(catchy_repos) > 0:
for repo in catchy_repos:
if (
repo is not None
and repo.description is not None
and len(repo.description) <= GitSuggest.MAX_DESC_LEN
):
filtered_repos.append(repo)
# Present the repositories, highly starred to not starred.
filtered_repos = sorted(
filtered_repos,
key=attrgetter("stargazers_count"),
reverse=True,
)
self.suggested_repositories = GitSuggest.get_unique_repositories(
filtered_repos
)
# Return an iterator to help user fetch the repository listing.
for repository in self.suggested_repositories:
yield repository | [
"Method to procure suggested repositories for the user.\n\n :return: Iterator to procure suggested repositories for the user.\n "
]
|
Please provide a description of the function:def guess_type(s):
sc = s.replace(',', '') # remove comma from potential numbers
try:
return int(sc)
except ValueError:
pass
try:
return float(sc)
except ValueError:
pass
return s | [
" attempt to convert string value into numeric type "
]
|
Please provide a description of the function:def parse(self, node):
self._attrs = {}
vals = []
yielded = False
for x in self._read_parts(node):
if isinstance(x, Field):
yielded = True
x.attrs = self._attrs
yield x
else:
vals.append(ustr(x).strip(' \n\t'))
joined = ' '.join([ x for x in vals if x ])
if joined:
yielded = True
yield Field(node, guess_type(joined), self._attrs)
if not yielded:
yield Field(node, "", self._attrs) | [
"\n Return generator yielding Field objects for a given node\n "
]
|
Please provide a description of the function:def parse(self, *nodes):
for n in nodes:
if not n.contents:
continue
row = self._parse(n)
if not row.is_null:
yield row | [
"\n Parse one or more `tr` nodes, yielding wikitables.Row objects\n "
]
|
Please provide a description of the function:def _find_header_flat(self):
nodes = self._node.contents.filter_tags(
matches=ftag('th'), recursive=False)
if not nodes:
return
self._log('found header outside rows (%d <th> elements)' % len(nodes))
return nodes | [
"\n Find header elements in a table, if possible. This case handles\n situations where '<th>' elements are not within a row('<tr>')\n "
]
|
Please provide a description of the function:def _find_header_row(self):
th_max = 0
header_idx = 0
for idx, tr in enumerate(self._tr_nodes):
th_count = len(tr.contents.filter_tags(matches=ftag('th')))
if th_count > th_max:
th_max = th_count
header_idx = idx
if not th_max:
return
self._log('found header at row %d (%d <th> elements)' % \
(header_idx, th_max))
header_row = self._tr_nodes.pop(header_idx)
return header_row.contents.filter_tags(matches=ftag('th')) | [
"\n Evaluate all rows and determine header position, based on\n greatest number of 'th' tagged elements\n "
]
|
Please provide a description of the function:def _make_default_header(self):
td_max = 0
for idx, tr in enumerate(self._tr_nodes):
td_count = len(tr.contents.filter_tags(matches=ftag('td')))
if td_count > td_max:
td_max = td_count
self._log('creating default header (%d columns)' % td_max)
return [ 'column%d' % n for n in range(0,td_max) ] | [
"\n Return a generic placeholder header based on the tables column count\n "
]
|
Please provide a description of the function:def fetch_page(self, title, method='GET'):
params = { 'prop': 'revisions',
'format': 'json',
'action': 'query',
'explaintext': '',
'titles': title,
'rvprop': 'content' }
r = self.request(method, self.base_url, params=params)
r.raise_for_status()
pages = r.json()["query"]["pages"]
# use key from first result in 'pages' array
pageid = list(pages.keys())[0]
if pageid == '-1':
raise ArticleNotFound('no matching articles returned')
return pages[pageid] | [
" Query for page by title "
]
|
Please provide a description of the function:def print_stack(pid, include_greenlet=False, debugger=None, verbose=False):
# TextIOWrapper of Python 3 is so strange.
sys_stdout = getattr(sys.stdout, 'buffer', sys.stdout)
sys_stderr = getattr(sys.stderr, 'buffer', sys.stderr)
make_args = make_gdb_args
environ = dict(os.environ)
if (
debugger == 'lldb' or
(debugger is None and platform.system().lower() == 'darwin')
):
make_args = make_lldb_args
# fix the PATH environment variable for using built-in Python with lldb
environ['PATH'] = '/usr/bin:%s' % environ.get('PATH', '')
tmp_fd, tmp_path = tempfile.mkstemp()
os.chmod(tmp_path, 0o777)
commands = []
commands.append(FILE_OPEN_COMMAND)
commands.extend(UTILITY_COMMANDS)
commands.extend(THREAD_STACK_COMMANDS)
if include_greenlet:
commands.extend(GREENLET_STACK_COMMANDS)
commands.append(FILE_CLOSE_COMMAND)
command = r';'.join(commands)
args = make_args(pid, command % tmp_path)
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if verbose:
sys_stderr.write(b'Standard Output:\n%s\n' % out)
sys_stderr.write(b'Standard Error:\n%s\n' % err)
sys_stderr.flush()
for chunk in iter(functools.partial(os.read, tmp_fd, 1024), b''):
sys_stdout.write(chunk)
sys_stdout.write(b'\n')
sys_stdout.flush() | [
"Executes a file in a running Python process."
]
|
Please provide a description of the function:def cli_main(pid, include_greenlet, debugger, verbose):
'''Print stack of python process.
$ pystack <pid>
'''
try:
print_stack(pid, include_greenlet, debugger, verbose)
except DebuggerNotFound as e:
click.echo('DebuggerNotFound: %s' % e.args[0], err=True)
click.get_current_context().exit(1) | []
|
Please provide a description of the function:def forward_algo(self,observations):
# Store total number of observations total_stages = len(observations)
total_stages = len(observations)
# Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number
# Inittialize Alpha
ob_ind = self.obs_map[ observations[0] ]
alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Iteratively find alpha(using knowledge of alpha in the previous stage)
for curr_t in range(1,total_stages):
ob_ind = self.obs_map[observations[curr_t]]
alpha = np.dot( alpha , self.trans_prob)
alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] ))
# Sum the alpha's over the last stage
total_prob = alpha.sum()
return ( total_prob ) | [
" Finds the probability of an observation sequence for given model parameters\n\n **Arguments**:\n\n :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. \n :type observations: A list or tuple\n\n :return: The probability of occurence of the observation sequence\n :rtype: float \n\n **Example**:\n\n >>> states = ('s', 't')\n >>> possible_observation = ('A','B' )\n >>> # Numpy arrays of the data\n >>> start_probability = np.matrix( '0.5 0.5 ')\n >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')\n >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )\n >>> # Initialize class object\n >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)\n >>> observations = ('A', 'B','B','A')\n >>> print(test.forward_algo(observations))\n\n .. note::\n No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead.\n\n "
]
|
Please provide a description of the function:def viterbi(self,observations):
# Find total states,observations
total_stages = len(observations)
num_states = len(self.states)
# initialize data
# Path stores the state sequence giving maximum probability
old_path = np.zeros( (total_stages, num_states) )
new_path = np.zeros( (total_stages, num_states) )
# Find initial delta
# Map observation to an index
# delta[s] stores the probability of most probable path ending in state 's'
ob_ind = self.obs_map[ observations[0] ]
delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob )
# Scale delta
delta = delta /np.sum(delta)
# initialize path
old_path[0,:] = [i for i in range(num_states) ]
# Find delta[t][x] for each state 'x' at the iteration 't'
# delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path
for curr_t in range(1,total_stages):
# Map observation to an index
ob_ind = self.obs_map[ observations[curr_t] ]
# Find temp and take max along each row to get delta
temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] )
# Update delta and scale it
delta = temp.max(axis = 1).transpose()
delta = delta /np.sum(delta)
# Find state which is most probable using argax
# Convert to a list for easier processing
max_temp = temp.argmax(axis=1).transpose()
max_temp = np.ravel(max_temp).tolist()
# Update path
for s in range(num_states):
new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ]
new_path[curr_t,:] = [i for i in range(num_states) ]
old_path = new_path.copy()
# Find the state in last stage, giving maximum probability
final_max = np.argmax(np.ravel(delta))
best_path = old_path[:,final_max].tolist()
best_path_map = [ self.state_map[i] for i in best_path]
return best_path_map | [
" The probability of occurence of the observation sequence\n\n **Arguments**:\n\n :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. \n :type observations: A list or tuple\n\n :return: Returns a list of hidden states. \n :rtype: list of states \n \n **Features**:\n\n Scaling applied here. This ensures that no underflow error occurs.\n\n\n **Example**:\n\n >>> states = ('s', 't')\n >>> possible_observation = ('A','B' )\n >>> # Numpy arrays of the data\n >>> start_probability = np.matrix( '0.5 0.5 ')\n >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')\n >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )\n >>> # Initialize class object\n >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)\n >>> observations = ('A', 'B','B','A')\n >>> print(test.viterbi(observations))\n\n "
]
|
Please provide a description of the function:def train_hmm(self,observation_list, iterations, quantities):
obs_size = len(observation_list)
prob = float('inf')
q = quantities
# Train the model 'iteration' number of times
# store em_prob and trans_prob copies since you should use same values for one loop
for i in range(iterations):
emProbNew = np.asmatrix(np.zeros((self.em_prob.shape)))
transProbNew = np.asmatrix(np.zeros((self.trans_prob.shape)))
startProbNew = np.asmatrix(np.zeros((self.start_prob.shape)))
for j in range(obs_size):
# re-assing values based on weight
emProbNew= emProbNew + q[j] * self._train_emission(observation_list[j])
transProbNew = transProbNew + q[j] * self._train_transition(observation_list[j])
startProbNew = startProbNew + q[j] * self._train_start_prob(observation_list[j])
# Normalizing
em_norm = emProbNew.sum(axis = 1)
trans_norm = transProbNew.sum(axis = 1)
start_norm = startProbNew.sum(axis = 1)
emProbNew = emProbNew/ em_norm.transpose()
startProbNew = startProbNew/ start_norm.transpose()
transProbNew = transProbNew/ trans_norm.transpose()
self.em_prob,self.trans_prob = emProbNew,transProbNew
self.start_prob = startProbNew
if prob - self.log_prob(observation_list,quantities)>0.0000001:
prob = self.log_prob(observation_list,quantities)
else:
return self.em_prob, self.trans_prob , self.start_prob
return self.em_prob, self.trans_prob , self.start_prob | [
" Runs the Baum Welch Algorithm and finds the new model parameters\n\n **Arguments**:\n\n :param observation_list: A nested list, or a list of lists \n :type observation_list: Contains a list multiple observation sequences.\n\n :param iterations: Maximum number of iterations for the algorithm \n :type iterations: An integer \n\n :param quantities: Number of times, each corresponding item in 'observation_list' occurs.\n :type quantities: A list of integers\n\n :return: Returns the emission, transition and start probabilites as numpy matrices\n :rtype: Three numpy matices \n \n **Features**:\n\n Scaling applied here. This ensures that no underflow error occurs.\n \n **Example**:\n\n >>> states = ('s', 't')\n >>> possible_observation = ('A','B' )\n >>> # Numpy arrays of the data\n >>> start_probability = np.matrix( '0.5 0.5 ')\n >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')\n >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )\n >>> # Initialize class object\n >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)\n >>> \n >>> observations = ('A', 'B','B','A')\n >>> obs4 = ('B', 'A','B')\n >>> observation_tuple = []\n >>> observation_tuple.extend( [observations,obs4] )\n >>> quantities_observations = [10, 20]\n >>> num_iter=1000\n >>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)\n >>> # e,t,s contain new emission transition and start probabilities\n\n "
]
|
Please provide a description of the function:def log_prob(self,observations_list, quantities):
prob = 0
for q,obs in enumerate(observations_list):
temp,c_scale = self._alpha_cal(obs)
prob = prob + -1 * quantities[q] * np.sum(np.log(c_scale))
return prob | [
" Finds Weighted log probability of a list of observation sequences\n\n **Arguments**:\n\n :param observation_list: A nested list, or a list of lists \n :type observation_list: Contains a list multiple observation sequences.\n\n :param quantities: Number of times, each corresponding item in 'observation_list' occurs.\n :type quantities: A list of integers\n\n :return: Weighted log probability of multiple observations. \n :rtype: float\n \n **Features**:\n\n Scaling applied here. This ensures that no underflow error occurs.\n\n **Example**:\n\n >>> states = ('s', 't')\n >>> possible_observation = ('A','B' )\n >>> # Numpy arrays of the data\n >>> start_probability = np.matrix( '0.5 0.5 ')\n >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')\n >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )\n >>> # Initialize class object\n >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)\n >>> observations = ('A', 'B','B','A')\n >>> obs4 = ('B', 'A','B')\n >>> observation_tuple = []\n >>> observation_tuple.extend( [observations,obs4] )\n >>> quantities_observations = [10, 20]\n >>>\n >>> prob = test.log_prob(observation_tuple, quantities_observations)\n\n "
]
|
Please provide a description of the function:def __fetch_data(self, url):
url += '&api_key=' + self.api_key
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root | [
"\n helper function for fetching data given a request URL\n "
]
|
Please provide a description of the function:def _parse(self, date_str, format='%Y-%m-%d'):
rv = pd.to_datetime(date_str, format=format)
if hasattr(rv, 'to_pydatetime'):
rv = rv.to_pydatetime()
return rv | [
"\n helper function for parsing FRED date string into datetime\n "
]
|
Please provide a description of the function:def get_series_info(self, series_id):
url = "%s/series?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None or not len(root):
raise ValueError('No info exists for series id: ' + series_id)
info = pd.Series(root.getchildren()[0].attrib)
return info | [
"\n Get information about a series such as its title, frequency, observation start/end dates, units, notes, etc.\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'CPIAUCSL'\n\n Returns\n -------\n info : Series\n a pandas Series containing information about the Fred series\n "
]
|
Please provide a description of the function:def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs):
url = "%s/series/observations?series_id=%s" % (self.root_url, series_id)
if observation_start is not None:
observation_start = pd.to_datetime(observation_start,
errors='raise')
url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')
if observation_end is not None:
observation_end = pd.to_datetime(observation_end, errors='raise')
url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')
if kwargs.keys():
url += '&' + urlencode(kwargs)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
data[self._parse(child.get('date'))] = val
return pd.Series(data) | [
"\n Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release()\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'CPIAUCSL'\n observation_start : datetime or datetime-like str such as '7/1/2014', optional\n earliest observation date\n observation_end : datetime or datetime-like str such as '7/1/2014', optional\n latest observation date\n kwargs : additional parameters\n Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list\n\n Returns\n -------\n data : Series\n a Series where each index is the observation date and the value is the data for the Fred series\n "
]
|
Please provide a description of the function:def get_series_first_release(self, series_id):
df = self.get_series_all_releases(series_id)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data | [
"\n Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,\n The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.\n This will ignore revisions after the first release.\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'GDP'\n\n Returns\n -------\n data : Series\n a Series where each index is the observation date and the value is the data for the Fred series\n "
]
|
Please provide a description of the function:def get_series_as_of_date(self, series_id, as_of_date):
as_of_date = pd.to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data | [
"\n Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series\n before or on as_of_date, but ignores any revision on dates after as_of_date.\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'GDP'\n as_of_date : datetime, or datetime-like str such as '10/25/2014'\n Include data revisions on or before this date, and ignore revisions afterwards\n\n Returns\n -------\n data : Series\n a Series where each index is the observation date and the value is the data for the Fred series\n "
]
|
Please provide a description of the function:def get_series_all_releases(self, series_id):
url = "%s/series/observations?series_id=%s&realtime_start=%s&realtime_end=%s" % (self.root_url,
series_id,
self.earliest_realtime_start,
self.latest_realtime_end)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
i = 0
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
realtime_start = self._parse(child.get('realtime_start'))
# realtime_end = self._parse(child.get('realtime_end'))
date = self._parse(child.get('date'))
data[i] = {'realtime_start': realtime_start,
# 'realtime_end': realtime_end,
'date': date,
'value': val}
i += 1
data = pd.DataFrame(data).T
return data | [
"\n Get all data for a Fred series id including first releases and all revisions. This returns a DataFrame\n with three columns: 'date', 'realtime_start', and 'value'. For instance, the US GDP for Q4 2013 was first released\n to be 17102.5 on 2014-01-30, and then revised to 17080.7 on 2014-02-28, and then revised to 17089.6 on\n 2014-03-27. You will therefore get three rows with the same 'date' (observation date) of 2013-10-01 but three\n different 'realtime_start' of 2014-01-30, 2014-02-28, and 2014-03-27 with corresponding 'value' of 17102.5, 17080.7\n and 17089.6\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'GDP'\n\n Returns\n -------\n data : DataFrame\n a DataFrame with columns 'date', 'realtime_start' and 'value' where 'date' is the observation period and 'realtime_start'\n is when the corresponding value (either first release or revision) is reported.\n "
]
|
Please provide a description of the function:def get_series_vintage_dates(self, series_id):
url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root.getchildren():
dates.append(self._parse(child.text))
return dates | [
"\n Get a list of vintage dates for a series. Vintage dates are the dates in history when a\n series' data values were revised or new data values were released.\n\n Parameters\n ----------\n series_id : str\n Fred series id such as 'CPIAUCSL'\n\n Returns\n -------\n dates : list\n list of vintage dates\n "
]
|
Please provide a description of the function:def __do_series_search(self, url):
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned
for child in root.getchildren():
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
data = pd.DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total | [
"\n helper function for making one HTTP request for data, and parsing the returned results into a DataFrame\n "
]
|
Please provide a description of the function:def __get_search_results(self, url, limit, order_by, sort_order, filter):
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
if filter is not None:
if len(filter) == 2:
url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1])
else:
raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)')
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed) | [
"\n helper function for getting search results up to specified limit on the number of results. The Fred HTTP API\n truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.\n "
]
|
Please provide a description of the function:def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None):
url = "%s/series/search?search_text=%s&" % (self.root_url,
quote_plus(text))
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
return info | [
"\n Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.\n\n Parameters\n ----------\n text : str\n text to do fulltext search on, e.g., 'Real GDP'\n limit : int, optional\n limit the number of results to this value. If limit is 0, it means fetching all results without limit.\n order_by : str, optional\n order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',\n 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',\n 'popularity'\n sort_order : str, optional\n sort the results by ascending or descending order. Valid options are 'asc' or 'desc'\n filter : tuple, optional\n filters the results. Expects a tuple like (filter_variable, filter_value).\n Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'\n\n Returns\n -------\n info : DataFrame\n a DataFrame containing information about the matching Fred series\n "
]
|
Please provide a description of the function:def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None):
url = "%s/release/series?release_id=%d" % (self.root_url, release_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info | [
"\n Search for series that belongs to a release id. Returns information about matching series in a DataFrame.\n\n Parameters\n ----------\n release_id : int\n release id, e.g., 151\n limit : int, optional\n limit the number of results to this value. If limit is 0, it means fetching all results without limit.\n order_by : str, optional\n order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',\n 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',\n 'popularity'\n sort_order : str, optional\n sort the results by ascending or descending order. Valid options are 'asc' or 'desc'\n filter : tuple, optional\n filters the results. Expects a tuple like (filter_variable, filter_value).\n Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'\n\n Returns\n -------\n info : DataFrame\n a DataFrame containing information about the matching Fred series\n "
]
|
Please provide a description of the function:def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info | [
"\n Search for series that belongs to a category id. Returns information about matching series in a DataFrame.\n\n Parameters\n ----------\n category_id : int\n category id, e.g., 32145\n limit : int, optional\n limit the number of results to this value. If limit is 0, it means fetching all results without limit.\n order_by : str, optional\n order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',\n 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',\n 'popularity'\n sort_order : str, optional\n sort the results by ascending or descending order. Valid options are 'asc' or 'desc'\n filter : tuple, optional\n filters the results. Expects a tuple like (filter_variable, filter_value).\n Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'\n\n Returns\n -------\n info : DataFrame\n a DataFrame containing information about the matching Fred series\n "
]
|
Please provide a description of the function:def init(self, name, subject, expires=None, algorithm=None, parent=None, pathlen=None,
issuer_url=None, issuer_alt_name='', crl_url=None, ocsp_url=None,
ca_issuer_url=None, ca_crl_url=None, ca_ocsp_url=None, name_constraints=None,
password=None, parent_password=None, ecc_curve=None, key_type='RSA', key_size=None,
extra_extensions=None):
# NOTE: Already verified by KeySizeAction, so these checks are only for when the Python API is used
# directly.
if key_type != 'ECC':
if key_size is None:
key_size = ca_settings.CA_DEFAULT_KEY_SIZE
if not is_power2(key_size):
raise ValueError("%s: Key size must be a power of two" % key_size)
elif key_size < ca_settings.CA_MIN_KEY_SIZE:
raise ValueError("%s: Key size must be least %s bits" % (
key_size, ca_settings.CA_MIN_KEY_SIZE))
algorithm = parse_hash_algorithm(algorithm)
# Normalize extensions to django_ca.extensions.Extension subclasses
if not isinstance(subject, Subject):
subject = Subject(subject)
if not isinstance(issuer_alt_name, IssuerAlternativeName):
issuer_alt_name = IssuerAlternativeName(issuer_alt_name)
pre_create_ca.send(
sender=self.model, name=name, key_size=key_size, key_type=key_type, algorithm=algorithm,
expires=expires, parent=parent, subject=subject, pathlen=pathlen, issuer_url=issuer_url,
issuer_alt_name=issuer_alt_name, crl_url=crl_url, ocsp_url=ocsp_url, ca_issuer_url=ca_issuer_url,
ca_crl_url=ca_crl_url, ca_ocsp_url=ca_ocsp_url, name_constraints=name_constraints,
password=password, parent_password=parent_password, extra_extensions=extra_extensions)
if key_type == 'DSA':
private_key = dsa.generate_private_key(key_size=key_size, backend=default_backend())
elif key_type == 'ECC':
ecc_curve = parse_key_curve(ecc_curve)
private_key = ec.generate_private_key(ecc_curve, default_backend())
else:
private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_size,
backend=default_backend())
public_key = private_key.public_key()
builder = get_cert_builder(expires)
builder = builder.public_key(public_key)
subject = subject.name
builder = builder.subject_name(subject)
builder = builder.add_extension(x509.BasicConstraints(ca=True, path_length=pathlen), critical=True)
builder = builder.add_extension(x509.KeyUsage(
key_cert_sign=True, crl_sign=True, digital_signature=False, content_commitment=False,
key_encipherment=False, data_encipherment=False, key_agreement=False, encipher_only=False,
decipher_only=False), critical=True)
subject_key_id = x509.SubjectKeyIdentifier.from_public_key(public_key)
builder = builder.add_extension(subject_key_id, critical=False)
if parent is None:
builder = builder.issuer_name(subject)
private_sign_key = private_key
aki = x509.AuthorityKeyIdentifier.from_issuer_public_key(public_key)
else:
builder = builder.issuer_name(parent.x509.subject)
private_sign_key = parent.key(parent_password)
aki = parent.get_authority_key_identifier()
builder = builder.add_extension(aki, critical=False)
for critical, ext in self.get_common_extensions(ca_issuer_url, ca_crl_url, ca_ocsp_url):
builder = builder.add_extension(ext, critical=critical)
if name_constraints:
if not isinstance(name_constraints, NameConstraints):
name_constraints = NameConstraints(name_constraints)
builder = builder.add_extension(**name_constraints.for_builder())
if extra_extensions:
builder = self._extra_extensions(builder, extra_extensions)
certificate = builder.sign(private_key=private_sign_key, algorithm=algorithm,
backend=default_backend())
# Normalize extensions for create()
if crl_url is not None:
crl_url = '\n'.join(crl_url)
issuer_alt_name = issuer_alt_name.serialize()
ca = self.model(name=name, issuer_url=issuer_url, issuer_alt_name=issuer_alt_name,
ocsp_url=ocsp_url, crl_url=crl_url, parent=parent)
ca.x509 = certificate
ca.private_key_path = ca_storage.generate_filename('%s.key' % ca.serial.replace(':', ''))
ca.save()
if password is None:
encryption = serialization.NoEncryption()
else:
encryption = serialization.BestAvailableEncryption(password)
pem = private_key.private_bytes(encoding=Encoding.PEM,
format=PrivateFormat.PKCS8,
encryption_algorithm=encryption)
# write private key to file
ca_storage.save(ca.private_key_path, ContentFile(pem))
post_create_ca.send(sender=self.model, ca=ca)
return ca | [
"Create a new certificate authority.\n\n Parameters\n ----------\n\n name : str\n The name of the CA. This is a human-readable string and is used for administrative purposes only.\n subject : dict or str or :py:class:`~django_ca.subject.Subject`\n Subject string, e.g. ``\"/CN=example.com\"`` or ``Subject(\"/CN=example.com\")``. The value is\n actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an instance of that\n class.\n expires : datetime, optional\n Datetime for when this certificate authority will expire, defaults to\n :ref:`CA_DEFAULT_EXPIRES <settings-ca-default-expires>`.\n algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional\n Hash algorithm used when signing the certificate, passed to\n :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the\n :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting.\n parent : :py:class:`~django_ca.models.CertificateAuthority`, optional\n Parent certificate authority for the new CA. Passing this value makes the CA an intermediate\n authority.\n pathlen : int, optional\n Value of the path length attribute for the :py:class:`~django_ca.extensions.BasicConstraints`\n extension.\n issuer_url : str\n URL for the DER/ASN1 formatted certificate that is signing certificates.\n issuer_alt_name : :py:class:`~django_ca.extensions.IssuerAlternativeName` or str, optional\n IssuerAlternativeName used when signing certificates. If the value is not an instance of\n :py:class:`~django_ca.extensions.IssuerAlternativeName`, it will be passed as argument to\n the constructor of the class.\n crl_url : list of str, optional\n CRL URLs used for certificates signed by this CA.\n ocsp_url : str, optional\n OCSP URL used for certificates signed by this CA.\n ca_issuer_url : str, optional\n URL for the DER/ASN1 formatted certificate that is signing this CA. For intermediate CAs, this\n would usually be the ``issuer_url`` of the parent CA.\n ca_crl_url : list of str, optional\n CRL URLs used for this CA. This value is only meaningful for intermediate CAs.\n ca_ocsp_url : str, optional\n OCSP URL used for this CA. This value is only meaningful for intermediate CAs.\n name_constraints : list of lists or :py:class:`~django_ca.extensions.NameConstraints`\n List of names that this CA can sign and/or cannot sign. The value is passed to\n :py:class:`~django_ca.extensions.NameConstraints` if the value is not already an instance of that\n class.\n password : bytes, optional\n Password to encrypt the private key with.\n parent_password : bytes, optional\n Password that the private key of the parent CA is encrypted with.\n ecc_curve : str or EllipticCurve, optional\n The elliptic curve to use for ECC type keys, passed verbatim to\n :py:func:`~django_ca.utils.parse_key_curve`.\n key_type: str, optional\n The type of private key to generate, must be one of ``\"RSA\"``, ``\"DSA\"`` or ``\"ECC\"``, with\n ``\"RSA\"`` being the default.\n key_size : int, optional\n Integer specifying the key size, must be a power of two (e.g. 2048, 4096, ...). Defaults to\n the :ref:`CA_DEFAULT_KEY_SIZE <settings-ca-default-key-size>`, unused if ``key_type=\"ECC\"``.\n extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \\\n :py:class:`django_ca.extensions.Extension`, optional\n An optional list of additional extensions to add to the certificate.\n\n Raises\n ------\n\n ValueError\n For various cases of wrong input data (e.g. ``key_size`` not being the power of two).\n PermissionError\n If the private key file cannot be written to disk.\n "
]
|
Please provide a description of the function:def sign_cert(self, ca, csr, expires=None, algorithm=None, subject=None, cn_in_san=True,
csr_format=Encoding.PEM, subject_alternative_name=None, key_usage=None,
extended_key_usage=None, tls_feature=None, ocsp_no_check=False, extra_extensions=None,
password=None):
########################
# Normalize parameters #
########################
if subject is None:
subject = Subject() # we need a subject instance so we can possibly add the CN
elif not isinstance(subject, Subject):
subject = Subject(subject)
if 'CN' not in subject and not subject_alternative_name:
raise ValueError("Must name at least a CN or a subjectAlternativeName.")
algorithm = parse_hash_algorithm(algorithm)
# Normalize extensions to django_ca.extensions.Extension subclasses
if key_usage and not isinstance(key_usage, KeyUsage):
key_usage = KeyUsage(key_usage)
if extended_key_usage and not isinstance(extended_key_usage, ExtendedKeyUsage):
extended_key_usage = ExtendedKeyUsage(extended_key_usage)
if tls_feature and not isinstance(tls_feature, TLSFeature):
tls_feature = TLSFeature(tls_feature)
if not subject_alternative_name:
subject_alternative_name = SubjectAlternativeName([])
elif not isinstance(subject_alternative_name, SubjectAlternativeName):
subject_alternative_name = SubjectAlternativeName(subject_alternative_name)
# use first SAN as CN if CN is not set
if 'CN' not in subject:
subject['CN'] = subject_alternative_name.value[0].value
elif cn_in_san and 'CN' in subject: # add CN to SAN if cn_in_san is True (default)
try:
cn_name = parse_general_name(subject['CN'])
except idna.IDNAError:
raise ValueError('%s: Could not parse CommonName as subjectAlternativeName.' % subject['CN'])
else:
if cn_name not in subject_alternative_name:
subject_alternative_name.insert(0, cn_name)
################
# Read the CSR #
################
if csr_format == Encoding.PEM:
req = x509.load_pem_x509_csr(force_bytes(csr), default_backend())
elif csr_format == Encoding.DER:
req = x509.load_der_x509_csr(force_bytes(csr), default_backend())
else:
raise ValueError('Unknown CSR format passed: %s' % csr_format)
#########################
# Send pre-issue signal #
#########################
pre_issue_cert.send(sender=self.model, ca=ca, csr=csr, expires=expires, algorithm=algorithm,
subject=subject, cn_in_san=cn_in_san, csr_format=csr_format,
subject_alternative_name=subject_alternative_name, key_usage=key_usage,
extended_key_usage=extended_key_usage, tls_featur=tls_feature,
extra_extensions=extra_extensions, password=password)
#######################
# Generate public key #
#######################
public_key = req.public_key()
builder = get_cert_builder(expires)
builder = builder.public_key(public_key)
builder = builder.issuer_name(ca.x509.subject)
builder = builder.subject_name(subject.name)
# Add extensions
builder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True)
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(public_key), critical=False)
# Get authorityKeyIdentifier from subjectKeyIdentifier from signing CA
builder = builder.add_extension(ca.get_authority_key_identifier(), critical=False)
for critical, ext in self.get_common_extensions(ca.issuer_url, ca.crl_url, ca.ocsp_url):
builder = builder.add_extension(ext, critical=critical)
if subject_alternative_name:
builder = builder.add_extension(**subject_alternative_name.for_builder())
if key_usage:
builder = builder.add_extension(**key_usage.for_builder())
if extended_key_usage:
builder = builder.add_extension(**extended_key_usage.for_builder())
if tls_feature:
builder = builder.add_extension(**tls_feature.for_builder())
if ca.issuer_alt_name:
issuer_alt_name = IssuerAlternativeName(ca.issuer_alt_name)
builder = builder.add_extension(**issuer_alt_name.for_builder())
if ocsp_no_check:
builder = builder.add_extension(**OCSPNoCheck().for_builder())
if extra_extensions:
builder = self._extra_extensions(builder, extra_extensions)
###################
# Sign public key #
###################
cert = builder.sign(private_key=ca.key(password), algorithm=algorithm, backend=default_backend())
return cert, req | [
"Create a signed certificate from a CSR.\n\n **PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is\n called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which\n passes along all parameters unchanged and saves the raw certificate to the database.\n\n Parameters\n ----------\n\n ca : :py:class:`~django_ca.models.CertificateAuthority`\n The certificate authority to sign the certificate with.\n csr : str\n A valid CSR. The format is given by the ``csr_format`` parameter.\n expires : datetime, optional\n Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting.\n algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional\n Hash algorithm used when signing the certificate, passed to\n :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the\n :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting.\n subject : dict or str or :py:class:`~django_ca.subject.Subject`\n Subject string, e.g. ``\"/CN=example.com\"`` or ``Subject(\"/CN=example.com\")``.\n The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an\n instance of that class. If this value is not passed or if the value does not contain a CommonName,\n the first value of the ``subject_alternative_name`` parameter is used as CommonName.\n cn_in_san : bool, optional\n Wether the CommonName should also be included as subjectAlternativeName. The default is\n ``True``, but the parameter is ignored if no CommonName is given. This is typically set\n to ``False`` when creating a client certificate, where the subjects CommonName has no\n meaningful value as subjectAlternativeName.\n csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional\n The format of the CSR. The default is ``PEM``.\n subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`,\n optional A list of alternative names for the certificate. The value is passed to\n :py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class.\n key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional\n Value for the ``keyUsage`` X509 extension. The value is passed to\n :py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class.\n extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional\n Value for the ``extendedKeyUsage`` X509 extension. The value is passed to\n :py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class.\n tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional\n Value for the ``TLSFeature`` X509 extension. The value is passed to\n :py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class.\n ocsp_no_check : bool, optional\n Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's\n lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder,\n the default is ``False``. See `RFC 6990, section 4.2.2.2.1\n <https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information.\n extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \\\n :py:class:`django_ca.extensions.Extension`, optional\n An optional list of additional extensions to add to the certificate.\n password : bytes, optional\n Password used to load the private key of the certificate authority. If not passed, the private key\n is assumed to be unencrypted.\n\n Returns\n -------\n\n cryptography.x509.Certificate\n The signed certificate.\n "
]
|
Please provide a description of the function:def init(self, ca, csr, **kwargs):
c = self.model(ca=ca)
c.x509, csr = self.sign_cert(ca, csr, **kwargs)
c.csr = csr.public_bytes(Encoding.PEM).decode('utf-8')
c.save()
post_issue_cert.send(sender=self.model, cert=c)
return c | [
"Create a signed certificate from a CSR and store it to the database.\n\n All parameters are passed on to :py:func:`Certificate.objects.sign_cert()\n <django_ca.managers.CertificateManager.sign_cert>`.\n "
]
|
Please provide a description of the function:def download_bundle_view(self, request, pk):
return self._download_response(request, pk, bundle=True) | [
"A view that allows the user to download a certificate bundle in PEM format."
]
|
Please provide a description of the function:def get_actions(self, request):
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions | [
"Disable the \"delete selected\" admin action.\n\n Otherwise the action is present even though has_delete_permission is False, it just doesn't\n work.\n "
]
|
Please provide a description of the function:def get_cert_profile_kwargs(name=None):
if name is None:
name = ca_settings.CA_DEFAULT_PROFILE
profile = deepcopy(ca_settings.CA_PROFILES[name])
kwargs = {
'cn_in_san': profile['cn_in_san'],
'subject': get_default_subject(name=name),
}
key_usage = profile.get('keyUsage')
if key_usage and key_usage.get('value'):
kwargs['key_usage'] = KeyUsage(key_usage)
ext_key_usage = profile.get('extendedKeyUsage')
if ext_key_usage and ext_key_usage.get('value'):
kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage)
tls_feature = profile.get('TLSFeature')
if tls_feature and tls_feature.get('value'):
kwargs['tls_feature'] = TLSFeature(tls_feature)
if profile.get('ocsp_no_check'):
kwargs['ocsp_no_check'] = profile['ocsp_no_check']
return kwargs | [
"Get kwargs suitable for get_cert X509 keyword arguments from the given profile."
]
|
Please provide a description of the function:def format_name(subject):
if isinstance(subject, x509.Name):
subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject]
return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject])) | [
"Convert a subject into the canonical form for distinguished names.\n\n This function does not take care of sorting the subject in any meaningful order.\n\n Examples::\n\n >>> format_name([('CN', 'example.com'), ])\n '/CN=example.com'\n >>> format_name([('CN', 'example.com'), ('O', \"My Organization\"), ])\n '/CN=example.com/O=My Organization'\n "
]
|
Please provide a description of the function:def format_general_name(name):
if isinstance(name, x509.DirectoryName):
value = format_name(name.value)
else:
value = name.value
return '%s:%s' % (SAN_NAME_MAPPINGS[type(name)], value) | [
"Format a single general name.\n\n >>> import ipaddress\n >>> format_general_name(x509.DNSName('example.com'))\n 'DNS:example.com'\n >>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1')))\n 'IP:127.0.0.1'\n "
]
|
Please provide a description of the function:def add_colons(s):
return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)]) | [
"Add colons after every second digit.\n\n This function is used in functions to prettify serials.\n\n >>> add_colons('teststring')\n 'te:st:st:ri:ng'\n "
]
|
Please provide a description of the function:def int_to_hex(i):
s = hex(i)[2:].upper()
if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA
# Strip the "L" suffix, since hex(1L) -> 0x1L.
# NOTE: Do not convert to int earlier. int(<very-large-long>) is still long
s = s[:-1]
return add_colons(s) | [
"Create a hex-representation of the given serial.\n\n >>> int_to_hex(12345678)\n 'BC:61:4E'\n "
]
|
Please provide a description of the function:def parse_name(name):
name = name.strip()
if not name: # empty subjects are ok
return []
try:
items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)]
except KeyError as e:
raise ValueError('Unknown x509 name field: %s' % e.args[0])
# Check that no OIDs not in MULTIPLE_OIDS occur more then once
for key, oid in NAME_OID_MAPPINGS.items():
if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS:
raise ValueError('Subject contains multiple "%s" fields' % key)
return sort_name(items) | [
"Parses a subject string as used in OpenSSLs command line utilities.\n\n The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example\n ``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient\n on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``,\n whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``).\n\n >>> parse_name('/CN=example.com')\n [('CN', 'example.com')]\n >>> parse_name('c=AT/l= Vienna/o=\"ex org\"/CN=example.com')\n [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')]\n\n Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted\n based on x509 name specifications regardless of the given order:\n\n >>> parse_name('L=\"Vienna / District\"/[email protected]')\n [('L', 'Vienna / District'), ('emailAddress', '[email protected]')]\n >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT')\n True\n\n Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes,\n so strings like ``/OU=\"Org / Org Unit\"/CN=example.com`` will work as expected.\n\n >>> parse_name('L=\"Vienna / District\"/CN=example.com')\n [('L', 'Vienna / District'), ('CN', 'example.com')]\n\n But note that it's still easy to trick this function, if you really want to. The following example is\n *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's\n certainly different:\n\n >>> parse_name('L=\"Vienna \" District\"/CN=example.com')\n [('L', 'Vienna'), ('CN', 'example.com')]\n\n Examples of where this string is used are:\n\n .. code-block:: console\n\n # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com'\n # openssl x509 -in cert.pem -noout -subject -nameopt compat\n /C=AT/L=Vienna/CN=example.com\n "
]
|
Please provide a description of the function:def x509_name(name):
if isinstance(name, six.string_types):
name = parse_name(name)
return x509.Name([x509.NameAttribute(NAME_OID_MAPPINGS[typ], force_text(value)) for typ, value in name]) | [
"Parses a subject into a :py:class:`x509.Name <cg:cryptography.x509.Name>`.\n\n If ``name`` is a string, :py:func:`parse_name` is used to parse it.\n\n >>> x509_name('/C=AT/CN=example.com')\n <Name(C=AT,CN=example.com)>\n >>> x509_name([('C', 'AT'), ('CN', 'example.com')])\n <Name(C=AT,CN=example.com)>\n "
]
|
Please provide a description of the function:def validate_email(addr):
if '@' not in addr:
raise ValueError('Invalid email address: %s' % addr)
node, domain = addr.split('@', 1)
try:
domain = idna.encode(force_text(domain))
except idna.core.IDNAError:
raise ValueError('Invalid domain: %s' % domain)
return '%s@%s' % (node, force_text(domain)) | [
"Validate an email address.\n\n This function raises ``ValueError`` if the email address is not valid.\n\n >>> validate_email('[email protected]')\n '[email protected]'\n >>> validate_email('foo@bar com')\n Traceback (most recent call last):\n ...\n ValueError: Invalid domain: bar com\n\n "
]
|
Please provide a description of the function:def parse_general_name(name):
name = force_text(name)
typ = None
match = GENERAL_NAME_RE.match(name)
if match is not None:
typ, name = match.groups()
typ = typ.lower()
if typ is None:
if re.match('[a-z0-9]{2,}://', name): # Looks like a URI
try:
return x509.UniformResourceIdentifier(name)
except Exception: # pragma: no cover - this really accepts anything
pass
if '@' in name: # Looks like an Email address
try:
return x509.RFC822Name(validate_email(name))
except Exception:
pass
if name.strip().startswith('/'): # maybe it's a dirname?
return x509.DirectoryName(x509_name(name))
# Try to parse this as IPAddress/Network
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
# Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
# Almost anything passes as DNS name, so this is our default fallback
return x509.DNSName(name)
if typ == 'uri':
return x509.UniformResourceIdentifier(name)
elif typ == 'email':
return x509.RFC822Name(validate_email(name))
elif typ == 'ip':
try:
return x509.IPAddress(ip_address(name))
except ValueError:
pass
try:
return x509.IPAddress(ip_network(name))
except ValueError:
pass
raise ValueError('Could not parse IP address.')
elif typ == 'rid':
return x509.RegisteredID(x509.ObjectIdentifier(name))
elif typ == 'othername':
regex = "(.*);(.*):(.*)"
if re.match(regex, name) is not None:
oid, asn_typ, val = re.match(regex, name).groups()
oid = x509.ObjectIdentifier(oid)
if asn_typ == 'UTF8':
val = val.encode('utf-8')
elif asn_typ == 'OctetString':
val = bytes(bytearray.fromhex(val))
val = OctetString(val).dump()
else:
raise ValueError('Unsupported ASN type in otherName: %s' % asn_typ)
val = force_bytes(val)
return x509.OtherName(oid, val)
else:
raise ValueError('Incorrect otherName format: %s' % name)
elif typ == 'dirname':
return x509.DirectoryName(x509_name(name))
else:
# Try to encode the domain name. DNSName() does not validate the domain name, but this
# check will fail.
if name.startswith('*.'):
idna.encode(name[2:])
elif name.startswith('.'):
idna.encode(name[1:])
else:
idna.encode(name)
return x509.DNSName(name) | [
"Parse a general name from user input.\n\n This function will do its best to detect the intended type of any value passed to it:\n\n >>> parse_general_name('example.com')\n <DNSName(value='example.com')>\n >>> parse_general_name('*.example.com')\n <DNSName(value='*.example.com')>\n >>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains\n <DNSName(value='.example.com')>\n >>> parse_general_name('[email protected]')\n <RFC822Name(value='[email protected]')>\n >>> parse_general_name('https://example.com')\n <UniformResourceIdentifier(value='https://example.com')>\n >>> parse_general_name('1.2.3.4')\n <IPAddress(value=1.2.3.4)>\n >>> parse_general_name('fd00::1')\n <IPAddress(value=fd00::1)>\n >>> parse_general_name('/CN=example.com')\n <DirectoryName(value=<Name(CN=example.com)>)>\n\n The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't\n work, an exception will be raised:\n\n >>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n idna.core.IDNAError: ...\n\n If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`:\n\n >>> parse_general_name('email:[email protected]')\n <RFC822Name(value='[email protected]')>\n >>> parse_general_name('URI:https://example.com')\n <UniformResourceIdentifier(value='https://example.com')>\n >>> parse_general_name('dirname:/CN=example.com')\n <DirectoryName(value=<Name(CN=example.com)>)>\n\n Some more exotic values can only be generated by using this prefix:\n\n >>> parse_general_name('rid:2.5.4.3')\n <RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)>\n >>> parse_general_name('otherName:2.5.4.3;UTF8:example.com')\n <OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')>\n\n If you give a prefixed value, this function is less forgiving of any typos and does not catch any\n exceptions:\n\n >>> parse_general_name('email:foo@bar com')\n Traceback (most recent call last):\n ...\n ValueError: Invalid domain: bar com\n\n "
]
|
Please provide a description of the function:def parse_hash_algorithm(value=None):
if value is None:
return ca_settings.CA_DIGEST_ALGORITHM
elif isinstance(value, type) and issubclass(value, hashes.HashAlgorithm):
return value()
elif isinstance(value, hashes.HashAlgorithm):
return value
elif isinstance(value, six.string_types):
try:
return getattr(hashes, value.strip())()
except AttributeError:
raise ValueError('Unknown hash algorithm: %s' % value)
else:
raise ValueError('Unknown type passed: %s' % type(value).__name__) | [
"Parse a hash algorithm value.\n\n The most common use case is to pass a str naming a class in\n :py:mod:`~cg:cryptography.hazmat.primitives.hashes`.\n\n For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM\n <settings-ca-digest-algorithm>`, and passing an\n :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that\n instance unchanged.\n\n Example usage::\n\n >>> parse_hash_algorithm() # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.hashes.SHA512 object at ...>\n >>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.hashes.SHA512 object at ...>\n >>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.hashes.SHA512 object at ...>\n >>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.hashes.SHA512 object at ...>\n >>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.hashes.SHA512 object at ...>\n >>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n ValueError: Unknown hash algorithm: Wrong\n >>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n ValueError: Unknown type passed: object\n\n Parameters\n ----------\n\n value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional\n The value to parse, the function description on how possible values are used.\n\n Returns\n -------\n\n algorithm\n A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance.\n\n Raises\n ------\n\n ValueError\n If an unknown object is passed or if ``value`` does not name a known algorithm.\n "
]
|
Please provide a description of the function:def parse_encoding(value=None):
if value is None:
return ca_settings.CA_DEFAULT_ENCODING
elif isinstance(value, Encoding):
return value
elif isinstance(value, six.string_types):
if value == 'ASN1':
value = 'DER'
try:
return getattr(Encoding, value)
except AttributeError:
raise ValueError('Unknown encoding: %s' % value)
else:
raise ValueError('Unknown type passed: %s' % type(value).__name__) | [
"Parse a value to a valid encoding.\n\n This function accepts either a member of\n :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding` or a string describing a member. If\n no value is passed, it will assume ``PEM`` as a default value. Note that ``\"ASN1\"`` is treated as an alias\n for ``\"DER\"``.\n\n >>> parse_encoding()\n <Encoding.PEM: 'PEM'>\n >>> parse_encoding('DER')\n <Encoding.DER: 'DER'>\n >>> parse_encoding(Encoding.PEM)\n <Encoding.PEM: 'PEM'>\n "
]
|
Please provide a description of the function:def parse_key_curve(value=None):
if isinstance(value, ec.EllipticCurve):
return value # name was already parsed
if value is None:
return ca_settings.CA_DEFAULT_ECC_CURVE
curve = getattr(ec, value.strip(), type)
if not issubclass(curve, ec.EllipticCurve):
raise ValueError('%s: Not a known Eliptic Curve' % value)
return curve() | [
"Parse an elliptic curve value.\n\n This function uses a value identifying an elliptic curve to return an\n :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. The name must match a\n class name of one of the classes named under \"Elliptic Curves\" in\n :any:`cg:hazmat/primitives/asymmetric/ec`.\n\n For convenience, passing ``None`` will return the value of :ref:`CA_DEFAULT_ECC_CURVE\n <settings-ca-default-ecc-curve>`, and passing an\n :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` will return that instance\n unchanged.\n\n Example usage::\n\n >>> parse_key_curve('SECP256R1') # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>\n >>> parse_key_curve('SECP384R1') # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.asymmetric.ec.SECP384R1 object at ...>\n >>> parse_key_curve(ec.SECP256R1()) # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>\n >>> parse_key_curve() # doctest: +ELLIPSIS\n <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>\n\n Parameters\n ----------\n\n value : str, otional\n The name of the curve or ``None`` to return the default curve.\n\n Returns\n -------\n\n curve\n An :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance.\n\n Raises\n ------\n\n ValueError\n If the named curve is not supported.\n "
]
|
Please provide a description of the function:def get_cert_builder(expires):
now = datetime.utcnow().replace(second=0, microsecond=0)
if expires is None:
expires = get_expires(expires, now=now)
expires = expires.replace(second=0, microsecond=0)
builder = x509.CertificateBuilder()
builder = builder.not_valid_before(now)
builder = builder.not_valid_after(expires)
builder = builder.serial_number(x509.random_serial_number())
return builder | [
"Get a basic X509 cert builder object.\n\n Parameters\n ----------\n\n expires : datetime\n When this certificate will expire.\n "
]
|
Please provide a description of the function:def wrap_file_exceptions():
try:
yield
except (PermissionError, FileNotFoundError): # pragma: only py3
# In py3, we want to raise Exception unchanged, so there would be no need for this block.
# BUT (IOError, OSError) - see below - also matches, so we capture it here
raise
except (IOError, OSError) as e: # pragma: only py2
if e.errno == errno.EACCES:
raise PermissionError(str(e))
elif e.errno == errno.ENOENT:
raise FileNotFoundError(str(e))
raise | [
"Contextmanager to wrap file exceptions into identicaly exceptions in py2 and py3.\n\n This should be removed once py2 support is dropped.\n "
]
|
Please provide a description of the function:def read_file(path):
if os.path.isabs(path):
with wrap_file_exceptions():
with open(path, 'rb') as stream:
return stream.read()
with wrap_file_exceptions():
stream = ca_storage.open(path)
try:
return stream.read()
finally:
stream.close() | [
"Read the file from the given path.\n\n If ``path`` is an absolute path, reads a file from the local filesystem. For relative paths, read the file\n using the storage backend configured using :ref:`CA_FILE_STORAGE <settings-ca-file-storage>`.\n "
]
|
Please provide a description of the function:def get_extension_name(ext):
# In cryptography 2.2, SCTs return "Unknown OID"
if ext.oid == ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS:
return 'SignedCertificateTimestampList'
# Until at least cryptography 2.6.1, PrecertPoison has no name
# https://github.com/pyca/cryptography/issues/4817
elif ca_settings.CRYPTOGRAPHY_HAS_PRECERT_POISON: # pragma: no branch, pragma: only cryptography>=2.4
if ext.oid == ExtensionOID.PRECERT_POISON:
return 'PrecertPoison'
# uppercase the FIRST letter only ("keyUsage" -> "KeyUsage")
return re.sub('^([a-z])', lambda x: x.groups()[0].upper(), ext.oid._name) | [
"Function to get the name of an extension."
]
|
Please provide a description of the function:def shlex_split(s, sep):
lex = shlex.shlex(s, posix=True)
lex.whitespace = sep
lex.whitespace_split = True
return [l for l in lex] | [
"Split a character on the given set of characters.\n\n Example::\n\n >>> shlex_split('foo,bar', ', ')\n ['foo', 'bar']\n >>> shlex_split('foo\\\\\\\\,bar1', ',') # escape a separator\n ['foo,bar1']\n >>> shlex_split('\"foo,bar\", bla', ', ')\n ['foo,bar', 'bla']\n >>> shlex_split('foo,\"bar,bla\"', ',')\n ['foo', 'bar,bla']\n "
]
|
Please provide a description of the function:def get_revocation_reason(self):
if self.revoked is False:
return
if self.revoked_reason == '' or self.revoked_reason is None:
return x509.ReasonFlags.unspecified
else:
return getattr(x509.ReasonFlags, self.revoked_reason) | [
"Get the revocation reason of this certificate."
]
|
Please provide a description of the function:def get_revocation_time(self):
if self.revoked is False:
return
if timezone.is_aware(self.revoked_date):
# convert datetime object to UTC and make it naive
return timezone.make_naive(self.revoked_date, pytz.utc)
return self.revoked_date | [
"Get the revocation time as naive datetime.\n\n Note that this method is only used by cryptography>=2.4.\n "
]
|
Please provide a description of the function:def x509(self):
if self._x509 is None:
backend = default_backend()
self._x509 = x509.load_pem_x509_certificate(force_bytes(self.pub), backend)
return self._x509 | [
"The underlying :py:class:`cg:cryptography.x509.Certificate`."
]
|
Please provide a description of the function:def issuer(self):
return Subject([(s.oid, s.value) for s in self.x509.issuer]) | [
"The certificate issuer field as :py:class:`~django_ca.subject.Subject`."
]
|
Please provide a description of the function:def subject(self):
return Subject([(s.oid, s.value) for s in self.x509.subject]) | [
"The certificates subject as :py:class:`~django_ca.subject.Subject`."
]
|
Please provide a description of the function:def authority_key_identifier(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_KEY_IDENTIFIER)
except x509.ExtensionNotFound:
return None
return AuthorityKeyIdentifier(ext) | [
"The :py:class:`~django_ca.extensions.AuthorityKeyIdentifier` extension, or ``None`` if it doesn't\n exist."
]
|
Please provide a description of the function:def key_usage(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.KEY_USAGE)
except x509.ExtensionNotFound:
return None
return KeyUsage(ext) | [
"The :py:class:`~django_ca.extensions.KeyUsage` extension, or ``None`` if it doesn't exist."
]
|
Please provide a description of the function:def extended_key_usage(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.EXTENDED_KEY_USAGE)
except x509.ExtensionNotFound:
return None
return ExtendedKeyUsage(ext) | [
"The :py:class:`~django_ca.extensions.ExtendedKeyUsage` extension, or ``None`` if it doesn't\n exist."
]
|
Please provide a description of the function:def subject_key_identifier(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_KEY_IDENTIFIER)
except x509.ExtensionNotFound:
return None
return SubjectKeyIdentifier(ext) | [
"The :py:class:`~django_ca.extensions.SubjectKeyIdentifier` extension, or ``None`` if it doesn't\n exist."
]
|
Please provide a description of the function:def tls_feature(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE)
except x509.ExtensionNotFound:
return None
return TLSFeature(ext) | [
"The :py:class:`~django_ca.extensions.TLSFeature` extension, or ``None`` if it doesn't exist."
]
|
Please provide a description of the function:def get_authority_key_identifier(self):
try:
ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except x509.ExtensionNotFound:
return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key())
else:
return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski) | [
"Return the AuthorityKeyIdentifier extension used in certificates signed by this CA."
]
|
Please provide a description of the function:def get_crl(self, expires=86400, encoding=None, algorithm=None, password=None, scope=None, **kwargs):
if scope is not None and scope not in ['ca', 'user', 'attribute']:
raise ValueError('Scope must be either None, "ca", "user" or "attribute"')
encoding = parse_encoding(encoding)
now = now_builder = timezone.now()
algorithm = parse_hash_algorithm(algorithm)
if timezone.is_aware(now_builder):
now_builder = timezone.make_naive(now, pytz.utc)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(self.x509.subject)
builder = builder.last_update(now_builder)
builder = builder.next_update(now_builder + timedelta(seconds=expires))
if 'full_name' in kwargs:
full_name = kwargs['full_name']
full_name = [parse_general_name(n) for n in full_name]
elif self.crl_url:
crl_url = [url.strip() for url in self.crl_url.split()]
full_name = [x509.UniformResourceIdentifier(c) for c in crl_url]
else:
full_name = None
# Keyword arguments for the IssuingDistributionPoint extension
idp_kwargs = {
'only_contains_ca_certs': False,
'only_contains_user_certs': False,
'indirect_crl': False,
'only_contains_attribute_certs': False,
'only_some_reasons': None,
'full_name': full_name,
'relative_name': kwargs.get('relative_name'),
}
ca_qs = self.children.filter(expires__gt=now).revoked()
cert_qs = self.certificate_set.filter(expires__gt=now).revoked()
if scope == 'ca':
certs = ca_qs
idp_kwargs['only_contains_ca_certs'] = True
elif scope == 'user':
certs = cert_qs
idp_kwargs['only_contains_user_certs'] = True
elif scope == 'attribute':
# sorry, nothing we support right now
certs = []
idp_kwargs['only_contains_attribute_certs'] = True
else:
certs = itertools.chain(ca_qs, cert_qs)
for cert in certs:
builder = builder.add_revoked_certificate(cert.get_revocation())
if ca_settings.CRYPTOGRAPHY_HAS_IDP: # pragma: no branch, pragma: only cryptography>=2.5
builder = builder.add_extension(x509.IssuingDistributionPoint(**idp_kwargs), critical=True)
# TODO: Add CRLNumber extension
# https://cryptography.io/en/latest/x509/reference/#cryptography.x509.CRLNumber
crl = builder.sign(private_key=self.key(password), algorithm=algorithm, backend=default_backend())
return crl.public_bytes(encoding) | [
"Generate a Certificate Revocation List (CRL).\n\n The ``full_name`` and ``relative_name`` parameters describe how to retrieve the CRL and are used in\n the `Issuing Distribution Point extension <https://tools.ietf.org/html/rfc5280.html#section-5.2.5>`_.\n The former defaults to the ``crl_url`` field, pass ``None`` to not include the value. At most one of\n the two may be set.\n\n Parameters\n ----------\n\n expires : int\n The time in seconds when this CRL expires. Note that you should generate a new CRL until then.\n encoding : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding` or str, optional\n The encoding format for the CRL, passed to :py:func:`~django_ca.utils.parse_encoding`. The default\n value is ``\"PEM\"``.\n algorithm : :py:class:`~cg:cryptography.hazmat.primitives.hashes.Hash` or str, optional\n The hash algorithm to use, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default\n is to use :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>`.\n password : bytes, optional\n Password used to load the private key of the certificate authority. If not passed, the private key\n is assumed to be unencrypted.\n scope : {None, 'ca', 'user', 'attribute'}, optional\n What to include in the CRL: Use ``\"ca\"`` to include only revoked certificate authorities and\n ``\"user\"`` to include only certificates or ``None`` (the default) to include both.\n ``\"attribute\"`` is reserved for future use and always produces an empty CRL.\n full_name : list of str or :py:class:`~cg:cryptography.x509.GeneralName`, optional\n List of general names to use in the Issuing Distribution Point extension. If not passed, use\n ``crl_url`` if set.\n relative_name : :py:class:`~cg:cryptography.x509.RelativeDistinguishedName`, optional\n Used in Issuing Distribution Point extension, retrieve the CRL relative to the issuer.\n\n Returns\n -------\n\n bytes\n The CRL in the requested format.\n "
]
|
Please provide a description of the function:def pathlen(self):
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.BASIC_CONSTRAINTS)
except x509.ExtensionNotFound: # pragma: no cover - extension should always be present
return None
return ext.value.path_length | [
"The ``pathlen`` attribute of the ``BasicConstraints`` extension (either an ``int`` or ``None``)."
]
|
Please provide a description of the function:def max_pathlen(self):
pathlen = self.pathlen
if self.parent is None:
return pathlen
max_parent = self.parent.max_pathlen
if max_parent is None:
return pathlen
elif pathlen is None:
return max_parent - 1
else:
return min(self.pathlen, max_parent - 1) | [
"The maximum pathlen for any intermediate CAs signed by this CA.\n\n This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an\n ``int`` if any parent CA has the attribute.\n "
]
|
Please provide a description of the function:def bundle(self):
ca = self
bundle = [ca]
while ca.parent is not None:
bundle.append(ca.parent)
ca = ca.parent
return bundle | [
"A list of any parent CAs, including this CA.\n\n The list is ordered so the Root CA will be the first.\n "
]
|
Please provide a description of the function:def valid(self):
now = timezone.now()
return self.filter(revoked=False, expires__gt=now, valid_from__lt=now) | [
"Return valid certificates."
]
|
Please provide a description of the function:def as_extension(self):
return x509.extensions.Extension(oid=self.oid, critical=self.critical, value=self.extension_type) | [
"This extension as :py:class:`~cg:cryptography.x509.ExtensionType`."
]
|
Please provide a description of the function:def add_algorithm(self, parser):
help = 'The HashAlgorithm that will be used to generate the signature (default: %(default)s).' % {
'default': ca_settings.CA_DIGEST_ALGORITHM.name, }
parser.add_argument(
'--algorithm', metavar='{sha512,sha256,...}', default=ca_settings.CA_DIGEST_ALGORITHM,
action=AlgorithmAction, help=help) | [
"Add the --algorithm option."
]
|
Please provide a description of the function:def add_format(self, parser, default=Encoding.PEM, help_text=None, opts=None):
if opts is None:
opts = ['-f', '--format']
if help_text is None:
help_text = 'The format to use ("ASN1" is an alias for "DER", default: %(default)s).'
help_text = help_text % {'default': default.name}
parser.add_argument(*opts, metavar='{PEM,ASN1,DER}', default=default,
action=FormatAction, help=help_text) | [
"Add the --format option."
]
|
Please provide a description of the function:def process_module(self, node):
'''
process a module
'''
for listing in self.config.fileperms_ignore_paths:
if node.file.split('{0}/'.format(os.getcwd()))[-1] in glob.glob(listing):
# File is ignored, no checking should be done
return
desired_perm = self.config.fileperms_default
if '-' in desired_perm:
desired_perm = desired_perm.split('-')
else:
desired_perm = [desired_perm]
if len(desired_perm) > 2:
raise RuntimeError('Permission ranges should be like XXXX-YYYY')
for idx, perm in enumerate(desired_perm):
desired_perm[idx] = desired_perm[idx].strip('"').strip('\'').lstrip('0').zfill(4)
if desired_perm[idx][0] != '0':
# Always include a leading zero
desired_perm[idx] = '0{0}'.format(desired_perm[idx])
if sys.version_info > (3,):
# The octal representation in python 3 has changed to 0o644 instead of 0644
if desired_perm[idx][1] != 'o':
desired_perm[idx] = '0o' + desired_perm[idx][1:]
if sys.platform.startswith('win'):
# Windows does not distinguish between user/group/other.
# They must all be the same. Also, Windows will automatically
# set the execution bit on files with a known extension
# (eg .exe, .bat, .com). So we cannot reliably test the
# execution bit on other files such as .py files.
user_perm_noexec = int(desired_perm[idx][-3])
if user_perm_noexec % 2 == 1:
user_perm_noexec -= 1
desired_perm[idx] = desired_perm[idx][:-3] + (str(user_perm_noexec) * 3)
module_perms = oct(stat.S_IMODE(os.stat(node.file).st_mode))
if sys.version_info < (3,):
module_perms = str(module_perms)
if len(desired_perm) == 1:
if module_perms != desired_perm[0]:
if sys.platform.startswith('win'):
# Check the variant with execution bit set due to the
# unreliability of checking the execution bit on Windows.
user_perm_noexec = int(desired_perm[0][-3])
desired_perm_exec = desired_perm[0][:-3] + (str(user_perm_noexec + 1) * 3)
if module_perms == desired_perm_exec:
return
self.add_message('E0599', line=1, args=(desired_perm[0], module_perms))
else:
if module_perms < desired_perm[0] or module_perms > desired_perm[1]:
if sys.platform.startswith('win'):
# Check the variant with execution bit set due to the
# unreliability of checking the execution bit on Windows.
user_perm_noexec0 = int(desired_perm[0][-3])
desired_perm_exec0 = desired_perm[0][:-3] + (str(user_perm_noexec0 + 1) * 3)
user_perm_noexec1 = int(desired_perm[1][-3])
desired_perm_exec1 = desired_perm[1][:-3] + (str(user_perm_noexec1 + 1) * 3)
if desired_perm_exec0 <= module_perms <= desired_perm_exec1:
return
desired_perm = '>= {0} OR <= {1}'.format(*desired_perm)
self.add_message('E0599', line=1, args=(desired_perm, module_perms)) | []
|
Please provide a description of the function:def _parse_requirements_file(requirements_file):
'''
Parse requirements.txt and return list suitable for
passing to ``install_requires`` parameter in ``setup()``.
'''
parsed_requirements = []
with open(requirements_file) as rfh:
for line in rfh.readlines():
line = line.strip()
if not line or line.startswith(('#', '-r')):
continue
parsed_requirements.append(line)
return parsed_requirements | []
|
Please provide a description of the function:def _release_version():
'''
Returns release version
'''
with io.open(os.path.join(SETUP_DIRNAME, 'saltpylint', 'version.py'), encoding='utf-8') as fh_:
exec_locals = {}
exec_globals = {}
contents = fh_.read()
if not isinstance(contents, str):
contents = contents.encode('utf-8')
exec(contents, exec_globals, exec_locals) # pylint: disable=exec-used
return exec_locals['__version__'] | []
|
Please provide a description of the function:def process_module(self, node):
'''
process a module
'''
# Patch lib2to3.fixer_util.touch_import!
fixer_util.touch_import = salt_lib2to3_touch_import
flags = {}
if self.config.modernize_print_function:
flags['print_function'] = True
salt_avail_fixes = set(
refactor.get_fixers_from_package(
'saltpylint.py3modernize.fixes'
)
)
avail_fixes = set(refactor.get_fixers_from_package('libmodernize.fixes'))
avail_fixes.update(lib2to3_fix_names)
avail_fixes.update(salt_avail_fixes)
default_fixes = avail_fixes.difference(opt_in_fix_names)
unwanted_fixes = set(self.config.modernize_nofix)
# Explicitly disable libmodernize.fixes.fix_dict_six since we have our own implementation
# which only fixes `dict.iter<items|keys|values>()` calls
unwanted_fixes.add('libmodernize.fixes.fix_dict_six')
if self.config.modernize_six_unicode:
unwanted_fixes.add('libmodernize.fixes.fix_unicode_future')
elif self.config.modernize_future_unicode:
unwanted_fixes.add('libmodernize.fixes.fix_unicode')
else:
unwanted_fixes.add('libmodernize.fixes.fix_unicode_future')
unwanted_fixes.add('libmodernize.fixes.fix_unicode')
if self.config.modernize_no_six:
unwanted_fixes.update(six_fix_names)
unwanted_fixes.update(salt_avail_fixes)
else:
# We explicitly will remove fix_imports_six from libmodernize and will add
# our own fix_imports_six
unwanted_fixes.add('libmodernize.fixes.fix_imports_six')
# Remove a bunch of libmodernize.fixes since we need to properly skip them
# and we provide the proper skip rule
unwanted_fixes.add('libmodernize.fixes.fix_input_six')
unwanted_fixes.add('libmodernize.fixes.fix_filter')
unwanted_fixes.add('libmodernize.fixes.fix_map')
unwanted_fixes.add('libmodernize.fixes.fix_xrange_six')
unwanted_fixes.add('libmodernize.fixes.fix_zip')
explicit = set()
if self.config.modernize_fix:
default_present = False
for fix in self.config.modernize_fix:
if fix == 'default':
default_present = True
else:
explicit.add(fix)
requested = default_fixes.union(explicit) if default_present else explicit
else:
requested = default_fixes
requested = default_fixes
fixer_names = requested.difference(unwanted_fixes)
rft = PyLintRefactoringTool(sorted(fixer_names), flags, sorted(explicit))
try:
rft.refactor_file(node.file,
write=False,
doctests_only=self.config.modernize_doctests_only)
except ParseError as exc:
# Unable to refactor, let's not make PyLint crash
try:
lineno = exc.context[1][0]
line_contents = node.file_stream.readlines()[lineno-1].rstrip()
self.add_message('W1698', line=lineno, args=line_contents)
except Exception: # pylint: disable=broad-except
self.add_message('W1698', line=1, args=exc)
return
except AssertionError as exc:
self.add_message('W1698', line=1, args=exc)
return
except (IOError, OSError) as exc:
logging.getLogger(__name__).warn('Error while processing {0}: {1}'.format(node.file, exc))
return
for lineno, diff in rft.diff:
# Since PyLint's python3 checker uses <Type>16<int><int>, we'll also use that range
self.add_message('W1699', line=lineno, args=diff)
# Restore lib2to3.fixer_util.touch_import!
fixer_util.touch_import = FIXER_UTIL_TOUCH_IMPORT | []
|
Please provide a description of the function:def visit_functiondef(self, node):
'''
Verifies no logger statements inside __virtual__
'''
if (not isinstance(node, astroid.FunctionDef) or
node.is_method()
or node.type != 'function'
or not node.body
):
# only process functions
return
try:
if not node.name == '__virtual__':
# only need to process the __virtual__ function
return
except AttributeError:
return
# walk contents of __virtual__ function
for child in node.get_children():
for functions in child.get_children():
if isinstance(functions, astroid.Call):
if isinstance(functions.func, astroid.Attribute):
try:
# Inspect each statement for an instance of 'logging'
for inferred in functions.func.expr.infer():
try:
instance_type = inferred.pytype().split('.')[0]
except TypeError:
continue
if instance_type == 'logging':
self.add_message(
self.VIRT_LOG, node=functions
)
# Found logger, don't need to keep processing this line
break
except AttributeError:
# Not a log function
return | []
|
Please provide a description of the function:def process_module(self, node):
'''
process a module
the module's content is accessible via node.file_stream object
'''
pep263 = re.compile(six.b(self.RE_PEP263))
try:
file_stream = node.file_stream
except AttributeError:
# Pylint >= 1.8.1
file_stream = node.stream()
# Store a reference to the node's file stream position
current_stream_position = file_stream.tell()
# Go to the start of stream to achieve our logic
file_stream.seek(0)
# Grab the first two lines
twolines = list(itertools.islice(file_stream, 2))
pep263_encoding = [m.group(1).lower() for l in twolines for m in [pep263.search(l)] if m]
multiple_encodings = len(pep263_encoding) > 1
file_empty = len(twolines) == 0
# Reset the node's file stream position
file_stream.seek(current_stream_position)
# - If the file has an UTF-8 BOM and yet uses any other
# encoding, it will be caught by F0002
# - If the file has a PEP263 UTF-8 encoding and yet uses any
# other encoding, it will be caught by W0512
# - If there are non-ASCII characters and no PEP263, or UTF-8
# BOM, it will be caught by W0512
# - If there are ambiguous PEP263 encodings it will be caught
# by E0001, we still test for this
if multiple_encodings:
self.add_message('W9901', line=1)
if node.file_encoding:
pylint_encoding = node.file_encoding.lower()
if six.PY3:
pylint_encoding = pylint_encoding.encode('utf-8')
if pep263_encoding and pylint_encoding not in pep263_encoding:
self.add_message('W9902', line=1)
if not pep263_encoding:
if file_empty:
self.add_message('W9905', line=1)
else:
self.add_message('W9903', line=1)
elif self.REQ_ENCOD not in pep263_encoding:
self.add_message('W9904', line=1) | []
|
Please provide a description of the function:def get_versions(source):
tree = compiler.parse(source)
checker = compiler.walk(tree, NodeChecker())
return checker.vers | [
"Return information about the Python versions required for specific features.\n\n The return value is a dictionary with keys as a version number as a tuple\n (for example Python 2.6 is (2,6)) and the value are a list of features that\n require the indicated Python version.\n "
]
|
Please provide a description of the function:def register(linter):
'''required method to auto register this checker '''
linter.register_checker(StringCurlyBracesFormatIndexChecker(linter))
linter.register_checker(StringLiteralChecker(linter)) | []
|
Please provide a description of the function:def process_non_raw_string_token(self, prefix, string_body, start_row):
'''
check for bad escapes in a non-raw string.
prefix: lowercase string of eg 'ur' string prefix markers.
string_body: the un-parsed body of the string, not including the quote
marks.
start_row: integer line number in the source.
'''
if 'u' in prefix:
if string_body.find('\\0') != -1:
self.add_message('null-byte-unicode-literal', line=start_row) | []
|
Please provide a description of the function:def register(linter):
'''
Required method to auto register this checker
'''
linter.register_checker(ResourceLeakageChecker(linter))
linter.register_checker(BlacklistedImportsChecker(linter))
linter.register_checker(MovedTestCaseClassChecker(linter))
linter.register_checker(BlacklistedLoaderModulesUsageChecker(linter))
linter.register_checker(BlacklistedFunctionsChecker(linter)) | []
|
Please provide a description of the function:def visit_import(self, node):
'''triggered when an import statement is seen'''
module_filename = node.root().file
if fnmatch.fnmatch(module_filename, '__init__.py*') and \
not fnmatch.fnmatch(module_filename, 'test_*.py*'):
return
modnode = node.root()
names = [name for name, _ in node.names]
for name in names:
self._check_blacklisted_module(node, name) | []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.