desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'If the output file exists, delete it and start over.'
def setup(self):
if os.path.exists(__OUTPUT__): os.remove(__OUTPUT__)
'The open_pep_index function opens the byte stream of the PEP 0 HTML document found in the downloads/ directory.'
def open_pep_index(self):
with open('download/pep0.html', 'rb') as f: content = f.read() return content
'This function parses PEP 0 and retrieves all the relevant URLs containing the various PEPs. It adds all the links with a PEP identifier to a set called links. A set is used to filter out any duplicates and read efficiency. A conditional is added to the end of this function to check that this was successful. If it wasn\'t, the program exits. Consider checking internet connection if this fails.'
def get_pep_urls(self):
soup = BeautifulSoup(self.open_pep_index(), 'html.parser', from_encoding='UTF-8') for link in soup.findAll('a', href=True, text=re.compile('^[0-9]*$')): self.links.add(link)
'Gets the contents from the HTML file.'
def get_pep_contents(self, link):
self.url = (__PYBASE__ + link['href']) html = request.urlopen(self.url) self.set_pep_document(html.read().decode('utf-8'))
'Makes up the HTML document.'
def set_pep_document(self, doc):
self.document = BeautifulSoup(doc, 'html.parser')
'Gets the title for the PEP'
def set_pep_title(self):
peptitle = self.document.find('h1', {'class': 'page-title'}).get_text() self.long_title = re.sub('--', '-', peptitle) self.title = ('PEP ' + self.pep) self.pep_db.add(self.title)
'Sets the PEP number. For example, if the PEP is identified as PEP 0008, it will be stored as 8. If the PEP is identified as PEP 0143 it will be stored as 143.'
def set_pep_number(self):
pep = re.findall('\\d+', self.url)[0] self.pep = pep.lstrip('0')
'Parses the raw document, cleans the text'
def set_pep_body(self):
try: html = self.document.find('div', {'id': ['abstract', 'introduction', 'rationale', 'motivation', 'what-is-a-pep', 'overview', 'improving-python-zip-application-support', 'scope', 'abstract-and-rationale', 'rationale-and-goals', 'specification']}) if (html is None): raise AttributeError html = FHTEMPLATE.format(information=str(html), title=self.long_title) html = html.rstrip().replace(' DCTB ', ' ') html = re.sub(re.compile('\\[\\d+\\]'), '', html) html = re.sub(re.compile('<h1>((.|\n)+?)</h1>'), '', html) html = html.replace('<pre class="literal-block">', '<pre><code>') html = html.replace('</pre>', '</code></pre>') html = html.replace('<tt class="docutils literal">', '<code>') html = html.replace('</tt>', '</code>') soup = BeautifulSoup(html, 'html.parser') for match in soup.findAll(['div']): match.unwrap() for match in soup.findAll(['a']): match.unwrap() html = soup.contents[0] html = str(html) html = re.sub(re.compile('\n'), '\\n', html) html = re.sub('<p>[\\n\\s]+<p>[\\n\\s]*|<(\\/)p>[\\n\\s]+<\\/p>[\\n\\s]*', '<\\1p>', html) self.body = repr(html).replace("'", '') print ('>>' * 20) print self.body print ('<<' * 20) except AttributeError: self.body = '' except: print ('Parse Error: Investigate ' + self.pep)
'Sets the structure for the output.txt file'
def set_structure(self):
if ((int(self.pep) not in [205, 210, 308]) and (self.body != '')): entry = OUTPUT_TEMPLATE.format(title=self.title, entry_type='A', redirect_title='', empty_field='', categories='', related_topics='', external_links='', disambiguation='', image='', abstract=self.body, url=self.url) self.collection.add(entry)
'Initialize HtmlFileData object. Load data from Downloaded docs'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open HTML File and load data'
def load_data(self):
with open(self.FILE, 'r') as html_file: document = html_file.read() self.HTML = document
'Returns the Plain HTML'
def get_raw_data(self):
return self.HTML
'Return File Object'
def get_file(self):
return self.file
'Initialize APIDocsParser object with API Reference HTML'
def __init__(self, data):
self.data = self.get_api_reference_html(data) self.parsed_data = []
'Parses the HTML File Text and returns the API Reference soup'
def get_api_reference_html(self, data):
soup = BeautifulSoup(data.get_raw_data(), 'html.parser') reference = soup.find('a', attrs={'name': 'reference'}) if reference: reference_soup = reference.find_parent().find_parent() return reference_soup
'Parses Individual API and extracts Title , Link , Content and Example Code'
def parse_data(self, file_data):
if self.data: all_api_reference = self.data.findAll(['h4', 'h3']) for api in all_api_reference: title = api.text.replace(' #', '') href = self.parse_link(file_data, api) content = self.parse_content(api) example = self.parse_example(api) section = {'title': title, 'href': href, 'content': content, 'example': example} self.parsed_data.append(section)
'Extract the Example Code Snippet'
def parse_example(self, api):
example_code = '' for tag in api.next_siblings: if (not isinstance(tag, element.Tag)): continue if (tag.name == 'div'): code = str(tag.find('pre')) if (code != 'None'): example_code = tag.text.strip() example_code += '\n' return example_code.replace('\n', '\\n')
'Extracts the Abstract from API Docs'
def parse_content(self, api):
abstract = '' for tag in api.next_siblings: if (not isinstance(tag, element.Tag)): continue if ((tag.name == 'hr') or (tag.name == 'blockquote')): break elif (tag.name == 'div'): continue else: abstract += str(tag) abstract = self.remove_anchor(abstract) abstract += '\n' abstract = abstract.replace('\n\n', '\n') return abstract.replace('\n', '\\n')
'Forms the API Docs Link'
def parse_link(self, data, api):
return ((REACT_API_DOCS_URL + data.FILE.split('/')[1]) + api.find('a', attrs={'class': 'hash-link'}).attrs['href'])
'Returns the API List'
def get_data(self):
return self.parsed_data
'Initialize with parsed api data list and name of the output file'
def __init__(self, api_data, output_file):
self.data = api_data self.output_file = output_file
'Create the output file using the parsed data'
def create_file(self):
for data_element in self.data: title = data_element['title'] anchor = data_element['href'] example = data_element['example'] content = data_element['content'] if example: example = ('<pre><code>%s</code></pre>' % example) abstract = '<section class="prog__container">{}{}</section>'.format(content, example) list_of_data = [title, 'A', '', '', '', '', '', '', '', '', '', abstract, anchor] self.output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
''
def get_packages(self):
self.packages = [] table = self.soup.find('table') for row in table.find_all('tr')[1::None]: data = row.find_all('td') name = data[2].a.getText() reference = (self.ARCHLINUX_URL + data[2].a['href']) info = data[4].getText() arch = data[0].getText() package = Package(name, info, reference, arch) self.packages.append(package) if (len(self.packages) >= 2): if (self.packages[(-1)].name == self.packages[(-2)].name): del self.packages[(-1)]
'Update line with last point and current coordinates.'
def mouseMoveEvent(self, ev):
pos = self.transformPos(ev.pos()) self.restoreCursor() if self.drawing(): self.overrideCursor(CURSOR_DRAW) if self.current: color = self.lineColor if self.outOfPixmap(pos): pos = self.intersectionPoint(self.current[(-1)], pos) elif ((len(self.current) > 1) and self.closeEnough(pos, self.current[0])): pos = self.current[0] color = self.current.line_color self.overrideCursor(CURSOR_POINT) self.current.highlightVertex(0, Shape.NEAR_VERTEX) self.line[1] = pos self.line.line_color = color self.repaint() self.current.highlightClear() return if (Qt.RightButton & ev.buttons()): if (self.selectedShapeCopy and self.prevPoint): self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShapeCopy, pos) self.repaint() elif self.selectedShape: self.selectedShapeCopy = self.selectedShape.copy() self.repaint() return if (Qt.LeftButton & ev.buttons()): if self.selectedVertex(): self.boundedMoveVertex(pos) self.shapeMoved.emit() self.repaint() elif (self.selectedShape and self.prevPoint): self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShape, pos) self.shapeMoved.emit() self.repaint() return self.setToolTip('Image') for shape in reversed([s for s in self.shapes if self.isVisible(s)]): index = shape.nearestVertex(pos, self.epsilon) if (index is not None): if self.selectedVertex(): self.hShape.highlightClear() (self.hVertex, self.hShape) = (index, shape) shape.highlightVertex(index, shape.MOVE_VERTEX) self.overrideCursor(CURSOR_POINT) self.setToolTip('Click & drag to move point') self.setStatusTip(self.toolTip()) self.update() break elif shape.containsPoint(pos): if self.selectedVertex(): self.hShape.highlightClear() (self.hVertex, self.hShape) = (None, shape) self.setToolTip(("Click & drag to move shape '%s'" % shape.label)) self.setStatusTip(self.toolTip()) self.overrideCursor(CURSOR_GRAB) self.update() break else: if self.hShape: self.hShape.highlightClear() self.update() (self.hVertex, self.hShape) = (None, None)
'Select the first shape created which contains this point.'
def selectShapePoint(self, point):
self.deSelectShape() if self.selectedVertex(): (index, shape) = (self.hVertex, self.hShape) shape.highlightVertex(index, shape.MOVE_VERTEX) return for shape in reversed(self.shapes): if (self.isVisible(shape) and shape.containsPoint(point)): shape.selected = True self.selectedShape = shape self.calculateOffsets(shape, point) self.setHiding() self.selectionChanged.emit(True) return
'Convert from widget-logical coordinates to painter-logical coordinates.'
def transformPos(self, point):
return ((point / self.scale) - self.offsetToCenter())
'For each edge formed by `points\', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)\' to the middle of the edge along with its index, so that the one closest can be chosen.'
def intersectingEdges(self, x1y1, x2y2, points):
(x1, y1) = x1y1 (x2, y2) = x2y2 for i in range(4): (x3, y3) = points[i] (x4, y4) = points[((i + 1) % 4)] denom = (((y4 - y3) * (x2 - x1)) - ((x4 - x3) * (y2 - y1))) nua = (((x4 - x3) * (y1 - y3)) - ((y4 - y3) * (x1 - x3))) nub = (((x2 - x1) * (y1 - y3)) - ((y2 - y1) * (x1 - x3))) if (denom == 0): continue (ua, ub) = ((nua / denom), (nub / denom)) if ((0 <= ua <= 1) and (0 <= ub <= 1)): x = (x1 + (ua * (x2 - x1))) y = (y1 + (ua * (y2 - y1))) m = QPointF(((x3 + x4) / 2), ((y3 + y4) / 2)) d = distance((m - QPointF(x2, y2))) (yield (d, i, (x, y)))
'Return a pretty-printed XML string for the Element.'
def prettify(self, elem):
rough_string = ElementTree.tostring(elem, 'utf8') root = etree.fromstring(rough_string) return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), ' DCTB '.encode()) 'reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=" DCTB ", encoding=ENCODE_METHOD)'
'Return XML root'
def genXML(self):
if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)): return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'folder') folder.text = self.foldername filename = SubElement(top, 'filename') filename.text = self.filename if (self.localImgPath is not None): localImgPath = SubElement(top, 'path') localImgPath.text = self.localImgPath source = SubElement(top, 'source') database = SubElement(source, 'database') database.text = self.databaseSrc size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if (len(self.imgSize) == 3): depth.text = str(self.imgSize[2]) else: depth.text = '1' segmented = SubElement(top, 'segmented') segmented.text = '0' return top
'Enable/Disable widgets which depend on an opened image.'
def toggleActions(self, value=True):
for z in self.actions.zoomActions: z.setEnabled(value) for action in self.actions.onLoadActive: action.setEnabled(value)
'In the middle of drawing, toggling between modes should be disabled.'
def toggleDrawingSensitive(self, drawing=True):
self.actions.editMode.setEnabled((not drawing)) if ((not drawing) and self.beginner()): print 'Cancel creation.' self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
'Function to handle difficult examples Update on each object'
def btnstate(self, item=None):
if (not self.canvas.editing()): return item = self.currentItem() if (not item): item = self.labelList.item((self.labelList.count() - 1)) difficult = self.diffcButton.isChecked() try: shape = self.itemsToShapes[item] except: pass try: if (difficult != shape.difficult): shape.difficult = difficult self.setDirty() else: self.canvas.setShapeVisible(shape, (item.checkState() == Qt.Checked)) except: pass
'Pop-up and give focus to the label editor. position MUST be in global coordinates.'
def newShape(self):
if ((not self.useDefaultLabelCheckbox.isChecked()) or (not self.defaultLabelTextLine.text())): if (len(self.labelHist) > 0): self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist) if (self.singleClassMode.isChecked() and self.lastLabel): text = self.lastLabel else: text = self.labelDialog.popUp(text=self.prevLabelText) self.lastLabel = text else: text = self.defaultLabelTextLine.text() self.diffcButton.setChecked(False) if (text is not None): self.prevLabelText = text self.addLabel(self.canvas.setLastLabel(text)) if self.beginner(): self.canvas.setEditing(True) self.actions.create.setEnabled(True) else: self.actions.editMode.setEnabled(True) self.setDirty() if (text not in self.labelHist): self.labelHist.append(text) else: self.canvas.resetAllLines()
'Load the specified file, or the last opened file if None.'
def loadFile(self, filePath=None):
self.resetState() self.canvas.setEnabled(False) if (filePath is None): filePath = self.settings.get(SETTING_FILENAME) unicodeFilePath = ustr(filePath) if (unicodeFilePath and (self.fileListWidget.count() > 0)): index = self.mImgList.index(unicodeFilePath) fileWidgetItem = self.fileListWidget.item(index) fileWidgetItem.setSelected(True) if (unicodeFilePath and os.path.exists(unicodeFilePath)): if LabelFile.isLabelFile(unicodeFilePath): try: self.labelFile = LabelFile(unicodeFilePath) except LabelFileError as e: self.errorMessage(u'Error opening file', (u'<p><b>%s</b></p><p>Make sure <i>%s</i> is a valid label file.' % (e, unicodeFilePath))) self.status(('Error reading %s' % unicodeFilePath)) return False self.imageData = self.labelFile.imageData self.lineColor = QColor(*self.labelFile.lineColor) self.fillColor = QColor(*self.labelFile.fillColor) else: self.imageData = read(unicodeFilePath, None) self.labelFile = None image = QImage.fromData(self.imageData) if image.isNull(): self.errorMessage(u'Error opening file', (u'<p>Make sure <i>%s</i> is a valid image file.' % unicodeFilePath)) self.status(('Error reading %s' % unicodeFilePath)) return False self.status(('Loaded %s' % os.path.basename(unicodeFilePath))) self.image = image self.filePath = unicodeFilePath self.canvas.loadPixmap(QPixmap.fromImage(image)) if self.labelFile: self.loadLabels(self.labelFile.shapes) self.setClean() self.canvas.setEnabled(True) self.adjustScale(initial=True) self.paintCanvas() self.addRecentFile(self.filePath) self.toggleActions(True) if (self.usingPascalVocFormat is True): if (self.defaultSaveDir is not None): basename = (os.path.basename(os.path.splitext(self.filePath)[0]) + XML_EXT) xmlPath = os.path.join(self.defaultSaveDir, basename) self.loadPascalXMLByFilename(xmlPath) else: xmlPath = (os.path.splitext(filePath)[0] + XML_EXT) if os.path.isfile(xmlPath): self.loadPascalXMLByFilename(xmlPath) self.setWindowTitle(((__appname__ + ' ') + filePath)) if self.labelList.count(): self.labelList.setCurrentItem(self.labelList.item((self.labelList.count() - 1))) self.labelList.item((self.labelList.count() - 1)).setSelected(True) self.canvas.setFocus(True) return True return False
'Figure out the size of the pixmap in order to fit the main widget.'
def scaleFitWindow(self):
e = 2.0 w1 = (self.centralWidget().width() - e) h1 = (self.centralWidget().height() - e) a1 = (w1 / h1) w2 = (self.canvas.pixmap.width() - 0.0) h2 = (self.canvas.pixmap.height() - 0.0) a2 = (w2 / h2) return ((w1 / w2) if (a2 >= a1) else (h1 / h2))
'Create a statefile, with the offset of the end of the log file. Override if your tailer implementation can do this more efficiently'
def create_statefile(self):
for _ in self.ireadlines(): pass
'Return a generator over lines in the logfile, updating the statefile when the generator is exhausted'
def ireadlines(self):
raise NotImplementedError()
'Specify Amazon CloudWatch params'
def __init__(self, key, secret_key, metric):
self.base_url = 'monitoring.ap-northeast-1.amazonaws.com' self.key = key self.secret_key = secret_key self.metric = metric
'get instance id from amazon meta data server'
def get_instance_id(self, instance_id=None):
self.instance_id = instance_id if (self.instance_id is None): try: conn = HTTPConnection('169.254.169.254') conn.request('GET', '/latest/meta-data/instance-id') except Exception: raise CloudWatchException("Can't connect Amazon meta data server to get InstanceID : (%s)") self.instance_id = conn.getresponse().read() return self
'build signed parameters following http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html'
def get_signed_url(self):
keys = sorted(self.url_params) values = map(self.url_params.get, keys) url_string = urlencode(list(zip(keys, values))) string_to_sign = ('GET\n%s\n/\n%s' % (self.base_url, url_string)) try: if (sys.version_info[:2] == (2, 5)): signature = hmac.new(key=self.secret_key, msg=string_to_sign, digestmod=hashlib.sha256).digest() else: signature = hmac.new(key=bytes(self.secret_key), msg=bytes(string_to_sign), digestmod=hashlib.sha256).digest() except TypeError: signature = hmac.new(key=self.secret_key.encode('utf-8'), msg=string_to_sign.encode('utf-8'), digestmod=hashlib.sha256).digest() signature = base64.encodestring(signature).strip() urlencoded_signature = quote_plus(signature) url_string += ('&Signature=%s' % urlencoded_signature) return ('/?' + url_string)
'Take a line and do any parsing we need to do. Required for parsers'
def parse_line(self, line):
raise RuntimeError('Implement me!')
'Run any calculations needed and return list of metric objects'
def get_state(self, duration):
raise RuntimeError('Implement me!')
'Convenience method for contructing metric names Takes into account any supplied prefix/suffix options'
def get_metric_name(self, metric, separator='.'):
metric_name = metric.name if self.options.metric_prefix: metric_name = ((self.options.metric_prefix + separator) + metric_name) if self.options.metric_suffix: metric_name = ((metric_name + separator) + self.options.metric_suffix) return metric_name
'Send metrics to the specific output'
def submit(self, metrics):
raise RuntimeError('Implement me!')
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.notice = 0 self.warn = 0 self.error = 0 self.crit = 0 self.other = 0 self.reg = re.compile('^\\[[^]]+\\] \\[(?P<loglevel>\\w+)\\] .*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() level = linebits['loglevel'] if (level == 'notice'): self.notice += 1 elif (level == 'warn'): self.warn += 1 elif (level == 'error'): self.error += 1 elif (level == 'crit'): self.crit += 1 else: self.other += 1 else: raise LogsterParsingException('regmatch failed to match') except Exception as e: raise LogsterParsingException(('regmatch or contents failed with %s' % e))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = (duration / 10.0) return [MetricObject('notice', (self.notice / self.duration), 'Logs per 10 sec'), MetricObject('warn', (self.warn / self.duration), 'Logs per 10 sec'), MetricObject('error', (self.error / self.duration), 'Logs per 10 sec'), MetricObject('crit', (self.crit / self.duration), 'Logs per 10 sec'), MetricObject('other', (self.other / self.duration), 'Logs per 10 sec')]
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.numSent = 0 self.numDeferred = 0 self.numBounced = 0 self.totalDelay = 0 self.numRbl = 0 self.reg = re.compile('.*delay=(?P<send_delay>[^,]+),.*status=(?P<status>(sent|deferred|bounced))')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() if (linebits['status'] == 'sent'): self.totalDelay += float(linebits['send_delay']) self.numSent += 1 elif (linebits['status'] == 'deferred'): self.numDeferred += 1 elif (linebits['status'] == 'bounced'): self.numBounced += 1 except Exception as e: raise LogsterParsingException(('regmatch or contents failed with %s' % e))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) totalTxns = ((self.numSent + self.numBounced) + self.numDeferred) pctDeferred = 0.0 pctSent = 0.0 pctBounced = 0.0 avgDelay = 0 mailTxnsSec = 0 mailSentSec = 0 if (totalTxns > 0): pctDeferred = ((float(self.numDeferred) / totalTxns) * 100) pctSent = ((float(self.numSent) / totalTxns) * 100) pctBounced = ((float(self.numBounced) / totalTxns) * 100) if (self.numSent > 0): avgDelay = (self.totalDelay / self.numSent) if (self.duration > 0): mailTxnsSec = (totalTxns / self.duration) mailSentSec = (self.numSent / self.duration) return [MetricObject('numSent', self.numSent, 'Total Sent'), MetricObject('pctSent', pctSent, 'Percentage Sent'), MetricObject('numDeferred', self.numDeferred, 'Total Deferred'), MetricObject('pctDeferred', pctDeferred, 'Percentage Deferred'), MetricObject('numBounced', self.numBounced, 'Total Bounced'), MetricObject('pctBounced', pctBounced, 'Percentage Bounced'), MetricObject('mailTxnsSec', mailTxnsSec, 'Transactions per sec'), MetricObject('mailSentSec', mailSentSec, 'Sends per sec'), MetricObject('avgDelay', avgDelay, 'Average Sending Delay')]
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.metrics = {} if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--key-separator', '-k', dest='key_separator', default='.', help="Key separator for flattened json object key name. Default: '.'") (opts, args) = optparser.parse_args(args=options) self.key_separator = opts.key_separator
'Default key_filter method. Override and implement this method if you want to do any filtering or transforming on specific keys in your JSON object.'
def key_filter(self, key):
return key
'Recurses through dicts and/or lists and flattens them into a single level dict of key: value pairs. Each key consists of all of the recursed keys joined by separator. If key_filter_callback is callable, it will be called with each key. It should return either a new key which will be used in the final full key string, or False, which will indicate that this key and its value should be skipped.'
def flatten_object(self, node, separator='.', key_filter_callback=None, parent_keys=[]):
items = {} try: if (sys.version_info >= (3, 0)): iterator = iter(node.items()) else: iterator = node.iteritems() except AttributeError: iterator = enumerate(node) for (key, item) in iterator: if callable(key_filter_callback): key = key_filter_callback(key) if (key is False): continue if (type(item) in (list, dict)): items.update(self.flatten_object(item, separator, key_filter_callback, (parent_keys + [str(key)]))) else: final_key = separator.join((parent_keys + [str(key)])) items[final_key] = item return items
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: json_data = json.loads(line) except Exception as e: raise LogsterParsingException('{0} - {1}'.format(type(e), e)) self.metrics = self.flatten_object(json.loads(line), self.key_separator, self.key_filter)
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = duration metric_objects = [] for (metric_name, metric_value) in self.metrics.items(): if (type(metric_value) == float): metric_type = 'float' elif ((type(metric_value) == int) or (type(metric_value) == long)): metric_type = 'int32' else: metric_type = 'string' metric_value = str(metric_value) metric_objects.append(MetricObject(metric_name, metric_value, type='int')) return metric_objects
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--log-levels', '-l', dest='levels', default='WARN,ERROR,FATAL', help='Comma-separated list of log levels to track: (default: "WARN,ERROR,FATAL")') (opts, args) = optparser.parse_args(args=options) self.levels = opts.levels.split(',') for level in self.levels: setattr(self, level, 0) self.reg = re.compile(('[0-9-_:\\.]+ (?P<log_level>%s)' % '|'.join(self.levels)))
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() log_level = linebits['log_level'] if (log_level in self.levels): current_val = getattr(self, log_level) setattr(self, log_level, (current_val + 1)) else: raise LogsterParsingException('regmatch failed to match') except Exception as e: raise LogsterParsingException(('regmatch or contents failed with %s' % e))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) metrics = [MetricObject(level, (getattr(self, level) / self.duration)) for level in self.levels] return metrics
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.size_transferred = 0 self.squid_codes = {'TCP_MISS': 0, 'TCP_DENIED': 0, 'TCP_HIT': 0, 'TCP_MEM_HIT': 0, 'OTHER': 0} self.http_1xx = 0 self.http_2xx = 0 self.http_3xx = 0 self.http_4xx = 0 self.http_5xx = 0 self.reg = re.compile('^[0-9.]+ +(?P<size>[0-9]+) .*(?P<squid_code>(TCP|UDP|NONE)_[A-Z_]+)/(?P<http_status_code>\\d{3}) .*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() status = int(linebits['http_status_code']) squid_code = linebits['squid_code'] size = int(linebits['size']) if (status < 200): self.http_1xx += 1 elif (status < 300): self.http_2xx += 1 elif (status < 400): self.http_3xx += 1 elif (status < 500): self.http_4xx += 1 else: self.http_5xx += 1 if (squid_code in self.squid_codes): self.squid_codes[squid_code] += 1 else: self.squid_codes['OTHER'] += 1 self.size_transferred += size else: raise LogsterParsingException('regmatch failed to match') except Exception as e: raise LogsterParsingException(('regmatch or contents failed with %s' % e))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) return_array = [MetricObject('http_1xx', (self.http_1xx / self.duration), 'Responses per sec'), MetricObject('http_2xx', (self.http_2xx / self.duration), 'Responses per sec'), MetricObject('http_3xx', (self.http_3xx / self.duration), 'Responses per sec'), MetricObject('http_4xx', (self.http_4xx / self.duration), 'Responses per sec'), MetricObject('http_5xx', (self.http_5xx / self.duration), 'Responses per sec'), MetricObject('size', (self.size_transferred / self.duration), 'Size per sec')] for squid_code in self.squid_codes: return_array.append(MetricObject(('squid_' + squid_code), (self.squid_codes[squid_code] / self.duration), 'Squid code per sec')) return return_array
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.http_1xx = 0 self.http_2xx = 0 self.http_3xx = 0 self.http_4xx = 0 self.http_5xx = 0 self.reg = re.compile('.*HTTP/1.\\d" (?P<http_status_code>\\d{3}) .*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() status = int(linebits['http_status_code']) if (status < 200): self.http_1xx += 1 elif (status < 300): self.http_2xx += 1 elif (status < 400): self.http_3xx += 1 elif (status < 500): self.http_4xx += 1 else: self.http_5xx += 1 else: raise LogsterParsingException('regmatch failed to match') except Exception as e: raise LogsterParsingException(('regmatch or contents failed with %s' % e))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) return [MetricObject('http_1xx', (self.http_1xx / self.duration), 'Responses per sec'), MetricObject('http_2xx', (self.http_2xx / self.duration), 'Responses per sec'), MetricObject('http_3xx', (self.http_3xx / self.duration), 'Responses per sec'), MetricObject('http_4xx', (self.http_4xx / self.duration), 'Responses per sec'), MetricObject('http_5xx', (self.http_5xx / self.duration), 'Responses per sec')]
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.counts = {} self.times = {} if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--percentiles', '-p', dest='percentiles', default='90', help='Comma-separated list of integer percentiles to track: (default: "90")') (opts, args) = optparser.parse_args(args=options) self.percentiles = opts.percentiles.split(',') self.count_reg = re.compile('.*METRIC_COUNT\\smetric=(?P<count_name>[^\\s]+)\\s+value=(?P<count_value>[0-9.]+)[^0-9.].*') self.time_reg = re.compile('.*METRIC_TIME\\smetric=(?P<time_name>[^\\s]+)\\s+value=(?P<time_value>[0-9.]+)\\s*(?P<time_unit>[^\\s$]*).*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
count_match = self.count_reg.match(line) if count_match: countbits = count_match.groupdict() count_name = countbits['count_name'] if (count_name not in self.counts): self.counts[count_name] = 0.0 self.counts[count_name] += float(countbits['count_value']) time_match = self.time_reg.match(line) if time_match: time_name = time_match.groupdict()['time_name'] if (time_name not in self.times): unit = time_match.groupdict()['time_unit'] self.times[time_name] = {'unit': unit, 'values': []} self.times[time_name]['values'].append(float(time_match.groupdict()['time_value']))
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
duration = float(duration) metrics = [] if (duration > 0): metrics += [MetricObject(counter, (self.counts[counter] / duration)) for counter in self.counts] for time_name in self.times: values = self.times[time_name]['values'] unit = self.times[time_name]['unit'] metrics.append(MetricObject((time_name + '.mean'), stats_helper.find_mean(values), unit)) metrics.append(MetricObject((time_name + '.median'), stats_helper.find_median(values), unit)) metrics += [MetricObject(('%s.%sth_percentile' % (time_name, percentile)), stats_helper.find_percentile(values, int(percentile)), unit) for percentile in self.percentiles] return metrics
'Mostly ones (1), with a final value of 1000'
def data(self, ts):
timeseries = map(list, zip(map(float, range((int(ts) - 86400), (int(ts) + 1))), ([1] * 86401))) timeseries[(-1)][1] = 1000 timeseries[(-2)][1] = 1 timeseries[(-3)][1] = 1 return (ts, timeseries)
'Assert that a user can add their own custom algorithm. This mocks out settings.ALGORITHMS and settings.CONSENSUS to use only a single custom-defined function (alwaysTrue)'
@unittest.skip('Fails inexplicable in certain environments.') @patch.object(algorithms, 'CONSENSUS') @patch.object(algorithms, 'ALGORITHMS') @patch.object(algorithms, 'time') def test_run_selected_algorithm_runs_novel_algorithm(self, timeMock, algorithmsListMock, consensusMock):
algorithmsListMock.__iter__.return_value = ['alwaysTrue'] consensusMock = 1 (timeMock.return_value, timeseries) = self.data(time()) alwaysTrue = Mock(return_value=True) with patch.dict(algorithms.__dict__, {'alwaysTrue': alwaysTrue}): (result, ensemble, tail_avg) = algorithms.run_selected_algorithm(timeseries) alwaysTrue.assert_called_with(timeseries) self.assertTrue(result) self.assertEqual(ensemble, [True]) self.assertEqual(tail_avg, 334)
'Generate a pickle from a stream'
def gen_unpickle(self, infile):
try: bunch = self.unpickler.loads(infile) (yield bunch) except EOFError: return
'Read n bytes from a stream'
def read_all(self, sock, n):
data = '' while (n > 0): buf = sock.recv(n) n -= len(buf) data += buf return data
'Self explanatory'
def check_if_parent_is_alive(self):
try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0)
'Listen for pickles over tcp'
def listen_pickle(self):
while 1: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((self.ip, self.port)) s.setblocking(1) s.listen(5) logger.info(('listening over tcp for pickles on %s' % self.port)) (conn, address) = s.accept() logger.info(('connection from %s:%s' % (address[0], self.port))) chunk = [] while 1: self.check_if_parent_is_alive() try: length = Struct('!I').unpack(self.read_all(conn, 4)) body = self.read_all(conn, length[0]) for bunch in self.gen_unpickle(body): for metric in bunch: chunk.append(metric) if (len(chunk) > settings.CHUNK_SIZE): try: self.q.put(list(chunk), block=False) chunk[:] = [] except Full: logger.info('queue is full, dropping datapoints') chunk[:] = [] except Exception as e: logger.info(e) logger.info('incoming connection dropped, attempting to reconnect') break except Exception as e: logger.info(("can't connect to socket: " + str(e))) break
'Listen over udp for MessagePack strings'
def listen_udp(self):
while 1: try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((self.ip, self.port)) logger.info(('listening over udp for messagepack on %s' % self.port)) chunk = [] while 1: self.check_if_parent_is_alive() (data, addr) = s.recvfrom(1024) metric = unpackb(data) chunk.append(metric) if (len(chunk) > settings.CHUNK_SIZE): try: self.q.put(list(chunk), block=False) chunk[:] = [] except Full: logger.info('queue is full, dropping datapoints') chunk[:] = [] except Exception as e: logger.info(("can't connect to socket: " + str(e))) break
'Called when process intializes.'
def run(self):
logger.info('started listener') if (self.type == 'pickle'): self.listen_pickle() elif (self.type == 'udp'): self.listen_udp() else: logging.error('unknown listener format')
'Self explanatory.'
def check_if_parent_is_alive(self):
try: kill(self.parent_pid, 0) except: exit(0)
'Check if the metric is in SKIP_LIST.'
def in_skip_list(self, metric_name):
for to_skip in settings.SKIP_LIST: if (to_skip in metric_name): return True return False
'Called when the process intializes.'
def run(self):
logger.info('started worker') FULL_NAMESPACE = settings.FULL_NAMESPACE MINI_NAMESPACE = settings.MINI_NAMESPACE MAX_RESOLUTION = settings.MAX_RESOLUTION full_uniques = (FULL_NAMESPACE + 'unique_metrics') mini_uniques = (MINI_NAMESPACE + 'unique_metrics') pipe = self.redis_conn.pipeline() while 1: try: self.redis_conn.ping() except: logger.error(("worker can't connect to redis at socket path %s" % settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) pipe = self.redis_conn.pipeline() continue try: chunk = self.q.get(True, 15) now = time() for metric in chunk: if self.in_skip_list(metric[0]): continue if (metric[1][0] < (now - MAX_RESOLUTION)): continue key = ''.join((FULL_NAMESPACE, metric[0])) pipe.append(key, packb(metric[1])) pipe.sadd(full_uniques, key) if (not self.skip_mini): mini_key = ''.join((MINI_NAMESPACE, metric[0])) pipe.append(mini_key, packb(metric[1])) pipe.sadd(mini_uniques, mini_key) pipe.execute() if self.canary: logger.info(('queue size at %d' % self.q.qsize())) self.send_graphite_metric('skyline.horizon.queue_size', self.q.qsize()) except Empty: logger.info('worker queue is empty and timed out') except WatchError: logger.error(key) except NotImplementedError: pass except Exception as e: logger.error(('worker error: ' + str(e)))
'Self explanatory.'
def check_if_parent_is_alive(self):
try: kill(self.parent_pid, 0) except: exit(0)
'Trim metrics that are older than settings.FULL_DURATION and purge old metrics.'
def vacuum(self, i, namespace, duration):
begin = time() unique_metrics = list(self.redis_conn.smembers((namespace + 'unique_metrics'))) keys_per_processor = (len(unique_metrics) / settings.ROOMBA_PROCESSES) assigned_max = (i * keys_per_processor) assigned_min = (assigned_max - keys_per_processor) assigned_keys = range(assigned_min, assigned_max) assigned_metrics = [unique_metrics[index] for index in assigned_keys] euthanized = 0 blocked = 0 for i in xrange(len(assigned_metrics)): self.check_if_parent_is_alive() pipe = self.redis_conn.pipeline() now = time() key = assigned_metrics[i] try: pipe.watch(key) raw_series = pipe.get(key) unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = sorted([unpacked for unpacked in unpacker]) pipe.multi() try: if (not isinstance(timeseries[0], TupleType)): if (timeseries[0] < (now - duration)): pipe.delete(key) pipe.srem((namespace + 'unique_metrics'), key) pipe.execute() euthanized += 1 continue except IndexError: continue if (timeseries[(-1)][0] < (now - duration)): pipe.delete(key) pipe.srem((namespace + 'unique_metrics'), key) pipe.execute() euthanized += 1 continue temp = set() temp_add = temp.add delta = (now - duration) trimmed = [tuple for tuple in timeseries if ((tuple[0] > delta) and (tuple[0] not in temp) and (not temp_add(tuple[0])))] if (len(trimmed) > 0): btrimmed = packb(trimmed) if (len(trimmed) <= 15): value = btrimmed[1:] elif (len(trimmed) <= 65535): value = btrimmed[3:] else: value = btrimmed[5:] pipe.set(key, value) else: pipe.delete(key) pipe.srem((namespace + 'unique_metrics'), key) euthanized += 1 pipe.execute() except WatchError: blocked += 1 assigned_metrics.append(key) except Exception as e: pipe.delete(key) pipe.srem((namespace + 'unique_metrics'), key) pipe.execute() euthanized += 1 logger.info(e) logger.info(('Euthanizing ' + key)) finally: pipe.reset() logger.info(('operated on %s in %f seconds' % (namespace, (time() - begin)))) logger.info(('%s keyspace is %d' % (namespace, (len(assigned_metrics) - euthanized)))) logger.info(('blocked %d times' % blocked)) logger.info(('euthanized %d geriatric keys' % euthanized)) if ((time() - begin) < 30): logger.info('sleeping due to low run time...') sleep(10)
'Called when process initializes.'
def run(self):
logger.info('started roomba') while 1: now = time() try: self.redis_conn.ping() except: logger.error(("roomba can't connect to redis at socket path %s" % settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) continue pids = [] for i in range(1, (settings.ROOMBA_PROCESSES + 1)): if (not self.skip_mini): p = Process(target=self.vacuum, args=(i, settings.MINI_NAMESPACE, (settings.MINI_DURATION + settings.ROOMBA_GRACE_TIME))) pids.append(p) p.start() p = Process(target=self.vacuum, args=(i, settings.FULL_NAMESPACE, (settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME))) pids.append(p) p.start() for p in pids: p.join()
'Initialize the Analyzer'
def __init__(self, parent_pid):
super(Analyzer, self).__init__() self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.anomalous_metrics = Manager().list() self.exceptions_q = Queue() self.anomaly_breakdown_q = Queue()
'Self explanatory'
def check_if_parent_is_alive(self):
try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0)
'Assign a bunch of metrics for a process to analyze.'
def spin_process(self, i, unique_metrics):
keys_per_processor = int(ceil((float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))) if (i == settings.ANALYZER_PROCESSES): assigned_max = len(unique_metrics) else: assigned_max = (i * keys_per_processor) assigned_min = (assigned_max - keys_per_processor) assigned_keys = range(assigned_min, assigned_max) assigned_metrics = [unique_metrics[index] for index in assigned_keys] if (len(assigned_metrics) == 0): return raw_assigned = self.redis_conn.mget(assigned_metrics) exceptions = defaultdict(int) anomaly_breakdown = defaultdict(int) for (i, metric_name) in enumerate(assigned_metrics): self.check_if_parent_is_alive() try: raw_series = raw_assigned[i] unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = list(unpacker) (anomalous, ensemble, datapoint) = run_selected_algorithm(timeseries, metric_name) if anomalous: base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1) metric = [datapoint, base_name] self.anomalous_metrics.append(metric) for (index, value) in enumerate(ensemble): if value: algorithm = settings.ALGORITHMS[index] anomaly_breakdown[algorithm] += 1 except TypeError: exceptions['DeletedByRoomba'] += 1 except TooShort: exceptions['TooShort'] += 1 except Stale: exceptions['Stale'] += 1 except Boring: exceptions['Boring'] += 1 except: exceptions['Other'] += 1 logger.info(traceback.format_exc()) for (key, value) in anomaly_breakdown.items(): self.anomaly_breakdown_q.put((key, value)) for (key, value) in exceptions.items(): self.exceptions_q.put((key, value))
'Called when the process intializes.'
def run(self):
while 1: now = time() try: self.redis_conn.ping() except: logger.error(("skyline can't connect to redis at socket path %s" % settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) continue unique_metrics = list(self.redis_conn.smembers((settings.FULL_NAMESPACE + 'unique_metrics'))) if (len(unique_metrics) == 0): logger.info('no metrics in redis. try adding some - see README') sleep(10) continue pids = [] for i in range(1, (settings.ANALYZER_PROCESSES + 1)): if (i > len(unique_metrics)): logger.info('WARNING: skyline is set for more cores than needed.') break p = Process(target=self.spin_process, args=(i, unique_metrics)) pids.append(p) p.start() for p in pids: p.join() exceptions = dict() anomaly_breakdown = dict() while 1: try: (key, value) = self.anomaly_breakdown_q.get_nowait() if (key not in anomaly_breakdown.keys()): anomaly_breakdown[key] = value else: anomaly_breakdown[key] += value except Empty: break while 1: try: (key, value) = self.exceptions_q.get_nowait() if (key not in exceptions.keys()): exceptions[key] = value else: exceptions[key] += value except Empty: break if settings.ENABLE_ALERTS: for alert in settings.ALERTS: for metric in self.anomalous_metrics: if (alert[0] in metric[1]): cache_key = ('last_alert.%s.%s' % (alert[1], metric[1])) try: last_alert = self.redis_conn.get(cache_key) if (not last_alert): self.redis_conn.setex(cache_key, alert[2], packb(metric[0])) trigger_alert(alert, metric) except Exception as e: logger.error(("couldn't send alert: %s" % e)) filename = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP)) with open(filename, 'w') as fh: anomalous_metrics = list(self.anomalous_metrics) anomalous_metrics.sort(key=operator.itemgetter(1)) fh.write(('handle_data(%s)' % anomalous_metrics)) logger.info(('seconds to run :: %.2f' % (time() - now))) logger.info(('total metrics :: %d' % len(unique_metrics))) logger.info(('total analyzed :: %d' % (len(unique_metrics) - sum(exceptions.values())))) logger.info(('total anomalies :: %d' % len(self.anomalous_metrics))) logger.info(('exception stats :: %s' % exceptions)) logger.info(('anomaly breakdown :: %s' % anomaly_breakdown)) self.send_graphite_metric('skyline.analyzer.run_time', ('%.2f' % (time() - now))) self.send_graphite_metric('skyline.analyzer.total_analyzed', ('%.2f' % (len(unique_metrics) - sum(exceptions.values())))) raw_series = self.redis_conn.get((settings.FULL_NAMESPACE + settings.CANARY_METRIC)) if (raw_series is not None): unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = list(unpacker) time_human = ((timeseries[(-1)][0] - timeseries[0][0]) / 3600) projected = ((24 * (time() - now)) / time_human) logger.info(('canary duration :: %.2f' % time_human)) self.send_graphite_metric('skyline.analyzer.duration', ('%.2f' % time_human)) self.send_graphite_metric('skyline.analyzer.projected', ('%.2f' % projected)) self.anomalous_metrics[:] = [] if ((time() - now) < 5): logger.info('sleeping due to low run time...') sleep(10)
'Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of :class:`~pymongo.database.Database`. :Parameters: - `database`: database to use - `collection` (optional): root collection to use .. versionchanged:: 3.1 Indexes are only ensured on the first write to the DB. .. versionchanged:: 3.0 `database` must use an acknowledged :attr:`~pymongo.database.Database.write_concern` .. mongodoc:: gridfs'
def __init__(self, database, collection='fs'):
if (not isinstance(database, Database)): raise TypeError('database must be an instance of Database') if (not database.write_concern.acknowledged): raise ConfigurationError('database must use acknowledged write_concern') self.__database = database self.__collection = database[collection] self.__files = self.__collection.files self.__chunks = self.__collection.chunks
'Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to which data can be written. Any keyword arguments will be passed through to :meth:`~gridfs.grid_file.GridIn`. If the ``"_id"`` of the file is manually specified, it must not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. :Parameters: - `**kwargs` (optional): keyword arguments for file creation'
def new_file(self, **kwargs):
return GridIn(self.__collection, **kwargs)
'Put data in GridFS as a new file. Equivalent to doing:: try: f = new_file(**kwargs) f.write(data) finally: f.close() `data` can be either an instance of :class:`str` (:class:`bytes` in python 3) or a file-like object providing a :meth:`read` method. If an `encoding` keyword argument is passed, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as `encoding` before being written. Any keyword arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. If the ``"_id"`` of the file is manually specified, it must not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. :Parameters: - `data`: data to be written as a file. - `**kwargs` (optional): keyword arguments for file creation .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited.'
def put(self, data, **kwargs):
grid_file = GridIn(self.__collection, **kwargs) try: grid_file.write(data) finally: grid_file.close() return grid_file._id
'Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, which provides a file-like interface for reading. :Parameters: - `file_id`: ``"_id"`` of the file to get'
def get(self, file_id):
gout = GridOut(self.__collection, file_id) gout._ensure_file() return gout
'Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches `filename` and whose metadata fields match the supplied keyword arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. Version numbering is a convenience atop the GridFS API provided by MongoDB. If more than one file matches the query (either by `filename` alone, by metadata fields, or by a combination of both), then version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. Raises :class:`~gridfs.errors.NoFile` if no such version of that file exists. :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `version` (optional): version of the file to get (defaults to -1, the most recent version uploaded) - `**kwargs` (optional): find files by custom metadata. .. versionchanged:: 3.1 ``get_version`` no longer ensures indexes.'
def get_version(self, filename=None, version=(-1), **kwargs):
query = kwargs if (filename is not None): query['filename'] = filename cursor = self.__files.find(query) if (version < 0): skip = (abs(version) - 1) cursor.limit((-1)).skip(skip).sort('uploadDate', DESCENDING) else: cursor.limit((-1)).skip(version).sort('uploadDate', ASCENDING) try: grid_file = next(cursor) return GridOut(self.__collection, file_document=grid_file) except StopIteration: raise NoFile(('no version %d for filename %r' % (version, filename)))
'Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. Equivalent to calling :meth:`get_version` with the default `version` (``-1``). :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `**kwargs` (optional): find files by custom metadata.'
def get_last_version(self, filename=None, **kwargs):
return self.get_version(filename=filename, **kwargs)
'Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: `file_id`. .. warning:: Any processes/threads reading from the file while this method is executing will likely see an invalid/corrupt file. Care should be taken to avoid concurrent reads to a file while it is being deleted. .. note:: Deletes of non-existent files are considered successful since the end result is the same: no file with that _id remains. :Parameters: - `file_id`: ``"_id"`` of the file to delete .. versionchanged:: 3.1 ``delete`` no longer ensures indexes.'
def delete(self, file_id):
self.__files.delete_one({'_id': file_id}) self.__chunks.delete_many({'files_id': file_id})
'List the names of all files stored in this instance of :class:`GridFS`. .. versionchanged:: 3.1 ``list`` no longer ensures indexes.'
def list(self):
return [name for name in self.__files.distinct('filename') if (name is not None)]
'Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, or ``None`` if no matching file is found. For example:: file = fs.find_one({"filename": "lisa.txt"}) :Parameters: - `filter` (optional): a dictionary specifying the query to be performing OR any other type to be used as the value for a query for ``"_id"`` in the file collection. - `*args` (optional): any additional positional arguments are the same as the arguments to :meth:`find`. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`find`.'
def find_one(self, filter=None, *args, **kwargs):
if ((filter is not None) and (not isinstance(filter, Mapping))): filter = {'_id': filter} for f in self.find(filter, *args, **kwargs): return f return None
'Query GridFS for files. Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: for grid_out in fs.find({"filename": "lisa.txt"}, no_cursor_timeout=True): data = grid_out.read() would iterate through all versions of "lisa.txt" stored in GridFS. Note that setting no_cursor_timeout to True may be important to prevent the cursor from timing out during long multi-file processing work. As another example, the call:: most_recent_three = fs.find().sort("uploadDate", -1).limit(3) would return a cursor to the three most recently uploaded files in GridFS. Follows a similar interface to :meth:`~pymongo.collection.Collection.find` in :class:`~pymongo.collection.Collection`. :Parameters: - `filter` (optional): a SON object specifying elements which must be present for a document to be included in the result set - `skip` (optional): the number of files to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to return - `no_cursor_timeout` (optional): if False (the default), any returned cursor is closed by the server after 10 minutes of inactivity. If set to True, the returned cursor will never time out on the server. Care should be taken to ensure that cursors with no_cursor_timeout turned on are properly closed. - `sort` (optional): a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. Raises :class:`TypeError` if any of the arguments are of improper type. Returns an instance of :class:`~gridfs.grid_file.GridOutCursor` corresponding to this query. .. versionchanged:: 3.0 Removed the read_preference, tag_sets, and secondary_acceptable_latency_ms options. .. versionadded:: 2.7 .. mongodoc:: find'
def find(self, *args, **kwargs):
return GridOutCursor(self.__collection, *args, **kwargs)
'Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its ``_id`` key, or by passing in a query document. A query document can be passed in as dictionary, or by using keyword arguments. Thus, the following three calls are equivalent: >>> fs.exists(file_id) >>> fs.exists({"_id": file_id}) >>> fs.exists(_id=file_id) As are the following two calls: >>> fs.exists({"filename": "mike.txt"}) >>> fs.exists(filename="mike.txt") And the following two: >>> fs.exists({"foo": {"$gt": 12}}) >>> fs.exists(foo={"$gt": 12}) Returns ``True`` if a matching file exists, ``False`` otherwise. Calls to :meth:`exists` will not automatically create appropriate indexes; application developers should be sure to create indexes if needed and as appropriate. :Parameters: - `document_or_id` (optional): query document, or _id of the document to check for - `**kwargs` (optional): keyword arguments are used as a query document, if they\'re present.'
def exists(self, document_or_id=None, **kwargs):
if kwargs: return (self.__files.find_one(kwargs, ['_id']) is not None) return (self.__files.find_one(document_or_id, ['_id']) is not None)
'Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of :class:`~pymongo.database.Database`. Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` is not acknowledged. :Parameters: - `database`: database to use. - `bucket_name` (optional): The name of the bucket. Defaults to \'fs\'. - `chunk_size_bytes` (optional): The chunk size in bytes. Defaults to 255KB. - `write_concern` (optional): The :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` (the default) db.write_concern is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) db.read_preference is used. .. versionadded:: 3.1 .. mongodoc:: gridfs'
def __init__(self, db, bucket_name='fs', chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, read_preference=None):
if (not isinstance(db, Database)): raise TypeError('database must be an instance of Database') wtc = (write_concern if (write_concern is not None) else db.write_concern) if (not wtc.acknowledged): raise ConfigurationError('write concern must be acknowledged') self._db = db self._bucket_name = bucket_name self._collection = db[bucket_name] self._chunks = self._collection.chunks.with_options(write_concern=write_concern, read_preference=read_preference) self._files = self._collection.files.with_options(write_concern=write_concern, read_preference=read_preference) self._chunk_size_bytes = chunk_size_bytes
'Opens a Stream that the application can write the contents of the file to. The user must specify the filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_in, file_id = fs.open_upload_stream( "test_file", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) grid_in.write("data I want to store!") grid_in.close() # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - `metadata` (optional): User data for the \'metadata\' field of the files collection document. If not provided the metadata field will be omitted from the files collection document.'
def open_upload_stream(self, filename, chunk_size_bytes=None, metadata=None):
validate_string('filename', filename) opts = {'filename': filename, 'chunk_size': (chunk_size_bytes if (chunk_size_bytes is not None) else self._chunk_size_bytes)} if (metadata is not None): opts['metadata'] = metadata return GridIn(self._collection, **opts)
'Opens a Stream that the application can write the contents of the file to. The user must specify the file id and filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_in, file_id = fs.open_upload_stream( ObjectId(), "test_file", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) grid_in.write("data I want to store!") grid_in.close() # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `file_id`: The id to use for this file. The id must not have already been used for another file. - `filename`: The name of the file to upload. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - `metadata` (optional): User data for the \'metadata\' field of the files collection document. If not provided the metadata field will be omitted from the files collection document.'
def open_upload_stream_with_id(self, file_id, filename, chunk_size_bytes=None, metadata=None):
validate_string('filename', filename) opts = {'_id': file_id, 'filename': filename, 'chunk_size': (chunk_size_bytes if (chunk_size_bytes is not None) else self._chunk_size_bytes)} if (metadata is not None): opts['metadata'] = metadata return GridIn(self._collection, **opts)
'Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) Returns the _id of the uploaded file. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `source`: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - `metadata` (optional): User data for the \'metadata\' field of the files collection document. If not provided the metadata field will be omitted from the files collection document.'
def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None):
with self.open_upload_stream(filename, chunk_size_bytes, metadata) as gin: gin.write(source) return gin._id