desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Set variable.'
def let(self, name, value):
cmd = ('let %s = %s' % (name, self.prepare_value(value))) self.debug(cmd) vim.command(cmd)
'Decode bstr to vim encoding. :return unicode string:'
def prepare_value(self, value, dumps=True):
if dumps: value = json.dumps(value) if PY2: value = value.decode('utf-8').encode(self.options.get('encoding')) return value
'Calculate current offset. :return tuple: (source, offset)'
def get_offset_params(self, cursor=None, base=''):
(row, col) = (cursor or env.cursor) source = '' offset = 0 for (i, line) in enumerate(self.lines, 1): if (i == row): source += (line[:col] + base) offset = len(source) source += line[col:] else: source += line source += '\n' env.debug('Get offset', (base or None), row, col, offset) return (source, offset)
'Go to line.'
@staticmethod def goto_line(line):
vim.command(('normal %sggzz' % line))
'Open file by path.'
def goto_file(self, path, cmd='e', force=False):
if (force or (os.path.abspath(path) != self.curbuf.name)): self.debug('read', path) if ((' ' in path) and (os.name == 'posix')): path = path.replace(' ', '\\ ') vim.command(('%s %s' % (cmd, path)))
'Open buffer.'
@staticmethod def goto_buffer(bufnr):
if (str(bufnr) != '-1'): vim.command(('buffer %s' % bufnr))
'Return a version of the source code with PEP 8 violations fixed.'
def fix(self):
pep8_options = {u'ignore': self.options.ignore, u'select': self.options.select, u'max_line_length': self.options.max_line_length} results = _execute_pep8(pep8_options, self.source) if self.options.verbose: progress = {} for r in results: if (r[u'id'] not in progress): progress[r[u'id']] = set() progress[r[u'id']].add(r[u'line']) print(u'---> {n} issue(s) to fix {progress}'.format(n=len(results), progress=progress), file=sys.stderr) if self.options.line_range: (start, end) = self.options.line_range results = [r for r in results if (start <= r[u'line'] <= end)] self._fix_source(filter_results(source=u''.join(self.source), results=results, aggressive=self.options.aggressive)) if self.options.line_range: count = sum((sline.count(u'\n') for sline in self.source[(start - 1):end])) self.options.line_range[1] = ((start + count) - 1) return u''.join(self.source)
'Fix a badly indented line. This is done by adding or removing from its initial indent only.'
def _fix_reindent(self, result):
num_indent_spaces = int(result[u'info'].split()[1]) line_index = (result[u'line'] - 1) target = self.source[line_index] self.source[line_index] = ((u' ' * num_indent_spaces) + target.lstrip())
'Fix under-indented comments.'
def fix_e112(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] if (not target.lstrip().startswith(u'#')): return [] self.source[line_index] = (self.indent_word + target)
'Fix over-indented comments.'
def fix_e113(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] indent = _get_indentation(target) stripped = target.lstrip() if (not stripped.startswith(u'#')): return [] self.source[line_index] = (indent[1:] + stripped)
'Fix indentation undistinguish from the next logical line.'
def fix_e125(self, result):
num_indent_spaces = int(result[u'info'].split()[1]) line_index = (result[u'line'] - 1) target = self.source[line_index] spaces_to_add = (num_indent_spaces - len(_get_indentation(target))) indent = len(_get_indentation(target)) modified_lines = [] while (len(_get_indentation(self.source[line_index])) >= indent): self.source[line_index] = ((u' ' * spaces_to_add) + self.source[line_index]) modified_lines.append((1 + line_index)) line_index -= 1 return modified_lines
'Fix indentation undistinguish from the next logical line.'
def fix_e131(self, result):
num_indent_spaces = int(result[u'info'].split()[1]) line_index = (result[u'line'] - 1) target = self.source[line_index] spaces_to_add = (num_indent_spaces - len(_get_indentation(target))) if (spaces_to_add >= 0): self.source[line_index] = ((u' ' * spaces_to_add) + self.source[line_index]) else: offset = abs(spaces_to_add) self.source[line_index] = self.source[line_index][offset:]
'Remove extraneous whitespace.'
def fix_e201(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] offset = (result[u'column'] - 1) fixed = fix_whitespace(target, offset=offset, replacement=u'') self.source[line_index] = fixed
'Remove extraneous whitespace around operator.'
def fix_e224(self, result):
target = self.source[(result[u'line'] - 1)] offset = (result[u'column'] - 1) fixed = (target[:offset] + target[offset:].replace(u' DCTB ', u' ')) self.source[(result[u'line'] - 1)] = fixed
'Fix missing whitespace around operator.'
def fix_e225(self, result):
target = self.source[(result[u'line'] - 1)] offset = (result[u'column'] - 1) fixed = ((target[:offset] + u' ') + target[offset:]) if ((fixed.replace(u' ', u'') == target.replace(u' ', u'')) and (_get_indentation(fixed) == _get_indentation(target))): self.source[(result[u'line'] - 1)] = fixed error_code = result.get(u'id', 0) try: ts = generate_tokens(fixed) except tokenize.TokenError: return if (not check_syntax(fixed.lstrip())): return errors = list(pycodestyle.missing_whitespace_around_operator(fixed, ts)) for e in reversed(errors): if (error_code != e[1].split()[0]): continue offset = e[0][1] fixed = ((fixed[:offset] + u' ') + fixed[offset:]) self.source[(result[u'line'] - 1)] = fixed else: return []
'Add missing whitespace.'
def fix_e231(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] offset = result[u'column'] fixed = ((target[:offset].rstrip() + u' ') + target[offset:].lstrip()) self.source[line_index] = fixed
'Remove whitespace around parameter \'=\' sign.'
def fix_e251(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] c = min((result[u'column'] - 1), (len(target) - 1)) if target[c].strip(): fixed = target else: fixed = (target[:c].rstrip() + target[c:].lstrip()) if fixed.endswith((u'=\\\n', u'=\\\r\n', u'=\\\r')): self.source[line_index] = fixed.rstrip(u'\n\r DCTB \\') self.source[(line_index + 1)] = self.source[(line_index + 1)].lstrip() return [(line_index + 1), (line_index + 2)] self.source[(result[u'line'] - 1)] = fixed
'Fix spacing after comment hash.'
def fix_e262(self, result):
target = self.source[(result[u'line'] - 1)] offset = result[u'column'] code = target[:offset].rstrip(u' DCTB #') comment = target[offset:].lstrip(u' DCTB #') fixed = (code + ((u' # ' + comment) if comment.strip() else u'\n')) self.source[(result[u'line'] - 1)] = fixed
'Fix extraneous whitespace around keywords.'
def fix_e271(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] offset = (result[u'column'] - 1) fixed = fix_whitespace(target, offset=offset, replacement=u' ') if (fixed == target): return [] else: self.source[line_index] = fixed
'Add missing blank line.'
def fix_e301(self, result):
cr = u'\n' self.source[(result[u'line'] - 1)] = (cr + self.source[(result[u'line'] - 1)])
'Add missing 2 blank lines.'
def fix_e302(self, result):
add_linenum = (2 - int(result[u'info'].split()[(-1)])) cr = (u'\n' * add_linenum) self.source[(result[u'line'] - 1)] = (cr + self.source[(result[u'line'] - 1)])
'Remove extra blank lines.'
def fix_e303(self, result):
delete_linenum = (int(result[u'info'].split(u'(')[1].split(u')')[0]) - 2) delete_linenum = max(1, delete_linenum) cnt = 0 line = (result[u'line'] - 2) modified_lines = [] while ((cnt < delete_linenum) and (line >= 0)): if (not self.source[line].strip()): self.source[line] = u'' modified_lines.append((1 + line)) cnt += 1 line -= 1 return modified_lines
'Remove blank line following function decorator.'
def fix_e304(self, result):
line = (result[u'line'] - 2) if (not self.source[line].strip()): self.source[line] = u''
'Add missing 2 blank lines after end of function or class.'
def fix_e305(self, result):
cr = u'\n' offset = (result[u'line'] - 2) while True: if (offset < 0): break line = self.source[offset].lstrip() if (not line): break if (line[0] != u'#'): break offset -= 1 offset += 1 self.source[offset] = (cr + self.source[offset])
'Put imports on separate lines.'
def fix_e401(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] offset = (result[u'column'] - 1) if (not target.lstrip().startswith(u'import')): return [] indentation = re.split(pattern=u'\\bimport\\b', string=target, maxsplit=1)[0] fixed = ((((target[:offset].rstrip(u' DCTB ,') + u'\n') + indentation) + u'import ') + target[offset:].lstrip(u' DCTB ,')) self.source[line_index] = fixed
'Try to make lines fit within --max-line-length characters.'
def fix_long_line_logically(self, result, logical):
if ((not logical) or (len(logical[2]) == 1) or self.source[(result[u'line'] - 1)].lstrip().startswith(u'#')): return self.fix_long_line_physically(result) start_line_index = logical[0][0] end_line_index = logical[1][0] logical_lines = logical[2] previous_line = get_item(self.source, (start_line_index - 1), default=u'') next_line = get_item(self.source, (end_line_index + 1), default=u'') single_line = join_logical_line(u''.join(logical_lines)) try: fixed = self.fix_long_line(target=single_line, previous_line=previous_line, next_line=next_line, original=u''.join(logical_lines)) except (SyntaxError, tokenize.TokenError): return self.fix_long_line_physically(result) if fixed: for line_index in range(start_line_index, (end_line_index + 1)): self.source[line_index] = u'' self.source[start_line_index] = fixed return range((start_line_index + 1), (end_line_index + 1)) return []
'Try to make lines fit within --max-line-length characters.'
def fix_long_line_physically(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] previous_line = get_item(self.source, (line_index - 1), default=u'') next_line = get_item(self.source, (line_index + 1), default=u'') try: fixed = self.fix_long_line(target=target, previous_line=previous_line, next_line=next_line, original=target) except (SyntaxError, tokenize.TokenError): return [] if fixed: self.source[line_index] = fixed return [(line_index + 1)] return []
'Remove extraneous escape of newline.'
def fix_e502(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) self.source[line_index] = (target.rstrip(u'\n\r DCTB \\') + u'\n')
'Put colon-separated compound statement on separate lines.'
def fix_e701(self, result):
line_index = (result[u'line'] - 1) target = self.source[line_index] c = result[u'column'] fixed_source = ((((target[:c] + u'\n') + _get_indentation(target)) + self.indent_word) + target[c:].lstrip(u'\n\r DCTB \\')) self.source[(result[u'line'] - 1)] = fixed_source return [result[u'line'], (result[u'line'] + 1)]
'Put semicolon-separated compound statement on separate lines.'
def fix_e702(self, result, logical):
if (not logical): return [] logical_lines = logical[2] for line in logical_lines: if (u':' in line): return [] line_index = (result[u'line'] - 1) target = self.source[line_index] if target.rstrip().endswith(u'\\'): self.source[line_index] = target.rstrip(u'\n \r DCTB \\') self.source[(line_index + 1)] = self.source[(line_index + 1)].lstrip() return [(line_index + 1), (line_index + 2)] if target.rstrip().endswith(u';'): self.source[line_index] = (target.rstrip(u'\n \r DCTB ;') + u'\n') return [(line_index + 1)] offset = (result[u'column'] - 1) first = target[:offset].rstrip(u';').rstrip() second = (_get_indentation(logical_lines[0]) + target[offset:].lstrip(u';').lstrip()) inline_comment = None if (target[offset:].lstrip(u';').lstrip()[:2] == u'# '): inline_comment = target[offset:].lstrip(u';') if inline_comment: self.source[line_index] = (first + inline_comment) else: self.source[line_index] = ((first + u'\n') + second) return [(line_index + 1)]
'Fix multiple statements on one line def'
def fix_e704(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) match = STARTSWITH_DEF_REGEX.match(target) if match: self.source[line_index] = u'{0}\n{1}{2}'.format(match.group(0), (_get_indentation(target) + self.indent_word), target[match.end(0):].lstrip())
'Fix comparison with None.'
def fix_e711(self, result):
(line_index, offset, target) = get_index_offset_contents(result, self.source) right_offset = (offset + 2) if (right_offset >= len(target)): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() if (not right.startswith(u'None')): return [] if (center.strip() == u'=='): new_center = u'is' elif (center.strip() == u'!='): new_center = u'is not' else: return [] self.source[line_index] = u' '.join([left, new_center, right])
'Fix (trivial case of) comparison with boolean.'
def fix_e712(self, result):
(line_index, offset, target) = get_index_offset_contents(result, self.source) if re.match(u'^\\s*if [\\w.]+ == False:$', target): self.source[line_index] = re.sub(u'if ([\\w.]+) == False:', u'if not \\1:', target, count=1) elif re.match(u'^\\s*if [\\w.]+ != True:$', target): self.source[line_index] = re.sub(u'if ([\\w.]+) != True:', u'if not \\1:', target, count=1) else: right_offset = (offset + 2) if (right_offset >= len(target)): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() new_right = None if (center.strip() == u'=='): if re.match(u'\\bTrue\\b', right): new_right = re.sub(u'\\bTrue\\b *', u'', right, count=1) elif (center.strip() == u'!='): if re.match(u'\\bFalse\\b', right): new_right = re.sub(u'\\bFalse\\b *', u'', right, count=1) if (new_right is None): return [] if new_right[0].isalnum(): new_right = (u' ' + new_right) self.source[line_index] = (left + new_right)
'Fix (trivial case of) non-membership check.'
def fix_e713(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) match = COMPARE_NEGATIVE_REGEX.search(target) if match: if (match.group(3) == u'in'): pos_start = match.start(1) self.source[line_index] = u'{0}{1} {2} {3} {4}'.format(target[:pos_start], match.group(2), match.group(1), match.group(3), target[match.end():])
'Fix object identity should be \'is not\' case.'
def fix_e714(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) match = COMPARE_NEGATIVE_REGEX.search(target) if match: if (match.group(3) == u'is'): pos_start = match.start(1) self.source[line_index] = u'{0}{1} {2} {3} {4}'.format(target[:pos_start], match.group(2), match.group(3), match.group(1), target[match.end():])
'fix bare except'
def fix_e722(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) match = BARE_EXCEPT_REGEX.search(target) if match: self.source[line_index] = u'{0}{1}{2}'.format(target[:(result[u'column'] - 1)], u'except BaseException:', target[match.end():])
'Fix do not assign a lambda expression check.'
def fix_e731(self, result):
(line_index, _, target) = get_index_offset_contents(result, self.source) match = LAMBDA_REGEX.search(target) if match: end = match.end() self.source[line_index] = u'{0}def {1}({2}): return {3}'.format(target[:match.start(0)], match.group(1), match.group(2), target[end:].lstrip())
'Remove trailing whitespace.'
def fix_w291(self, result):
fixed_line = self.source[(result[u'line'] - 1)].rstrip() self.source[(result[u'line'] - 1)] = (fixed_line + u'\n')
'Remove trailing blank lines.'
def fix_w391(self, _):
blank_count = 0 for line in reversed(self.source): line = line.rstrip() if line: break else: blank_count += 1 original_length = len(self.source) self.source = self.source[:(original_length - blank_count)] return range(1, (1 + original_length))
'Return the previous non-whitespace item.'
def previous_item(self):
return self._prev_item
'The size of the current line minus the indentation.'
def current_size(self):
size = 0 for item in reversed(self._lines): size += item.size if isinstance(item, self._LineBreak): break return size
'Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not.'
def _add_item(self, item, indent_amt):
if (self._prev_item and self._prev_item.is_string and item.is_string): self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) item_text = unicode(item) if (self._lines and self._bracket_depth): self._prevent_default_initializer_splitting(item, indent_amt) if (item_text in u'.,)]}'): self._split_after_delimiter(item, indent_amt) elif (self._lines and (not self.line_empty())): if self.fits_on_current_line(len(item_text)): self._enforce_space(item) else: self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) self._lines.append(item) (self._prev_item, self._prev_prev_item) = (item, self._prev_item) if (item_text in u'([{'): self._bracket_depth += 1 elif (item_text in u'}])'): self._bracket_depth -= 1 assert (self._bracket_depth >= 0)
'Prevent splitting between a default initializer. When there is a default initializer, it\'s best to keep it all on the same line. It\'s nicer and more readable, even if it goes over the maximum allowable line length. This goes back along the current line to determine if we have a default initializer, and, if so, to remove extraneous whitespaces and add a line break/indent before it if needed.'
def _prevent_default_initializer_splitting(self, item, indent_amt):
if (unicode(item) == u'='): self._delete_whitespace() return if ((not self._prev_item) or (not self._prev_prev_item) or (unicode(self._prev_item) != u'=')): return self._delete_whitespace() prev_prev_index = self._lines.index(self._prev_prev_item) if (isinstance(self._lines[(prev_prev_index - 1)], self._Indent) or self.fits_on_current_line((item.size + 1))): return if isinstance(self._lines[(prev_prev_index - 1)], self._Space): del self._lines[(prev_prev_index - 1)] self.add_line_break_at(self._lines.index(self._prev_prev_item), indent_amt)
'Split the line only after a delimiter.'
def _split_after_delimiter(self, item, indent_amt):
self._delete_whitespace() if self.fits_on_current_line(item.size): return last_space = None for current_item in reversed(self._lines): if (last_space and ((not isinstance(current_item, Atom)) or (not current_item.is_colon))): break else: last_space = None if isinstance(current_item, self._Space): last_space = current_item if isinstance(current_item, (self._LineBreak, self._Indent)): return if (not last_space): return self.add_line_break_at(self._lines.index(last_space), indent_amt)
'Enforce a space in certain situations. There are cases where we will want a space where normally we wouldn\'t put one. This just enforces the addition of a space.'
def _enforce_space(self, item):
if isinstance(self._lines[(-1)], (self._Space, self._LineBreak, self._Indent)): return if (not self._prev_item): return item_text = unicode(item) prev_text = unicode(self._prev_item) if (((item_text == u'.') and (prev_text == u'from')) or ((item_text == u'import') and (prev_text == u'.')) or ((item_text == u'(') and (prev_text == u'import'))): self._lines.append(self._Space())
'Delete all whitespace from the end of the line.'
def _delete_whitespace(self):
while isinstance(self._lines[(-1)], (self._Space, self._LineBreak, self._Indent)): del self._lines[(-1)]
'The extent of the full element. E.g., the length of a function call or keyword.'
def _get_extent(self, index):
extent = 0 prev_item = get_item(self._items, (index - 1)) seen_dot = (prev_item and (unicode(prev_item) == u'.')) while (index < len(self._items)): item = get_item(self._items, index) index += 1 if isinstance(item, (ListComprehension, IfExpression)): break if isinstance(item, Container): if (prev_item and prev_item.is_name): if seen_dot: extent += 1 else: extent += item.size prev_item = item continue elif ((unicode(item) not in [u'.', u'=', u':', u'not']) and (not item.is_name) and (not item.is_string)): break if (unicode(item) == u'.'): seen_dot = True extent += item.size prev_item = item return extent
'Fix indentation and return modified line numbers. Line numbers are indexed at 1.'
def run(self, indent_size=DEFAULT_INDENT_SIZE):
if (indent_size < 1): return self.input_text try: stats = _reindent_stats(tokenize.generate_tokens(self.getline)) except (SyntaxError, tokenize.TokenError): return self.input_text lines = self.lines stats.append((len(lines), 0)) have2want = {} after = [] i = stats[0][0] after.extend(lines[1:i]) for i in range((len(stats) - 1)): (thisstmt, thislevel) = stats[i] nextstmt = stats[(i + 1)][0] have = _leading_space_count(lines[thisstmt]) want = (thislevel * indent_size) if (want < 0): if have: want = have2want.get(have, (-1)) if (want < 0): for j in range((i + 1), (len(stats) - 1)): (jline, jlevel) = stats[j] if (jlevel >= 0): if (have == _leading_space_count(lines[jline])): want = (jlevel * indent_size) break if (want < 0): for j in range((i - 1), (-1), (-1)): (jline, jlevel) = stats[j] if (jlevel >= 0): want = ((have + _leading_space_count(after[(jline - 1)])) - _leading_space_count(lines[jline])) break if (want < 0): want = have else: want = 0 assert (want >= 0) have2want[have] = want diff = (want - have) if ((diff == 0) or (have == 0)): after.extend(lines[thisstmt:nextstmt]) else: for (line_number, line) in enumerate(lines[thisstmt:nextstmt], start=thisstmt): if (line_number in self.string_content_line_numbers): after.append(line) elif (diff > 0): if (line == u'\n'): after.append(line) else: after.append(((u' ' * diff) + line)) else: remove = min(_leading_space_count(line), (- diff)) after.append(line[remove:]) return u''.join(after)
'Line-getter for tokenize.'
def getline(self):
if (self.index >= len(self.lines)): line = u'' else: line = self.lines[self.index] self.index += 1 return line
'A stand-in for tokenize.generate_tokens().'
def generate_tokens(self, text):
if (text != self.last_text): string_io = io.StringIO(text) self.last_tokens = list(tokenize.generate_tokens(string_io.readline)) self.last_text = text return self.last_tokens
'Returns list of people blocking user.'
def get_blockers_for_user(self, user, flat=False):
user_list = self.filter(to_user=user, is_blocked=True) return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['BLOCKERS'], flat=flat, flat_attr='from_user')
'Returns people user is following sans people blocking user.'
def get_friends_for_user(self, user, flat=False):
blocked_id_list = self.get_blockers_for_user(user, flat=True) user_list = self.filter(from_user=user, is_blocked=False).exclude(to_user__in=blocked_id_list) return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FRIENDS'], flat=flat)
'Returns people following user.'
def get_followers_for_user(self, user, flat=False):
user_list = self.filter(to_user=user, is_blocked=False) return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FOLLOWERS'], flat=flat, flat_attr='from_user')
'Returns people following user but user isn\'t following.'
def get_fans_for_user(self, user, flat=False):
friend_id_list = self.get_friends_for_user(user, flat=True) user_list = self.filter(to_user=user, is_blocked=False).exclude(from_user__in=friend_id_list) return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FANS'], flat=flat, flat_attr='from_user')
'Returns True if from_user is blocking to_user.'
def blocking(self, from_user, to_user):
try: relationship = self.get(from_user=from_user, to_user=to_user) if relationship.is_blocked: return True except: return False return False
':type root: TreeNode :type key: int :rtype: TreeNode'
def deleteNode(self, root, key):
if (not root): return None if (root.val == key): if root.left: left_right_most = root.left while left_right_most.right: left_right_most = left_right_most.right left_right_most.right = root.right return root.left else: return root.right elif (root.val > key): root.left = self.deleteNode(root.left, key) else: root.right = self.deleteNode(root.right, key) return root
':type root: TreeNode :type k: int :rtype: int'
def kthSmallest(self, root, k):
count = [] self.helper(root, count) return count[(k - 1)]
'Initialize your data structure here. :type size: int'
def __init__(self, size):
self.queue = deque(maxlen=size)
':type val: int :rtype: float'
def next(self, val):
self.queue.append(val) return (sum(self.queue) / len(self.queue))
'Initialize python List with size of 10 or user given input. Python List type is a dynamic array, so we have to restrict its dynamic nature to make it work like a static array.'
def __init__(self, size=10):
AbstractStack.__init__(self) self.array = ([None] * size) self.front = 0 self.rear = 0
'expands size of the array. Time Complexity: O(n)'
def expand(self):
new_array = (([None] * len(self.array)) * 2) for (i, element) in enumerate(self.array): new_array[i] = element self.array = new_array
'Initialize your data structure here. :type v1: List[int] :type v2: List[int]'
def __init__(self, v1, v2):
self.queue = [_ for _ in (v1, v2) if _] print self.queue
':rtype: int'
def next(self):
v = self.queue.pop(0) ret = v.pop(0) if v: self.queue.append(v) return ret
':rtype: bool'
def has_next(self):
if self.queue: return True return False
'linear probing'
def _rehash(self, old_hash):
return ((old_hash + 1) % self.size)
'Initialize python List with size of 10 or user given input. Python List type is a dynamic array, so we have to restrict its dynamic nature to make it work like a static array.'
def __init__(self, size=10):
AbstractStack.__init__(self) self.array = ([None] * size)
'expands size of the array. Time Complexity: O(n)'
def expand(self):
newArray = (([None] * len(self.array)) * 2) for (i, element) in enumerate(self.array): newArray[i] = element self.array = newArray
'Init session.'
def __init__(self):
self.session = requests.sessions.Session()
'Setting up authentication headers.'
def set_auth_headers(self, access_token, client_id):
self.headers[u'X-Udemy-Bearer-Token'] = access_token self.headers[u'X-Udemy-Client-Id'] = client_id self.headers[u'Authorization'] = (u'Bearer ' + access_token) self.headers[u'X-Udemy-Authorization'] = (u'Bearer ' + access_token)
'Retrieving content of a given url.'
def get(self, url):
return self.session.get(url, headers=self.headers)
'HTTP post given data with requests object.'
def post(self, url, data):
return self.session.post(url, data, headers=self.headers)
'Inits RequestProcessingException with data used to build a reply to client'
def __init__(self, code, reason, explanation=''):
self.code = code self.reason = reason self.explanation = explanation
'Override logging settings of base class This method reformats standard request logging provided by the base class BaseHTTPRequestHandler and sends it to logger/formatter configured by the user during logging module initialization Args: fmt, args: just like base class'
def log_message(self, fmt, *args):
logging.info('REQ: {0} {1}'.format(self.address_string(), (fmt % args)))
'Send reply to client in JSON format Send a successful reply to the client, with reply data/body formatted/serialized as JSON. It also makes sure that headers are set right and JSON is formatted in human-readable form. Args: data: free form data that should be serialized to JSON'
def _send_reply(self, data):
self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() body_str = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) bytes_arr = bytes(body_str, 'utf-8') self.wfile.write(bytes_arr)
'Respond to a dns resolution request with the dns results for a name'
def _handle_path_dns_search(self):
def get_hostbyname_json(hostname): try: return socket.gethostbyname(hostname) except socket.gaierror as ex: return {'error': str(ex)} data = {'search_hit_leader': get_hostbyname_json('leader'), 'always_miss': get_hostbyname_json('notasubdomainofmesos'), 'always_hit_leader': get_hostbyname_json('leader.mesos'), 'test_uuid': os.environ[TEST_UUID_VARNAME]} self._send_reply(data)
'Respond to PING request with PONG reply'
def _handle_path_ping(self):
data = {'pong': True} self._send_reply(data)
'Respond to request for UUID with servers test-sesion UUID'
def _handle_path_uuid(self):
data = {'test_uuid': os.environ[TEST_UUID_VARNAME]} self._send_reply(data)
'Respond to request for client\'s IP with it\'s as seen by the server'
def _handle_path_reflect(self):
data = {'test_uuid': os.environ[TEST_UUID_VARNAME], 'request_ip': self.address_string()} self._send_reply(data)
'Use the sever to cache results from application runs'
def _handle_path_signal_test_cache(self, set_data):
global TEST_DATA_CACHE if set_data: TEST_DATA_CACHE = self.rfile.read(int(self.headers['Content-Length'])).decode() self._send_reply(TEST_DATA_CACHE)
'Parse request\'s POST headers in utf8 aware way Returns: A dictionary with POST arguments mapped to it\'s keys/values'
def parse_POST_headers(self):
length = int(self.headers['Content-Length']) field_data = self.rfile.read(length).decode('utf-8') fields = urllib.parse.parse_qs(field_data) logging.debug("Request's POST arguments: {}".format(fields)) return fields
'Verify /your_ip request\'s arguments Make sure that the POST arguments send by the client while requesting for /your_ip path are valid. Args: fields: decoded POST arguments sent by the client in the form of dictionary Raises: RequestProcessingException: some/all arguments are missing and/or malformed. Request should be aborted.'
def _verify_path_your_ip_args(self, fields):
if (('reflector_ip' not in fields) or ('reflector_port' not in fields)): raise RequestProcessingException(400, 'Reflector data missing', 'Reflector IP and/or port has not been provided, so the request cannot be processed.') fields['reflector_ip'] = fields['reflector_ip'][0] fields['reflector_port'] = fields['reflector_port'][0] try: fields['reflector_port'] = int(fields['reflector_port']) except ValueError: msg = 'Reflector port "{}" is not an integer' raise RequestProcessingException(400, msg.format(fields['reflector_port'])) try: socket.inet_aton(fields['reflector_ip']) except socket.error: msg = 'Reflector IP "{}" is invalid/not a proper ipv4' raise RequestProcessingException(400, msg.format(fields['reflector_ip']))
'Ask the reflector to report server\'s IP address This method queries external reflector for server\'s IP address. It\'s done by sending a \'GET /reflect\' request to a test_server running on some other mesos slave. Please see the description of the \'_handle_path_reflect\' method for more details. Args: reflector_ip: IP where the test_server used as a reflector can be found reflector_port: TCP port on which reflector listens for incomming connections Raises: RequestProcessingException: server failed to communicate reflector, and the request that initiated the query should be aborted.'
def _query_reflector_for_ip(self, reflector_ip, reflector_port):
uri = 'http://{}:{}/reflect'.format(reflector_ip, reflector_port) try: r = requests.get(uri, timeout=1.0) except requests.Timeout as e: raise RequestProcessingException(500, 'Reflector timed out', 'Reflector was unable to respond in timely manner: {}'.format(e)) except requests.RequestException as e: raise RequestProcessingException(500, 'Reflector connection error', 'Unable to connect to reflector: {}'.format(e)) if (r.status_code != 200): msg_short = 'Data fetch from reflector failed.' msg_detailed = 'Reflector responded with code: {}, response body: {}' reply_body = r.text.replace('\n', ' ') raise RequestProcessingException(500, msg_short, msg_detailed.format(r.status_code, reply_body)) try: return r.json() except ValueError as e: raise RequestProcessingException(500, 'Malformed reflector response', 'Reflectors response is not a valid JSON: {}'.format(e))
'Responds to requests for server\'s IP address as seen by other cluster members Determine the server\'s address by querying external reflector (basically the same test_server, but different service endpoint), and respond to client with JSON hash containing test UUID\'s of the server, reflector, and IP address as reported by the reflector'
def _handle_path_your_ip(self):
form_data = self.parse_POST_headers() self._verify_path_your_ip_args(form_data) reflector_data = self._query_reflector_for_ip(form_data['reflector_ip'], form_data['reflector_port']) data = {'reflector_uuid': reflector_data['test_uuid'], 'test_uuid': os.environ[TEST_UUID_VARNAME], 'my_ip': reflector_data['request_ip']} self._send_reply(data)
'Runs an arbitrary command, and returns the output along with the return code Sometimes there isn\'t enough time to write code'
def _handle_path_run_cmd(self):
length = int(self.headers['Content-Length']) cmd = self.rfile.read(length).decode('utf-8') (status, output) = subprocess.getstatusoutput(cmd) data = {'status': status, 'output': output} self._send_reply(data)
'Gets basic operating environment info (such as running user)'
def _handle_operating_environment(self):
self._send_reply({'uid': os.getuid()})
'Mini service router handling GET requests'
def do_GET(self):
if (self.path == '/dns_search'): self._handle_path_dns_search() elif (self.path == '/operating_environment'): self._handle_operating_environment() elif (self.path == '/ping'): self._handle_path_ping() elif (self.path == '/reflect'): self._handle_path_reflect() elif (self.path == '/signal_test_cache'): self._handle_path_signal_test_cache(False) elif (self.path == '/test_uuid'): self._handle_path_uuid() else: self.send_error(404, 'Not found', 'Endpoint is not supported')
'Mini service router handling POST requests'
def do_POST(self):
if (self.path == '/your_ip'): try: self._handle_path_your_ip() except RequestProcessingException as e: logging.error("Request processing exception occured: code: {}, reason: '{}', explanation: '{}'".format(e.code, e.reason, e.explanation)) self.send_error(e.code, e.reason, e.explanation) elif (self.path == '/signal_test_cache'): self._handle_path_signal_test_cache(True) elif (self.path == '/run_cmd'): self._handle_path_run_cmd() else: self.send_error(404, 'Not found', 'Endpoint is not supported')
'...right after Nginx has started.'
def test_if_cache_refresh_is_triggered_by_request(self, nginx_class, mocker, valid_user_header):
filter_regexp = {'Executing cache refresh triggered by request': SearchCriteria(1, True), 'Cache `[\\s\\w]+` empty. Fetching.': SearchCriteria(3, True), 'Mesos state cache has been successfully updated': SearchCriteria(1, True), 'Marathon apps cache has been successfully updated': SearchCriteria(1, True), 'Marathon leader cache has been successfully updated': SearchCriteria(1, True)} mocker.send_command(endpoint_id='http://127.0.0.1:8080', func_name='record_requests') mocker.send_command(endpoint_id='http://127.0.0.2:5050', func_name='record_requests') ar = nginx_class(cache_first_poll_delay=120, cache_poll_period=120, cache_expiration=115) with GuardedSubprocess(ar): lbf = LineBufferFilter(filter_regexp, timeout=5, line_buffer=ar.stderr_line_buffer) ping_mesos_agent(ar, valid_user_header) lbf.scan_log_buffer() ping_mesos_agent(ar, valid_user_header) mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050', func_name='get_recorded_requests') marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080', func_name='get_recorded_requests') assert (lbf.extra_matches == {}) assert (len(mesos_requests) == 1) assert (len(marathon_requests) == 2)
'Test that an app that, instead of specifying \'ipAddress: null\' does not specify \'ipAddress\' at all, is successfully cached.'
def test_ip_per_task_app_with_unspecified_ip_address_DCOS_OSS_1366(self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app() del app['ipAddress'] ar = nginx_class() mocker.send_command(endpoint_id='http://127.0.0.1:8080', func_name='set_apps_response', aux_data={'apps': [app]}) url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/') with GuardedSubprocess(ar): resp = requests.get(url, allow_redirects=False, headers=valid_user_header) assert (resp.status_code == 200) req_data = resp.json() assert (req_data['endpoint_id'] == 'http://127.0.0.1:16000')
'Helper method that will assert if provided regexp filter is found in nginx logs for given apps response from Marathon upstream endpoint. Arguments: filter_regexp (dict): Filter definition where key is the message looked up in logs and value is SearchCriteria definition app (dict): App that upstream endpoint should respond with nginx_class (Nginx): Nginx process fixture mocker (Mocker): Mocker fixture auth_header (dict): Headers that should be passed to Nginx in the request'
def _assert_filter_regexp_for_invalid_app(self, filter_regexp, app, nginx_class, mocker, auth_headers):
ar = nginx_class() mocker.send_command(endpoint_id='http://127.0.0.1:8080', func_name='set_apps_response', aux_data={'apps': [app]}) mocker.send_command(endpoint_id='http://127.0.0.2:5050', func_name='set_frameworks_response', aux_data=[]) mocker.send_command(endpoint_id='http://127.0.0.1:8123', func_name='set_srv_response', aux_data=[]) url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/') with GuardedSubprocess(ar): lbf = LineBufferFilter(filter_regexp, timeout=5, line_buffer=ar.stderr_line_buffer) resp = requests.get(url, allow_redirects=False, headers=auth_headers) assert (resp.status_code == 404) lbf.scan_log_buffer() assert (lbf.extra_matches == {})
'Returns a valid Marathon app with the \'/scheduler-alwaysthere\' id'
def _scheduler_alwaysthere_app(self):
return copy.deepcopy(SCHEDULER_APP_ALWAYSTHERE)
'Initialize new SearchCriteria object Attributes: occurrences (int): number of occurrences of the particular regexp in the buffer exact (bool): should the `occurrences` attribute be treated as `exact number of occurrences` (True), or `at least that many occurrences`.'
def __init__(self, occurrences, exact):
self.occurrences = occurrences self.exact = exact
'Initialize new LineBufferFilter object Create new LineBufferFilter object configured to search for string `filter_regexp` in line buffer `filter_regexp` for as much as `timeout` seconds. Args: line_buffer (list()): an array of log lines, as presented by `.*_line_buffer()` method of the object we want to scan lines for. timeout (int): how long before LineBufferFilter gives up on searching for filter_regexp in line_buffer filter_regexp: see below `filter_regexp` argument can have 3 forms: * regexp that the instance should look for in the logs. It has to be matched at least once. * a list of regexpes that the instance should look for in the logs. Each one of them has to be matched at least once. * a dictionary with regexp as a key and SearchCriteria object as the value. The SearchCriteria object determines how exactly given regexp is going to be matched'
def __init__(self, filter_regexpes, line_buffer, timeout=3):
assert isinstance(timeout, int) assert (timeout >= LOG_LINE_SEARCH_INTERVAL) assert isinstance(line_buffer, list) self._line_buffer = line_buffer self._timeout = timeout self._filter_regexpes = filter_regexpes
'Scan for `filter_regexp` since the beginning of the given instance\'s log This is a convenience function that forces search of the `filter_regexp` since the beginning of the log buffer. It\'s does by simply fixing the start position and calling the __exit__() method of the context manager'
def scan_log_buffer(self):
self._line_buffer_start = 0 self.__exit__()
'Helper method that abstracts matching of the line against multiple regexpes. Each match is registered, so that it\'s possible to determine if search criteria were met. Arguments: line (str): a line to match'
def _match_line_against_filter_regexpes(self, line):
for filter_regexp in self._filter_regexpes: if re.search(filter_regexp, line, flags=0): sc = self._filter_regexpes[filter_regexp] if (sc.exact and (sc.occurrences <= 0)): log.warning('filter string `%s` matched more times than requested', filter_regexp) sc.occurrences -= 1
'Context manager __exit__ method for filter string search This is the heart of the LineBufferFilter - the whole matching happens here.'
def __exit__(self, *unused):
msg_fmt = 'Beginning to scan for line `%s` in logline buffer' log.debug(msg_fmt, list(self._filter_regexpes.keys())) deadline = (time.time() + self._timeout) while (time.time() < deadline): lines_scanned = 0 for log_line in self._line_buffer[self._line_buffer_start:]: self._match_line_against_filter_regexpes(log_line) if self._all_found: return lines_scanned += 1 self._line_buffer_start = (self._line_buffer_start + lines_scanned) msg_fmt = 'waiting for strings `%s` to appear in logline buffer' log.debug(msg_fmt, self._regexpes_still_not_matched) time.sleep(LOG_LINE_SEARCH_INTERVAL) msg_fmt = 'Timed out while waiting for strings `%s` to appear in logline buffer' log.debug(msg_fmt, self._regexpes_still_not_matched)
'Helper function that returns a list of regexpes that still has not met search criterias'
@property def _regexpes_still_not_matched(self):
return [x for x in self._filter_regexpes if (self._filter_regexpes[x].occurrences > 0)]
'Helper - check if all search criterias have been met ?'
@property def _all_found(self):
return all([(sc.occurrences <= 0) for sc in self._filter_regexpes.values()])
'Detailed information about regexpes that has and/or has not been matched. This property can be useful if i.e. there were mixed search criterias - some of the regexpes had to be strictly matched, some not. Return: It returns a dictionary with regexpes from `filter_regexpes` argument of `__init__()` as keys and the number of matches as values. This number can have 3 different values: * if the regexp was matched exactly the number of times specified (once for regexp and list of regexpes `filter_regexpes` argument), it has a value of zero and the key is not present in the resulting dictionary * if the input has not been matched at all in case of regexp and list of regexpes `filter_regexpes` argument, or less than requested number of times in case of detailed `filter_regexpes` form, it\'s a positive number * if the input has been matched more times than anticipated - a negative number. Usually it\'s used in `assert lbf.extra_matches == {}` form in tests'
@property def extra_matches(self):
left = {} for filter_regexp in self._filter_regexpes: search_criteria = self._filter_regexpes[filter_regexp] if ((search_criteria.occurrences > 0) or (search_criteria.exact and (search_criteria.occurrences < 0))): search_criteria.occurrences = (- search_criteria.occurrences) left[filter_regexp] = search_criteria.occurrences return left
'Bind to the socket specified by self.SOCKET_PATH Worth noting is that Nginx setuids to user nobody, thus it is necessary to give very open permission for the socket so that it can be accessed by the AR instance'
def _bind_socket(self):
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self._socket.bind(self.SOCKET_PATH) os.chmod(self.SOCKET_PATH, 438)