repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._get_char_x | def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width | python | def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width | [
"def",
"_get_char_x",
"(",
"self",
",",
"charno",
")",
":",
"return",
"charno",
"*",
"self",
".",
"fontw",
"+",
"self",
".",
"image_pad",
"+",
"self",
".",
"line_number_width"
]
| Get the X coordinate of a character position. | [
"Get",
"the",
"X",
"coordinate",
"of",
"a",
"character",
"position",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L408-L412 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._get_text_pos | def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno) | python | def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno) | [
"def",
"_get_text_pos",
"(",
"self",
",",
"charno",
",",
"lineno",
")",
":",
"return",
"self",
".",
"_get_char_x",
"(",
"charno",
")",
",",
"self",
".",
"_get_line_y",
"(",
"lineno",
")"
]
| Get the actual position for a character and line position. | [
"Get",
"the",
"actual",
"position",
"for",
"a",
"character",
"and",
"line",
"position",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L414-L418 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._get_image_size | def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad) | python | def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad) | [
"def",
"_get_image_size",
"(",
"self",
",",
"maxcharno",
",",
"maxlineno",
")",
":",
"return",
"(",
"self",
".",
"_get_char_x",
"(",
"maxcharno",
")",
"+",
"self",
".",
"image_pad",
",",
"self",
".",
"_get_line_y",
"(",
"maxlineno",
"+",
"0",
")",
"+",
"self",
".",
"image_pad",
")"
]
| Get the required image size. | [
"Get",
"the",
"required",
"image",
"size",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L442-L447 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._draw_linenumber | def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
) | python | def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
) | [
"def",
"_draw_linenumber",
"(",
"self",
",",
"posno",
",",
"lineno",
")",
":",
"self",
".",
"_draw_text",
"(",
"self",
".",
"_get_linenumber_pos",
"(",
"posno",
")",
",",
"str",
"(",
"lineno",
")",
".",
"rjust",
"(",
"self",
".",
"line_number_chars",
")",
",",
"font",
"=",
"self",
".",
"fonts",
".",
"get_font",
"(",
"self",
".",
"line_number_bold",
",",
"self",
".",
"line_number_italic",
")",
",",
"fill",
"=",
"self",
".",
"line_number_fg",
",",
")"
]
| Remember a line number drawable to paint later. | [
"Remember",
"a",
"line",
"number",
"drawable",
"to",
"paint",
"later",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L449-L459 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._draw_text | def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw)) | python | def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw)) | [
"def",
"_draw_text",
"(",
"self",
",",
"pos",
",",
"text",
",",
"font",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"drawables",
".",
"append",
"(",
"(",
"pos",
",",
"text",
",",
"font",
",",
"kw",
")",
")"
]
| Remember a single drawable tuple to paint later. | [
"Remember",
"a",
"single",
"drawable",
"tuple",
"to",
"paint",
"later",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L461-L465 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._create_drawables | def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno | python | def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno | [
"def",
"_create_drawables",
"(",
"self",
",",
"tokensource",
")",
":",
"lineno",
"=",
"charno",
"=",
"maxcharno",
"=",
"0",
"for",
"ttype",
",",
"value",
"in",
"tokensource",
":",
"while",
"ttype",
"not",
"in",
"self",
".",
"styles",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"style",
"=",
"self",
".",
"styles",
"[",
"ttype",
"]",
"# TODO: make sure tab expansion happens earlier in the chain. It",
"# really ought to be done on the input, as to do it right here is",
"# quite complex.",
"value",
"=",
"value",
".",
"expandtabs",
"(",
"4",
")",
"lines",
"=",
"value",
".",
"splitlines",
"(",
"True",
")",
"# print lines",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"temp",
"=",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"if",
"temp",
":",
"self",
".",
"_draw_text",
"(",
"self",
".",
"_get_text_pos",
"(",
"charno",
",",
"lineno",
")",
",",
"temp",
",",
"font",
"=",
"self",
".",
"_get_style_font",
"(",
"style",
")",
",",
"fill",
"=",
"self",
".",
"_get_text_color",
"(",
"style",
")",
")",
"charno",
"+=",
"len",
"(",
"temp",
")",
"maxcharno",
"=",
"max",
"(",
"maxcharno",
",",
"charno",
")",
"if",
"line",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"# add a line for each extra line in the value",
"charno",
"=",
"0",
"lineno",
"+=",
"1",
"self",
".",
"maxcharno",
"=",
"maxcharno",
"self",
".",
"maxlineno",
"=",
"lineno"
]
| Create drawables for the token content. | [
"Create",
"drawables",
"for",
"the",
"token",
"content",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L467-L498 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._draw_line_numbers | def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n) | python | def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n) | [
"def",
"_draw_line_numbers",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"line_numbers",
":",
"return",
"for",
"p",
"in",
"xrange",
"(",
"self",
".",
"maxlineno",
")",
":",
"n",
"=",
"p",
"+",
"self",
".",
"line_number_start",
"if",
"(",
"n",
"%",
"self",
".",
"line_number_step",
")",
"==",
"0",
":",
"self",
".",
"_draw_linenumber",
"(",
"p",
",",
"n",
")"
]
| Create drawables for the line numbers. | [
"Create",
"drawables",
"for",
"the",
"line",
"numbers",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L500-L509 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/img.py | ImageFormatter._paint_line_number_bg | def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw | python | def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw | [
"def",
"_paint_line_number_bg",
"(",
"self",
",",
"im",
")",
":",
"if",
"not",
"self",
".",
"line_numbers",
":",
"return",
"if",
"self",
".",
"line_number_fg",
"is",
"None",
":",
"return",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"im",
")",
"recth",
"=",
"im",
".",
"size",
"[",
"-",
"1",
"]",
"rectw",
"=",
"self",
".",
"image_pad",
"+",
"self",
".",
"line_number_width",
"-",
"self",
".",
"line_number_pad",
"draw",
".",
"rectangle",
"(",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"rectw",
",",
"recth",
")",
"]",
",",
"fill",
"=",
"self",
".",
"line_number_bg",
")",
"draw",
".",
"line",
"(",
"[",
"(",
"rectw",
",",
"0",
")",
",",
"(",
"rectw",
",",
"recth",
")",
"]",
",",
"fill",
"=",
"self",
".",
"line_number_fg",
")",
"del",
"draw"
]
| Paint the line number background on the image. | [
"Paint",
"the",
"line",
"number",
"background",
"on",
"the",
"image",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L511-L525 | train |
wakatime/wakatime | wakatime/heartbeat.py | Heartbeat.update | def update(self, attrs):
"""Return a copy of the current Heartbeat with updated attributes."""
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
return heartbeat | python | def update(self, attrs):
"""Return a copy of the current Heartbeat with updated attributes."""
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
return heartbeat | [
"def",
"update",
"(",
"self",
",",
"attrs",
")",
":",
"data",
"=",
"self",
".",
"dict",
"(",
")",
"data",
".",
"update",
"(",
"attrs",
")",
"heartbeat",
"=",
"Heartbeat",
"(",
"data",
",",
"self",
".",
"args",
",",
"self",
".",
"configs",
",",
"_clone",
"=",
"True",
")",
"return",
"heartbeat"
]
| Return a copy of the current Heartbeat with updated attributes. | [
"Return",
"a",
"copy",
"of",
"the",
"current",
"Heartbeat",
"with",
"updated",
"attributes",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L130-L136 | train |
wakatime/wakatime | wakatime/heartbeat.py | Heartbeat.sanitize | def sanitize(self):
"""Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
"""
if not self.args.hide_file_names:
return self
if self.entity is None:
return self
if self.type != 'file':
return self
if self.should_obfuscate_filename():
self._sanitize_metadata()
extension = u(os.path.splitext(self.entity)[1])
self.entity = u('HIDDEN{0}').format(extension)
elif self.should_obfuscate_project():
self._sanitize_metadata()
return self | python | def sanitize(self):
"""Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
"""
if not self.args.hide_file_names:
return self
if self.entity is None:
return self
if self.type != 'file':
return self
if self.should_obfuscate_filename():
self._sanitize_metadata()
extension = u(os.path.splitext(self.entity)[1])
self.entity = u('HIDDEN{0}').format(extension)
elif self.should_obfuscate_project():
self._sanitize_metadata()
return self | [
"def",
"sanitize",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"args",
".",
"hide_file_names",
":",
"return",
"self",
"if",
"self",
".",
"entity",
"is",
"None",
":",
"return",
"self",
"if",
"self",
".",
"type",
"!=",
"'file'",
":",
"return",
"self",
"if",
"self",
".",
"should_obfuscate_filename",
"(",
")",
":",
"self",
".",
"_sanitize_metadata",
"(",
")",
"extension",
"=",
"u",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"entity",
")",
"[",
"1",
"]",
")",
"self",
".",
"entity",
"=",
"u",
"(",
"'HIDDEN{0}'",
")",
".",
"format",
"(",
"extension",
")",
"elif",
"self",
".",
"should_obfuscate_project",
"(",
")",
":",
"self",
".",
"_sanitize_metadata",
"(",
")",
"return",
"self"
]
| Removes sensitive data including file names and dependencies.
Returns a Heartbeat. | [
"Removes",
"sensitive",
"data",
"including",
"file",
"names",
"and",
"dependencies",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L138-L160 | train |
wakatime/wakatime | wakatime/heartbeat.py | Heartbeat.should_obfuscate_filename | def should_obfuscate_filename(self):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
for pattern in self.args.hide_file_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False | python | def should_obfuscate_filename(self):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
for pattern in self.args.hide_file_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False | [
"def",
"should_obfuscate_filename",
"(",
"self",
")",
":",
"for",
"pattern",
"in",
"self",
".",
"args",
".",
"hide_file_names",
":",
"try",
":",
"compiled",
"=",
"re",
".",
"compile",
"(",
"pattern",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"compiled",
".",
"search",
"(",
"self",
".",
"entity",
")",
":",
"return",
"True",
"except",
"re",
".",
"error",
"as",
"ex",
":",
"log",
".",
"warning",
"(",
"u",
"(",
"'Regex error ({msg}) for hide_file_names pattern: {pattern}'",
")",
".",
"format",
"(",
"msg",
"=",
"u",
"(",
"ex",
")",
",",
"pattern",
"=",
"u",
"(",
"pattern",
")",
",",
")",
")",
"return",
"False"
]
| Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths. | [
"Returns",
"True",
"if",
"hide_file_names",
"is",
"true",
"or",
"the",
"entity",
"file",
"path",
"matches",
"one",
"in",
"the",
"list",
"of",
"obfuscated",
"file",
"paths",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L196-L210 | train |
wakatime/wakatime | wakatime/heartbeat.py | Heartbeat._format_local_file | def _format_local_file(self):
"""When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
"""
if self.type != 'file':
return
if not self.entity:
return
if not is_win:
return
if self._file_exists():
return
self.args.local_file = self._to_unc_path(self.entity) | python | def _format_local_file(self):
"""When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
"""
if self.type != 'file':
return
if not self.entity:
return
if not is_win:
return
if self._file_exists():
return
self.args.local_file = self._to_unc_path(self.entity) | [
"def",
"_format_local_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"'file'",
":",
"return",
"if",
"not",
"self",
".",
"entity",
":",
"return",
"if",
"not",
"is_win",
":",
"return",
"if",
"self",
".",
"_file_exists",
"(",
")",
":",
"return",
"self",
".",
"args",
".",
"local_file",
"=",
"self",
".",
"_to_unc_path",
"(",
"self",
".",
"entity",
")"
]
| When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything. | [
"When",
"args",
".",
"local_file",
"empty",
"on",
"Windows",
"tries",
"to",
"map",
"args",
".",
"entity",
"to",
"a",
"unc",
"path",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L242-L261 | train |
wakatime/wakatime | wakatime/packages/ntlm_auth/ntlm.py | Ntlm.create_negotiate_message | def create_negotiate_message(self, domain_name=None, workstation=None):
"""
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
"""
self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation)
return base64.b64encode(self.negotiate_message.get_data()) | python | def create_negotiate_message(self, domain_name=None, workstation=None):
"""
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
"""
self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation)
return base64.b64encode(self.negotiate_message.get_data()) | [
"def",
"create_negotiate_message",
"(",
"self",
",",
"domain_name",
"=",
"None",
",",
"workstation",
"=",
"None",
")",
":",
"self",
".",
"negotiate_message",
"=",
"NegotiateMessage",
"(",
"self",
".",
"negotiate_flags",
",",
"domain_name",
",",
"workstation",
")",
"return",
"base64",
".",
"b64encode",
"(",
"self",
".",
"negotiate_message",
".",
"get_data",
"(",
")",
")"
]
| Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE | [
"Create",
"an",
"NTLM",
"NEGOTIATE_MESSAGE"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L89-L99 | train |
wakatime/wakatime | wakatime/packages/ntlm_auth/ntlm.py | Ntlm.parse_challenge_message | def parse_challenge_message(self, msg2):
"""
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE
"""
msg2 = base64.b64decode(msg2)
self.challenge_message = ChallengeMessage(msg2) | python | def parse_challenge_message(self, msg2):
"""
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE
"""
msg2 = base64.b64decode(msg2)
self.challenge_message = ChallengeMessage(msg2) | [
"def",
"parse_challenge_message",
"(",
"self",
",",
"msg2",
")",
":",
"msg2",
"=",
"base64",
".",
"b64decode",
"(",
"msg2",
")",
"self",
".",
"challenge_message",
"=",
"ChallengeMessage",
"(",
"msg2",
")"
]
| Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE | [
"Parse",
"the",
"NTLM",
"CHALLENGE_MESSAGE",
"from",
"the",
"server",
"and",
"add",
"it",
"to",
"the",
"Ntlm",
"context",
"fields"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L101-L108 | train |
wakatime/wakatime | wakatime/packages/ntlm_auth/ntlm.py | Ntlm.create_authenticate_message | def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None):
"""
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE
"""
self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation,
self.challenge_message, self.ntlm_compatibility,
server_certificate_hash)
self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message)
# Setups up the session_security context used to sign and seal messages if wanted
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN:
self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0],
self.authenticate_message.exported_session_key)
return base64.b64encode(self.authenticate_message.get_data()) | python | def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None):
"""
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE
"""
self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation,
self.challenge_message, self.ntlm_compatibility,
server_certificate_hash)
self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message)
# Setups up the session_security context used to sign and seal messages if wanted
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN:
self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0],
self.authenticate_message.exported_session_key)
return base64.b64encode(self.authenticate_message.get_data()) | [
"def",
"create_authenticate_message",
"(",
"self",
",",
"user_name",
",",
"password",
",",
"domain_name",
"=",
"None",
",",
"workstation",
"=",
"None",
",",
"server_certificate_hash",
"=",
"None",
")",
":",
"self",
".",
"authenticate_message",
"=",
"AuthenticateMessage",
"(",
"user_name",
",",
"password",
",",
"domain_name",
",",
"workstation",
",",
"self",
".",
"challenge_message",
",",
"self",
".",
"ntlm_compatibility",
",",
"server_certificate_hash",
")",
"self",
".",
"authenticate_message",
".",
"add_mic",
"(",
"self",
".",
"negotiate_message",
",",
"self",
".",
"challenge_message",
")",
"# Setups up the session_security context used to sign and seal messages if wanted",
"if",
"self",
".",
"negotiate_flags",
"&",
"NegotiateFlags",
".",
"NTLMSSP_NEGOTIATE_SEAL",
"or",
"self",
".",
"negotiate_flags",
"&",
"NegotiateFlags",
".",
"NTLMSSP_NEGOTIATE_SIGN",
":",
"self",
".",
"session_security",
"=",
"SessionSecurity",
"(",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"self",
".",
"authenticate_message",
".",
"negotiate_flags",
")",
"[",
"0",
"]",
",",
"self",
".",
"authenticate_message",
".",
"exported_session_key",
")",
"return",
"base64",
".",
"b64encode",
"(",
"self",
".",
"authenticate_message",
".",
"get_data",
"(",
")",
")"
]
| Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE | [
"Create",
"an",
"NTLM",
"AUTHENTICATE_MESSAGE",
"based",
"on",
"the",
"Ntlm",
"context",
"and",
"the",
"previous",
"messages",
"sent",
"and",
"received"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L110-L133 | train |
wakatime/wakatime | wakatime/packages/pygments/__init__.py | lex | def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise | python | def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise | [
"def",
"lex",
"(",
"code",
",",
"lexer",
")",
":",
"try",
":",
"return",
"lexer",
".",
"get_tokens",
"(",
"code",
")",
"except",
"TypeError",
"as",
"err",
":",
"if",
"(",
"isinstance",
"(",
"err",
".",
"args",
"[",
"0",
"]",
",",
"str",
")",
"and",
"(",
"'unbound method get_tokens'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
"or",
"'missing 1 required positional argument'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'lex() argument must be a lexer instance, '",
"'not a class'",
")",
"raise"
]
| Lex ``code`` with ``lexer`` and return an iterable of tokens. | [
"Lex",
"code",
"with",
"lexer",
"and",
"return",
"an",
"iterable",
"of",
"tokens",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L38-L50 | train |
wakatime/wakatime | wakatime/packages/pygments/__init__.py | format | def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise | python | def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise | [
"def",
"format",
"(",
"tokens",
",",
"formatter",
",",
"outfile",
"=",
"None",
")",
":",
"# pylint: disable=redefined-builtin",
"try",
":",
"if",
"not",
"outfile",
":",
"realoutfile",
"=",
"getattr",
"(",
"formatter",
",",
"'encoding'",
",",
"None",
")",
"and",
"BytesIO",
"(",
")",
"or",
"StringIO",
"(",
")",
"formatter",
".",
"format",
"(",
"tokens",
",",
"realoutfile",
")",
"return",
"realoutfile",
".",
"getvalue",
"(",
")",
"else",
":",
"formatter",
".",
"format",
"(",
"tokens",
",",
"outfile",
")",
"except",
"TypeError",
"as",
"err",
":",
"if",
"(",
"isinstance",
"(",
"err",
".",
"args",
"[",
"0",
"]",
",",
"str",
")",
"and",
"(",
"'unbound method format'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
"or",
"'missing 1 required positional argument'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'format() argument must be a formatter instance, '",
"'not a class'",
")",
"raise"
]
| Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string. | [
"Format",
"a",
"tokenlist",
"tokens",
"with",
"the",
"formatter",
"formatter",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L53-L74 | train |
wakatime/wakatime | wakatime/packages/pygments/__init__.py | highlight | def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile) | python | def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile) | [
"def",
"highlight",
"(",
"code",
",",
"lexer",
",",
"formatter",
",",
"outfile",
"=",
"None",
")",
":",
"return",
"format",
"(",
"lex",
"(",
"code",
",",
"lexer",
")",
",",
"formatter",
",",
"outfile",
")"
]
| Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string. | [
"Lex",
"code",
"with",
"lexer",
"and",
"format",
"it",
"with",
"the",
"formatter",
"formatter",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L77-L85 | train |
wakatime/wakatime | wakatime/configs.py | getConfigFile | def getConfigFile():
"""Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
"""
fileName = '.wakatime.cfg'
home = os.environ.get('WAKATIME_HOME')
if home:
return os.path.join(os.path.expanduser(home), fileName)
return os.path.join(os.path.expanduser('~'), fileName) | python | def getConfigFile():
"""Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
"""
fileName = '.wakatime.cfg'
home = os.environ.get('WAKATIME_HOME')
if home:
return os.path.join(os.path.expanduser(home), fileName)
return os.path.join(os.path.expanduser('~'), fileName) | [
"def",
"getConfigFile",
"(",
")",
":",
"fileName",
"=",
"'.wakatime.cfg'",
"home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'WAKATIME_HOME'",
")",
"if",
"home",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"home",
")",
",",
"fileName",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"fileName",
")"
]
| Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg. | [
"Returns",
"the",
"config",
"file",
"location",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/configs.py#L28-L41 | train |
wakatime/wakatime | wakatime/packages/pygments/filters/__init__.py | find_filter_class | def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None | python | def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None | [
"def",
"find_filter_class",
"(",
"filtername",
")",
":",
"if",
"filtername",
"in",
"FILTERS",
":",
"return",
"FILTERS",
"[",
"filtername",
"]",
"for",
"name",
",",
"cls",
"in",
"find_plugin_filters",
"(",
")",
":",
"if",
"name",
"==",
"filtername",
":",
"return",
"cls",
"return",
"None"
]
| Lookup a filter by name. Return None if not found. | [
"Lookup",
"a",
"filter",
"by",
"name",
".",
"Return",
"None",
"if",
"not",
"found",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filters/__init__.py#L23-L30 | train |
wakatime/wakatime | wakatime/packages/pygments/filters/__init__.py | get_filter_by_name | def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername) | python | def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername) | [
"def",
"get_filter_by_name",
"(",
"filtername",
",",
"*",
"*",
"options",
")",
":",
"cls",
"=",
"find_filter_class",
"(",
"filtername",
")",
"if",
"cls",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"else",
":",
"raise",
"ClassNotFound",
"(",
"'filter %r not found'",
"%",
"filtername",
")"
]
| Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found. | [
"Return",
"an",
"instantiated",
"filter",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filters/__init__.py#L33-L43 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/templates.py | ErbLexer.get_tokens_unprocessed | def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return | python | def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return | [
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
")",
":",
"tokens",
"=",
"self",
".",
"_block_re",
".",
"split",
"(",
"text",
")",
"tokens",
".",
"reverse",
"(",
")",
"state",
"=",
"idx",
"=",
"0",
"try",
":",
"while",
"True",
":",
"# text",
"if",
"state",
"==",
"0",
":",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
",",
"Other",
",",
"val",
"idx",
"+=",
"len",
"(",
"val",
")",
"state",
"=",
"1",
"# block starts",
"elif",
"state",
"==",
"1",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"# literals",
"if",
"tag",
"in",
"(",
"'<%%'",
",",
"'%%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"idx",
"+=",
"3",
"state",
"=",
"0",
"# comment",
"elif",
"tag",
"==",
"'<%#'",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
"+",
"3",
",",
"Comment",
",",
"val",
"idx",
"+=",
"3",
"+",
"len",
"(",
"val",
")",
"state",
"=",
"2",
"# blocks or output",
"elif",
"tag",
"in",
"(",
"'<%'",
",",
"'<%='",
",",
"'<%-'",
")",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"data",
"=",
"tokens",
".",
"pop",
"(",
")",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"data",
")",
":",
"yield",
"r_idx",
"+",
"idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"data",
")",
"state",
"=",
"2",
"elif",
"tag",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Error",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# % raw ruby statements",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"[",
"0",
"]",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"tag",
"[",
"1",
":",
"]",
")",
":",
"yield",
"idx",
"+",
"1",
"+",
"r_idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# block ends",
"elif",
"state",
"==",
"2",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"if",
"tag",
"not",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"except",
"IndexError",
":",
"return"
]
| Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too. | [
"Since",
"ERB",
"doesn",
"t",
"allow",
"<%",
"and",
"other",
"tags",
"inside",
"of",
"ruby",
"blocks",
"we",
"have",
"to",
"use",
"a",
"split",
"approach",
"here",
"that",
"fails",
"for",
"that",
"too",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/templates.py#L72-L138 | train |
wakatime/wakatime | wakatime/utils.py | format_file_path | def format_file_path(filepath):
"""Formats a path as absolute and with the correct platform separator."""
try:
is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath)
filepath = os.path.realpath(os.path.abspath(filepath))
filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath)
is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath)
if is_windows_drive:
filepath = filepath.capitalize()
if is_windows_network_mount:
# Add back a / to the front, since the previous modifications
# will have replaced any double slashes with single
filepath = '/' + filepath
except:
pass
return filepath | python | def format_file_path(filepath):
"""Formats a path as absolute and with the correct platform separator."""
try:
is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath)
filepath = os.path.realpath(os.path.abspath(filepath))
filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath)
is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath)
if is_windows_drive:
filepath = filepath.capitalize()
if is_windows_network_mount:
# Add back a / to the front, since the previous modifications
# will have replaced any double slashes with single
filepath = '/' + filepath
except:
pass
return filepath | [
"def",
"format_file_path",
"(",
"filepath",
")",
":",
"try",
":",
"is_windows_network_mount",
"=",
"WINDOWS_NETWORK_MOUNT_PATTERN",
".",
"match",
"(",
"filepath",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
")",
"filepath",
"=",
"re",
".",
"sub",
"(",
"BACKSLASH_REPLACE_PATTERN",
",",
"'/'",
",",
"filepath",
")",
"is_windows_drive",
"=",
"WINDOWS_DRIVE_PATTERN",
".",
"match",
"(",
"filepath",
")",
"if",
"is_windows_drive",
":",
"filepath",
"=",
"filepath",
".",
"capitalize",
"(",
")",
"if",
"is_windows_network_mount",
":",
"# Add back a / to the front, since the previous modifications",
"# will have replaced any double slashes with single",
"filepath",
"=",
"'/'",
"+",
"filepath",
"except",
":",
"pass",
"return",
"filepath"
]
| Formats a path as absolute and with the correct platform separator. | [
"Formats",
"a",
"path",
"as",
"absolute",
"and",
"with",
"the",
"correct",
"platform",
"separator",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/utils.py#L77-L93 | train |
wakatime/wakatime | wakatime/packages/urllib3/connectionpool.py | HTTPConnectionPool.close | def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass | python | def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass | [
"def",
"close",
"(",
"self",
")",
":",
"# Disable access to the pool",
"old_pool",
",",
"self",
".",
"pool",
"=",
"self",
".",
"pool",
",",
"None",
"try",
":",
"while",
"True",
":",
"conn",
"=",
"old_pool",
".",
"get",
"(",
"block",
"=",
"False",
")",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")",
"except",
"queue",
".",
"Empty",
":",
"pass"
]
| Close all pooled connections and disable the pool. | [
"Close",
"all",
"pooled",
"connections",
"and",
"disable",
"the",
"pool",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/connectionpool.py#L410-L424 | train |
wakatime/wakatime | wakatime/packages/urllib3/connectionpool.py | HTTPConnectionPool.is_same_host | def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port) | python | def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port) | [
"def",
"is_same_host",
"(",
"self",
",",
"url",
")",
":",
"if",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"return",
"True",
"# TODO: Add optional support for socket.gethostbyname checking.",
"scheme",
",",
"host",
",",
"port",
"=",
"get_host",
"(",
"url",
")",
"host",
"=",
"_ipv6_host",
"(",
"host",
")",
".",
"lower",
"(",
")",
"# Use explicit default port for comparison when none is given",
"if",
"self",
".",
"port",
"and",
"not",
"port",
":",
"port",
"=",
"port_by_scheme",
".",
"get",
"(",
"scheme",
")",
"elif",
"not",
"self",
".",
"port",
"and",
"port",
"==",
"port_by_scheme",
".",
"get",
"(",
"scheme",
")",
":",
"port",
"=",
"None",
"return",
"(",
"scheme",
",",
"host",
",",
"port",
")",
"==",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
")"
]
| Check if the given ``url`` is a member of the same host as this
connection pool. | [
"Check",
"if",
"the",
"given",
"url",
"is",
"a",
"member",
"of",
"the",
"same",
"host",
"as",
"this",
"connection",
"pool",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/connectionpool.py#L426-L445 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | ParsingError.filename | def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value | python | def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value | [
"def",
"filename",
"(",
"self",
",",
"value",
")",
":",
"warnings",
".",
"warn",
"(",
"\"The 'filename' attribute will be removed in future versions. \"",
"\"Use 'source' instead.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"self",
".",
"source",
"=",
"value"
]
| Deprecated, user `source'. | [
"Deprecated",
"user",
"source",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L315-L322 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser.options | def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise from_none(NoSectionError(section))
opts.update(self._defaults)
return list(opts.keys()) | python | def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise from_none(NoSectionError(section))
opts.update(self._defaults)
return list(opts.keys()) | [
"def",
"options",
"(",
"self",
",",
"section",
")",
":",
"try",
":",
"opts",
"=",
"self",
".",
"_sections",
"[",
"section",
"]",
".",
"copy",
"(",
")",
"except",
"KeyError",
":",
"raise",
"from_none",
"(",
"NoSectionError",
"(",
"section",
")",
")",
"opts",
".",
"update",
"(",
"self",
".",
"_defaults",
")",
"return",
"list",
"(",
"opts",
".",
"keys",
"(",
")",
")"
]
| Return a list of option names for the given section name. | [
"Return",
"a",
"list",
"of",
"option",
"names",
"for",
"the",
"given",
"section",
"name",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L666-L673 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser.read_string | def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source) | python | def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source) | [
"def",
"read_string",
"(",
"self",
",",
"string",
",",
"source",
"=",
"'<string>'",
")",
":",
"sfile",
"=",
"io",
".",
"StringIO",
"(",
"string",
")",
"self",
".",
"read_file",
"(",
"sfile",
",",
"source",
")"
]
| Read configuration from a given string. | [
"Read",
"configuration",
"from",
"a",
"given",
"string",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L726-L729 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser.read_dict | def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value) | python | def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value) | [
"def",
"read_dict",
"(",
"self",
",",
"dictionary",
",",
"source",
"=",
"'<dict>'",
")",
":",
"elements_added",
"=",
"set",
"(",
")",
"for",
"section",
",",
"keys",
"in",
"dictionary",
".",
"items",
"(",
")",
":",
"section",
"=",
"str",
"(",
"section",
")",
"try",
":",
"self",
".",
"add_section",
"(",
"section",
")",
"except",
"(",
"DuplicateSectionError",
",",
"ValueError",
")",
":",
"if",
"self",
".",
"_strict",
"and",
"section",
"in",
"elements_added",
":",
"raise",
"elements_added",
".",
"add",
"(",
"section",
")",
"for",
"key",
",",
"value",
"in",
"keys",
".",
"items",
"(",
")",
":",
"key",
"=",
"self",
".",
"optionxform",
"(",
"str",
"(",
"key",
")",
")",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"self",
".",
"_strict",
"and",
"(",
"section",
",",
"key",
")",
"in",
"elements_added",
":",
"raise",
"DuplicateOptionError",
"(",
"section",
",",
"key",
",",
"source",
")",
"elements_added",
".",
"add",
"(",
"(",
"section",
",",
"key",
")",
")",
"self",
".",
"set",
"(",
"section",
",",
"key",
",",
"value",
")"
]
| Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read. | [
"Read",
"configuration",
"from",
"a",
"dictionary",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L731-L760 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser.readfp | def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename) | python | def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename) | [
"def",
"readfp",
"(",
"self",
",",
"fp",
",",
"filename",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"This method will be removed in future versions. \"",
"\"Use 'parser.read_file()' instead.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"self",
".",
"read_file",
"(",
"fp",
",",
"source",
"=",
"filename",
")"
]
| Deprecated, use read_file instead. | [
"Deprecated",
"use",
"read_file",
"instead",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L762-L769 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser.has_option | def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults) | python | def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults) | [
"def",
"has_option",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"if",
"not",
"section",
"or",
"section",
"==",
"self",
".",
"default_section",
":",
"option",
"=",
"self",
".",
"optionxform",
"(",
"option",
")",
"return",
"option",
"in",
"self",
".",
"_defaults",
"elif",
"section",
"not",
"in",
"self",
".",
"_sections",
":",
"return",
"False",
"else",
":",
"option",
"=",
"self",
".",
"optionxform",
"(",
"option",
")",
"return",
"(",
"option",
"in",
"self",
".",
"_sections",
"[",
"section",
"]",
"or",
"option",
"in",
"self",
".",
"_defaults",
")"
]
| Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False. | [
"Check",
"for",
"the",
"existence",
"of",
"a",
"given",
"option",
"in",
"a",
"given",
"section",
".",
"If",
"the",
"specified",
"section",
"is",
"None",
"or",
"an",
"empty",
"string",
"DEFAULT",
"is",
"assumed",
".",
"If",
"the",
"specified",
"section",
"does",
"not",
"exist",
"returns",
"False",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L896-L908 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser._write_section | def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{0}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{0}{1}\n".format(key, value))
fp.write("\n") | python | def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{0}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{0}{1}\n".format(key, value))
fp.write("\n") | [
"def",
"_write_section",
"(",
"self",
",",
"fp",
",",
"section_name",
",",
"section_items",
",",
"delimiter",
")",
":",
"fp",
".",
"write",
"(",
"\"[{0}]\\n\"",
".",
"format",
"(",
"section_name",
")",
")",
"for",
"key",
",",
"value",
"in",
"section_items",
":",
"value",
"=",
"self",
".",
"_interpolation",
".",
"before_write",
"(",
"self",
",",
"section_name",
",",
"key",
",",
"value",
")",
"if",
"value",
"is",
"not",
"None",
"or",
"not",
"self",
".",
"_allow_no_value",
":",
"value",
"=",
"delimiter",
"+",
"str",
"(",
"value",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n\\t'",
")",
"else",
":",
"value",
"=",
"\"\"",
"fp",
".",
"write",
"(",
"\"{0}{1}\\n\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"fp",
".",
"write",
"(",
"\"\\n\"",
")"
]
| Write a single section to the specified `fp'. | [
"Write",
"a",
"single",
"section",
"to",
"the",
"specified",
"fp",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L941-L952 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser._unify_values | def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults) | python | def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults) | [
"def",
"_unify_values",
"(",
"self",
",",
"section",
",",
"vars",
")",
":",
"sectiondict",
"=",
"{",
"}",
"try",
":",
"sectiondict",
"=",
"self",
".",
"_sections",
"[",
"section",
"]",
"except",
"KeyError",
":",
"if",
"section",
"!=",
"self",
".",
"default_section",
":",
"raise",
"NoSectionError",
"(",
"section",
")",
"# Update with the entry specific variables",
"vardict",
"=",
"{",
"}",
"if",
"vars",
":",
"for",
"key",
",",
"value",
"in",
"vars",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"str",
"(",
"value",
")",
"vardict",
"[",
"self",
".",
"optionxform",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"_ChainMap",
"(",
"vardict",
",",
"sectiondict",
",",
"self",
".",
"_defaults",
")"
]
| Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT. | [
"Create",
"a",
"sequence",
"of",
"lookups",
"with",
"vars",
"taking",
"priority",
"over",
"the",
"section",
"which",
"takes",
"priority",
"over",
"the",
"DEFAULTSECT",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1152-L1170 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser._convert_to_boolean | def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()] | python | def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()] | [
"def",
"_convert_to_boolean",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
".",
"lower",
"(",
")",
"not",
"in",
"self",
".",
"BOOLEAN_STATES",
":",
"raise",
"ValueError",
"(",
"'Not a boolean: %s'",
"%",
"value",
")",
"return",
"self",
".",
"BOOLEAN_STATES",
"[",
"value",
".",
"lower",
"(",
")",
"]"
]
| Return a boolean value translating from other types if necessary. | [
"Return",
"a",
"boolean",
"value",
"translating",
"from",
"other",
"types",
"if",
"necessary",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1172-L1177 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | RawConfigParser._validate_value_types | def _validate_value_types(self, **kwargs):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
# keyword-only arguments
section = kwargs.get('section', "")
option = kwargs.get('option', "")
value = kwargs.get('value', "")
if PY2 and bytes in (type(section), type(option), type(value)):
# we allow for a little unholy magic for Python 2 so that
# people not using unicode_literals can still use the library
# conveniently
warnings.warn(
"You passed a bytestring. Implicitly decoding as UTF-8 string."
" This will not work on Python 3. Please switch to using"
" Unicode strings across the board.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
return section, option, value | python | def _validate_value_types(self, **kwargs):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
# keyword-only arguments
section = kwargs.get('section', "")
option = kwargs.get('option', "")
value = kwargs.get('value', "")
if PY2 and bytes in (type(section), type(option), type(value)):
# we allow for a little unholy magic for Python 2 so that
# people not using unicode_literals can still use the library
# conveniently
warnings.warn(
"You passed a bytestring. Implicitly decoding as UTF-8 string."
" This will not work on Python 3. Please switch to using"
" Unicode strings across the board.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
return section, option, value | [
"def",
"_validate_value_types",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# keyword-only arguments",
"section",
"=",
"kwargs",
".",
"get",
"(",
"'section'",
",",
"\"\"",
")",
"option",
"=",
"kwargs",
".",
"get",
"(",
"'option'",
",",
"\"\"",
")",
"value",
"=",
"kwargs",
".",
"get",
"(",
"'value'",
",",
"\"\"",
")",
"if",
"PY2",
"and",
"bytes",
"in",
"(",
"type",
"(",
"section",
")",
",",
"type",
"(",
"option",
")",
",",
"type",
"(",
"value",
")",
")",
":",
"# we allow for a little unholy magic for Python 2 so that",
"# people not using unicode_literals can still use the library",
"# conveniently",
"warnings",
".",
"warn",
"(",
"\"You passed a bytestring. Implicitly decoding as UTF-8 string.\"",
"\" This will not work on Python 3. Please switch to using\"",
"\" Unicode strings across the board.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"if",
"isinstance",
"(",
"section",
",",
"bytes",
")",
":",
"section",
"=",
"section",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"option",
",",
"bytes",
")",
":",
"option",
"=",
"option",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"not",
"isinstance",
"(",
"section",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"section names must be strings\"",
")",
"if",
"not",
"isinstance",
"(",
"option",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"option keys must be strings\"",
")",
"if",
"not",
"self",
".",
"_allow_no_value",
"or",
"value",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"option values must be strings\"",
")",
"return",
"section",
",",
"option",
",",
"value"
]
| Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set(). | [
"Raises",
"a",
"TypeError",
"for",
"non",
"-",
"string",
"values",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1179-L1223 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | ConfigParser.set | def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
_, option, value = self._validate_value_types(option=option, value=value)
super(ConfigParser, self).set(section, option, value) | python | def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
_, option, value = self._validate_value_types(option=option, value=value)
super(ConfigParser, self).set(section, option, value) | [
"def",
"set",
"(",
"self",
",",
"section",
",",
"option",
",",
"value",
"=",
"None",
")",
":",
"_",
",",
"option",
",",
"value",
"=",
"self",
".",
"_validate_value_types",
"(",
"option",
"=",
"option",
",",
"value",
"=",
"value",
")",
"super",
"(",
"ConfigParser",
",",
"self",
")",
".",
"set",
"(",
"section",
",",
"option",
",",
"value",
")"
]
| Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value. | [
"Set",
"an",
"option",
".",
"Extends",
"RawConfigParser",
".",
"set",
"by",
"validating",
"type",
"and",
"interpolation",
"syntax",
"on",
"the",
"value",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1235-L1239 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | ConfigParser.add_section | def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section) | python | def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section) | [
"def",
"add_section",
"(",
"self",
",",
"section",
")",
":",
"section",
",",
"_",
",",
"_",
"=",
"self",
".",
"_validate_value_types",
"(",
"section",
"=",
"section",
")",
"super",
"(",
"ConfigParser",
",",
"self",
")",
".",
"add_section",
"(",
"section",
")"
]
| Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string. | [
"Create",
"a",
"new",
"section",
"in",
"the",
"configuration",
".",
"Extends",
"RawConfigParser",
".",
"add_section",
"by",
"validating",
"if",
"the",
"section",
"name",
"is",
"a",
"string",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1241-L1246 | train |
wakatime/wakatime | wakatime/packages/configparser/__init__.py | SectionProxy.get | def get(self, option, fallback=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# keyword-only arguments
kwargs.setdefault('raw', False)
kwargs.setdefault('vars', None)
_impl = kwargs.pop('_impl', None)
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, fallback=fallback, **kwargs) | python | def get(self, option, fallback=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# keyword-only arguments
kwargs.setdefault('raw', False)
kwargs.setdefault('vars', None)
_impl = kwargs.pop('_impl', None)
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, fallback=fallback, **kwargs) | [
"def",
"get",
"(",
"self",
",",
"option",
",",
"fallback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# keyword-only arguments",
"kwargs",
".",
"setdefault",
"(",
"'raw'",
",",
"False",
")",
"kwargs",
".",
"setdefault",
"(",
"'vars'",
",",
"None",
")",
"_impl",
"=",
"kwargs",
".",
"pop",
"(",
"'_impl'",
",",
"None",
")",
"# If `_impl` is provided, it should be a getter method on the parser",
"# object that provides the desired type conversion.",
"if",
"not",
"_impl",
":",
"_impl",
"=",
"self",
".",
"_parser",
".",
"get",
"return",
"_impl",
"(",
"self",
".",
"_name",
",",
"option",
",",
"fallback",
"=",
"fallback",
",",
"*",
"*",
"kwargs",
")"
]
| Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found. | [
"Get",
"an",
"option",
"value",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1316-L1331 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/rebol.py | RebolLexer.analyse_text | def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5 | python | def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5 | [
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^\\s*REBOL\\s*\\['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code starts with REBOL header",
"return",
"1.0",
"elif",
"re",
".",
"search",
"(",
"r'\\s*REBOL\\s*['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code contains REBOL header but also some text before it",
"return",
"0.5"
]
| Check if code contains REBOL header and so it probably not R code | [
"Check",
"if",
"code",
"contains",
"REBOL",
"header",
"and",
"so",
"it",
"probably",
"not",
"R",
"code"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/rebol.py#L235-L244 | train |
wakatime/wakatime | wakatime/packages/ntlm_auth/compute_keys.py | get_sign_key | def get_sign_key(exported_session_key, magic_constant):
"""
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
"""
sign_key = hashlib.md5(exported_session_key + magic_constant).digest()
return sign_key | python | def get_sign_key(exported_session_key, magic_constant):
"""
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
"""
sign_key = hashlib.md5(exported_session_key + magic_constant).digest()
return sign_key | [
"def",
"get_sign_key",
"(",
"exported_session_key",
",",
"magic_constant",
")",
":",
"sign_key",
"=",
"hashlib",
".",
"md5",
"(",
"exported_session_key",
"+",
"magic_constant",
")",
".",
"digest",
"(",
")",
"return",
"sign_key"
]
| 3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages | [
"3",
".",
"4",
".",
"5",
".",
"2",
"SIGNKEY"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/compute_keys.py#L63-L74 | train |
wakatime/wakatime | wakatime/packages/urllib3/util/wait.py | _wait_for_io_events | def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events] | python | def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events] | [
"def",
"_wait_for_io_events",
"(",
"socks",
",",
"events",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"HAS_SELECT",
":",
"raise",
"ValueError",
"(",
"'Platform does not have a selector'",
")",
"if",
"not",
"isinstance",
"(",
"socks",
",",
"list",
")",
":",
"# Probably just a single socket.",
"if",
"hasattr",
"(",
"socks",
",",
"\"fileno\"",
")",
":",
"socks",
"=",
"[",
"socks",
"]",
"# Otherwise it might be a non-list iterable.",
"else",
":",
"socks",
"=",
"list",
"(",
"socks",
")",
"with",
"DefaultSelector",
"(",
")",
"as",
"selector",
":",
"for",
"sock",
"in",
"socks",
":",
"selector",
".",
"register",
"(",
"sock",
",",
"events",
")",
"return",
"[",
"key",
"[",
"0",
"]",
".",
"fileobj",
"for",
"key",
"in",
"selector",
".",
"select",
"(",
"timeout",
")",
"if",
"key",
"[",
"1",
"]",
"&",
"events",
"]"
]
| Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. | [
"Waits",
"for",
"IO",
"events",
"to",
"be",
"available",
"from",
"a",
"list",
"of",
"sockets",
"or",
"optionally",
"a",
"single",
"socket",
"if",
"passed",
"in",
".",
"Returns",
"a",
"list",
"of",
"sockets",
"that",
"can",
"be",
"interacted",
"with",
"immediately",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/util/wait.py#L9-L26 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | make_analysator | def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse) | python | def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse) | [
"def",
"make_analysator",
"(",
"f",
")",
":",
"def",
"text_analyse",
"(",
"text",
")",
":",
"try",
":",
"rv",
"=",
"f",
"(",
"text",
")",
"except",
"Exception",
":",
"return",
"0.0",
"if",
"not",
"rv",
":",
"return",
"0.0",
"try",
":",
"return",
"min",
"(",
"1.0",
",",
"max",
"(",
"0.0",
",",
"float",
"(",
"rv",
")",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"0.0",
"text_analyse",
".",
"__doc__",
"=",
"f",
".",
"__doc__",
"return",
"staticmethod",
"(",
"text_analyse",
")"
]
| Return a static text analyser function that returns float values. | [
"Return",
"a",
"static",
"text",
"analyser",
"function",
"that",
"returns",
"float",
"values",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L108-L122 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | shebang_matches | def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False | python | def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False | [
"def",
"shebang_matches",
"(",
"text",
",",
"regex",
")",
":",
"index",
"=",
"text",
".",
"find",
"(",
"'\\n'",
")",
"if",
"index",
">=",
"0",
":",
"first_line",
"=",
"text",
"[",
":",
"index",
"]",
".",
"lower",
"(",
")",
"else",
":",
"first_line",
"=",
"text",
".",
"lower",
"(",
")",
"if",
"first_line",
".",
"startswith",
"(",
"'#!'",
")",
":",
"try",
":",
"found",
"=",
"[",
"x",
"for",
"x",
"in",
"split_path_re",
".",
"split",
"(",
"first_line",
"[",
"2",
":",
"]",
".",
"strip",
"(",
")",
")",
"if",
"x",
"and",
"not",
"x",
".",
"startswith",
"(",
"'-'",
")",
"]",
"[",
"-",
"1",
"]",
"except",
"IndexError",
":",
"return",
"False",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'^%s(\\.(exe|cmd|bat|bin))?$'",
"%",
"regex",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"regex",
".",
"search",
"(",
"found",
")",
"is",
"not",
"None",
":",
"return",
"True",
"return",
"False"
]
| r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``) | [
"r",
"Check",
"if",
"the",
"given",
"regular",
"expression",
"matches",
"the",
"last",
"part",
"of",
"the",
"shebang",
"if",
"one",
"exists",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L125-L167 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | looks_like_xml | def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv | python | def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv | [
"def",
"looks_like_xml",
"(",
"text",
")",
":",
"if",
"xml_decl_re",
".",
"match",
"(",
"text",
")",
":",
"return",
"True",
"key",
"=",
"hash",
"(",
"text",
")",
"try",
":",
"return",
"_looks_like_xml_cache",
"[",
"key",
"]",
"except",
"KeyError",
":",
"m",
"=",
"doctype_lookup_re",
".",
"match",
"(",
"text",
")",
"if",
"m",
"is",
"not",
"None",
":",
"return",
"True",
"rv",
"=",
"tag_re",
".",
"search",
"(",
"text",
"[",
":",
"1000",
"]",
")",
"is",
"not",
"None",
"_looks_like_xml_cache",
"[",
"key",
"]",
"=",
"rv",
"return",
"rv"
]
| Check if a doctype exists or if we have some tags. | [
"Check",
"if",
"a",
"doctype",
"exists",
"or",
"if",
"we",
"have",
"some",
"tags",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L191-L204 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | unirange | def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')' | python | def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')' | [
"def",
"unirange",
"(",
"a",
",",
"b",
")",
":",
"if",
"b",
"<",
"a",
":",
"raise",
"ValueError",
"(",
"\"Bad character range\"",
")",
"if",
"a",
"<",
"0x10000",
"or",
"b",
"<",
"0x10000",
":",
"raise",
"ValueError",
"(",
"\"unirange is only defined for non-BMP ranges\"",
")",
"if",
"sys",
".",
"maxunicode",
">",
"0xffff",
":",
"# wide build",
"return",
"u'[%s-%s]'",
"%",
"(",
"unichr",
"(",
"a",
")",
",",
"unichr",
"(",
"b",
")",
")",
"else",
":",
"# narrow build stores surrogates, and the 're' module handles them",
"# (incorrectly) as characters. Since there is still ordering among",
"# these characters, expand the range to one that it understands. Some",
"# background in http://bugs.python.org/issue3665 and",
"# http://bugs.python.org/issue12749",
"#",
"# Additionally, the lower constants are using unichr rather than",
"# literals because jython [which uses the wide path] can't load this",
"# file if they are literals.",
"ah",
",",
"al",
"=",
"_surrogatepair",
"(",
"a",
")",
"bh",
",",
"bl",
"=",
"_surrogatepair",
"(",
"b",
")",
"if",
"ah",
"==",
"bh",
":",
"return",
"u'(?:%s[%s-%s])'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"unichr",
"(",
"bl",
")",
")",
"else",
":",
"buf",
"=",
"[",
"]",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"ah",
"==",
"bh",
"and",
"unichr",
"(",
"bl",
")",
"or",
"unichr",
"(",
"0xdfff",
")",
")",
")",
"if",
"ah",
"-",
"bh",
">",
"1",
":",
"buf",
".",
"append",
"(",
"u'[%s-%s][%s-%s]'",
"%",
"unichr",
"(",
"ah",
"+",
"1",
")",
",",
"unichr",
"(",
"bh",
"-",
"1",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"0xdfff",
")",
")",
"if",
"ah",
"!=",
"bh",
":",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"bh",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"bl",
")",
")",
")",
"return",
"u'(?:'",
"+",
"u'|'",
".",
"join",
"(",
"buf",
")",
"+",
"u')'"
]
| Returns a regular expression string to match the given non-BMP range. | [
"Returns",
"a",
"regular",
"expression",
"string",
"to",
"match",
"the",
"given",
"non",
"-",
"BMP",
"range",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L218-L254 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | format_lines | def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines) | python | def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines) | [
"def",
"format_lines",
"(",
"var_name",
",",
"seq",
",",
"raw",
"=",
"False",
",",
"indent_level",
"=",
"0",
")",
":",
"lines",
"=",
"[",
"]",
"base_indent",
"=",
"' '",
"*",
"indent_level",
"*",
"4",
"inner_indent",
"=",
"' '",
"*",
"(",
"indent_level",
"+",
"1",
")",
"*",
"4",
"lines",
".",
"append",
"(",
"base_indent",
"+",
"var_name",
"+",
"' = ('",
")",
"if",
"raw",
":",
"# These should be preformatted reprs of, say, tuples.",
"for",
"i",
"in",
"seq",
":",
"lines",
".",
"append",
"(",
"inner_indent",
"+",
"i",
"+",
"','",
")",
"else",
":",
"for",
"i",
"in",
"seq",
":",
"# Force use of single quotes",
"r",
"=",
"repr",
"(",
"i",
"+",
"'\"'",
")",
"lines",
".",
"append",
"(",
"inner_indent",
"+",
"r",
"[",
":",
"-",
"2",
"]",
"+",
"r",
"[",
"-",
"1",
"]",
"+",
"','",
")",
"lines",
".",
"append",
"(",
"base_indent",
"+",
"')'",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
]
| Formats a sequence of strings for output. | [
"Formats",
"a",
"sequence",
"of",
"strings",
"for",
"output",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L257-L273 | train |
wakatime/wakatime | wakatime/packages/pygments/util.py | duplicates_removed | def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst | python | def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst | [
"def",
"duplicates_removed",
"(",
"it",
",",
"already_seen",
"=",
"(",
")",
")",
":",
"lst",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"it",
":",
"if",
"i",
"in",
"seen",
"or",
"i",
"in",
"already_seen",
":",
"continue",
"lst",
".",
"append",
"(",
"i",
")",
"seen",
".",
"add",
"(",
"i",
")",
"return",
"lst"
]
| Returns a list with duplicates removed from the iterable `it`.
Order is preserved. | [
"Returns",
"a",
"list",
"with",
"duplicates",
"removed",
"from",
"the",
"iterable",
"it",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L276-L289 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/fortran.py | FortranFixedLexer._lex_fortran | def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value | python | def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value | [
"def",
"_lex_fortran",
"(",
"self",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"lexer",
"=",
"FortranLexer",
"(",
")",
"text",
"=",
"match",
".",
"group",
"(",
"0",
")",
"+",
"\"\\n\"",
"for",
"index",
",",
"token",
",",
"value",
"in",
"lexer",
".",
"get_tokens_unprocessed",
"(",
"text",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"if",
"value",
"!=",
"''",
":",
"yield",
"index",
",",
"token",
",",
"value"
]
| Lex a line just as free form fortran without line break. | [
"Lex",
"a",
"line",
"just",
"as",
"free",
"form",
"fortran",
"without",
"line",
"break",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/fortran.py#L177-L184 | train |
wakatime/wakatime | wakatime/project.py | get_project_info | def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name | python | def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name | [
"def",
"get_project_info",
"(",
"configs",
",",
"heartbeat",
",",
"data",
")",
":",
"project_name",
",",
"branch_name",
"=",
"heartbeat",
".",
"project",
",",
"heartbeat",
".",
"branch",
"if",
"heartbeat",
".",
"type",
"!=",
"'file'",
":",
"project_name",
"=",
"project_name",
"or",
"heartbeat",
".",
"args",
".",
"project",
"or",
"heartbeat",
".",
"args",
".",
"alternate_project",
"return",
"project_name",
",",
"branch_name",
"if",
"project_name",
"is",
"None",
"or",
"branch_name",
"is",
"None",
":",
"for",
"plugin_cls",
"in",
"CONFIG_PLUGINS",
":",
"plugin_name",
"=",
"plugin_cls",
".",
"__name__",
".",
"lower",
"(",
")",
"plugin_configs",
"=",
"get_configs_for_plugin",
"(",
"plugin_name",
",",
"configs",
")",
"project",
"=",
"plugin_cls",
"(",
"heartbeat",
".",
"entity",
",",
"configs",
"=",
"plugin_configs",
")",
"if",
"project",
".",
"process",
"(",
")",
":",
"project_name",
"=",
"project_name",
"or",
"project",
".",
"name",
"(",
")",
"branch_name",
"=",
"project",
".",
"branch",
"(",
")",
"break",
"if",
"project_name",
"is",
"None",
":",
"project_name",
"=",
"data",
".",
"get",
"(",
"'project'",
")",
"or",
"heartbeat",
".",
"args",
".",
"project",
"hide_project",
"=",
"heartbeat",
".",
"should_obfuscate_project",
"(",
")",
"if",
"hide_project",
"and",
"project_name",
"is",
"not",
"None",
":",
"return",
"project_name",
",",
"None",
"if",
"project_name",
"is",
"None",
"or",
"branch_name",
"is",
"None",
":",
"for",
"plugin_cls",
"in",
"REV_CONTROL_PLUGINS",
":",
"plugin_name",
"=",
"plugin_cls",
".",
"__name__",
".",
"lower",
"(",
")",
"plugin_configs",
"=",
"get_configs_for_plugin",
"(",
"plugin_name",
",",
"configs",
")",
"project",
"=",
"plugin_cls",
"(",
"heartbeat",
".",
"entity",
",",
"configs",
"=",
"plugin_configs",
")",
"if",
"project",
".",
"process",
"(",
")",
":",
"project_name",
"=",
"project_name",
"or",
"project",
".",
"name",
"(",
")",
"branch_name",
"=",
"branch_name",
"or",
"project",
".",
"branch",
"(",
")",
"if",
"hide_project",
":",
"branch_name",
"=",
"None",
"project_name",
"=",
"generate_project_name",
"(",
")",
"project_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project",
".",
"folder",
"(",
")",
",",
"'.wakatime-project'",
")",
"try",
":",
"with",
"open",
"(",
"project_file",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"project_name",
")",
"except",
"IOError",
":",
"project_name",
"=",
"None",
"break",
"if",
"project_name",
"is",
"None",
"and",
"not",
"hide_project",
":",
"project_name",
"=",
"data",
".",
"get",
"(",
"'alternate_project'",
")",
"or",
"heartbeat",
".",
"args",
".",
"alternate_project",
"return",
"project_name",
",",
"branch_name"
]
| Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple. | [
"Find",
"the",
"current",
"project",
"and",
"branch",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/project.py#L39-L100 | train |
wakatime/wakatime | wakatime/project.py | generate_project_name | def generate_project_name():
"""Generates a random project name."""
adjectives = [
'aged', 'ancient', 'autumn', 'billowing', 'bitter', 'black', 'blue', 'bold',
'broad', 'broken', 'calm', 'cold', 'cool', 'crimson', 'curly', 'damp',
'dark', 'dawn', 'delicate', 'divine', 'dry', 'empty', 'falling', 'fancy',
'flat', 'floral', 'fragrant', 'frosty', 'gentle', 'green', 'hidden', 'holy',
'icy', 'jolly', 'late', 'lingering', 'little', 'lively', 'long', 'lucky',
'misty', 'morning', 'muddy', 'mute', 'nameless', 'noisy', 'odd', 'old',
'orange', 'patient', 'plain', 'polished', 'proud', 'purple', 'quiet', 'rapid',
'raspy', 'red', 'restless', 'rough', 'round', 'royal', 'shiny', 'shrill',
'shy', 'silent', 'small', 'snowy', 'soft', 'solitary', 'sparkling', 'spring',
'square', 'steep', 'still', 'summer', 'super', 'sweet', 'throbbing', 'tight',
'tiny', 'twilight', 'wandering', 'weathered', 'white', 'wild', 'winter', 'wispy',
'withered', 'yellow', 'young'
]
nouns = [
'art', 'band', 'bar', 'base', 'bird', 'block', 'boat', 'bonus',
'bread', 'breeze', 'brook', 'bush', 'butterfly', 'cake', 'cell', 'cherry',
'cloud', 'credit', 'darkness', 'dawn', 'dew', 'disk', 'dream', 'dust',
'feather', 'field', 'fire', 'firefly', 'flower', 'fog', 'forest', 'frog',
'frost', 'glade', 'glitter', 'grass', 'hall', 'hat', 'haze', 'heart',
'hill', 'king', 'lab', 'lake', 'leaf', 'limit', 'math', 'meadow',
'mode', 'moon', 'morning', 'mountain', 'mouse', 'mud', 'night', 'paper',
'pine', 'poetry', 'pond', 'queen', 'rain', 'recipe', 'resonance', 'rice',
'river', 'salad', 'scene', 'sea', 'shadow', 'shape', 'silence', 'sky',
'smoke', 'snow', 'snowflake', 'sound', 'star', 'sun', 'sun', 'sunset',
'surf', 'term', 'thunder', 'tooth', 'tree', 'truth', 'union', 'unit',
'violet', 'voice', 'water', 'waterfall', 'wave', 'wildflower', 'wind', 'wood'
]
numbers = [str(x) for x in range(10)]
return ' '.join([
random.choice(adjectives).capitalize(),
random.choice(nouns).capitalize(),
random.choice(numbers) + random.choice(numbers),
]) | python | def generate_project_name():
"""Generates a random project name."""
adjectives = [
'aged', 'ancient', 'autumn', 'billowing', 'bitter', 'black', 'blue', 'bold',
'broad', 'broken', 'calm', 'cold', 'cool', 'crimson', 'curly', 'damp',
'dark', 'dawn', 'delicate', 'divine', 'dry', 'empty', 'falling', 'fancy',
'flat', 'floral', 'fragrant', 'frosty', 'gentle', 'green', 'hidden', 'holy',
'icy', 'jolly', 'late', 'lingering', 'little', 'lively', 'long', 'lucky',
'misty', 'morning', 'muddy', 'mute', 'nameless', 'noisy', 'odd', 'old',
'orange', 'patient', 'plain', 'polished', 'proud', 'purple', 'quiet', 'rapid',
'raspy', 'red', 'restless', 'rough', 'round', 'royal', 'shiny', 'shrill',
'shy', 'silent', 'small', 'snowy', 'soft', 'solitary', 'sparkling', 'spring',
'square', 'steep', 'still', 'summer', 'super', 'sweet', 'throbbing', 'tight',
'tiny', 'twilight', 'wandering', 'weathered', 'white', 'wild', 'winter', 'wispy',
'withered', 'yellow', 'young'
]
nouns = [
'art', 'band', 'bar', 'base', 'bird', 'block', 'boat', 'bonus',
'bread', 'breeze', 'brook', 'bush', 'butterfly', 'cake', 'cell', 'cherry',
'cloud', 'credit', 'darkness', 'dawn', 'dew', 'disk', 'dream', 'dust',
'feather', 'field', 'fire', 'firefly', 'flower', 'fog', 'forest', 'frog',
'frost', 'glade', 'glitter', 'grass', 'hall', 'hat', 'haze', 'heart',
'hill', 'king', 'lab', 'lake', 'leaf', 'limit', 'math', 'meadow',
'mode', 'moon', 'morning', 'mountain', 'mouse', 'mud', 'night', 'paper',
'pine', 'poetry', 'pond', 'queen', 'rain', 'recipe', 'resonance', 'rice',
'river', 'salad', 'scene', 'sea', 'shadow', 'shape', 'silence', 'sky',
'smoke', 'snow', 'snowflake', 'sound', 'star', 'sun', 'sun', 'sunset',
'surf', 'term', 'thunder', 'tooth', 'tree', 'truth', 'union', 'unit',
'violet', 'voice', 'water', 'waterfall', 'wave', 'wildflower', 'wind', 'wood'
]
numbers = [str(x) for x in range(10)]
return ' '.join([
random.choice(adjectives).capitalize(),
random.choice(nouns).capitalize(),
random.choice(numbers) + random.choice(numbers),
]) | [
"def",
"generate_project_name",
"(",
")",
":",
"adjectives",
"=",
"[",
"'aged'",
",",
"'ancient'",
",",
"'autumn'",
",",
"'billowing'",
",",
"'bitter'",
",",
"'black'",
",",
"'blue'",
",",
"'bold'",
",",
"'broad'",
",",
"'broken'",
",",
"'calm'",
",",
"'cold'",
",",
"'cool'",
",",
"'crimson'",
",",
"'curly'",
",",
"'damp'",
",",
"'dark'",
",",
"'dawn'",
",",
"'delicate'",
",",
"'divine'",
",",
"'dry'",
",",
"'empty'",
",",
"'falling'",
",",
"'fancy'",
",",
"'flat'",
",",
"'floral'",
",",
"'fragrant'",
",",
"'frosty'",
",",
"'gentle'",
",",
"'green'",
",",
"'hidden'",
",",
"'holy'",
",",
"'icy'",
",",
"'jolly'",
",",
"'late'",
",",
"'lingering'",
",",
"'little'",
",",
"'lively'",
",",
"'long'",
",",
"'lucky'",
",",
"'misty'",
",",
"'morning'",
",",
"'muddy'",
",",
"'mute'",
",",
"'nameless'",
",",
"'noisy'",
",",
"'odd'",
",",
"'old'",
",",
"'orange'",
",",
"'patient'",
",",
"'plain'",
",",
"'polished'",
",",
"'proud'",
",",
"'purple'",
",",
"'quiet'",
",",
"'rapid'",
",",
"'raspy'",
",",
"'red'",
",",
"'restless'",
",",
"'rough'",
",",
"'round'",
",",
"'royal'",
",",
"'shiny'",
",",
"'shrill'",
",",
"'shy'",
",",
"'silent'",
",",
"'small'",
",",
"'snowy'",
",",
"'soft'",
",",
"'solitary'",
",",
"'sparkling'",
",",
"'spring'",
",",
"'square'",
",",
"'steep'",
",",
"'still'",
",",
"'summer'",
",",
"'super'",
",",
"'sweet'",
",",
"'throbbing'",
",",
"'tight'",
",",
"'tiny'",
",",
"'twilight'",
",",
"'wandering'",
",",
"'weathered'",
",",
"'white'",
",",
"'wild'",
",",
"'winter'",
",",
"'wispy'",
",",
"'withered'",
",",
"'yellow'",
",",
"'young'",
"]",
"nouns",
"=",
"[",
"'art'",
",",
"'band'",
",",
"'bar'",
",",
"'base'",
",",
"'bird'",
",",
"'block'",
",",
"'boat'",
",",
"'bonus'",
",",
"'bread'",
",",
"'breeze'",
",",
"'brook'",
",",
"'bush'",
",",
"'butterfly'",
",",
"'cake'",
",",
"'cell'",
",",
"'cherry'",
",",
"'cloud'",
",",
"'credit'",
",",
"'darkness'",
",",
"'dawn'",
",",
"'dew'",
",",
"'disk'",
",",
"'dream'",
",",
"'dust'",
",",
"'feather'",
",",
"'field'",
",",
"'fire'",
",",
"'firefly'",
",",
"'flower'",
",",
"'fog'",
",",
"'forest'",
",",
"'frog'",
",",
"'frost'",
",",
"'glade'",
",",
"'glitter'",
",",
"'grass'",
",",
"'hall'",
",",
"'hat'",
",",
"'haze'",
",",
"'heart'",
",",
"'hill'",
",",
"'king'",
",",
"'lab'",
",",
"'lake'",
",",
"'leaf'",
",",
"'limit'",
",",
"'math'",
",",
"'meadow'",
",",
"'mode'",
",",
"'moon'",
",",
"'morning'",
",",
"'mountain'",
",",
"'mouse'",
",",
"'mud'",
",",
"'night'",
",",
"'paper'",
",",
"'pine'",
",",
"'poetry'",
",",
"'pond'",
",",
"'queen'",
",",
"'rain'",
",",
"'recipe'",
",",
"'resonance'",
",",
"'rice'",
",",
"'river'",
",",
"'salad'",
",",
"'scene'",
",",
"'sea'",
",",
"'shadow'",
",",
"'shape'",
",",
"'silence'",
",",
"'sky'",
",",
"'smoke'",
",",
"'snow'",
",",
"'snowflake'",
",",
"'sound'",
",",
"'star'",
",",
"'sun'",
",",
"'sun'",
",",
"'sunset'",
",",
"'surf'",
",",
"'term'",
",",
"'thunder'",
",",
"'tooth'",
",",
"'tree'",
",",
"'truth'",
",",
"'union'",
",",
"'unit'",
",",
"'violet'",
",",
"'voice'",
",",
"'water'",
",",
"'waterfall'",
",",
"'wave'",
",",
"'wildflower'",
",",
"'wind'",
",",
"'wood'",
"]",
"numbers",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"10",
")",
"]",
"return",
"' '",
".",
"join",
"(",
"[",
"random",
".",
"choice",
"(",
"adjectives",
")",
".",
"capitalize",
"(",
")",
",",
"random",
".",
"choice",
"(",
"nouns",
")",
".",
"capitalize",
"(",
")",
",",
"random",
".",
"choice",
"(",
"numbers",
")",
"+",
"random",
".",
"choice",
"(",
"numbers",
")",
",",
"]",
")"
]
| Generates a random project name. | [
"Generates",
"a",
"random",
"project",
"name",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/project.py#L109-L145 | train |
wakatime/wakatime | wakatime/session_cache.py | SessionCache.save | def save(self, session):
"""Saves a requests.Session object for the next heartbeat process.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
values = {
'value': sqlite3.Binary(pickle.dumps(session, protocol=2)),
}
c.execute('INSERT INTO {0} VALUES (:value)'.format(self.table_name), values)
conn.commit()
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG) | python | def save(self, session):
"""Saves a requests.Session object for the next heartbeat process.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
values = {
'value': sqlite3.Binary(pickle.dumps(session, protocol=2)),
}
c.execute('INSERT INTO {0} VALUES (:value)'.format(self.table_name), values)
conn.commit()
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG) | [
"def",
"save",
"(",
"self",
",",
"session",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"c",
".",
"execute",
"(",
"'DELETE FROM {0}'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"values",
"=",
"{",
"'value'",
":",
"sqlite3",
".",
"Binary",
"(",
"pickle",
".",
"dumps",
"(",
"session",
",",
"protocol",
"=",
"2",
")",
")",
",",
"}",
"c",
".",
"execute",
"(",
"'INSERT INTO {0} VALUES (:value)'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
",",
"values",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")"
]
| Saves a requests.Session object for the next heartbeat process. | [
"Saves",
"a",
"requests",
".",
"Session",
"object",
"for",
"the",
"next",
"heartbeat",
"process",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L44-L60 | train |
wakatime/wakatime | wakatime/session_cache.py | SessionCache.get | def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
return session if session is not None else requests.session() | python | def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
return session if session is not None else requests.session() | [
"def",
"get",
"(",
"self",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"requests",
".",
"session",
"(",
")",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"requests",
".",
"session",
"(",
")",
"session",
"=",
"None",
"try",
":",
"c",
".",
"execute",
"(",
"'BEGIN IMMEDIATE'",
")",
"c",
".",
"execute",
"(",
"'SELECT value FROM {0} LIMIT 1'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"not",
"None",
":",
"session",
"=",
"pickle",
".",
"loads",
"(",
"row",
"[",
"0",
"]",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"try",
":",
"conn",
".",
"close",
"(",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"session",
"if",
"session",
"is",
"not",
"None",
"else",
"requests",
".",
"session",
"(",
")"
]
| Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session. | [
"Returns",
"a",
"requests",
".",
"Session",
"object",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L62-L92 | train |
wakatime/wakatime | wakatime/session_cache.py | SessionCache.delete | def delete(self):
"""Clears all cached Session objects.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
conn.commit()
conn.close()
except:
log.traceback(logging.DEBUG) | python | def delete(self):
"""Clears all cached Session objects.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
conn.commit()
conn.close()
except:
log.traceback(logging.DEBUG) | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"c",
".",
"execute",
"(",
"'DELETE FROM {0}'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")"
]
| Clears all cached Session objects. | [
"Clears",
"all",
"cached",
"Session",
"objects",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L94-L106 | train |
wakatime/wakatime | wakatime/packages/requests/sessions.py | SessionRedirectMixin.resolve_redirects | def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp | python | def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp | [
"def",
"resolve_redirects",
"(",
"self",
",",
"resp",
",",
"req",
",",
"stream",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cert",
"=",
"None",
",",
"proxies",
"=",
"None",
",",
"yield_requests",
"=",
"False",
",",
"*",
"*",
"adapter_kwargs",
")",
":",
"hist",
"=",
"[",
"]",
"# keep track of history",
"url",
"=",
"self",
".",
"get_redirect_target",
"(",
"resp",
")",
"while",
"url",
":",
"prepared_request",
"=",
"req",
".",
"copy",
"(",
")",
"# Update history and keep track of redirects.",
"# resp.history must ignore the original request in this loop",
"hist",
".",
"append",
"(",
"resp",
")",
"resp",
".",
"history",
"=",
"hist",
"[",
"1",
":",
"]",
"try",
":",
"resp",
".",
"content",
"# Consume socket so it can be released",
"except",
"(",
"ChunkedEncodingError",
",",
"ContentDecodingError",
",",
"RuntimeError",
")",
":",
"resp",
".",
"raw",
".",
"read",
"(",
"decode_content",
"=",
"False",
")",
"if",
"len",
"(",
"resp",
".",
"history",
")",
">=",
"self",
".",
"max_redirects",
":",
"raise",
"TooManyRedirects",
"(",
"'Exceeded %s redirects.'",
"%",
"self",
".",
"max_redirects",
",",
"response",
"=",
"resp",
")",
"# Release the connection back into the pool.",
"resp",
".",
"close",
"(",
")",
"# Handle redirection without scheme (see: RFC 1808 Section 4)",
"if",
"url",
".",
"startswith",
"(",
"'//'",
")",
":",
"parsed_rurl",
"=",
"urlparse",
"(",
"resp",
".",
"url",
")",
"url",
"=",
"'%s:%s'",
"%",
"(",
"to_native_string",
"(",
"parsed_rurl",
".",
"scheme",
")",
",",
"url",
")",
"# The scheme should be lower case...",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"url",
"=",
"parsed",
".",
"geturl",
"(",
")",
"# Facilitate relative 'location' headers, as allowed by RFC 7231.",
"# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')",
"# Compliant with RFC3986, we percent encode the url.",
"if",
"not",
"parsed",
".",
"netloc",
":",
"url",
"=",
"urljoin",
"(",
"resp",
".",
"url",
",",
"requote_uri",
"(",
"url",
")",
")",
"else",
":",
"url",
"=",
"requote_uri",
"(",
"url",
")",
"prepared_request",
".",
"url",
"=",
"to_native_string",
"(",
"url",
")",
"self",
".",
"rebuild_method",
"(",
"prepared_request",
",",
"resp",
")",
"# https://github.com/requests/requests/issues/1084",
"if",
"resp",
".",
"status_code",
"not",
"in",
"(",
"codes",
".",
"temporary_redirect",
",",
"codes",
".",
"permanent_redirect",
")",
":",
"# https://github.com/requests/requests/issues/3490",
"purged_headers",
"=",
"(",
"'Content-Length'",
",",
"'Content-Type'",
",",
"'Transfer-Encoding'",
")",
"for",
"header",
"in",
"purged_headers",
":",
"prepared_request",
".",
"headers",
".",
"pop",
"(",
"header",
",",
"None",
")",
"prepared_request",
".",
"body",
"=",
"None",
"headers",
"=",
"prepared_request",
".",
"headers",
"try",
":",
"del",
"headers",
"[",
"'Cookie'",
"]",
"except",
"KeyError",
":",
"pass",
"# Extract any cookies sent on the response to the cookiejar",
"# in the new request. Because we've mutated our copied prepared",
"# request, use the old one that we haven't yet touched.",
"extract_cookies_to_jar",
"(",
"prepared_request",
".",
"_cookies",
",",
"req",
",",
"resp",
".",
"raw",
")",
"merge_cookies",
"(",
"prepared_request",
".",
"_cookies",
",",
"self",
".",
"cookies",
")",
"prepared_request",
".",
"prepare_cookies",
"(",
"prepared_request",
".",
"_cookies",
")",
"# Rebuild auth and proxy information.",
"proxies",
"=",
"self",
".",
"rebuild_proxies",
"(",
"prepared_request",
",",
"proxies",
")",
"self",
".",
"rebuild_auth",
"(",
"prepared_request",
",",
"resp",
")",
"# A failed tell() sets `_body_position` to `object()`. This non-None",
"# value ensures `rewindable` will be True, allowing us to raise an",
"# UnrewindableBodyError, instead of hanging the connection.",
"rewindable",
"=",
"(",
"prepared_request",
".",
"_body_position",
"is",
"not",
"None",
"and",
"(",
"'Content-Length'",
"in",
"headers",
"or",
"'Transfer-Encoding'",
"in",
"headers",
")",
")",
"# Attempt to rewind consumed file-like object.",
"if",
"rewindable",
":",
"rewind_body",
"(",
"prepared_request",
")",
"# Override the original request.",
"req",
"=",
"prepared_request",
"if",
"yield_requests",
":",
"yield",
"req",
"else",
":",
"resp",
"=",
"self",
".",
"send",
"(",
"req",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
",",
"proxies",
"=",
"proxies",
",",
"allow_redirects",
"=",
"False",
",",
"*",
"*",
"adapter_kwargs",
")",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"prepared_request",
",",
"resp",
".",
"raw",
")",
"# extract redirect url, if any, for the next loop",
"url",
"=",
"self",
".",
"get_redirect_target",
"(",
"resp",
")",
"yield",
"resp"
]
| Receives a Response. Returns a generator of Responses or Requests. | [
"Receives",
"a",
"Response",
".",
"Returns",
"a",
"generator",
"of",
"Responses",
"or",
"Requests",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/sessions.py#L119-L225 | train |
wakatime/wakatime | wakatime/packages/requests/sessions.py | SessionRedirectMixin.rebuild_method | def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method | python | def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method | [
"def",
"rebuild_method",
"(",
"self",
",",
"prepared_request",
",",
"response",
")",
":",
"method",
"=",
"prepared_request",
".",
"method",
"# http://tools.ietf.org/html/rfc7231#section-6.4.4",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"see_other",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Do what the browsers do, despite standards...",
"# First, turn 302s into GETs.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"found",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Second, if a POST is responded to with a 301, turn it into a GET.",
"# This bizarre behaviour is explained in Issue 1704.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"moved",
"and",
"method",
"==",
"'POST'",
":",
"method",
"=",
"'GET'",
"prepared_request",
".",
"method",
"=",
"method"
]
| When being redirected we may want to change the method of the request
based on certain specs or browser behavior. | [
"When",
"being",
"redirected",
"we",
"may",
"want",
"to",
"change",
"the",
"method",
"of",
"the",
"request",
"based",
"on",
"certain",
"specs",
"or",
"browser",
"behavior",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/sessions.py#L292-L312 | train |
wakatime/wakatime | wakatime/packages/pygments/filter.py | apply_filters | def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream | python | def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream | [
"def",
"apply_filters",
"(",
"stream",
",",
"filters",
",",
"lexer",
"=",
"None",
")",
":",
"def",
"_apply",
"(",
"filter_",
",",
"stream",
")",
":",
"for",
"token",
"in",
"filter_",
".",
"filter",
"(",
"lexer",
",",
"stream",
")",
":",
"yield",
"token",
"for",
"filter_",
"in",
"filters",
":",
"stream",
"=",
"_apply",
"(",
"filter_",
",",
"stream",
")",
"return",
"stream"
]
| Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`. | [
"Use",
"this",
"method",
"to",
"apply",
"an",
"iterable",
"of",
"filters",
"to",
"a",
"stream",
".",
"If",
"lexer",
"is",
"given",
"it",
"s",
"forwarded",
"to",
"the",
"filter",
"otherwise",
"the",
"filter",
"receives",
"None",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filter.py#L13-L24 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/data.py | YamlLexer.reset_indent | def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback | python | def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def",
"reset_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"context",
".",
"indent_stack",
"=",
"[",
"]",
"context",
".",
"indent",
"=",
"-",
"1",
"context",
".",
"next_indent",
"=",
"0",
"context",
".",
"block_scalar_indent",
"=",
"None",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Reset the indentation levels. | [
"Reset",
"the",
"indentation",
"levels",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L56-L66 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/data.py | YamlLexer.save_indent | def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback | python | def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback | [
"def",
"save_indent",
"(",
"token_class",
",",
"start",
"=",
"False",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"extra",
"=",
"''",
"if",
"start",
":",
"context",
".",
"next_indent",
"=",
"len",
"(",
"text",
")",
"if",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
":",
"while",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
":",
"context",
".",
"indent",
"=",
"context",
".",
"indent_stack",
".",
"pop",
"(",
")",
"if",
"context",
".",
"next_indent",
">",
"context",
".",
"indent",
":",
"extra",
"=",
"text",
"[",
"context",
".",
"indent",
":",
"]",
"text",
"=",
"text",
"[",
":",
"context",
".",
"indent",
"]",
"else",
":",
"context",
".",
"next_indent",
"+=",
"len",
"(",
"text",
")",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"if",
"extra",
":",
"yield",
"match",
".",
"start",
"(",
")",
"+",
"len",
"(",
"text",
")",
",",
"token_class",
".",
"Error",
",",
"extra",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Save a possible indentation level. | [
"Save",
"a",
"possible",
"indentation",
"level",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L68-L88 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/data.py | YamlLexer.set_block_scalar_indent | def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | python | def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def",
"set_block_scalar_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"context",
".",
"block_scalar_indent",
"=",
"None",
"if",
"not",
"text",
":",
"return",
"increment",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"increment",
":",
"current_indent",
"=",
"max",
"(",
"context",
".",
"indent",
",",
"0",
")",
"increment",
"=",
"int",
"(",
"increment",
")",
"context",
".",
"block_scalar_indent",
"=",
"current_indent",
"+",
"increment",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Set an explicit indentation level for a block scalar. | [
"Set",
"an",
"explicit",
"indentation",
"level",
"for",
"a",
"block",
"scalar",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L103-L118 | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/data.py | YamlLexer.parse_block_scalar_indent | def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | python | def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def",
"parse_block_scalar_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"if",
"context",
".",
"block_scalar_indent",
"is",
"None",
":",
"if",
"len",
"(",
"text",
")",
"<=",
"max",
"(",
"context",
".",
"indent",
",",
"0",
")",
":",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"return",
"context",
".",
"block_scalar_indent",
"=",
"len",
"(",
"text",
")",
"else",
":",
"if",
"len",
"(",
"text",
")",
"<",
"context",
".",
"block_scalar_indent",
":",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"return",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Process indentation spaces in a block scalar. | [
"Process",
"indentation",
"spaces",
"in",
"a",
"block",
"scalar",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L137-L155 | train |
wakatime/wakatime | wakatime/packages/requests/models.py | Response.content | def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content | python | def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content | [
"def",
"content",
"(",
"self",
")",
":",
"if",
"self",
".",
"_content",
"is",
"False",
":",
"# Read the contents.",
"if",
"self",
".",
"_content_consumed",
":",
"raise",
"RuntimeError",
"(",
"'The content for this response was already consumed'",
")",
"if",
"self",
".",
"status_code",
"==",
"0",
"or",
"self",
".",
"raw",
"is",
"None",
":",
"self",
".",
"_content",
"=",
"None",
"else",
":",
"self",
".",
"_content",
"=",
"bytes",
"(",
")",
".",
"join",
"(",
"self",
".",
"iter_content",
"(",
"CONTENT_CHUNK_SIZE",
")",
")",
"or",
"bytes",
"(",
")",
"self",
".",
"_content_consumed",
"=",
"True",
"# don't need to release the connection; that's been handled by urllib3",
"# since we exhausted the data.",
"return",
"self",
".",
"_content"
]
| Content of the response, in bytes. | [
"Content",
"of",
"the",
"response",
"in",
"bytes",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/models.py#L811-L828 | train |
wakatime/wakatime | wakatime/packages/simplejson/decoder.py | py_scanstring | def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end | python | def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end | [
"def",
"py_scanstring",
"(",
"s",
",",
"end",
",",
"encoding",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"_b",
"=",
"BACKSLASH",
",",
"_m",
"=",
"STRINGCHUNK",
".",
"match",
",",
"_join",
"=",
"u",
"(",
"''",
")",
".",
"join",
",",
"_PY3",
"=",
"PY3",
",",
"_maxunicode",
"=",
"sys",
".",
"maxunicode",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"DEFAULT_ENCODING",
"chunks",
"=",
"[",
"]",
"_append",
"=",
"chunks",
".",
"append",
"begin",
"=",
"end",
"-",
"1",
"while",
"1",
":",
"chunk",
"=",
"_m",
"(",
"s",
",",
"end",
")",
"if",
"chunk",
"is",
"None",
":",
"raise",
"JSONDecodeError",
"(",
"\"Unterminated string starting at\"",
",",
"s",
",",
"begin",
")",
"end",
"=",
"chunk",
".",
"end",
"(",
")",
"content",
",",
"terminator",
"=",
"chunk",
".",
"groups",
"(",
")",
"# Content is contains zero or more unescaped string characters",
"if",
"content",
":",
"if",
"not",
"_PY3",
"and",
"not",
"isinstance",
"(",
"content",
",",
"text_type",
")",
":",
"content",
"=",
"text_type",
"(",
"content",
",",
"encoding",
")",
"_append",
"(",
"content",
")",
"# Terminator is the end of string, a literal control character,",
"# or a backslash denoting that an escape sequence follows",
"if",
"terminator",
"==",
"'\"'",
":",
"break",
"elif",
"terminator",
"!=",
"'\\\\'",
":",
"if",
"strict",
":",
"msg",
"=",
"\"Invalid control character %r at\"",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"else",
":",
"_append",
"(",
"terminator",
")",
"continue",
"try",
":",
"esc",
"=",
"s",
"[",
"end",
"]",
"except",
"IndexError",
":",
"raise",
"JSONDecodeError",
"(",
"\"Unterminated string starting at\"",
",",
"s",
",",
"begin",
")",
"# If not a unicode escape sequence, must be in the lookup table",
"if",
"esc",
"!=",
"'u'",
":",
"try",
":",
"char",
"=",
"_b",
"[",
"esc",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"\"Invalid \\\\X escape sequence %r\"",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"end",
"+=",
"1",
"else",
":",
"# Unicode escape sequence",
"msg",
"=",
"\"Invalid \\\\uXXXX escape sequence\"",
"esc",
"=",
"s",
"[",
"end",
"+",
"1",
":",
"end",
"+",
"5",
"]",
"escX",
"=",
"esc",
"[",
"1",
":",
"2",
"]",
"if",
"len",
"(",
"esc",
")",
"!=",
"4",
"or",
"escX",
"==",
"'x'",
"or",
"escX",
"==",
"'X'",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
"-",
"1",
")",
"try",
":",
"uni",
"=",
"int",
"(",
"esc",
",",
"16",
")",
"except",
"ValueError",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
"-",
"1",
")",
"end",
"+=",
"5",
"# Check for surrogate pair on UCS-4 systems",
"# Note that this will join high/low surrogate pairs",
"# but will also pass unpaired surrogates through",
"if",
"(",
"_maxunicode",
">",
"65535",
"and",
"uni",
"&",
"0xfc00",
"==",
"0xd800",
"and",
"s",
"[",
"end",
":",
"end",
"+",
"2",
"]",
"==",
"'\\\\u'",
")",
":",
"esc2",
"=",
"s",
"[",
"end",
"+",
"2",
":",
"end",
"+",
"6",
"]",
"escX",
"=",
"esc2",
"[",
"1",
":",
"2",
"]",
"if",
"len",
"(",
"esc2",
")",
"==",
"4",
"and",
"not",
"(",
"escX",
"==",
"'x'",
"or",
"escX",
"==",
"'X'",
")",
":",
"try",
":",
"uni2",
"=",
"int",
"(",
"esc2",
",",
"16",
")",
"except",
"ValueError",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"if",
"uni2",
"&",
"0xfc00",
"==",
"0xdc00",
":",
"uni",
"=",
"0x10000",
"+",
"(",
"(",
"(",
"uni",
"-",
"0xd800",
")",
"<<",
"10",
")",
"|",
"(",
"uni2",
"-",
"0xdc00",
")",
")",
"end",
"+=",
"6",
"char",
"=",
"unichr",
"(",
"uni",
")",
"# Append the unescaped character",
"_append",
"(",
"char",
")",
"return",
"_join",
"(",
"chunks",
")",
",",
"end"
]
| Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote. | [
"Scan",
"the",
"string",
"s",
"for",
"a",
"JSON",
"string",
".",
"End",
"is",
"the",
"index",
"of",
"the",
"character",
"in",
"s",
"after",
"the",
"quote",
"that",
"started",
"the",
"JSON",
"string",
".",
"Unescapes",
"all",
"valid",
"JSON",
"string",
"escape",
"sequences",
"and",
"raises",
"ValueError",
"on",
"attempt",
"to",
"decode",
"an",
"invalid",
"string",
".",
"If",
"strict",
"is",
"False",
"then",
"literal",
"control",
"characters",
"are",
"allowed",
"in",
"the",
"string",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/simplejson/decoder.py#L49-L133 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter._get_css_class | def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return '' | python | def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return '' | [
"def",
"_get_css_class",
"(",
"self",
",",
"ttype",
")",
":",
"ttypeclass",
"=",
"_get_ttype_class",
"(",
"ttype",
")",
"if",
"ttypeclass",
":",
"return",
"self",
".",
"classprefix",
"+",
"ttypeclass",
"return",
"''"
]
| Return the css class of this token type prefixed with
the classprefix option. | [
"Return",
"the",
"css",
"class",
"of",
"this",
"token",
"type",
"prefixed",
"with",
"the",
"classprefix",
"option",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L430-L436 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter._get_css_classes | def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls | python | def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls | [
"def",
"_get_css_classes",
"(",
"self",
",",
"ttype",
")",
":",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"while",
"ttype",
"not",
"in",
"STANDARD_TYPES",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"+",
"' '",
"+",
"cls",
"return",
"cls"
]
| Return the css classes of this token type prefixed with
the classprefix option. | [
"Return",
"the",
"css",
"classes",
"of",
"this",
"token",
"type",
"prefixed",
"with",
"the",
"classprefix",
"option",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L438-L445 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter.get_style_defs | def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines) | python | def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines) | [
"def",
"get_style_defs",
"(",
"self",
",",
"arg",
"=",
"None",
")",
":",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"(",
"'cssclass'",
"in",
"self",
".",
"options",
"and",
"'.'",
"+",
"self",
".",
"cssclass",
"or",
"''",
")",
"if",
"isinstance",
"(",
"arg",
",",
"string_types",
")",
":",
"args",
"=",
"[",
"arg",
"]",
"else",
":",
"args",
"=",
"list",
"(",
"arg",
")",
"def",
"prefix",
"(",
"cls",
")",
":",
"if",
"cls",
":",
"cls",
"=",
"'.'",
"+",
"cls",
"tmp",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"tmp",
".",
"append",
"(",
"(",
"arg",
"and",
"arg",
"+",
"' '",
"or",
"''",
")",
"+",
"cls",
")",
"return",
"', '",
".",
"join",
"(",
"tmp",
")",
"styles",
"=",
"[",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"for",
"cls",
",",
"(",
"style",
",",
"ttype",
",",
"level",
")",
"in",
"iteritems",
"(",
"self",
".",
"class2style",
")",
"if",
"cls",
"and",
"style",
"]",
"styles",
".",
"sort",
"(",
")",
"lines",
"=",
"[",
"'%s { %s } /* %s */'",
"%",
"(",
"prefix",
"(",
"cls",
")",
",",
"style",
",",
"repr",
"(",
"ttype",
")",
"[",
"6",
":",
"]",
")",
"for",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"in",
"styles",
"]",
"if",
"arg",
"and",
"not",
"self",
".",
"nobackground",
"and",
"self",
".",
"style",
".",
"background_color",
"is",
"not",
"None",
":",
"text_style",
"=",
"''",
"if",
"Text",
"in",
"self",
".",
"ttype2class",
":",
"text_style",
"=",
"' '",
"+",
"self",
".",
"class2style",
"[",
"self",
".",
"ttype2class",
"[",
"Text",
"]",
"]",
"[",
"0",
"]",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s { background: %s;%s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"background_color",
",",
"text_style",
")",
")",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s.hll { background-color: %s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"highlight_color",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
]
| Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes. | [
"Return",
"CSS",
"style",
"definitions",
"for",
"the",
"classes",
"produced",
"by",
"the",
"current",
"highlighting",
"style",
".",
"arg",
"can",
"be",
"a",
"string",
"or",
"list",
"of",
"selectors",
"to",
"insert",
"before",
"the",
"token",
"type",
"classes",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L471-L508 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter._format_lines | def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line) | python | def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line) | [
"def",
"_format_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"nocls",
"=",
"self",
".",
"noclasses",
"lsep",
"=",
"self",
".",
"lineseparator",
"# for <span style=\"\"> lookup only",
"getcls",
"=",
"self",
".",
"ttype2class",
".",
"get",
"c2s",
"=",
"self",
".",
"class2style",
"escape_table",
"=",
"_escape_html_table",
"tagsfile",
"=",
"self",
".",
"tagsfile",
"lspan",
"=",
"''",
"line",
"=",
"[",
"]",
"for",
"ttype",
",",
"value",
"in",
"tokensource",
":",
"if",
"nocls",
":",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"while",
"cclass",
"is",
"None",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"cspan",
"=",
"cclass",
"and",
"'<span style=\"%s\">'",
"%",
"c2s",
"[",
"cclass",
"]",
"[",
"0",
"]",
"or",
"''",
"else",
":",
"cls",
"=",
"self",
".",
"_get_css_classes",
"(",
"ttype",
")",
"cspan",
"=",
"cls",
"and",
"'<span class=\"%s\">'",
"%",
"cls",
"or",
"''",
"parts",
"=",
"value",
".",
"translate",
"(",
"escape_table",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"tagsfile",
"and",
"ttype",
"in",
"Token",
".",
"Name",
":",
"filename",
",",
"linenumber",
"=",
"self",
".",
"_lookup_ctag",
"(",
"value",
")",
"if",
"linenumber",
":",
"base",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"if",
"base",
":",
"base",
"+=",
"'/'",
"filename",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"url",
"=",
"self",
".",
"tagurlformat",
"%",
"{",
"'path'",
":",
"base",
",",
"'fname'",
":",
"filename",
",",
"'fext'",
":",
"extension",
"}",
"parts",
"[",
"0",
"]",
"=",
"\"<a href=\\\"%s#%s-%d\\\">%s\"",
"%",
"(",
"url",
",",
"self",
".",
"lineanchors",
",",
"linenumber",
",",
"parts",
"[",
"0",
"]",
")",
"parts",
"[",
"-",
"1",
"]",
"=",
"parts",
"[",
"-",
"1",
"]",
"+",
"\"</a>\"",
"# for all but the last line",
"for",
"part",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"if",
"line",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"# both are the same",
"line",
".",
"extend",
"(",
"(",
"part",
",",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")",
"line",
"=",
"[",
"]",
"elif",
"part",
":",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"(",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"yield",
"1",
",",
"lsep",
"# for the last line",
"if",
"line",
"and",
"parts",
"[",
"-",
"1",
"]",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
")",
")",
"lspan",
"=",
"cspan",
"else",
":",
"line",
".",
"append",
"(",
"parts",
"[",
"-",
"1",
"]",
")",
"elif",
"parts",
"[",
"-",
"1",
"]",
":",
"line",
"=",
"[",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
"]",
"lspan",
"=",
"cspan",
"# else we neither have to open a new span nor set lspan",
"if",
"line",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")"
]
| Just format the tokens, without any wrapping tags.
Yield individual lines. | [
"Just",
"format",
"the",
"tokens",
"without",
"any",
"wrapping",
"tags",
".",
"Yield",
"individual",
"lines",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L712-L781 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter._highlight_lines | def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value | python | def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value | [
"def",
"_highlight_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"hls",
"=",
"self",
".",
"hl_lines",
"for",
"i",
",",
"(",
"t",
",",
"value",
")",
"in",
"enumerate",
"(",
"tokensource",
")",
":",
"if",
"t",
"!=",
"1",
":",
"yield",
"t",
",",
"value",
"if",
"i",
"+",
"1",
"in",
"hls",
":",
"# i + 1 because Python indexes start at 0",
"if",
"self",
".",
"noclasses",
":",
"style",
"=",
"''",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"style",
"=",
"(",
"' style=\"background-color: %s\"'",
"%",
"(",
"self",
".",
"style",
".",
"highlight_color",
",",
")",
")",
"yield",
"1",
",",
"'<span%s>%s</span>'",
"%",
"(",
"style",
",",
"value",
")",
"else",
":",
"yield",
"1",
",",
"'<span class=\"hll\">%s</span>'",
"%",
"value",
"else",
":",
"yield",
"1",
",",
"value"
]
| Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`. | [
"Highlighted",
"the",
"lines",
"specified",
"in",
"the",
"hl_lines",
"option",
"by",
"post",
"-",
"processing",
"the",
"token",
"stream",
"coming",
"from",
"_format_lines",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L790-L810 | train |
wakatime/wakatime | wakatime/packages/pygments/formatters/html.py | HtmlFormatter.format_unencoded | def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece) | python | def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece) | [
"def",
"format_unencoded",
"(",
"self",
",",
"tokensource",
",",
"outfile",
")",
":",
"source",
"=",
"self",
".",
"_format_lines",
"(",
"tokensource",
")",
"if",
"self",
".",
"hl_lines",
":",
"source",
"=",
"self",
".",
"_highlight_lines",
"(",
"source",
")",
"if",
"not",
"self",
".",
"nowrap",
":",
"if",
"self",
".",
"linenos",
"==",
"2",
":",
"source",
"=",
"self",
".",
"_wrap_inlinelinenos",
"(",
"source",
")",
"if",
"self",
".",
"lineanchors",
":",
"source",
"=",
"self",
".",
"_wrap_lineanchors",
"(",
"source",
")",
"if",
"self",
".",
"linespans",
":",
"source",
"=",
"self",
".",
"_wrap_linespans",
"(",
"source",
")",
"source",
"=",
"self",
".",
"wrap",
"(",
"source",
",",
"outfile",
")",
"if",
"self",
".",
"linenos",
"==",
"1",
":",
"source",
"=",
"self",
".",
"_wrap_tablelinenos",
"(",
"source",
")",
"if",
"self",
".",
"full",
":",
"source",
"=",
"self",
".",
"_wrap_full",
"(",
"source",
",",
"outfile",
")",
"for",
"t",
",",
"piece",
"in",
"source",
":",
"outfile",
".",
"write",
"(",
"piece",
")"
]
| The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators. | [
"The",
"formatting",
"process",
"uses",
"several",
"nested",
"generators",
";",
"which",
"of",
"them",
"are",
"used",
"is",
"determined",
"by",
"the",
"user",
"s",
"options",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L820-L851 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | bygroups | def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback | python | def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback | [
"def",
"bygroups",
"(",
"*",
"args",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"for",
"i",
",",
"action",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"action",
"is",
"None",
":",
"continue",
"elif",
"type",
"(",
"action",
")",
"is",
"_TokenType",
":",
"data",
"=",
"match",
".",
"group",
"(",
"i",
"+",
"1",
")",
"if",
"data",
":",
"yield",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
",",
"action",
",",
"data",
"else",
":",
"data",
"=",
"match",
".",
"group",
"(",
"i",
"+",
"1",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
"for",
"item",
"in",
"action",
"(",
"lexer",
",",
"_PseudoMatch",
"(",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
",",
"data",
")",
",",
"ctx",
")",
":",
"if",
"item",
":",
"yield",
"item",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Callback that yields multiple actions for each group in the match. | [
"Callback",
"that",
"yields",
"multiple",
"actions",
"for",
"each",
"group",
"in",
"the",
"match",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L305-L328 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | using | def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback | python | def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback | [
"def",
"using",
"(",
"_other",
",",
"*",
"*",
"kwargs",
")",
":",
"gt_kwargs",
"=",
"{",
"}",
"if",
"'state'",
"in",
"kwargs",
":",
"s",
"=",
"kwargs",
".",
"pop",
"(",
"'state'",
")",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"gt_kwargs",
"[",
"'stack'",
"]",
"=",
"s",
"else",
":",
"gt_kwargs",
"[",
"'stack'",
"]",
"=",
"(",
"'root'",
",",
"s",
")",
"if",
"_other",
"is",
"this",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"# if keyword arguments are given the callback",
"# function has to create a new lexer instance",
"if",
"kwargs",
":",
"# XXX: cache that somehow",
"kwargs",
".",
"update",
"(",
"lexer",
".",
"options",
")",
"lx",
"=",
"lexer",
".",
"__class__",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"lx",
"=",
"lexer",
"s",
"=",
"match",
".",
"start",
"(",
")",
"for",
"i",
",",
"t",
",",
"v",
"in",
"lx",
".",
"get_tokens_unprocessed",
"(",
"match",
".",
"group",
"(",
")",
",",
"*",
"*",
"gt_kwargs",
")",
":",
"yield",
"i",
"+",
"s",
",",
"t",
",",
"v",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"else",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"# XXX: cache that somehow",
"kwargs",
".",
"update",
"(",
"lexer",
".",
"options",
")",
"lx",
"=",
"_other",
"(",
"*",
"*",
"kwargs",
")",
"s",
"=",
"match",
".",
"start",
"(",
")",
"for",
"i",
",",
"t",
",",
"v",
"in",
"lx",
".",
"get_tokens_unprocessed",
"(",
"match",
".",
"group",
"(",
")",
",",
"*",
"*",
"gt_kwargs",
")",
":",
"yield",
"i",
"+",
"s",
",",
"t",
",",
"v",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
]
| Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. | [
"Callback",
"that",
"processes",
"the",
"match",
"with",
"a",
"different",
"lexer",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L339-L386 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | do_insertions | def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break | python | def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break | [
"def",
"do_insertions",
"(",
"insertions",
",",
"tokens",
")",
":",
"insertions",
"=",
"iter",
"(",
"insertions",
")",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"# no insertions",
"for",
"item",
"in",
"tokens",
":",
"yield",
"item",
"return",
"realpos",
"=",
"None",
"insleft",
"=",
"True",
"# iterate over the token stream where we want to insert",
"# the tokens from the insertion list.",
"for",
"i",
",",
"t",
",",
"v",
"in",
"tokens",
":",
"# first iteration. store the postition of first item",
"if",
"realpos",
"is",
"None",
":",
"realpos",
"=",
"i",
"oldi",
"=",
"0",
"while",
"insleft",
"and",
"i",
"+",
"len",
"(",
"v",
")",
">=",
"index",
":",
"tmpval",
"=",
"v",
"[",
"oldi",
":",
"index",
"-",
"i",
"]",
"yield",
"realpos",
",",
"t",
",",
"tmpval",
"realpos",
"+=",
"len",
"(",
"tmpval",
")",
"for",
"it_index",
",",
"it_token",
",",
"it_value",
"in",
"itokens",
":",
"yield",
"realpos",
",",
"it_token",
",",
"it_value",
"realpos",
"+=",
"len",
"(",
"it_value",
")",
"oldi",
"=",
"index",
"-",
"i",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"insleft",
"=",
"False",
"break",
"# not strictly necessary",
"yield",
"realpos",
",",
"t",
",",
"v",
"[",
"oldi",
":",
"]",
"realpos",
"+=",
"len",
"(",
"v",
")",
"-",
"oldi",
"# leftover tokens",
"while",
"insleft",
":",
"# no normal tokens, set realpos to zero",
"realpos",
"=",
"realpos",
"or",
"0",
"for",
"p",
",",
"t",
",",
"v",
"in",
"itokens",
":",
"yield",
"realpos",
",",
"t",
",",
"v",
"realpos",
"+=",
"len",
"(",
"v",
")",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"insleft",
"=",
"False",
"break"
]
| Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here. | [
"Helper",
"for",
"lexers",
"which",
"must",
"combine",
"the",
"results",
"of",
"several",
"sublexers",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L758-L818 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta._process_regex | def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match | python | def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match | [
"def",
"_process_regex",
"(",
"cls",
",",
"regex",
",",
"rflags",
",",
"state",
")",
":",
"if",
"isinstance",
"(",
"regex",
",",
"Future",
")",
":",
"regex",
"=",
"regex",
".",
"get",
"(",
")",
"return",
"re",
".",
"compile",
"(",
"regex",
",",
"rflags",
")",
".",
"match"
]
| Preprocess the regular expression component of a token definition. | [
"Preprocess",
"the",
"regular",
"expression",
"component",
"of",
"a",
"token",
"definition",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L423-L427 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta._process_token | def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token | python | def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token | [
"def",
"_process_token",
"(",
"cls",
",",
"token",
")",
":",
"assert",
"type",
"(",
"token",
")",
"is",
"_TokenType",
"or",
"callable",
"(",
"token",
")",
",",
"'token type must be simple type or callable, not %r'",
"%",
"(",
"token",
",",
")",
"return",
"token"
]
| Preprocess the token component of a token definition. | [
"Preprocess",
"the",
"token",
"component",
"of",
"a",
"token",
"definition",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L429-L433 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta._process_new_state | def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state | python | def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state | [
"def",
"_process_new_state",
"(",
"cls",
",",
"new_state",
",",
"unprocessed",
",",
"processed",
")",
":",
"if",
"isinstance",
"(",
"new_state",
",",
"str",
")",
":",
"# an existing state",
"if",
"new_state",
"==",
"'#pop'",
":",
"return",
"-",
"1",
"elif",
"new_state",
"in",
"unprocessed",
":",
"return",
"(",
"new_state",
",",
")",
"elif",
"new_state",
"==",
"'#push'",
":",
"return",
"new_state",
"elif",
"new_state",
"[",
":",
"5",
"]",
"==",
"'#pop:'",
":",
"return",
"-",
"int",
"(",
"new_state",
"[",
"5",
":",
"]",
")",
"else",
":",
"assert",
"False",
",",
"'unknown new state %r'",
"%",
"new_state",
"elif",
"isinstance",
"(",
"new_state",
",",
"combined",
")",
":",
"# combine a new state from existing ones",
"tmp_state",
"=",
"'_tmp_%d'",
"%",
"cls",
".",
"_tmpname",
"cls",
".",
"_tmpname",
"+=",
"1",
"itokens",
"=",
"[",
"]",
"for",
"istate",
"in",
"new_state",
":",
"assert",
"istate",
"!=",
"new_state",
",",
"'circular state ref %r'",
"%",
"istate",
"itokens",
".",
"extend",
"(",
"cls",
".",
"_process_state",
"(",
"unprocessed",
",",
"processed",
",",
"istate",
")",
")",
"processed",
"[",
"tmp_state",
"]",
"=",
"itokens",
"return",
"(",
"tmp_state",
",",
")",
"elif",
"isinstance",
"(",
"new_state",
",",
"tuple",
")",
":",
"# push more than one state",
"for",
"istate",
"in",
"new_state",
":",
"assert",
"(",
"istate",
"in",
"unprocessed",
"or",
"istate",
"in",
"(",
"'#pop'",
",",
"'#push'",
")",
")",
",",
"'unknown new state '",
"+",
"istate",
"return",
"new_state",
"else",
":",
"assert",
"False",
",",
"'unknown new state def %r'",
"%",
"new_state"
]
| Preprocess the state transition action of a token definition. | [
"Preprocess",
"the",
"state",
"transition",
"action",
"of",
"a",
"token",
"definition",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L435-L468 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta._process_state | def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens | python | def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens | [
"def",
"_process_state",
"(",
"cls",
",",
"unprocessed",
",",
"processed",
",",
"state",
")",
":",
"assert",
"type",
"(",
"state",
")",
"is",
"str",
",",
"\"wrong state name %r\"",
"%",
"state",
"assert",
"state",
"[",
"0",
"]",
"!=",
"'#'",
",",
"\"invalid state name %r\"",
"%",
"state",
"if",
"state",
"in",
"processed",
":",
"return",
"processed",
"[",
"state",
"]",
"tokens",
"=",
"processed",
"[",
"state",
"]",
"=",
"[",
"]",
"rflags",
"=",
"cls",
".",
"flags",
"for",
"tdef",
"in",
"unprocessed",
"[",
"state",
"]",
":",
"if",
"isinstance",
"(",
"tdef",
",",
"include",
")",
":",
"# it's a state reference",
"assert",
"tdef",
"!=",
"state",
",",
"\"circular state reference %r\"",
"%",
"state",
"tokens",
".",
"extend",
"(",
"cls",
".",
"_process_state",
"(",
"unprocessed",
",",
"processed",
",",
"str",
"(",
"tdef",
")",
")",
")",
"continue",
"if",
"isinstance",
"(",
"tdef",
",",
"_inherit",
")",
":",
"# should be processed already, but may not in the case of:",
"# 1. the state has no counterpart in any parent",
"# 2. the state includes more than one 'inherit'",
"continue",
"if",
"isinstance",
"(",
"tdef",
",",
"default",
")",
":",
"new_state",
"=",
"cls",
".",
"_process_new_state",
"(",
"tdef",
".",
"state",
",",
"unprocessed",
",",
"processed",
")",
"tokens",
".",
"append",
"(",
"(",
"re",
".",
"compile",
"(",
"''",
")",
".",
"match",
",",
"None",
",",
"new_state",
")",
")",
"continue",
"assert",
"type",
"(",
"tdef",
")",
"is",
"tuple",
",",
"\"wrong rule def %r\"",
"%",
"tdef",
"try",
":",
"rex",
"=",
"cls",
".",
"_process_regex",
"(",
"tdef",
"[",
"0",
"]",
",",
"rflags",
",",
"state",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"ValueError",
"(",
"\"uncompilable regex %r in state %r of %r: %s\"",
"%",
"(",
"tdef",
"[",
"0",
"]",
",",
"state",
",",
"cls",
",",
"err",
")",
")",
"token",
"=",
"cls",
".",
"_process_token",
"(",
"tdef",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"tdef",
")",
"==",
"2",
":",
"new_state",
"=",
"None",
"else",
":",
"new_state",
"=",
"cls",
".",
"_process_new_state",
"(",
"tdef",
"[",
"2",
"]",
",",
"unprocessed",
",",
"processed",
")",
"tokens",
".",
"append",
"(",
"(",
"rex",
",",
"token",
",",
"new_state",
")",
")",
"return",
"tokens"
]
| Preprocess a single state definition. | [
"Preprocess",
"a",
"single",
"state",
"definition",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L470-L512 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta.process_tokendef | def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed | python | def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed | [
"def",
"process_tokendef",
"(",
"cls",
",",
"name",
",",
"tokendefs",
"=",
"None",
")",
":",
"processed",
"=",
"cls",
".",
"_all_tokens",
"[",
"name",
"]",
"=",
"{",
"}",
"tokendefs",
"=",
"tokendefs",
"or",
"cls",
".",
"tokens",
"[",
"name",
"]",
"for",
"state",
"in",
"list",
"(",
"tokendefs",
")",
":",
"cls",
".",
"_process_state",
"(",
"tokendefs",
",",
"processed",
",",
"state",
")",
"return",
"processed"
]
| Preprocess a dictionary of token definitions. | [
"Preprocess",
"a",
"dictionary",
"of",
"token",
"definitions",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L514-L520 | train |
wakatime/wakatime | wakatime/packages/pygments/lexer.py | RegexLexerMeta.get_tokendefs | def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens | python | def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens | [
"def",
"get_tokendefs",
"(",
"cls",
")",
":",
"tokens",
"=",
"{",
"}",
"inheritable",
"=",
"{",
"}",
"for",
"c",
"in",
"cls",
".",
"__mro__",
":",
"toks",
"=",
"c",
".",
"__dict__",
".",
"get",
"(",
"'tokens'",
",",
"{",
"}",
")",
"for",
"state",
",",
"items",
"in",
"iteritems",
"(",
"toks",
")",
":",
"curitems",
"=",
"tokens",
".",
"get",
"(",
"state",
")",
"if",
"curitems",
"is",
"None",
":",
"# N.b. because this is assigned by reference, sufficiently",
"# deep hierarchies are processed incrementally (e.g. for",
"# A(B), B(C), C(RegexLexer), B will be premodified so X(B)",
"# will not see any inherits in B).",
"tokens",
"[",
"state",
"]",
"=",
"items",
"try",
":",
"inherit_ndx",
"=",
"items",
".",
"index",
"(",
"inherit",
")",
"except",
"ValueError",
":",
"continue",
"inheritable",
"[",
"state",
"]",
"=",
"inherit_ndx",
"continue",
"inherit_ndx",
"=",
"inheritable",
".",
"pop",
"(",
"state",
",",
"None",
")",
"if",
"inherit_ndx",
"is",
"None",
":",
"continue",
"# Replace the \"inherit\" value with the items",
"curitems",
"[",
"inherit_ndx",
":",
"inherit_ndx",
"+",
"1",
"]",
"=",
"items",
"try",
":",
"# N.b. this is the index in items (that is, the superclass",
"# copy), so offset required when storing below.",
"new_inh_ndx",
"=",
"items",
".",
"index",
"(",
"inherit",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"inheritable",
"[",
"state",
"]",
"=",
"inherit_ndx",
"+",
"new_inh_ndx",
"return",
"tokens"
]
| Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state. | [
"Merge",
"tokens",
"from",
"superclasses",
"in",
"MRO",
"order",
"returning",
"a",
"single",
"tokendef",
"dictionary",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L522-L569 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | memorized_timedelta | def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta | python | def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta | [
"def",
"memorized_timedelta",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_timedelta_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"_timedelta_cache",
"[",
"seconds",
"]",
"=",
"delta",
"return",
"delta"
]
| Create only one instance of each distinct timedelta | [
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"timedelta"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L16-L23 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | memorized_datetime | def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt | python | def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt | [
"def",
"memorized_datetime",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_datetime_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"# NB. We can't just do datetime.utcfromtimestamp(seconds) as this",
"# fails with negative values under Windows (Bug #90096)",
"dt",
"=",
"_epoch",
"+",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"_datetime_cache",
"[",
"seconds",
"]",
"=",
"dt",
"return",
"dt"
]
| Create only one instance of each distinct datetime | [
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"datetime"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L27-L36 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | memorized_ttinfo | def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo | python | def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo | [
"def",
"memorized_ttinfo",
"(",
"*",
"args",
")",
":",
"try",
":",
"return",
"_ttinfo_cache",
"[",
"args",
"]",
"except",
"KeyError",
":",
"ttinfo",
"=",
"(",
"memorized_timedelta",
"(",
"args",
"[",
"0",
"]",
")",
",",
"memorized_timedelta",
"(",
"args",
"[",
"1",
"]",
")",
",",
"args",
"[",
"2",
"]",
")",
"_ttinfo_cache",
"[",
"args",
"]",
"=",
"ttinfo",
"return",
"ttinfo"
]
| Create only one instance of each distinct tuple | [
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"tuple"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L39-L50 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | unpickler | def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf] | python | def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf] | [
"def",
"unpickler",
"(",
"zone",
",",
"utcoffset",
"=",
"None",
",",
"dstoffset",
"=",
"None",
",",
"tzname",
"=",
"None",
")",
":",
"# Raises a KeyError if zone no longer exists, which should never happen",
"# and would be a bug.",
"tz",
"=",
"pytz",
".",
"timezone",
"(",
"zone",
")",
"# A StaticTzInfo - just return it",
"if",
"utcoffset",
"is",
"None",
":",
"return",
"tz",
"# This pickle was created from a DstTzInfo. We need to",
"# determine which of the list of tzinfo instances for this zone",
"# to use in order to restore the state of any datetime instances using",
"# it correctly.",
"utcoffset",
"=",
"memorized_timedelta",
"(",
"utcoffset",
")",
"dstoffset",
"=",
"memorized_timedelta",
"(",
"dstoffset",
")",
"try",
":",
"return",
"tz",
".",
"_tzinfos",
"[",
"(",
"utcoffset",
",",
"dstoffset",
",",
"tzname",
")",
"]",
"except",
"KeyError",
":",
"# The particular state requested in this timezone no longer exists.",
"# This indicates a corrupt pickle, or the timezone database has been",
"# corrected violently enough to make this particular",
"# (utcoffset,dstoffset) no longer exist in the zone, or the",
"# abbreviation has been changed.",
"pass",
"# See if we can find an entry differing only by tzname. Abbreviations",
"# get changed from the initial guess by the database maintainers to",
"# match reality when this information is discovered.",
"for",
"localized_tz",
"in",
"tz",
".",
"_tzinfos",
".",
"values",
"(",
")",
":",
"if",
"(",
"localized_tz",
".",
"_utcoffset",
"==",
"utcoffset",
"and",
"localized_tz",
".",
"_dst",
"==",
"dstoffset",
")",
":",
"return",
"localized_tz",
"# This (utcoffset, dstoffset) information has been removed from the",
"# zone. Add it back. This might occur when the database maintainers have",
"# corrected incorrect information. datetime instances using this",
"# incorrect information will continue to do so, exactly as they were",
"# before being pickled. This is purely an overly paranoid safety net - I",
"# doubt this will ever been needed in real life.",
"inf",
"=",
"(",
"utcoffset",
",",
"dstoffset",
",",
"tzname",
")",
"tz",
".",
"_tzinfos",
"[",
"inf",
"]",
"=",
"tz",
".",
"__class__",
"(",
"inf",
",",
"tz",
".",
"_tzinfos",
")",
"return",
"tz",
".",
"_tzinfos",
"[",
"inf",
"]"
]
| Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade. | [
"Factory",
"function",
"for",
"unpickling",
"pytz",
"tzinfo",
"instances",
"."
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L516-L564 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | DstTzInfo.utcoffset | def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset | python | def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset | [
"def",
"utcoffset",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"None",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_utcoffset",
"else",
":",
"return",
"self",
".",
"_utcoffset"
]
| See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous | [
"See",
"datetime",
".",
"tzinfo",
".",
"utcoffset"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L382-L411 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | DstTzInfo.dst | def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst | python | def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst | [
"def",
"dst",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"None",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_dst",
"else",
":",
"return",
"self",
".",
"_dst"
]
| See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous | [
"See",
"datetime",
".",
"tzinfo",
".",
"dst"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L413-L450 | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | DstTzInfo.tzname | def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname | python | def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname | [
"def",
"tzname",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"self",
".",
"zone",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_tzname",
"else",
":",
"return",
"self",
".",
"_tzname"
]
| See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous | [
"See",
"datetime",
".",
"tzinfo",
".",
"tzname"
]
| 74519ace04e8472f3a3993269963732b9946a01d | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L452-L488 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_ffmpeg_normalize.py | check_range | def check_range(number, min_r, max_r, name=""):
"""
Check if a number is within a given range
"""
try:
number = float(number)
if number < min_r or number > max_r:
raise FFmpegNormalizeError(
"{} must be within [{},{}]".format(
name, min_r, max_r
)
)
return number
pass
except Exception as e:
raise e | python | def check_range(number, min_r, max_r, name=""):
"""
Check if a number is within a given range
"""
try:
number = float(number)
if number < min_r or number > max_r:
raise FFmpegNormalizeError(
"{} must be within [{},{}]".format(
name, min_r, max_r
)
)
return number
pass
except Exception as e:
raise e | [
"def",
"check_range",
"(",
"number",
",",
"min_r",
",",
"max_r",
",",
"name",
"=",
"\"\"",
")",
":",
"try",
":",
"number",
"=",
"float",
"(",
"number",
")",
"if",
"number",
"<",
"min_r",
"or",
"number",
">",
"max_r",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"{} must be within [{},{}]\"",
".",
"format",
"(",
"name",
",",
"min_r",
",",
"max_r",
")",
")",
"return",
"number",
"pass",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
]
| Check if a number is within a given range | [
"Check",
"if",
"a",
"number",
"is",
"within",
"a",
"given",
"range"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L16-L31 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_ffmpeg_normalize.py | FFmpegNormalize.add_media_file | def add_media_file(self, input_file, output_file):
"""
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
"""
if not os.path.exists(input_file):
raise FFmpegNormalizeError("file " + input_file + " does not exist")
ext = os.path.splitext(output_file)[1][1:]
if (self.audio_codec is None or 'pcm' in self.audio_codec) and ext in PCM_INCOMPATIBLE_EXTS:
raise FFmpegNormalizeError(
"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.".format(ext)
)
mf = MediaFile(self, input_file, output_file)
self.media_files.append(mf)
self.file_count += 1 | python | def add_media_file(self, input_file, output_file):
"""
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
"""
if not os.path.exists(input_file):
raise FFmpegNormalizeError("file " + input_file + " does not exist")
ext = os.path.splitext(output_file)[1][1:]
if (self.audio_codec is None or 'pcm' in self.audio_codec) and ext in PCM_INCOMPATIBLE_EXTS:
raise FFmpegNormalizeError(
"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.".format(ext)
)
mf = MediaFile(self, input_file, output_file)
self.media_files.append(mf)
self.file_count += 1 | [
"def",
"add_media_file",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"input_file",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"file \"",
"+",
"input_file",
"+",
"\" does not exist\"",
")",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"output_file",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"(",
"self",
".",
"audio_codec",
"is",
"None",
"or",
"'pcm'",
"in",
"self",
".",
"audio_codec",
")",
"and",
"ext",
"in",
"PCM_INCOMPATIBLE_EXTS",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.\"",
".",
"format",
"(",
"ext",
")",
")",
"mf",
"=",
"MediaFile",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
"self",
".",
"media_files",
".",
"append",
"(",
"mf",
")",
"self",
".",
"file_count",
"+=",
"1"
]
| Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file | [
"Add",
"a",
"media",
"file",
"to",
"normalize"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L141-L161 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_ffmpeg_normalize.py | FFmpegNormalize.run_normalization | def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file)) | python | def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file)) | [
"def",
"run_normalization",
"(",
"self",
")",
":",
"for",
"index",
",",
"media_file",
"in",
"enumerate",
"(",
"tqdm",
"(",
"self",
".",
"media_files",
",",
"desc",
"=",
"\"File\"",
",",
"disable",
"=",
"not",
"self",
".",
"progress",
",",
"position",
"=",
"0",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"Normalizing file {} ({} of {})\"",
".",
"format",
"(",
"media_file",
",",
"index",
"+",
"1",
",",
"self",
".",
"file_count",
")",
")",
"media_file",
".",
"run_normalization",
"(",
")",
"logger",
".",
"info",
"(",
"\"Normalized file written to {}\"",
".",
"format",
"(",
"media_file",
".",
"output_file",
")",
")"
]
| Run the normalization procedures | [
"Run",
"the",
"normalization",
"procedures"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L163-L178 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_cmd_utils.py | get_ffmpeg_exe | def get_ffmpeg_exe():
"""
Return path to ffmpeg executable
"""
if 'FFMPEG_PATH' in os.environ:
ffmpeg_exe = os.environ['FFMPEG_PATH']
else:
ffmpeg_exe = which('ffmpeg')
if not ffmpeg_exe:
if which('avconv'):
raise FFmpegNormalizeError(
"avconv is not supported. "
"Please install ffmpeg from http://ffmpeg.org instead."
)
else:
raise FFmpegNormalizeError(
"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. "
"Please install ffmpeg from http://ffmpeg.org"
)
return ffmpeg_exe | python | def get_ffmpeg_exe():
"""
Return path to ffmpeg executable
"""
if 'FFMPEG_PATH' in os.environ:
ffmpeg_exe = os.environ['FFMPEG_PATH']
else:
ffmpeg_exe = which('ffmpeg')
if not ffmpeg_exe:
if which('avconv'):
raise FFmpegNormalizeError(
"avconv is not supported. "
"Please install ffmpeg from http://ffmpeg.org instead."
)
else:
raise FFmpegNormalizeError(
"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. "
"Please install ffmpeg from http://ffmpeg.org"
)
return ffmpeg_exe | [
"def",
"get_ffmpeg_exe",
"(",
")",
":",
"if",
"'FFMPEG_PATH'",
"in",
"os",
".",
"environ",
":",
"ffmpeg_exe",
"=",
"os",
".",
"environ",
"[",
"'FFMPEG_PATH'",
"]",
"else",
":",
"ffmpeg_exe",
"=",
"which",
"(",
"'ffmpeg'",
")",
"if",
"not",
"ffmpeg_exe",
":",
"if",
"which",
"(",
"'avconv'",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"avconv is not supported. \"",
"\"Please install ffmpeg from http://ffmpeg.org instead.\"",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. \"",
"\"Please install ffmpeg from http://ffmpeg.org\"",
")",
"return",
"ffmpeg_exe"
]
| Return path to ffmpeg executable | [
"Return",
"path",
"to",
"ffmpeg",
"executable"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_cmd_utils.py#L153-L174 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_cmd_utils.py | ffmpeg_has_loudnorm | def ffmpeg_has_loudnorm():
"""
Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter
"""
cmd_runner = CommandRunner([get_ffmpeg_exe(), '-filters'])
cmd_runner.run_command()
output = cmd_runner.get_output()
if 'loudnorm' in output:
return True
else:
logger.warning(
"Your ffmpeg version does not support the 'loudnorm' filter. "
"Please make sure you are running ffmpeg v3.1 or above."
)
return False | python | def ffmpeg_has_loudnorm():
"""
Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter
"""
cmd_runner = CommandRunner([get_ffmpeg_exe(), '-filters'])
cmd_runner.run_command()
output = cmd_runner.get_output()
if 'loudnorm' in output:
return True
else:
logger.warning(
"Your ffmpeg version does not support the 'loudnorm' filter. "
"Please make sure you are running ffmpeg v3.1 or above."
)
return False | [
"def",
"ffmpeg_has_loudnorm",
"(",
")",
":",
"cmd_runner",
"=",
"CommandRunner",
"(",
"[",
"get_ffmpeg_exe",
"(",
")",
",",
"'-filters'",
"]",
")",
"cmd_runner",
".",
"run_command",
"(",
")",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"if",
"'loudnorm'",
"in",
"output",
":",
"return",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Your ffmpeg version does not support the 'loudnorm' filter. \"",
"\"Please make sure you are running ffmpeg v3.1 or above.\"",
")",
"return",
"False"
]
| Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter | [
"Run",
"feature",
"detection",
"on",
"ffmpeg",
"returns",
"True",
"if",
"ffmpeg",
"supports",
"the",
"loudnorm",
"filter"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_cmd_utils.py#L176-L191 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_media_file.py | MediaFile.parse_streams | def parse_streams(self):
"""
Try to parse all input streams from file
"""
logger.debug("Parsing streams of {}".format(self.input_file))
cmd = [
self.ffmpeg_normalize.ffmpeg_exe, '-i', self.input_file,
'-c', 'copy', '-t', '0', '-map', '0',
'-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
cmd_runner.run_command()
output = cmd_runner.get_output()
logger.debug("Stream parsing command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
for line in output_lines:
if not line.startswith('Stream'):
continue
stream_id_match = re.search(r'#0:([\d]+)', line)
if stream_id_match:
stream_id = int(stream_id_match.group(1))
if stream_id in self._stream_ids():
continue
else:
continue
if 'Audio' in line:
logger.debug("Found audio stream at index {}".format(stream_id))
sample_rate_match = re.search(r'(\d+) Hz', line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
bit_depth_match = re.search(r's(\d+)p?,', line)
bit_depth = int(bit_depth_match.group(1)) if bit_depth_match else None
self.streams['audio'][stream_id] = AudioStream(self, stream_id, sample_rate, bit_depth)
elif 'Video' in line:
logger.debug("Found video stream at index {}".format(stream_id))
self.streams['video'][stream_id] = VideoStream(self, stream_id)
elif 'Subtitle' in line:
logger.debug("Found subtitle stream at index {}".format(stream_id))
self.streams['subtitle'][stream_id] = SubtitleStream(self, stream_id)
if not self.streams['audio']:
raise FFmpegNormalizeError(
"Input file {} does not contain any audio streams"
.format(self.input_file))
if os.path.splitext(self.output_file)[1].lower() in ['.wav', '.mp3', '.aac']:
logger.warning(
"Output file only supports one stream. "
"Keeping only first audio stream."
)
first_stream = list(self.streams['audio'].values())[0]
self.streams['audio'] = {first_stream.stream_id: first_stream}
self.streams['video'] = {}
self.streams['subtitle'] = {} | python | def parse_streams(self):
"""
Try to parse all input streams from file
"""
logger.debug("Parsing streams of {}".format(self.input_file))
cmd = [
self.ffmpeg_normalize.ffmpeg_exe, '-i', self.input_file,
'-c', 'copy', '-t', '0', '-map', '0',
'-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
cmd_runner.run_command()
output = cmd_runner.get_output()
logger.debug("Stream parsing command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
for line in output_lines:
if not line.startswith('Stream'):
continue
stream_id_match = re.search(r'#0:([\d]+)', line)
if stream_id_match:
stream_id = int(stream_id_match.group(1))
if stream_id in self._stream_ids():
continue
else:
continue
if 'Audio' in line:
logger.debug("Found audio stream at index {}".format(stream_id))
sample_rate_match = re.search(r'(\d+) Hz', line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
bit_depth_match = re.search(r's(\d+)p?,', line)
bit_depth = int(bit_depth_match.group(1)) if bit_depth_match else None
self.streams['audio'][stream_id] = AudioStream(self, stream_id, sample_rate, bit_depth)
elif 'Video' in line:
logger.debug("Found video stream at index {}".format(stream_id))
self.streams['video'][stream_id] = VideoStream(self, stream_id)
elif 'Subtitle' in line:
logger.debug("Found subtitle stream at index {}".format(stream_id))
self.streams['subtitle'][stream_id] = SubtitleStream(self, stream_id)
if not self.streams['audio']:
raise FFmpegNormalizeError(
"Input file {} does not contain any audio streams"
.format(self.input_file))
if os.path.splitext(self.output_file)[1].lower() in ['.wav', '.mp3', '.aac']:
logger.warning(
"Output file only supports one stream. "
"Keeping only first audio stream."
)
first_stream = list(self.streams['audio'].values())[0]
self.streams['audio'] = {first_stream.stream_id: first_stream}
self.streams['video'] = {}
self.streams['subtitle'] = {} | [
"def",
"parse_streams",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Parsing streams of {}\"",
".",
"format",
"(",
"self",
".",
"input_file",
")",
")",
"cmd",
"=",
"[",
"self",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-i'",
",",
"self",
".",
"input_file",
",",
"'-c'",
",",
"'copy'",
",",
"'-t'",
",",
"'0'",
",",
"'-map'",
",",
"'0'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"cmd_runner",
".",
"run_command",
"(",
")",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Stream parsing command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"output_lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"]",
"for",
"line",
"in",
"output_lines",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'Stream'",
")",
":",
"continue",
"stream_id_match",
"=",
"re",
".",
"search",
"(",
"r'#0:([\\d]+)'",
",",
"line",
")",
"if",
"stream_id_match",
":",
"stream_id",
"=",
"int",
"(",
"stream_id_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"stream_id",
"in",
"self",
".",
"_stream_ids",
"(",
")",
":",
"continue",
"else",
":",
"continue",
"if",
"'Audio'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found audio stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"sample_rate_match",
"=",
"re",
".",
"search",
"(",
"r'(\\d+) Hz'",
",",
"line",
")",
"sample_rate",
"=",
"int",
"(",
"sample_rate_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"sample_rate_match",
"else",
"None",
"bit_depth_match",
"=",
"re",
".",
"search",
"(",
"r's(\\d+)p?,'",
",",
"line",
")",
"bit_depth",
"=",
"int",
"(",
"bit_depth_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"bit_depth_match",
"else",
"None",
"self",
".",
"streams",
"[",
"'audio'",
"]",
"[",
"stream_id",
"]",
"=",
"AudioStream",
"(",
"self",
",",
"stream_id",
",",
"sample_rate",
",",
"bit_depth",
")",
"elif",
"'Video'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found video stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"self",
".",
"streams",
"[",
"'video'",
"]",
"[",
"stream_id",
"]",
"=",
"VideoStream",
"(",
"self",
",",
"stream_id",
")",
"elif",
"'Subtitle'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found subtitle stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"self",
".",
"streams",
"[",
"'subtitle'",
"]",
"[",
"stream_id",
"]",
"=",
"SubtitleStream",
"(",
"self",
",",
"stream_id",
")",
"if",
"not",
"self",
".",
"streams",
"[",
"'audio'",
"]",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Input file {} does not contain any audio streams\"",
".",
"format",
"(",
"self",
".",
"input_file",
")",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"output_file",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"'.wav'",
",",
"'.mp3'",
",",
"'.aac'",
"]",
":",
"logger",
".",
"warning",
"(",
"\"Output file only supports one stream. \"",
"\"Keeping only first audio stream.\"",
")",
"first_stream",
"=",
"list",
"(",
"self",
".",
"streams",
"[",
"'audio'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"self",
".",
"streams",
"[",
"'audio'",
"]",
"=",
"{",
"first_stream",
".",
"stream_id",
":",
"first_stream",
"}",
"self",
".",
"streams",
"[",
"'video'",
"]",
"=",
"{",
"}",
"self",
".",
"streams",
"[",
"'subtitle'",
"]",
"=",
"{",
"}"
]
| Try to parse all input streams from file | [
"Try",
"to",
"parse",
"all",
"input",
"streams",
"from",
"file"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_media_file.py#L51-L114 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_media_file.py | MediaFile._get_audio_filter_cmd | def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu()
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label)
filter_complex_cmd = ';'.join(all_filters)
return filter_complex_cmd, output_labels | python | def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu()
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label)
filter_complex_cmd = ';'.join(all_filters)
return filter_complex_cmd, output_labels | [
"def",
"_get_audio_filter_cmd",
"(",
"self",
")",
":",
"all_filters",
"=",
"[",
"]",
"output_labels",
"=",
"[",
"]",
"for",
"audio_stream",
"in",
"self",
".",
"streams",
"[",
"'audio'",
"]",
".",
"values",
"(",
")",
":",
"if",
"self",
".",
"ffmpeg_normalize",
".",
"normalization_type",
"==",
"'ebu'",
":",
"stream_filter",
"=",
"audio_stream",
".",
"get_second_pass_opts_ebu",
"(",
")",
"else",
":",
"stream_filter",
"=",
"audio_stream",
".",
"get_second_pass_opts_peakrms",
"(",
")",
"input_label",
"=",
"'[0:{}]'",
".",
"format",
"(",
"audio_stream",
".",
"stream_id",
")",
"output_label",
"=",
"'[norm{}]'",
".",
"format",
"(",
"audio_stream",
".",
"stream_id",
")",
"output_labels",
".",
"append",
"(",
"output_label",
")",
"all_filters",
".",
"append",
"(",
"input_label",
"+",
"stream_filter",
"+",
"output_label",
")",
"filter_complex_cmd",
"=",
"';'",
".",
"join",
"(",
"all_filters",
")",
"return",
"filter_complex_cmd",
",",
"output_labels"
]
| Return filter_complex command and output labels needed | [
"Return",
"filter_complex",
"command",
"and",
"output",
"labels",
"needed"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_media_file.py#L160-L179 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_streams.py | AudioStream.parse_volumedetect_stats | def parse_volumedetect_stats(self):
"""
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
"""
logger.info(
"Running first pass volumedetect filter for stream {}".format(self.stream_id)
)
filter_str = '[0:{}]volumedetect'.format(self.stream_id)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Volumedetect command output:")
logger.debug(output)
mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output)
if mean_volume_matches:
self.loudness_statistics['mean'] = float(mean_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get mean volume for {}".format(self.media_file.input_file)
)
max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output)
if max_volume_matches:
self.loudness_statistics['max'] = float(max_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get max volume for {}".format(self.media_file.input_file)
) | python | def parse_volumedetect_stats(self):
"""
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
"""
logger.info(
"Running first pass volumedetect filter for stream {}".format(self.stream_id)
)
filter_str = '[0:{}]volumedetect'.format(self.stream_id)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Volumedetect command output:")
logger.debug(output)
mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output)
if mean_volume_matches:
self.loudness_statistics['mean'] = float(mean_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get mean volume for {}".format(self.media_file.input_file)
)
max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output)
if max_volume_matches:
self.loudness_statistics['max'] = float(max_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get max volume for {}".format(self.media_file.input_file)
) | [
"def",
"parse_volumedetect_stats",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Running first pass volumedetect filter for stream {}\"",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
")",
"filter_str",
"=",
"'[0:{}]volumedetect'",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
"cmd",
"=",
"[",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-nostdin'",
",",
"'-y'",
",",
"'-i'",
",",
"self",
".",
"media_file",
".",
"input_file",
",",
"'-filter_complex'",
",",
"filter_str",
",",
"'-vn'",
",",
"'-sn'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"for",
"progress",
"in",
"cmd_runner",
".",
"run_ffmpeg_command",
"(",
")",
":",
"yield",
"progress",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Volumedetect command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"mean_volume_matches",
"=",
"re",
".",
"findall",
"(",
"r\"mean_volume: ([\\-\\d\\.]+) dB\"",
",",
"output",
")",
"if",
"mean_volume_matches",
":",
"self",
".",
"loudness_statistics",
"[",
"'mean'",
"]",
"=",
"float",
"(",
"mean_volume_matches",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not get mean volume for {}\"",
".",
"format",
"(",
"self",
".",
"media_file",
".",
"input_file",
")",
")",
"max_volume_matches",
"=",
"re",
".",
"findall",
"(",
"r\"max_volume: ([\\-\\d\\.]+) dB\"",
",",
"output",
")",
"if",
"max_volume_matches",
":",
"self",
".",
"loudness_statistics",
"[",
"'max'",
"]",
"=",
"float",
"(",
"max_volume_matches",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not get max volume for {}\"",
".",
"format",
"(",
"self",
".",
"media_file",
".",
"input_file",
")",
")"
]
| Use ffmpeg with volumedetect filter to get the mean volume of the input file. | [
"Use",
"ffmpeg",
"with",
"volumedetect",
"filter",
"to",
"get",
"the",
"mean",
"volume",
"of",
"the",
"input",
"file",
"."
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L78-L117 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_streams.py | AudioStream.parse_loudnorm_stats | def parse_loudnorm_stats(self):
"""
Run a first pass loudnorm filter to get measured data.
"""
logger.info(
"Running first pass loudnorm filter for stream {}".format(self.stream_id)
)
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
filter_str = '[0:{}]'.format(self.stream_id) + \
'loudnorm=' + dict_to_filter_opts(opts)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Loudnorm first pass command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
loudnorm_start = False
loudnorm_end = False
for index, line in enumerate(output_lines):
if line.startswith('[Parsed_loudnorm'):
loudnorm_start = index + 1
continue
if loudnorm_start and line.startswith('}'):
loudnorm_end = index + 1
break
if not (loudnorm_start and loudnorm_end):
raise FFmpegNormalizeError("Could not parse loudnorm stats; no loudnorm-related output found")
try:
loudnorm_stats = json.loads('\n'.join(output_lines[loudnorm_start:loudnorm_end]))
except Exception as e:
raise FFmpegNormalizeError("Could not parse loudnorm stats; wrong JSON format in string: {}".format(e))
logger.debug("Loudnorm stats parsed: {}".format(json.dumps(loudnorm_stats)))
self.loudness_statistics['ebu'] = loudnorm_stats
for key, val in self.loudness_statistics['ebu'].items():
if key == 'normalization_type':
continue
# FIXME: drop Python 2 support and just use math.inf
if float(val) == -float("inf"):
self.loudness_statistics['ebu'][key] = -99
elif float(val) == float("inf"):
self.loudness_statistics['ebu'][key] = 0 | python | def parse_loudnorm_stats(self):
"""
Run a first pass loudnorm filter to get measured data.
"""
logger.info(
"Running first pass loudnorm filter for stream {}".format(self.stream_id)
)
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
filter_str = '[0:{}]'.format(self.stream_id) + \
'loudnorm=' + dict_to_filter_opts(opts)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Loudnorm first pass command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
loudnorm_start = False
loudnorm_end = False
for index, line in enumerate(output_lines):
if line.startswith('[Parsed_loudnorm'):
loudnorm_start = index + 1
continue
if loudnorm_start and line.startswith('}'):
loudnorm_end = index + 1
break
if not (loudnorm_start and loudnorm_end):
raise FFmpegNormalizeError("Could not parse loudnorm stats; no loudnorm-related output found")
try:
loudnorm_stats = json.loads('\n'.join(output_lines[loudnorm_start:loudnorm_end]))
except Exception as e:
raise FFmpegNormalizeError("Could not parse loudnorm stats; wrong JSON format in string: {}".format(e))
logger.debug("Loudnorm stats parsed: {}".format(json.dumps(loudnorm_stats)))
self.loudness_statistics['ebu'] = loudnorm_stats
for key, val in self.loudness_statistics['ebu'].items():
if key == 'normalization_type':
continue
# FIXME: drop Python 2 support and just use math.inf
if float(val) == -float("inf"):
self.loudness_statistics['ebu'][key] = -99
elif float(val) == float("inf"):
self.loudness_statistics['ebu'][key] = 0 | [
"def",
"parse_loudnorm_stats",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Running first pass loudnorm filter for stream {}\"",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
")",
"opts",
"=",
"{",
"'i'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"target_level",
",",
"'lra'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"loudness_range_target",
",",
"'tp'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"true_peak",
",",
"'offset'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"offset",
",",
"'print_format'",
":",
"'json'",
"}",
"if",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"dual_mono",
":",
"opts",
"[",
"'dual_mono'",
"]",
"=",
"'true'",
"filter_str",
"=",
"'[0:{}]'",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
"+",
"'loudnorm='",
"+",
"dict_to_filter_opts",
"(",
"opts",
")",
"cmd",
"=",
"[",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-nostdin'",
",",
"'-y'",
",",
"'-i'",
",",
"self",
".",
"media_file",
".",
"input_file",
",",
"'-filter_complex'",
",",
"filter_str",
",",
"'-vn'",
",",
"'-sn'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"for",
"progress",
"in",
"cmd_runner",
".",
"run_ffmpeg_command",
"(",
")",
":",
"yield",
"progress",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Loudnorm first pass command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"output_lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"]",
"loudnorm_start",
"=",
"False",
"loudnorm_end",
"=",
"False",
"for",
"index",
",",
"line",
"in",
"enumerate",
"(",
"output_lines",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'[Parsed_loudnorm'",
")",
":",
"loudnorm_start",
"=",
"index",
"+",
"1",
"continue",
"if",
"loudnorm_start",
"and",
"line",
".",
"startswith",
"(",
"'}'",
")",
":",
"loudnorm_end",
"=",
"index",
"+",
"1",
"break",
"if",
"not",
"(",
"loudnorm_start",
"and",
"loudnorm_end",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not parse loudnorm stats; no loudnorm-related output found\"",
")",
"try",
":",
"loudnorm_stats",
"=",
"json",
".",
"loads",
"(",
"'\\n'",
".",
"join",
"(",
"output_lines",
"[",
"loudnorm_start",
":",
"loudnorm_end",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not parse loudnorm stats; wrong JSON format in string: {}\"",
".",
"format",
"(",
"e",
")",
")",
"logger",
".",
"debug",
"(",
"\"Loudnorm stats parsed: {}\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"loudnorm_stats",
")",
")",
")",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"=",
"loudnorm_stats",
"for",
"key",
",",
"val",
"in",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'normalization_type'",
":",
"continue",
"# FIXME: drop Python 2 support and just use math.inf",
"if",
"float",
"(",
"val",
")",
"==",
"-",
"float",
"(",
"\"inf\"",
")",
":",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"key",
"]",
"=",
"-",
"99",
"elif",
"float",
"(",
"val",
")",
"==",
"float",
"(",
"\"inf\"",
")",
":",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"key",
"]",
"=",
"0"
]
| Run a first pass loudnorm filter to get measured data. | [
"Run",
"a",
"first",
"pass",
"loudnorm",
"filter",
"to",
"get",
"measured",
"data",
"."
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L119-L185 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_streams.py | AudioStream.get_second_pass_opts_ebu | def get_second_pass_opts_ebu(self):
"""
Return second pass loudnorm filter options string for ffmpeg
"""
if not self.loudness_statistics['ebu']:
raise FFmpegNormalizeError(
"First pass not run, you must call parse_loudnorm_stats first"
)
input_i = float(self.loudness_statistics['ebu']["input_i"])
if input_i > 0:
logger.warn("Input file had measured input loudness greater than zero ({}), capping at 0".format("input_i"))
self.loudness_statistics['ebu']['input_i'] = 0
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'measured_i': float(self.loudness_statistics['ebu']['input_i']),
'measured_lra': float(self.loudness_statistics['ebu']['input_lra']),
'measured_tp': float(self.loudness_statistics['ebu']['input_tp']),
'measured_thresh': float(self.loudness_statistics['ebu']['input_thresh']),
'linear': 'true',
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
return 'loudnorm=' + dict_to_filter_opts(opts) | python | def get_second_pass_opts_ebu(self):
"""
Return second pass loudnorm filter options string for ffmpeg
"""
if not self.loudness_statistics['ebu']:
raise FFmpegNormalizeError(
"First pass not run, you must call parse_loudnorm_stats first"
)
input_i = float(self.loudness_statistics['ebu']["input_i"])
if input_i > 0:
logger.warn("Input file had measured input loudness greater than zero ({}), capping at 0".format("input_i"))
self.loudness_statistics['ebu']['input_i'] = 0
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'measured_i': float(self.loudness_statistics['ebu']['input_i']),
'measured_lra': float(self.loudness_statistics['ebu']['input_lra']),
'measured_tp': float(self.loudness_statistics['ebu']['input_tp']),
'measured_thresh': float(self.loudness_statistics['ebu']['input_thresh']),
'linear': 'true',
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
return 'loudnorm=' + dict_to_filter_opts(opts) | [
"def",
"get_second_pass_opts_ebu",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"First pass not run, you must call parse_loudnorm_stats first\"",
")",
"input_i",
"=",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"\"input_i\"",
"]",
")",
"if",
"input_i",
">",
"0",
":",
"logger",
".",
"warn",
"(",
"\"Input file had measured input loudness greater than zero ({}), capping at 0\"",
".",
"format",
"(",
"\"input_i\"",
")",
")",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_i'",
"]",
"=",
"0",
"opts",
"=",
"{",
"'i'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"target_level",
",",
"'lra'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"loudness_range_target",
",",
"'tp'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"true_peak",
",",
"'offset'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"offset",
",",
"'measured_i'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_i'",
"]",
")",
",",
"'measured_lra'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_lra'",
"]",
")",
",",
"'measured_tp'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_tp'",
"]",
")",
",",
"'measured_thresh'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_thresh'",
"]",
")",
",",
"'linear'",
":",
"'true'",
",",
"'print_format'",
":",
"'json'",
"}",
"if",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"dual_mono",
":",
"opts",
"[",
"'dual_mono'",
"]",
"=",
"'true'",
"return",
"'loudnorm='",
"+",
"dict_to_filter_opts",
"(",
"opts",
")"
]
| Return second pass loudnorm filter options string for ffmpeg | [
"Return",
"second",
"pass",
"loudnorm",
"filter",
"options",
"string",
"for",
"ffmpeg"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L187-L218 | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_logger.py | setup_custom_logger | def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
# handler = logging.StreamHandler()
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
# \033[1;30m - black
# \033[1;31m - red
# \033[1;32m - green
# \033[1;33m - yellow
# \033[1;34m - blue
# \033[1;35m - magenta
# \033[1;36m - cyan
# \033[1;37m - white
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger | python | def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
# handler = logging.StreamHandler()
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
# \033[1;30m - black
# \033[1;31m - red
# \033[1;32m - green
# \033[1;33m - yellow
# \033[1;34m - blue
# \033[1;35m - magenta
# \033[1;36m - cyan
# \033[1;37m - white
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger | [
"def",
"setup_custom_logger",
"(",
"name",
")",
":",
"global",
"loggers",
"if",
"loggers",
".",
"get",
"(",
"name",
")",
":",
"return",
"loggers",
".",
"get",
"(",
"name",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
"=",
"'%(levelname)s: %(message)s'",
")",
"# handler = logging.StreamHandler()",
"handler",
"=",
"TqdmLoggingHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"# \\033[1;30m - black",
"# \\033[1;31m - red",
"# \\033[1;32m - green",
"# \\033[1;33m - yellow",
"# \\033[1;34m - blue",
"# \\033[1;35m - magenta",
"# \\033[1;36m - cyan",
"# \\033[1;37m - white",
"if",
"system",
"(",
")",
"not",
"in",
"[",
"'Windows'",
",",
"'cli'",
"]",
":",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"ERROR",
",",
"\"\\033[1;31m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"ERROR",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"WARNING",
",",
"\"\\033[1;33m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"WARNING",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"INFO",
",",
"\"\\033[1;34m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"INFO",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"DEBUG",
",",
"\"\\033[1;35m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"DEBUG",
")",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"# if (logger.hasHandlers()):",
"# logger.handlers.clear()",
"if",
"logger",
".",
"handlers",
":",
"logger",
".",
"handlers",
"=",
"[",
"]",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"loggers",
".",
"update",
"(",
"dict",
"(",
"name",
"=",
"logger",
")",
")",
"return",
"logger"
]
| Create a logger with a certain name and level | [
"Create",
"a",
"logger",
"with",
"a",
"certain",
"name",
"and",
"level"
]
| 18477a7f2d092777ee238340be40c04ecb45c132 | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_logger.py#L26-L68 | train |
PyCQA/pylint-django | pylint_django/checkers/__init__.py | register_checkers | def register_checkers(linter):
"""Register checkers."""
linter.register_checker(ModelChecker(linter))
linter.register_checker(DjangoInstalledChecker(linter))
linter.register_checker(JsonResponseChecker(linter))
linter.register_checker(FormChecker(linter)) | python | def register_checkers(linter):
"""Register checkers."""
linter.register_checker(ModelChecker(linter))
linter.register_checker(DjangoInstalledChecker(linter))
linter.register_checker(JsonResponseChecker(linter))
linter.register_checker(FormChecker(linter)) | [
"def",
"register_checkers",
"(",
"linter",
")",
":",
"linter",
".",
"register_checker",
"(",
"ModelChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"DjangoInstalledChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"JsonResponseChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"FormChecker",
"(",
"linter",
")",
")"
]
| Register checkers. | [
"Register",
"checkers",
"."
]
| 0bbee433519f48134df4a797341c4196546a454e | https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/__init__.py#L8-L13 | train |
PyCQA/pylint-django | pylint_django/checkers/db_performance.py | register | def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter) | python | def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter) | [
"def",
"register",
"(",
"linter",
")",
":",
"linter",
".",
"register_checker",
"(",
"NewDbFieldWithDefaultChecker",
"(",
"linter",
")",
")",
"if",
"not",
"compat",
".",
"LOAD_CONFIGURATION_SUPPORTED",
":",
"load_configuration",
"(",
"linter",
")"
]
| Required method to auto register this checker. | [
"Required",
"method",
"to",
"auto",
"register",
"this",
"checker",
"."
]
| 0bbee433519f48134df4a797341c4196546a454e | https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/db_performance.py#L125-L129 | train |
PyCQA/pylint-django | pylint_django/augmentations/__init__.py | ignore_import_warnings_for_related_fields | def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node) | python | def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node) | [
"def",
"ignore_import_warnings_for_related_fields",
"(",
"orig_method",
",",
"self",
",",
"node",
")",
":",
"consumer",
"=",
"self",
".",
"_to_consume",
"[",
"0",
"]",
"# pylint: disable=W0212",
"# we can disable this warning ('Access to a protected member _to_consume of a client class')",
"# as it's not actually a client class, but rather, this method is being monkey patched",
"# onto the class and so the access is valid",
"new_things",
"=",
"{",
"}",
"iterat",
"=",
"consumer",
".",
"to_consume",
".",
"items",
"if",
"PY3",
"else",
"consumer",
".",
"to_consume",
".",
"iteritems",
"for",
"name",
",",
"stmts",
"in",
"iterat",
"(",
")",
":",
"if",
"isinstance",
"(",
"stmts",
"[",
"0",
"]",
",",
"ImportFrom",
")",
":",
"if",
"any",
"(",
"[",
"n",
"[",
"0",
"]",
"in",
"(",
"'ForeignKey'",
",",
"'OneToOneField'",
")",
"for",
"n",
"in",
"stmts",
"[",
"0",
"]",
".",
"names",
"]",
")",
":",
"continue",
"new_things",
"[",
"name",
"]",
"=",
"stmts",
"consumer",
".",
"_atomic",
"=",
"ScopeConsumer",
"(",
"new_things",
",",
"consumer",
".",
"consumed",
",",
"consumer",
".",
"scope_type",
")",
"# pylint: disable=W0212",
"self",
".",
"_to_consume",
"=",
"[",
"consumer",
"]",
"# pylint: disable=W0212",
"return",
"orig_method",
"(",
"self",
",",
"node",
")"
]
| Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning | [
"Replaces",
"the",
"leave_module",
"method",
"on",
"the",
"VariablesChecker",
"class",
"to",
"prevent",
"unused",
"-",
"import",
"warnings",
"which",
"are",
"caused",
"by",
"the",
"ForeignKey",
"and",
"OneToOneField",
"transformations",
".",
"By",
"replacing",
"the",
"nodes",
"in",
"the",
"AST",
"with",
"their",
"type",
"rather",
"than",
"the",
"django",
"field",
"imports",
"of",
"the",
"form",
"from",
"django",
".",
"db",
".",
"models",
"import",
"OneToOneField",
"raise",
"an",
"unused",
"-",
"import",
"warning"
]
| 0bbee433519f48134df4a797341c4196546a454e | https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L291-L317 | train |
PyCQA/pylint-django | pylint_django/augmentations/__init__.py | is_model_admin_subclass | def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin') | python | def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin') | [
"def",
"is_model_admin_subclass",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"[",
"-",
"5",
":",
"]",
"!=",
"'Admin'",
"or",
"isinstance",
"(",
"node",
".",
"parent",
",",
"ClassDef",
")",
":",
"return",
"False",
"return",
"node_is_subclass",
"(",
"node",
",",
"'django.contrib.admin.options.ModelAdmin'",
")"
]
| Checks that node is derivative of ModelAdmin class. | [
"Checks",
"that",
"node",
"is",
"derivative",
"of",
"ModelAdmin",
"class",
"."
]
| 0bbee433519f48134df4a797341c4196546a454e | https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L382-L387 | train |
PyCQA/pylint-django | pylint_django/augmentations/__init__.py | is_model_factory | def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False | python | def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False | [
"def",
"is_model_factory",
"(",
"node",
")",
":",
"try",
":",
"parent_classes",
"=",
"node",
".",
"expr",
".",
"inferred",
"(",
")",
"except",
":",
"# noqa: E722, pylint: disable=bare-except",
"return",
"False",
"parents",
"=",
"(",
"'factory.declarations.LazyFunction'",
",",
"'factory.declarations.SubFactory'",
",",
"'factory.django.DjangoModelFactory'",
")",
"for",
"parent_class",
"in",
"parent_classes",
":",
"try",
":",
"if",
"parent_class",
".",
"qname",
"(",
")",
"in",
"parents",
":",
"return",
"True",
"if",
"node_is_subclass",
"(",
"parent_class",
",",
"*",
"parents",
")",
":",
"return",
"True",
"except",
"AttributeError",
":",
"continue",
"return",
"False"
]
| Checks that node is derivative of DjangoModelFactory or SubFactory class. | [
"Checks",
"that",
"node",
"is",
"derivative",
"of",
"DjangoModelFactory",
"or",
"SubFactory",
"class",
"."
]
| 0bbee433519f48134df4a797341c4196546a454e | https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L428-L449 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.