Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
|
[
"Returns a CookieJar from a key/value dictionary.\n\n :param cookie_dict: Dict of key/values to insert into CookieJar.\n :param cookiejar: (optional) A cookiejar to add the cookies to.\n :param overwrite: (optional) If False, will not replace cookies\n already in the jar with new ones.\n :rtype: CookieJar\n "
] |
Please provide a description of the function:def merge_cookies(cookiejar, cookies):
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
[
"Add cookies to cookiejar and returns a merged CookieJar.\n\n :param cookiejar: CookieJar object to add the cookies to.\n :param cookies: Dictionary or CookieJar object to be added.\n :rtype: CookieJar\n "
] |
Please provide a description of the function:def get(self, name, default=None, domain=None, path=None):
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
|
[
"Dict-like get() that also supports optional domain and path args in\n order to resolve naming collisions from using one cookie jar over\n multiple domains.\n\n .. warning:: operation is O(n), not O(1).\n "
] |
Please provide a description of the function:def set(self, name, value, **kwargs):
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
|
[
"Dict-like set() that also supports optional domain and path args in\n order to resolve naming collisions from using one cookie jar over\n multiple domains.\n "
] |
Please provide a description of the function:def list_domains(self):
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
|
[
"Utility method to list all the domains in the jar."
] |
Please provide a description of the function:def list_paths(self):
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
|
[
"Utility method to list all the paths in the jar."
] |
Please provide a description of the function:def multiple_domains(self):
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False
|
[
"Returns True if there are multiple domains in the jar.\n Returns False otherwise.\n\n :rtype: bool\n "
] |
Please provide a description of the function:def update(self, other):
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
|
[
"Updates this jar with cookies from another CookieJar or dict-like"
] |
Please provide a description of the function:def _find(self, name, domain=None, path=None):
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
|
[
"Requests uses this method internally to get cookie values.\n\n If there are conflicting cookies, _find arbitrarily chooses one.\n See _find_no_duplicates if you want an exception thrown if there are\n conflicting cookies.\n\n :param name: a string containing name of cookie\n :param domain: (optional) string containing domain of cookie\n :param path: (optional) string containing path of cookie\n :return: cookie.value\n "
] |
Please provide a description of the function:def _find_no_duplicates(self, name, domain=None, path=None):
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
|
[
"Both ``__get_item__`` and ``get`` call this function: it's never\n used elsewhere in Requests.\n\n :param name: a string containing name of cookie\n :param domain: (optional) string containing domain of cookie\n :param path: (optional) string containing path of cookie\n :raises KeyError: if cookie is not found\n :raises CookieConflictError: if there are multiple cookies\n that match name and optionally domain and path\n :return: cookie.value\n "
] |
Please provide a description of the function:def copy(self):
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
|
[
"Return a copy of this RequestsCookieJar."
] |
Please provide a description of the function:def constrain (n, min, max):
'''This returns a number, n constrained to the min and max bounds. '''
if n < min:
return min
if n > max:
return max
return n
|
[] |
Please provide a description of the function:def _decode(self, s):
'''This converts from the external coding system (as passed to
the constructor) to the internal one (unicode). '''
if self.decoder is not None:
return self.decoder.decode(s)
else:
raise TypeError("This screen was constructed with encoding=None, "
"so it does not handle bytes.")
|
[] |
Please provide a description of the function:def _unicode(self):
'''This returns a printable representation of the screen as a unicode
string (which, under Python 3.x, is the same as 'str'). The end of each
screen line is terminated by a newline.'''
return u'\n'.join ([ u''.join(c) for c in self.w ])
|
[] |
Please provide a description of the function:def dump (self):
'''This returns a copy of the screen as a unicode string. This is similar to
__str__/__unicode__ except that lines are not terminated with line
feeds.'''
return u''.join ([ u''.join(c) for c in self.w ])
|
[] |
Please provide a description of the function:def pretty (self):
'''This returns a copy of the screen as a unicode string with an ASCII
text box around the screen border. This is similar to
__str__/__unicode__ except that it adds a box.'''
top_bot = u'+' + u'-'*self.cols + u'+\n'
return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
|
[] |
Please provide a description of the function:def lf (self):
'''This moves the cursor down with scrolling.
'''
old_r = self.cur_r
self.cursor_down()
if old_r == self.cur_r:
self.scroll_up ()
self.erase_line()
|
[] |
Please provide a description of the function:def put_abs (self, r, c, ch):
'''Screen array starts at 1 index.'''
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
if isinstance(ch, bytes):
ch = self._decode(ch)[0]
else:
ch = ch[0]
self.w[r-1][c-1] = ch
|
[] |
Please provide a description of the function:def put (self, ch):
'''This puts a characters at the current cursor position.
'''
if isinstance(ch, bytes):
ch = self._decode(ch)
self.put_abs (self.cur_r, self.cur_c, ch)
|
[] |
Please provide a description of the function:def insert_abs (self, r, c, ch):
'''This inserts a character at (r,c). Everything under
and to the right is shifted right one character.
The last character of the line is lost.
'''
if isinstance(ch, bytes):
ch = self._decode(ch)
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
for ci in range (self.cols, c, -1):
self.put_abs (r,ci, self.get_abs(r,ci-1))
self.put_abs (r,c,ch)
|
[] |
Please provide a description of the function:def get_region (self, rs,cs, re,ce):
'''This returns a list of lines representing the region.
'''
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
sc = []
for r in range (rs, re+1):
line = u''
for c in range (cs, ce + 1):
ch = self.get_abs (r,c)
line = line + ch
sc.append (line)
return sc
|
[] |
Please provide a description of the function:def cursor_constrain (self):
'''This keeps the cursor within the screen area.
'''
self.cur_r = constrain (self.cur_r, 1, self.rows)
self.cur_c = constrain (self.cur_c, 1, self.cols)
|
[] |
Please provide a description of the function:def cursor_save_attrs (self): # <ESC>7
'''Save current cursor position.'''
self.cur_saved_r = self.cur_r
self.cur_saved_c = self.cur_c
|
[] |
Please provide a description of the function:def scroll_constrain (self):
'''This keeps the scroll region within the screen region.'''
if self.scroll_row_start <= 0:
self.scroll_row_start = 1
if self.scroll_row_end > self.rows:
self.scroll_row_end = self.rows
|
[] |
Please provide a description of the function:def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
'''Enable scrolling from row {start} to row {end}.'''
self.scroll_row_start = rs
self.scroll_row_end = re
self.scroll_constrain()
|
[] |
Please provide a description of the function:def scroll_down (self): # <ESC>D
'''Scroll display down one line.'''
# Screen is indexed from 1, but arrays are indexed from 0.
s = self.scroll_row_start - 1
e = self.scroll_row_end - 1
self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
|
[] |
Please provide a description of the function:def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
'''Erases from the current cursor position to the end of the current
line.'''
self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
|
[] |
Please provide a description of the function:def erase_start_of_line (self): # <ESC>[1K
'''Erases from the current cursor position to the start of the current
line.'''
self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
|
[] |
Please provide a description of the function:def erase_line (self): # <ESC>[2K
'''Erases the entire current line.'''
self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
|
[] |
Please provide a description of the function:def erase_down (self): # <ESC>[0J -or- <ESC>[J
'''Erases the screen from the current line down to the bottom of the
screen.'''
self.erase_end_of_line ()
self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
|
[] |
Please provide a description of the function:def erase_up (self): # <ESC>[1J
'''Erases the screen from the current line up to the top of the
screen.'''
self.erase_start_of_line ()
self.fill_region (self.cur_r-1, 1, 1, self.cols)
|
[] |
Please provide a description of the function:def to_int(d, key, default_to_zero=False, default=None, required=True):
value = d.get(key) or default
if (value in ["", None]) and default_to_zero:
return 0
if value is None:
if required:
raise ParseError("Unable to read %s from %s" % (key, d))
else:
return int(value)
|
[
"Pull a value from the dict and convert to int\n\n :param default_to_zero: If the value is None or empty, treat it as zero\n :param default: If the value is missing in the dict use this default\n\n "
] |
Please provide a description of the function:def parse_timezone(matches, default_timezone=UTC):
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description)
|
[
"Parses ISO 8601 time zone specs into tzinfo offsets\n\n "
] |
Please provide a description of the function:def parse_date(datestring, default_timezone=UTC):
if not isinstance(datestring, _basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups, default_timezone=default_timezone)
groups["second_fraction"] = int(Decimal("0.%s" % (groups["second_fraction"] or 0)) * Decimal("1000000.0"))
try:
return datetime.datetime(
year=to_int(groups, "year"),
month=to_int(groups, "month", default=to_int(groups, "monthdash", required=False, default=1)),
day=to_int(groups, "day", default=to_int(groups, "daydash", required=False, default=1)),
hour=to_int(groups, "hour", default_to_zero=True),
minute=to_int(groups, "minute", default_to_zero=True),
second=to_int(groups, "second", default_to_zero=True),
microsecond=groups["second_fraction"],
tzinfo=tz,
)
except Exception as e:
raise ParseError(e)
|
[
"Parses ISO 8601 dates into datetime objects\n\n The timezone is parsed from the date string. However it is quite common to\n have dates without a timezone (not strictly correct). In this case the\n default timezone specified in default_timezone is used. This is UTC by\n default.\n\n :param datestring: The date to parse as a string\n :param default_timezone: A datetime tzinfo instance to use when no timezone\n is specified in the datestring. If this is set to\n None then a naive datetime object is returned.\n :returns: A datetime.datetime instance\n :raises: ParseError when there is a problem parsing the date or\n constructing the datetime instance.\n\n "
] |
Please provide a description of the function:def default_handler(signum, frame, spinner):
spinner.fail()
spinner.stop()
sys.exit(0)
|
[
"Signal handler, used to gracefully shut down the ``spinner`` instance\n when specified signal is received by the process running the ``spinner``.\n\n ``signum`` and ``frame`` are mandatory arguments. Check ``signal.signal``\n function for more details.\n "
] |
Please provide a description of the function:def fancy_handler(signum, frame, spinner):
spinner.red.fail("✘")
spinner.stop()
sys.exit(0)
|
[
"Signal handler, used to gracefully shut down the ``spinner`` instance\n when specified signal is received by the process running the ``spinner``.\n\n ``signum`` and ``frame`` are mandatory arguments. Check ``signal.signal``\n function for more details.\n "
] |
Please provide a description of the function:def uts46_remap(domain, std3_rules=True, transitional=False):
from .uts46data import uts46data
output = u""
try:
for pos, char in enumerate(domain):
code_point = ord(char)
uts46row = uts46data[code_point if code_point < 256 else
bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
status = uts46row[1]
replacement = uts46row[2] if len(uts46row) == 3 else None
if (status == "V" or
(status == "D" and not transitional) or
(status == "3" and not std3_rules and replacement is None)):
output += char
elif replacement is not None and (status == "M" or
(status == "3" and not std3_rules) or
(status == "D" and transitional)):
output += replacement
elif status != "I":
raise IndexError()
return unicodedata.normalize("NFC", output)
except IndexError:
raise InvalidCodepoint(
"Codepoint {0} not allowed at position {1} in {2}".format(
_unot(code_point), pos + 1, repr(domain)))
|
[
"Re-map the characters in the string according to UTS46 processing."
] |
Please provide a description of the function:def _implementation():
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
|
[
"Return a dict with the Python implementation and version.\n\n Provide both the name and the version of the Python implementation\n currently running. For example, on CPython 2.7.5 it will return\n {'name': 'CPython', 'version': '2.7.5'}.\n\n This function works best on CPython and PyPy: in particular, it probably\n doesn't work for Jython or IronPython. Future investigation should be done\n to work out the correct shape of the code for those platforms.\n "
] |
Please provide a description of the function:def info():
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
|
[
"Generate information for a bug report."
] |
Please provide a description of the function:def package_type(self):
mapping = {'bdist_egg': u'egg', 'bdist_wheel': u'wheel',
'sdist': u'source'}
ptype = self._release['packagetype']
if ptype in mapping.keys():
return mapping[ptype]
return ptype
|
[
"\n >>> package = yarg.get('yarg')\n >>> v = \"0.1.0\"\n >>> r = package.release(v)\n >>> r.package_type\n u'wheel'\n "
] |
Please provide a description of the function:def process (self, c):
if isinstance(c, bytes):
c = self._decode(c)
self.state.process(c)
|
[
"Process a single character. Called by :meth:`write`."
] |
Please provide a description of the function:def write (self, s):
if isinstance(s, bytes):
s = self._decode(s)
for c in s:
self.process(c)
|
[
"Process text, writing it to the virtual screen while handling\n ANSI escape codes.\n "
] |
Please provide a description of the function:def write_ch (self, ch):
'''This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. '''
if isinstance(ch, bytes):
ch = self._decode(ch)
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == u'\r':
self.cr()
return
if ch == u'\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
|
[] |
Please provide a description of the function:def get_best_encoding(stream):
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
|
[
"Returns the default stream encoding if not found."
] |
Please provide a description of the function:def get_terminal_size(fallback=(80, 24)):
# Try the environment first
try:
columns = int(os.environ["COLUMNS"])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ["LINES"])
except (KeyError, ValueError):
lines = 0
# Only query if necessary
if columns <= 0 or lines <= 0:
try:
size = _get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = terminal_size(*fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return terminal_size(columns, lines)
|
[
"Get the size of the terminal window.\n\n For each of the two dimensions, the environment variable, COLUMNS\n and LINES respectively, is checked. If the variable is defined and\n the value is a positive integer, it is used.\n\n When COLUMNS or LINES is not defined, which is the common case,\n the terminal connected to sys.__stdout__ is queried\n by invoking os.get_terminal_size.\n\n If the terminal size cannot be successfully queried, either because\n the system doesn't support querying, or because we are not\n connected to a terminal, the value given in fallback parameter\n is used. Fallback defaults to (80, 24) which is the default\n size used by many terminal emulators.\n\n The value returned is a named tuple of type os.terminal_size.\n "
] |
Please provide a description of the function:def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
[
"\n Shortcuts for generating request headers.\n\n :param keep_alive:\n If ``True``, adds 'connection: keep-alive' header.\n\n :param accept_encoding:\n Can be a boolean, list, or string.\n ``True`` translates to 'gzip,deflate'.\n List will get joined by comma.\n String will be used as provided.\n\n :param user_agent:\n String representing the user-agent you want, such as\n \"python-urllib3/0.6\"\n\n :param basic_auth:\n Colon-separated username:password string for 'authorization: basic ...'\n auth header.\n\n :param proxy_basic_auth:\n Colon-separated username:password string for 'proxy-authorization: basic ...'\n auth header.\n\n :param disable_cache:\n If ``True``, adds 'cache-control: no-cache' header.\n\n Example::\n\n >>> make_headers(keep_alive=True, user_agent=\"Batman/1.0\")\n {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}\n >>> make_headers(accept_encoding=True)\n {'accept-encoding': 'gzip,deflate'}\n "
] |
Please provide a description of the function:def set_file_position(body, pos):
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
|
[
"\n If a position is provided, move file to that point.\n Otherwise, we'll attempt to record a position for future use.\n "
] |
Please provide a description of the function:def rewind_body(body, body_pos):
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos))
|
[
"\n Attempt to rewind body to a certain position.\n Primarily used for request redirects and retries.\n\n :param body:\n File-like object that supports seek.\n\n :param int pos:\n Position to seek to in file.\n "
] |
Please provide a description of the function:def _copy_jsonsafe(value):
if isinstance(value, six.string_types + (numbers.Number,)):
return value
if isinstance(value, collections_abc.Mapping):
return {six.text_type(k): _copy_jsonsafe(v) for k, v in value.items()}
if isinstance(value, collections_abc.Iterable):
return [_copy_jsonsafe(v) for v in value]
if value is None: # This doesn't happen often for us.
return None
return six.text_type(value)
|
[
"Deep-copy a value into JSON-safe types.\n "
] |
Please provide a description of the function:def clear(self):
for key in self.conn.keys():
self.conn.delete(key)
|
[
"Helper for clearing all the keys in a database. Use with\n caution!"
] |
Please provide a description of the function:def mapping_to_frozenset(mapping):
mapping = mapping.copy()
for key, value in mapping.items():
if isinstance(value, Mapping):
mapping[key] = mapping_to_frozenset(value)
elif isinstance(value, Sequence):
value = list(value)
for i, item in enumerate(value):
if isinstance(item, Mapping):
value[i] = mapping_to_frozenset(item)
mapping[key] = tuple(value)
return frozenset(mapping.items())
|
[
" Be aware that this treats any sequence type with the equal members as\n equal. As it is used to identify equality of schemas, this can be\n considered okay as definitions are semantically equal regardless the\n container type. "
] |
Please provide a description of the function:def validator_factory(name, bases=None, namespace={}):
Validator = get_Validator_class()
if bases is None:
bases = (Validator,)
elif isinstance(bases, tuple):
bases += (Validator,)
else:
bases = (bases, Validator)
docstrings = [x.__doc__ for x in bases if x.__doc__]
if len(docstrings) > 1 and '__doc__' not in namespace:
namespace.update({'__doc__': '\n'.join(docstrings)})
return type(name, bases, namespace)
|
[
" Dynamically create a :class:`~cerberus.Validator` subclass.\n Docstrings of mixin-classes will be added to the resulting\n class' one if ``__doc__`` is not in :obj:`namespace`.\n\n :param name: The name of the new class.\n :type name: :class:`str`\n :param bases: Class(es) with additional and overriding attributes.\n :type bases: :class:`tuple` of or a single :term:`class`\n :param namespace: Attributes for the new class.\n :type namespace: :class:`dict`\n :return: The created class.\n "
] |
Please provide a description of the function:def cmdify(self):
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
)
|
[
"Encode into a cmd-executable string.\n\n This re-implements CreateProcess's quoting logic to turn a list of\n arguments into one single string for the shell to interpret.\n\n * All double quotes are escaped with a backslash.\n * Existing backslashes before a quote are doubled, so they are all\n escaped properly.\n * Backslashes elsewhere are left as-is; cmd will interpret them\n literally.\n\n The result is then quoted into a pair of double quotes to be grouped.\n\n An argument is intentionally not quoted if it does not contain\n whitespaces. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n\n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n "
] |
Please provide a description of the function:def _split_what(what):
return (
frozenset(cls for cls in what if isclass(cls)),
frozenset(cls for cls in what if isinstance(cls, Attribute)),
)
|
[
"\n Returns a tuple of `frozenset`s of classes and attributes.\n "
] |
Please provide a description of the function:def include(*what):
cls, attrs = _split_what(what)
def include_(attribute, value):
return value.__class__ in cls or attribute in attrs
return include_
|
[
"\n Whitelist *what*.\n\n :param what: What to whitelist.\n :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\\\\ s\n\n :rtype: :class:`callable`\n "
] |
Please provide a description of the function:def exclude(*what):
cls, attrs = _split_what(what)
def exclude_(attribute, value):
return value.__class__ not in cls and attribute not in attrs
return exclude_
|
[
"\n Blacklist *what*.\n\n :param what: What to blacklist.\n :type what: :class:`list` of classes or :class:`attr.Attribute`\\\\ s.\n\n :rtype: :class:`callable`\n "
] |
Please provide a description of the function:def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
):
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v, True, filter, dict_factory, retain_collection_types
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk, filter, df, retain_collection_types
),
_asdict_anything(
vv, filter, df, retain_collection_types
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
|
[
"\n Return the ``attrs`` attribute values of *inst* as a dict.\n\n Optionally recurse into other ``attrs``-decorated classes.\n\n :param inst: Instance of an ``attrs``-decorated class.\n :param bool recurse: Recurse into classes that are also\n ``attrs``-decorated.\n :param callable filter: A callable whose return code determines whether an\n attribute or element is included (``True``) or dropped (``False``). Is\n called with the :class:`attr.Attribute` as the first argument and the\n value as the second argument.\n :param callable dict_factory: A callable to produce dictionaries from. For\n example, to produce ordered dictionaries instead of normal Python\n dictionaries, pass in ``collections.OrderedDict``.\n :param bool retain_collection_types: Do not convert to ``list`` when\n encountering an attribute whose type is ``tuple`` or ``set``. Only\n meaningful if ``recurse`` is ``True``.\n\n :rtype: return type of *dict_factory*\n\n :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``\n class.\n\n .. versionadded:: 16.0.0 *dict_factory*\n .. versionadded:: 16.1.0 *retain_collection_types*\n "
] |
Please provide a description of the function:def _asdict_anything(val, filter, dict_factory, retain_collection_types):
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(val, True, filter, dict_factory, retain_collection_types)
elif isinstance(val, (tuple, list, set)):
cf = val.__class__ if retain_collection_types is True else list
rv = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(kk, filter, df, retain_collection_types),
_asdict_anything(vv, filter, df, retain_collection_types),
)
for kk, vv in iteritems(val)
)
else:
rv = val
return rv
|
[
"\n ``asdict`` only works on attrs instances, this works on anything.\n "
] |
Please provide a description of the function:def astuple(
inst,
recurse=True,
filter=None,
tuple_factory=tuple,
retain_collection_types=False,
):
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(
astuple(
v,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain is True else list
rv.append(
cf(
[
astuple(
j,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(j.__class__)
else j
for j in v
]
)
)
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(
df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(kk.__class__)
else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(vv.__class__)
else vv,
)
for kk, vv in iteritems(v)
)
)
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
|
[
"\n Return the ``attrs`` attribute values of *inst* as a tuple.\n\n Optionally recurse into other ``attrs``-decorated classes.\n\n :param inst: Instance of an ``attrs``-decorated class.\n :param bool recurse: Recurse into classes that are also\n ``attrs``-decorated.\n :param callable filter: A callable whose return code determines whether an\n attribute or element is included (``True``) or dropped (``False``). Is\n called with the :class:`attr.Attribute` as the first argument and the\n value as the second argument.\n :param callable tuple_factory: A callable to produce tuples from. For\n example, to produce lists instead of tuples.\n :param bool retain_collection_types: Do not convert to ``list``\n or ``dict`` when encountering an attribute which type is\n ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is\n ``True``.\n\n :rtype: return type of *tuple_factory*\n\n :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``\n class.\n\n .. versionadded:: 16.2.0\n "
] |
Please provide a description of the function:def assoc(inst, **changes):
import warnings
warnings.warn(
"assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning,
stacklevel=2,
)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}.".format(
k=k, cl=new.__class__
)
)
_obj_setattr(new, k, v)
return new
|
[
"\n Copy *inst* and apply *changes*.\n\n :param inst: Instance of a class with ``attrs`` attributes.\n :param changes: Keyword changes in the new copy.\n\n :return: A copy of inst with *changes* incorporated.\n\n :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't\n be found on *cls*.\n :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``\n class.\n\n .. deprecated:: 17.1.0\n Use :func:`evolve` instead.\n "
] |
Please provide a description of the function:def evolve(inst, **changes):
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
|
[
"\n Create a new instance, based on *inst* with *changes* applied.\n\n :param inst: Instance of a class with ``attrs`` attributes.\n :param changes: Keyword changes in the new copy.\n\n :return: A copy of inst with *changes* incorporated.\n\n :raise TypeError: If *attr_name* couldn't be found in the class\n ``__init__``.\n :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``\n class.\n\n .. versionadded:: 17.1.0\n "
] |
Please provide a description of the function:def get(package_name, pypi_server="https://pypi.python.org/pypi/"):
if not pypi_server.endswith("/"):
pypi_server = pypi_server + "/"
response = requests.get("{0}{1}/json".format(pypi_server,
package_name))
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, 'decode'):
return json2package(response.content.decode())
else:
return json2package(response.content)
|
[
"\n Constructs a request to the PyPI server and returns a\n :class:`yarg.package.Package`.\n\n :param package_name: case sensitive name of the package on the PyPI server.\n :param pypi_server: (option) URL to the PyPI server.\n\n >>> import yarg\n >>> package = yarg.get('yarg')\n <Package yarg>\n "
] |
Please provide a description of the function:def resolve_ctx(cli, prog_name, args):
ctx = cli.make_context(prog_name, args, resilient_parsing=True)
args = ctx.protected_args + ctx.args
while args:
if isinstance(ctx.command, MultiCommand):
if not ctx.command.chain:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(cmd_name, args, parent=ctx,
resilient_parsing=True)
args = ctx.protected_args + ctx.args
else:
# Walk chained subcommand contexts saving the last one.
while args:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True)
args = sub_ctx.args
ctx = sub_ctx
args = sub_ctx.protected_args + sub_ctx.args
else:
break
return ctx
|
[
"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n "
] |
Please provide a description of the function:def is_incomplete_option(all_args, cmd_param):
if not isinstance(cmd_param, Option):
return False
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
if index + 1 > cmd_param.nargs:
break
if start_of_option(arg_str):
last_option = arg_str
return True if last_option and last_option in cmd_param.opts else False
|
[
"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n "
] |
Please provide a description of the function:def is_incomplete_argument(current_params, cmd_param):
if not isinstance(cmd_param, Argument):
return False
current_param_values = current_params[cmd_param.name]
if current_param_values is None:
return True
if cmd_param.nargs == -1:
return True
if isinstance(current_param_values, abc.Iterable) \
and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
return True
return False
|
[
"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n "
] |
Please provide a description of the function:def get_user_autocompletions(ctx, args, incomplete, cmd_param):
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [(c, None)
for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
incomplete=incomplete)
results = [c if isinstance(c, tuple) else (c, None)
for c in dynamic_completions]
return results
|
[
"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n "
] |
Please provide a description of the function:def get_visible_commands_starting_with(ctx, starts_with):
for c in ctx.command.list_commands(ctx):
if c.startswith(starts_with):
command = ctx.command.get_command(ctx, c)
if not command.hidden:
yield command
|
[
"\n :param ctx: context associated with the parsed command\n :starts_with: string that visible commands must start with.\n :return: all visible (not hidden) commands that start with starts_with.\n "
] |
Please provide a description of the function:def get_choices(cli, prog_name, args, incomplete):
all_args = copy.deepcopy(args)
ctx = resolve_ctx(cli, prog_name, args)
if ctx is None:
return []
# In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
# without the '='
if start_of_option(incomplete) and WORDBREAK in incomplete:
partition_incomplete = incomplete.partition(WORDBREAK)
all_args.append(partition_incomplete[0])
incomplete = partition_incomplete[2]
elif incomplete == WORDBREAK:
incomplete = ''
completions = []
if start_of_option(incomplete):
# completions for partial options
for param in ctx.command.params:
if isinstance(param, Option) and not param.hidden:
param_opts = [param_opt for param_opt in param.opts +
param.secondary_opts if param_opt not in all_args or param.multiple]
completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)])
return completions
# completion for option values from user supplied values
for param in ctx.command.params:
if is_incomplete_option(all_args, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
# completion for argument values from user supplied values
for param in ctx.command.params:
if is_incomplete_argument(ctx.params, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
add_subcommand_completions(ctx, incomplete, completions)
# Sort before returning so that proper ordering can be enforced in custom types.
return sorted(completions)
|
[
"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n "
] |
Please provide a description of the function:def interpret(marker, execution_context=None):
try:
expr, rest = parse_marker(marker)
except Exception as e:
raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
if rest and rest[0] != '#':
raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
context = dict(DEFAULT_CONTEXT)
if execution_context:
context.update(execution_context)
return evaluator.evaluate(expr, context)
|
[
"\n Interpret a marker and return a result depending on environment.\n\n :param marker: The marker to interpret.\n :type marker: str\n :param execution_context: The context used for name lookup.\n :type execution_context: mapping\n "
] |
Please provide a description of the function:def evaluate(self, expr, context):
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr]
else:
assert isinstance(expr, dict)
op = expr['op']
if op not in self.operations:
raise NotImplementedError('op not implemented: %s' % op)
elhs = expr['lhs']
erhs = expr['rhs']
if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
lhs = self.evaluate(elhs, context)
rhs = self.evaluate(erhs, context)
result = self.operations[op](lhs, rhs)
return result
|
[
"\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n "
] |
Please provide a description of the function:def colored(text, color=None, on_color=None, attrs=None):
if os.getenv("ANSI_COLORS_DISABLED") is None:
style = "NORMAL"
if "bold" in attrs:
style = "BRIGHT"
attrs.remove("bold")
if color is not None:
color = color.upper()
text = to_native_string("%s%s%s%s%s") % (
to_native_string(getattr(colorama.Fore, color)),
to_native_string(getattr(colorama.Style, style)),
to_native_string(text),
to_native_string(colorama.Fore.RESET),
to_native_string(colorama.Style.NORMAL),
)
if on_color is not None:
on_color = on_color.upper()
text = to_native_string("%s%s%s%s") % (
to_native_string(getattr(colorama.Back, on_color)),
to_native_string(text),
to_native_string(colorama.Back.RESET),
to_native_string(colorama.Style.NORMAL),
)
if attrs is not None:
fmt_str = to_native_string("%s[%%dm%%s%s[9m") % (chr(27), chr(27))
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
|
[
"Colorize text using a reimplementation of the colorizer from\n https://github.com/pavdmyt/yaspin so that it works on windows.\n\n Available text colors:\n red, green, yellow, blue, magenta, cyan, white.\n\n Available text highlights:\n on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.\n\n Available attributes:\n bold, dark, underline, blink, reverse, concealed.\n\n Example:\n colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])\n colored('Hello, World!', 'green')\n "
] |
Please provide a description of the function:def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool
return self._src.inc_n(n=n, exception=exception)
|
[
"\n Increments the parser by n characters\n if the end of the input has not been reached.\n "
] |
Please provide a description of the function:def consume(self, chars, min=0, max=-1):
return self._src.consume(chars=chars, min=min, max=max)
|
[
"\n Consume chars until min/max is satisfied is valid.\n "
] |
Please provide a description of the function:def parse_error(self, exception=ParseError, *args):
return self._src.parse_error(exception, *args)
|
[
"\n Creates a generic \"parse error\" at the current position.\n "
] |
Please provide a description of the function:def _merge_ws(self, item, container): # type: (Item, Container) -> bool
last = container.last_item()
if not last:
return False
if not isinstance(item, Whitespace) or not isinstance(last, Whitespace):
return False
start = self._idx - (len(last.s) + len(item.s))
container.body[-1] = (
container.body[-1][0],
Whitespace(self._src[start : self._idx]),
)
return True
|
[
"\n Merges the given Item with the last one currently in the given Container if\n both are whitespace items.\n\n Returns True if the items were merged.\n "
] |
Please provide a description of the function:def _is_child(self, parent, child): # type: (str, str) -> bool
parent_parts = tuple(self._split_table_name(parent))
child_parts = tuple(self._split_table_name(child))
if parent_parts == child_parts:
return False
return parent_parts == child_parts[: len(parent_parts)]
|
[
"\n Returns whether a key is strictly a child of another key.\n AoT siblings are not considered children of one another.\n "
] |
Please provide a description of the function:def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]]
self.mark()
with self._state as state:
while True:
c = self._current
if c == "\n":
# Found a newline; Return all whitespace found up to this point.
self.inc()
return None, Whitespace(self.extract())
elif c in " \t\r":
# Skip whitespace.
if not self.inc():
return None, Whitespace(self.extract())
elif c == "#":
# Found a comment, parse it
indent = self.extract()
cws, comment, trail = self._parse_comment_trail()
return None, Comment(Trivia(indent, cws, comment, trail))
elif c == "[":
# Found a table, delegate to the calling function.
return
else:
# Begining of a KV pair.
# Return to beginning of whitespace so it gets included
# as indentation for the KV about to be parsed.
state.restore = True
break
return self._parse_key_value(True)
|
[
"\n Attempts to parse the next item and returns it, along with its key\n if the item is value-like.\n "
] |
Please provide a description of the function:def _parse_comment_trail(self): # type: () -> Tuple[str, str, str]
if self.end():
return "", "", ""
comment = ""
comment_ws = ""
self.mark()
while True:
c = self._current
if c == "\n":
break
elif c == "#":
comment_ws = self.extract()
self.mark()
self.inc() # Skip #
# The comment itself
while not self.end() and not self._current.is_nl() and self.inc():
pass
comment = self.extract()
self.mark()
break
elif c in " \t\r":
self.inc()
else:
raise self.parse_error(UnexpectedCharError, c)
if self.end():
break
while self._current.is_spaces() and self.inc():
pass
if self._current == "\r":
self.inc()
if self._current == "\n":
self.inc()
trail = ""
if self._idx != self._marker or self._current.is_ws():
trail = self.extract()
return comment_ws, comment, trail
|
[
"\n Returns (comment_ws, comment, trail)\n If there is no comment, comment_ws and comment will\n simply be empty.\n "
] |
Please provide a description of the function:def _parse_quoted_key(self): # type: () -> Key
quote_style = self._current
key_type = None
dotted = False
for t in KeyType:
if t.value == quote_style:
key_type = t
break
if key_type is None:
raise RuntimeError("Should not have entered _parse_quoted_key()")
self.inc()
self.mark()
while self._current != quote_style and self.inc():
pass
key = self.extract()
if self._current == ".":
self.inc()
dotted = True
key += "." + self._parse_key().as_string()
key_type = KeyType.Bare
else:
self.inc()
return Key(key, key_type, "", dotted)
|
[
"\n Parses a key enclosed in either single or double quotes.\n "
] |
Please provide a description of the function:def _parse_bare_key(self): # type: () -> Key
key_type = None
dotted = False
self.mark()
while self._current.is_bare_key_char() and self.inc():
pass
key = self.extract()
if self._current == ".":
self.inc()
dotted = True
key += "." + self._parse_key().as_string()
key_type = KeyType.Bare
return Key(key, key_type, "", dotted)
|
[
"\n Parses a bare key.\n "
] |
Please provide a description of the function:def _parse_value(self): # type: () -> Item
self.mark()
c = self._current
trivia = Trivia()
if c == StringType.SLB.value:
return self._parse_basic_string()
elif c == StringType.SLL.value:
return self._parse_literal_string()
elif c == BoolType.TRUE.value[0]:
return self._parse_true()
elif c == BoolType.FALSE.value[0]:
return self._parse_false()
elif c == "[":
return self._parse_array()
elif c == "{":
return self._parse_inline_table()
elif c in "+-" or self._peek(4) in {
"+inf",
"-inf",
"inf",
"+nan",
"-nan",
"nan",
}:
# Number
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
elif c in string.digits:
# Integer, Float, Date, Time or DateTime
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
m = RFC_3339_LOOSE.match(raw)
if m:
if m.group(1) and m.group(5):
# datetime
try:
return DateTime(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateTimeError)
if m.group(1):
try:
return Date(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateError)
if m.group(5):
try:
return Time(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidTimeError)
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
else:
raise self.parse_error(UnexpectedCharError, c)
|
[
"\n Attempts to parse a value at the current position.\n "
] |
Please provide a description of the function:def _parse_table(
self, parent_name=None
): # type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]]
if self._current != "[":
raise self.parse_error(
InternalParserError, "_parse_table() called on non-bracket character."
)
indent = self.extract()
self.inc() # Skip opening bracket
if self.end():
raise self.parse_error(UnexpectedEofError)
is_aot = False
if self._current == "[":
if not self.inc():
raise self.parse_error(UnexpectedEofError)
is_aot = True
# Key
self.mark()
while self._current != "]" and self.inc():
if self.end():
raise self.parse_error(UnexpectedEofError)
pass
name = self.extract()
if not name.strip():
raise self.parse_error(EmptyTableNameError)
key = Key(name, sep="")
name_parts = tuple(self._split_table_name(name))
missing_table = False
if parent_name:
parent_name_parts = tuple(self._split_table_name(parent_name))
else:
parent_name_parts = tuple()
if len(name_parts) > len(parent_name_parts) + 1:
missing_table = True
name_parts = name_parts[len(parent_name_parts) :]
values = Container(True)
self.inc() # Skip closing bracket
if is_aot:
# TODO: Verify close bracket
self.inc()
cws, comment, trail = self._parse_comment_trail()
result = Null()
if len(name_parts) > 1:
if missing_table:
# Missing super table
# i.e. a table initialized like this: [foo.bar]
# without initializing [foo]
#
# So we have to create the parent tables
table = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and name_parts[0].key in self._aot_stack,
is_super_table=True,
name=name_parts[0].key,
)
result = table
key = name_parts[0]
for i, _name in enumerate(name_parts[1:]):
if _name in table:
child = table[_name]
else:
child = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and i == len(name_parts[1:]) - 1,
is_super_table=i < len(name_parts[1:]) - 1,
name=_name.key,
display_name=name if i == len(name_parts[1:]) - 1 else None,
)
if is_aot and i == len(name_parts[1:]) - 1:
table.append(_name, AoT([child], name=table.name, parsed=True))
else:
table.append(_name, child)
table = child
values = table.value
else:
if name_parts:
key = name_parts[0]
while not self.end():
item = self._parse_item()
if item:
_key, item = item
if not self._merge_ws(item, values):
if _key is not None and _key.is_dotted():
self._handle_dotted_key(values, _key, item)
else:
values.append(_key, item)
else:
if self._current == "[":
is_aot_next, name_next = self._peek_table()
if self._is_child(name, name_next):
key_next, table_next = self._parse_table(name)
values.append(key_next, table_next)
# Picking up any sibling
while not self.end():
_, name_next = self._peek_table()
if not self._is_child(name, name_next):
break
key_next, table_next = self._parse_table(name)
values.append(key_next, table_next)
break
else:
raise self.parse_error(
InternalParserError,
"_parse_item() returned None on a non-bracket character.",
)
if isinstance(result, Null):
result = Table(
values,
Trivia(indent, cws, comment, trail),
is_aot,
name=name,
display_name=name,
)
if is_aot and (not self._aot_stack or name != self._aot_stack[-1]):
result = self._parse_aot(result, name)
return key, result
|
[
"\n Parses a table element.\n "
] |
Please provide a description of the function:def _peek_table(self): # type: () -> Tuple[bool, str]
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current != "[":
raise self.parse_error(
InternalParserError,
"_peek_table() entered on non-bracket character",
)
# AoT
self.inc()
is_aot = False
if self._current == "[":
self.inc()
is_aot = True
self.mark()
while self._current != "]" and self.inc():
table_name = self.extract()
return is_aot, table_name
|
[
"\n Peeks ahead non-intrusively by cloning then restoring the\n initial state of the parser.\n\n Returns the name of the table about to be parsed,\n as well as whether it is part of an AoT.\n "
] |
Please provide a description of the function:def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT
payload = [first]
self._aot_stack.append(name_first)
while not self.end():
is_aot_next, name_next = self._peek_table()
if is_aot_next and name_next == name_first:
_, table = self._parse_table(name_first)
payload.append(table)
else:
break
self._aot_stack.pop()
return AoT(payload, parsed=True)
|
[
"\n Parses all siblings of the provided table first and bundles them into\n an AoT.\n "
] |
Please provide a description of the function:def _peek(self, n): # type: (int) -> str
# we always want to restore after exiting this scope
with self._state(restore=True):
buf = ""
for _ in range(n):
if self._current not in " \t\n\r#,]}":
buf += self._current
self.inc()
continue
break
return buf
|
[
"\n Peeks ahead n characters.\n\n n is the max number of characters that will be peeked.\n "
] |
Please provide a description of the function:def _peek_unicode(
self, is_long
): # type: (bool) -> Tuple[Optional[str], Optional[str]]
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current not in {"u", "U"}:
raise self.parse_error(
InternalParserError, "_peek_unicode() entered on non-unicode value"
)
self.inc() # Dropping prefix
self.mark()
if is_long:
chars = 8
else:
chars = 4
if not self.inc_n(chars):
value, extracted = None, None
else:
extracted = self.extract()
if extracted[0].lower() == "d" and extracted[1].strip("01234567"):
return None, None
try:
value = chr(int(extracted, 16))
except ValueError:
value = None
return value, extracted
|
[
"\n Peeks ahead non-intrusively by cloning then restoring the\n initial state of the parser.\n\n Returns the unicode value is it's a valid one else None.\n "
] |
Please provide a description of the function:def split_first(s, delims):
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
|
[
"\n Given a string and an iterable of delimiters, split on the first found\n delimiter. Return two split parts and the matched delimiter.\n\n If not found, then the first part is the full input string.\n\n Example::\n\n >>> split_first('foo/bar?baz', '?/=')\n ('foo', 'bar?baz', '/')\n >>> split_first('foo/bar?baz', '123')\n ('foo/bar?baz', '', None)\n\n Scales linearly with number of delims. Not ideal for large number of delims.\n "
] |
Please provide a description of the function:def parse_url(url):
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers. No whitespace, no plus or
# minus prefixes, no non-integer digits such as ^2 (superscript).
if not port.isdigit():
raise LocationParseError(url)
try:
port = int(port)
except ValueError:
raise LocationParseError(url)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
|
[
"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n "
] |
Please provide a description of the function:def get_host(url):
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
[
"\n Deprecated. Use :func:`parse_url` instead.\n "
] |
Please provide a description of the function:def request_uri(self):
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
|
[
"Absolute path including the query string."
] |
Please provide a description of the function:def netloc(self):
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
|
[
"Network location including host and port"
] |
Please provide a description of the function:def url(self):
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
|
[
"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example: ::\n\n >>> U = parse_url('http://google.com/mail/')\n >>> U.url\n 'http://google.com/mail/'\n >>> Url('http', 'username:password', 'host.com', 80,\n ... '/path', 'query', 'fragment').url\n 'http://username:[email protected]:80/path?query#fragment'\n "
] |
Please provide a description of the function:def split_template_path(template):
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
|
[
"Split a path into segments and perform a sanity check. If it detects\n '..' in the path it will raise a `TemplateNotFound` error.\n "
] |
Please provide a description of the function:def get_source(self, environment, template):
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
|
[
"Get the template source, filename and reload helper for a template.\n It's passed the environment and template name and has to return a\n tuple in the form ``(source, filename, uptodate)`` or raise a\n `TemplateNotFound` error if it can't locate the template.\n\n The source part of the returned tuple must be the source of the\n template as unicode string or a ASCII bytestring. The filename should\n be the name of the file on the filesystem if it was loaded from there,\n otherwise `None`. The filename is used by python for the tracebacks\n if no loader extension is used.\n\n The last item in the tuple is the `uptodate` function. If auto\n reloading is enabled it's always called to check if the template\n changed. No arguments are passed so the function must store the\n old state somewhere (for example in a closure). If it returns `False`\n the template will be reloaded.\n "
] |
Please provide a description of the function:def load(self, environment, name, globals=None):
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
|
[
"Loads a template. This method looks up the template in the cache\n or loads one by calling :meth:`get_source`. Subclasses should not\n override this method as loaders working on collections of other\n loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)\n will not call this method but `get_source` directly.\n "
] |
Please provide a description of the function:def description_of(lines, name='stdin'):
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
|
[
"\n Return a string describing the probable encoding of a file or\n list of strings.\n\n :param lines: The lines to get the encoding of.\n :type lines: Iterable of bytes\n :param name: Name of file or collection of lines\n :type name: str\n "
] |
Please provide a description of the function:def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
|
[
"Convert a tree to a genshi tree\n\n :arg walker: the treewalker to use to walk the tree to convert it\n\n :returns: generator of genshi nodes\n\n "
] |
Please provide a description of the function:def parse_requirements(file_):
modules = []
delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]): # Check for modules w/o a specifier.
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules
|
[
"Parse a requirements formatted file.\n\n Traverse a string until a delimiter is detected, then split at said\n delimiter, get module name by element index, create a dict consisting of\n module:version, and add dict to list of parsed modules.\n\n Args:\n file_: File to parse.\n\n Raises:\n OSerror: If there's any issues accessing the file.\n\n Returns:\n tuple: The contents of the file, excluding comments.\n "
] |
Please provide a description of the function:def compare_modules(file_, imports):
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported
|
[
"Compare modules in a file to imported modules in a project.\n\n Args:\n file_ (str): File to parse for modules to be compared.\n imports (tuple): Modules being imported in the project.\n\n Returns:\n tuple: The modules not imported in the project, but do exist in the\n specified file.\n "
] |
Please provide a description of the function:def diff(file_, imports):
modules_not_imported = compare_modules(file_, imports)
logging.info("The following modules are in {} but do not seem to be imported: "
"{}".format(file_, ", ".join(x for x in modules_not_imported)))
|
[
"Display the difference between modules in a file and imported modules."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.