sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _handle_break(self, node, scope, ctxt, stream):
"""Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling break")
raise errors.InterpBreak() | Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_continue(self, node, scope, ctxt, stream):
"""Handle continue node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling continue")
raise errors.InterpContinue() | Handle continue node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_decl_list(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling decl list")
# just handle each declaration
for decl in node.decls:
self._handle_node(decl, scope, ctxt, stream) | Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _create_scope(self):
"""TODO: Docstring for _create_scope.
:returns: TODO
"""
res = Scope(self._log)
for func_name,native_func in six.iteritems(self._natives):
res.add_local(func_name, native_func)
return res | TODO: Docstring for _create_scope.
:returns: TODO | entailment |
def _get_value(self, node, scope, ctxt, stream):
"""Return the value of the node. It is expected to be
either an AST.ID instance or a constant
:node: TODO
:returns: TODO
"""
res = self._handle_node(node, scope, ctxt, stream)
if isinstance(res, fields.Field):
return res._pfp__value
# assume it's a constant
else:
return res | Return the value of the node. It is expected to be
either an AST.ID instance or a constant
:node: TODO
:returns: TODO | entailment |
def _resolve_to_field_class(self, names, scope):
"""Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO
"""
switch = {
"char" : "Char",
"int" : "Int",
"long" : "Int",
"int64" : "Int64",
"uint64" : "UInt64",
"short" : "Short",
"double" : "Double",
"float" : "Float",
"void" : "Void",
"string" : "String",
"wstring" : "WString"
}
core = names[-1]
if core not in switch:
# will return a list of resolved names
type_info = scope.get_type(core)
if type(type_info) is type and issubclass(type_info, fields.Field):
return type_info
resolved_names = type_info
if resolved_names is None:
raise errors.UnresolvedType(self._coord, " ".join(names), " ")
if resolved_names[-1] not in switch:
raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names))
names = copy.copy(names)
names.pop()
names += resolved_names
if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long":
res = "Int64"
else:
res = switch[names[-1]]
if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]:
res = "U" + res
cls = getattr(fields, res)
return cls | Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO | entailment |
def bits_to_bytes(bits):
"""Convert the bit list into bytes. (Assumes bits is a list
whose length is a multiple of 8)
"""
if len(bits) % 8 != 0:
raise Exception("num bits must be multiple of 8")
res = ""
for x in six.moves.range(0, len(bits), 8):
byte_bits = bits[x:x+8]
byte_val = int(''.join(map(str, byte_bits)), 2)
res += chr(byte_val)
return utils.binary(res) | Convert the bit list into bytes. (Assumes bits is a list
whose length is a multiple of 8) | entailment |
def bytes_to_bits(bytes_):
"""Convert bytes to a list of bits
"""
res = []
for x in bytes_:
if not isinstance(x, int):
x = ord(x)
res += byte_to_bits(x)
return res | Convert bytes to a list of bits | entailment |
def is_eof(self):
"""Return if the stream has reached EOF or not
without discarding any unflushed bits
:returns: True/False
"""
pos = self._stream.tell()
byte = self._stream.read(1)
self._stream.seek(pos, 0)
return utils.binary(byte) == utils.binary("") | Return if the stream has reached EOF or not
without discarding any unflushed bits
:returns: True/False | entailment |
def close(self):
"""Close the stream
"""
self.closed = True
self._flush_bits_to_stream()
self._stream.close() | Close the stream | entailment |
def read(self, num):
"""Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
:num: number of bytes to read
:returns: the read bytes, or empty string if EOF has been reached
"""
start_pos = self.tell()
if self.padded:
# we toss out any uneven bytes
self._bits.clear()
res = utils.binary(self._stream.read(num))
else:
bits = self.read_bits(num * 8)
res = bits_to_bytes(bits)
res = utils.binary(res)
end_pos = self.tell()
self._update_consumed_ranges(start_pos, end_pos)
return res | Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
:num: number of bytes to read
:returns: the read bytes, or empty string if EOF has been reached | entailment |
def read_bits(self, num):
"""Read ``num`` number of bits from the stream
:num: number of bits to read
:returns: a list of ``num`` bits, or an empty list if EOF has been reached
"""
if num > len(self._bits):
needed = num - len(self._bits)
num_bytes = int(math.ceil(needed / 8.0))
read_bytes = self._stream.read(num_bytes)
for bit in bytes_to_bits(read_bytes):
self._bits.append(bit)
res = []
while len(res) < num and len(self._bits) > 0:
res.append(self._bits.popleft())
return res | Read ``num`` number of bits from the stream
:num: number of bits to read
:returns: a list of ``num`` bits, or an empty list if EOF has been reached | entailment |
def write(self, data):
"""Write data to the stream
:data: the data to write to the stream
:returns: None
"""
if self.padded:
# flush out any remaining bits first
if len(self._bits) > 0:
self._flush_bits_to_stream()
self._stream.write(data)
else:
# nothing to do here
if len(data) == 0:
return
bits = bytes_to_bits(data)
self.write_bits(bits) | Write data to the stream
:data: the data to write to the stream
:returns: None | entailment |
def write_bits(self, bits):
"""Write the bits to the stream.
Add the bits to the existing unflushed bits and write
complete bytes to the stream.
"""
for bit in bits:
self._bits.append(bit)
while len(self._bits) >= 8:
byte_bits = [self._bits.popleft() for x in six.moves.range(8)]
byte = bits_to_bytes(byte_bits)
self._stream.write(byte) | Write the bits to the stream.
Add the bits to the existing unflushed bits and write
complete bytes to the stream. | entailment |
def tell(self):
"""Return the current position in the stream (ignoring bit
position)
:returns: int for the position in the stream
"""
res = self._stream.tell()
if len(self._bits) > 0:
res -= 1
return res | Return the current position in the stream (ignoring bit
position)
:returns: int for the position in the stream | entailment |
def seek(self, pos, seek_type=0):
"""Seek to the specified position in the stream with seek_type.
Unflushed bits will be discarded in the case of a seek.
The stream will also keep track of which bytes have and have
not been consumed so that the dom will capture all of the
bytes in the stream.
:pos: offset
:seek_type: direction
:returns: TODO
"""
self._bits.clear()
return self._stream.seek(pos, seek_type) | Seek to the specified position in the stream with seek_type.
Unflushed bits will be discarded in the case of a seek.
The stream will also keep track of which bytes have and have
not been consumed so that the dom will capture all of the
bytes in the stream.
:pos: offset
:seek_type: direction
:returns: TODO | entailment |
def size(self):
"""Return the size of the stream, or -1 if it cannot
be determined.
"""
pos = self._stream.tell()
# seek to the end of the stream
self._stream.seek(0,2)
size = self._stream.tell()
self._stream.seek(pos, 0)
return size | Return the size of the stream, or -1 if it cannot
be determined. | entailment |
def unconsumed_ranges(self):
"""Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included
"""
res = IntervalTree()
prev = None
# normal iteration is not in a predictable order
ranges = sorted([x for x in self.range_set], key=lambda x: x.begin)
for rng in ranges:
if prev is None:
prev = rng
continue
res.add(Interval(prev.end, rng.begin))
prev = rng
# means we've seeked past the end
if len(self.range_set[self.tell()]) != 1:
res.add(Interval(prev.end, self.tell()))
return res | Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included | entailment |
def _update_consumed_ranges(self, start_pos, end_pos):
"""Update the ``self.consumed_ranges`` array with which
byte ranges have been consecutively consumed.
"""
self.range_set.add(Interval(start_pos, end_pos+1))
self.range_set.merge_overlaps() | Update the ``self.consumed_ranges`` array with which
byte ranges have been consecutively consumed. | entailment |
def _flush_bits_to_stream(self):
"""Flush the bits to the stream. This is used when
a few bits have been read and ``self._bits`` contains unconsumed/
flushed bits when data is to be written to the stream
"""
if len(self._bits) == 0:
return 0
bits = list(self._bits)
diff = 8 - (len(bits) % 8)
padding = [0] * diff
bits = bits + padding
self._stream.write(bits_to_bytes(bits))
self._bits.clear() | Flush the bits to the stream. This is used when
a few bits have been read and ``self._bits`` contains unconsumed/
flushed bits when data is to be written to the stream | entailment |
def _validate_markdown(self, expfile):
'''ensure that fields are present in markdown file'''
try:
import yaml
except:
bot.error('Python yaml is required for testing yml/markdown files.')
sys.exit(1)
self.metadata = {}
uid = os.path.basename(expfile).strip('.md')
if os.path.exists(expfile):
with open(expfile, "r") as stream:
docs = yaml.load_all(stream)
for doc in docs:
if isinstance(doc,dict):
for k,v in doc.items():
print('%s: %s' %(k,v))
self.metadata[k] = v
self.metadata['uid'] = uid
fields = ['github', 'preview', 'name', 'layout',
'tags', 'uid', 'maintainer']
# Tests for all fields
for field in fields:
if field not in self.metadata:
return False
if self.metadata[field] in ['',None]:
return False
if 'github' not in self.metadata['github']:
return notvalid('%s: not a valid github repository' % name)
if not isinstance(self.metadata['tags'],list):
return notvalid('%s: tags must be a list' % name)
if not re.search("(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*)", self.metadata['github']):
return notvalid('%s is not a valid URL.' %(self.metadata['github']))
return True | ensure that fields are present in markdown file | entailment |
def perform_checks(template,
do_redirect=False,
context=None,
next=None,
quiet=False):
'''return all checks for required variables before returning to
desired view
Parameters
==========
template: the html template to render
do_redirect: if True, perform a redirect and not render
context: dictionary of context variables to pass to render_template
next: a pre-defined next experiment, will calculate if None
quiet: decrease verbosity
'''
from expfactory.server import app
username = session.get('username')
subid = session.get('subid')
# If redirect, "last" is currently active (about to start)
# If render, "last" is last completed / active experiment (just finished)
last = session.get('exp_id')
if next is None:
next = app.get_next(session)
session['exp_id'] = next
# Headless mode requires token
if "token" not in session and app.headless is True:
flash('A token is required for these experiments.')
return redirect('/')
# Update the user / log
if quiet is False:
app.logger.info("[router] %s --> %s [subid] %s [user] %s" %(last,
next,
subid,
username))
if username is None and app.headless is False:
flash('You must start a session before doing experiments.')
return redirect('/')
if subid is None:
flash('You must have a participant identifier before doing experiments')
return redirect('/')
if next is None:
flash('Congratulations, you have finished the battery!')
return redirect('/finish')
if do_redirect is True:
app.logger.debug('Redirecting to %s' %template)
return redirect(template)
if context is not None and isinstance(context, dict):
app.logger.debug('Rendering %s' %template)
return render_template(template, **context)
return render_template(template) | return all checks for required variables before returning to
desired view
Parameters
==========
template: the html template to render
do_redirect: if True, perform a redirect and not render
context: dictionary of context variables to pass to render_template
next: a pre-defined next experiment, will calculate if None
quiet: decrease verbosity | entailment |
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument")
pos = PYVAL(params[0])
curr_pos = stream.tell()
fsize = stream.size()
if pos > fsize:
stream.seek(fsize)
return -1
elif pos < 0:
stream.seek(0)
return -1
diff = pos - curr_pos
if diff < 0:
stream.seek(pos)
return 0
data = stream.read(diff)
# let the ctxt automatically append numbers, as needed, unless the previous
# child was also a skipped field
skipped_name = "_skipped"
if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"):
old_name = ctxt._pfp__children[-1]._pfp__name
data = ctxt._pfp__children[-1].raw_data + data
skipped_name = old_name
ctxt._pfp__children = ctxt._pfp__children[:-1]
del ctxt._pfp__children_map[old_name]
tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream)
ctxt._pfp__add_child(skipped_name, new_field, stream)
scope.add_var(skipped_name, new_field)
return 0 | Returns 0 if successful or -1 if the address is out of range | entailment |
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument")
skip_amt = PYVAL(params[0])
pos = skip_amt + stream.tell()
return FSeek([pos], ctxt, scope, stream, coord) | Returns 0 if successful or -1 if the address is out of range | entailment |
def packer_gzip(params, ctxt, scope, stream, coord):
"""``PackerGZip`` - implements both unpacking and packing. Can be used
as the ``packer`` for a field. When packing, concats the build output
of all params and gzip-compresses the result. When unpacking, concats
the build output of all params and gzip-decompresses the result.
Example:
The code below specifies that the ``data`` field is gzipped
and that once decompressed, should be parsed with ``PACK_TYPE``.
When building the ``PACK_TYPE`` structure, ``data`` will be updated
with the compressed data.::
char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>;
:pack: True if the data should be packed, false if it should be unpacked
:data: The data to operate on
:returns: An array
"""
if len(params) <= 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments")
# to gzip it (pack it)
if params[0]:
return pack_gzip(params[1:], ctxt, scope, stream, coord)
else:
return unpack_gzip(params[1:], ctxt, scope, stream, coord) | ``PackerGZip`` - implements both unpacking and packing. Can be used
as the ``packer`` for a field. When packing, concats the build output
of all params and gzip-compresses the result. When unpacking, concats
the build output of all params and gzip-decompresses the result.
Example:
The code below specifies that the ``data`` field is gzipped
and that once decompressed, should be parsed with ``PACK_TYPE``.
When building the ``PACK_TYPE`` structure, ``data`` will be updated
with the compressed data.::
char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>;
:pack: True if the data should be packed, false if it should be unpacked
:data: The data to operate on
:returns: An array | entailment |
def pack_gzip(params, ctxt, scope, stream, coord):
"""``PackGZip`` - Concats the build output of all params and gzips the
resulting data, returning a char array.
Example: ::
char data[0x100]<pack=PackGZip, ...>;
"""
if len(params) == 0:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument")
built = utils.binary("")
for param in params:
if isinstance(param, pfp.fields.Field):
built += param._pfp__build()
else:
built += param
return zlib.compress(built) | ``PackGZip`` - Concats the build output of all params and gzips the
resulting data, returning a char array.
Example: ::
char data[0x100]<pack=PackGZip, ...>; | entailment |
def watch_length(params, ctxt, scope, stream, coord):
"""WatchLength - Watch the total length of each of the params.
Example:
The code below uses the ``WatchLength`` update function to update
the ``length`` field to the length of the ``data`` field ::
int length<watch=data, update=WatchLength>;
char data[length];
"""
if len(params) <= 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments")
to_update = params[0]
total_size = 0
for param in params[1:]:
total_size += param._pfp__width()
to_update._pfp__set_value(total_size) | WatchLength - Watch the total length of each of the params.
Example:
The code below uses the ``WatchLength`` update function to update
the ``length`` field to the length of the ``data`` field ::
int length<watch=data, update=WatchLength>;
char data[length]; | entailment |
def watch_crc(params, ctxt, scope, stream, coord):
"""WatchCrc32 - Watch the total crc32 of the params.
Example:
The code below uses the ``WatchCrc32`` update function to update
the ``crc`` field to the crc of the ``length`` and ``data`` fields ::
char length;
char data[length];
int crc<watch=length;data, update=WatchCrc32>;
"""
if len(params) <= 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments")
to_update = params[0]
total_data = utils.binary("")
for param in params[1:]:
total_data += param._pfp__build()
to_update._pfp__set_value(binascii.crc32(total_data)) | WatchCrc32 - Watch the total crc32 of the params.
Example:
The code below uses the ``WatchCrc32`` update function to update
the ``crc`` field to the crc of the ``length`` and ``data`` fields ::
char length;
char data[length];
int crc<watch=length;data, update=WatchCrc32>; | entailment |
def _validate_folder(self, folder=None):
''' validate folder takes a cloned github repo, ensures
the existence of the config.json, and validates it.
'''
from expfactory.experiment import load_experiment
if folder is None:
folder=os.path.abspath(os.getcwd())
config = load_experiment(folder, return_path=True)
if not config:
return notvalid("%s is not an experiment." %(folder))
return self._validate_config(folder) | validate folder takes a cloned github repo, ensures
the existence of the config.json, and validates it. | entailment |
def validate(self, folder, cleanup=False, validate_folder=True):
''' validate is the entrypoint to all validation, for
a folder, config, or url. If a URL is found, it is
cloned and cleaned up.
:param validate_folder: ensures the folder name (github repo)
matches.
'''
# Obtain any repository URL provided
if folder.startswith('http') or 'github' in folder:
folder = clone(folder, tmpdir=self.tmpdir)
# Load config.json if provided directly
elif os.path.basename(folder) == 'config.json':
config = os.path.dirname(folder)
return self._validate_config(config, validate_folder)
# Otherwise, validate folder and cleanup
valid = self._validate_folder(folder)
if cleanup is True:
shutil.rmtree(folder)
return valid | validate is the entrypoint to all validation, for
a folder, config, or url. If a URL is found, it is
cloned and cleaned up.
:param validate_folder: ensures the folder name (github repo)
matches. | entailment |
def _validate_config(self, folder, validate_folder=True):
''' validate config is the primary validation function that checks
for presence and format of required fields.
Parameters
==========
:folder: full path to folder with config.json
:name: if provided, the folder name to check against exp_id
'''
config = "%s/config.json" % folder
name = os.path.basename(folder)
if not os.path.exists(config):
return notvalid("%s: config.json not found." %(folder))
# Load the config
try:
config = read_json(config)
except:
return notvalid("%s: cannot load json, invalid." %(name))
# Config.json should be single dict
if isinstance(config, list):
return notvalid("%s: config.json is a list, not valid." %(name))
# Check over required fields
fields = self.get_validation_fields()
for field,value,ftype in fields:
bot.verbose('field: %s, required: %s' %(field,value))
# Field must be in the keys if required
if field not in config.keys():
if value == 1:
return notvalid("%s: config.json is missing required field %s" %(name,field))
# Field is present, check type
else:
if not isinstance(config[field], ftype):
return notvalid("%s: invalid type, must be %s." %(name,str(ftype)))
# Expid gets special treatment
if field == "exp_id" and validate_folder is True:
if config[field] != name:
return notvalid("%s: exp_id parameter %s does not match folder name."
%(name,config[field]))
# name cannot have special characters, only _ and letters/numbers
if not re.match("^[a-z0-9_-]*$", config[field]):
message = "%s: exp_id parameter %s has invalid characters"
message += "only lowercase [a-z],[0-9], -, and _ allowed."
return notvalid(message %(name,config[field]))
return True | validate config is the primary validation function that checks
for presence and format of required fields.
Parameters
==========
:folder: full path to folder with config.json
:name: if provided, the folder name to check against exp_id | entailment |
def get_validation_fields(self):
'''get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields are optional.
To specify runtime variables, add to "experiment_variables"
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type
'''
return [("name",1,str), # required
("time",1,int),
("url",1,str),
("description",1, str),
("instructions",1, str),
("exp_id",1,str),
("install",0, list), # list of commands to install / build experiment
("contributors",0, list), # not required
("reference",0, list),
("cognitive_atlas_task_id",0,str),
("template",0,str)] | get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields are optional.
To specify runtime variables, add to "experiment_variables"
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type | entailment |
def get_runtime_vars(varset, experiment, token):
'''get_runtime_vars will return the urlparsed string of one or more runtime
variables. If None are present, None is returned.
Parameters
==========
varset: the variable set, a dictionary lookup with exp_id, token, vars
experiment: the exp_id to look up
token: the participant id (or token) that must be defined.
Returns
=======
url: the variable portion of the url to be passed to experiment, e.g,
'?words=at the thing&color=red&globalname=globalvalue'
'''
url = ''
if experiment in varset:
variables = dict()
# Participant set variables
if token in varset[experiment]:
for k,v in varset[experiment][token].items():
variables[k] = v
# Global set variables
if "*" in varset[experiment]:
for k,v in varset[experiment]['*'].items():
# Only add the variable if not already defined
if k not in variables:
variables[k] = v
# Join together, the first ? is added by calling function
varlist = ["%s=%s" %(k,v) for k,v in variables.items()]
url = '&'.join(varlist)
bot.debug('Parsed url: %s' %url)
return url | get_runtime_vars will return the urlparsed string of one or more runtime
variables. If None are present, None is returned.
Parameters
==========
varset: the variable set, a dictionary lookup with exp_id, token, vars
experiment: the exp_id to look up
token: the participant id (or token) that must be defined.
Returns
=======
url: the variable portion of the url to be passed to experiment, e.g,
'?words=at the thing&color=red&globalname=globalvalue' | entailment |
def generate_runtime_vars(variable_file=None, sep=','):
'''generate a lookup data structure from a
delimited file. We typically obtain the file name and delimiter from
the environment by way of EXPFACTORY_RUNTIME_VARS, and
EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse
from a custom variable file by way of specifying it to the function
(preference is given here). The file should be csv, with the
only required first header field as "token" and second as "exp_id" to
distinguish the participant ID and experiment id. The subsequent
columns should correspond to experiment variable names. No special parsing
of either is done.
Parameters
==========
variable_file: full path to the tabular file with token, exp_id, etc.
sep: the default delimiter to use, if not set in enironment.
Returns
=======
varset: a dictionary lookup by exp_id and then participant ID.
{ 'test-parse-url': {
'123': {
'color': 'red',
'globalname': 'globalvalue',
'words': 'at the thing'
},
'456': {'color': 'blue',
'globalname': 'globalvalue',
'words': 'omg tacos'}
}
}
'''
# First preference goes to runtime, then environment, then unset
if variable_file is None:
if EXPFACTORY_RUNTIME_VARS is not None:
variable_file = EXPFACTORY_RUNTIME_VARS
if variable_file is not None:
if not os.path.exists(variable_file):
bot.warning('%s is set, but not found' %variable_file)
return variable_file
# If still None, no file
if variable_file is None:
return variable_file
# If we get here, we have a variable file that exists
delim = sep
if EXPFACTORY_RUNTIME_DELIM is not None:
delim = EXPFACTORY_RUNTIME_DELIM
bot.debug('Delim for variables file set to %s' %sep)
# Read in the file, generate config
varset = dict()
rows = _read_runtime_vars(variable_file)
if len(rows) > 0:
# When we get here, we are sure to have
# 'exp_id', 'var_name', 'var_value', 'token'
for row in rows:
exp_id = row[0].lower() # exp-id must be lowercase
var_name = row[1]
var_value = row[2]
token = row[3]
# Level 1: Experiment ID
if exp_id not in varset:
varset[exp_id] = {}
# Level 2: Participant ID
if token not in varset[exp_id]:
varset[exp_id][token] = {}
# If found global setting, courtesy debug message
if token == "*":
bot.debug('Found global variable %s' %var_name)
# Level 3: is the variable, issue warning if already defined
if var_name in varset[exp_id][token]:
bot.warning('%s defined twice %s:%s' %(var_name, exp_id, token))
varset[exp_id][token][var_name] = var_value
return varset | generate a lookup data structure from a
delimited file. We typically obtain the file name and delimiter from
the environment by way of EXPFACTORY_RUNTIME_VARS, and
EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse
from a custom variable file by way of specifying it to the function
(preference is given here). The file should be csv, with the
only required first header field as "token" and second as "exp_id" to
distinguish the participant ID and experiment id. The subsequent
columns should correspond to experiment variable names. No special parsing
of either is done.
Parameters
==========
variable_file: full path to the tabular file with token, exp_id, etc.
sep: the default delimiter to use, if not set in enironment.
Returns
=======
varset: a dictionary lookup by exp_id and then participant ID.
{ 'test-parse-url': {
'123': {
'color': 'red',
'globalname': 'globalvalue',
'words': 'at the thing'
},
'456': {'color': 'blue',
'globalname': 'globalvalue',
'words': 'omg tacos'}
}
} | entailment |
def _read_runtime_vars(variable_file, sep=','):
'''read the entire runtime variable file, and return a list of lists,
each corresponding to a row. We also check the header, and exit
if anything is missing or malformed.
Parameters
==========
variable_file: full path to the tabular file with token, exp_id, etc.
sep: the default delimiter to use, if not set in enironment.
Returns
=======
valid_rows: a list of lists, each a valid row
[['test-parse-url', 'globalname', 'globalvalue', '*'],
['test-parse-url', 'color', 'red', '123'],
['test-parse-url', 'color', 'blue', '456'],
['test-parse-url', 'words', 'at the thing', '123'],
['test-parse-url', 'words', 'omg tacos', '456']]
'''
rows = [x for x in read_file(variable_file).split('\n') if x.strip()]
valid_rows = []
if len(rows) > 0:
# Validate header and rows, exit if not valid
header = rows.pop(0).split(sep)
validate_header(header)
for row in rows:
row = _validate_row(row, sep=sep, required_length=4)
# If the row is returned, it is valid
if row:
valid_rows.append(row)
return valid_rows | read the entire runtime variable file, and return a list of lists,
each corresponding to a row. We also check the header, and exit
if anything is missing or malformed.
Parameters
==========
variable_file: full path to the tabular file with token, exp_id, etc.
sep: the default delimiter to use, if not set in enironment.
Returns
=======
valid_rows: a list of lists, each a valid row
[['test-parse-url', 'globalname', 'globalvalue', '*'],
['test-parse-url', 'color', 'red', '123'],
['test-parse-url', 'color', 'blue', '456'],
['test-parse-url', 'words', 'at the thing', '123'],
['test-parse-url', 'words', 'omg tacos', '456']] | entailment |
def _validate_row(row, sep=',', required_length=None):
'''validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid
'''
if not isinstance(row, list):
row = _parse_row(row, sep)
if required_length:
length = len(row)
if length != required_length:
bot.warning('Row should have length %s (not %s)' %(required_length,
length))
bot.warning(row)
row = None
return row | validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid | entailment |
def _parse_row(row, sep=','):
'''parse row is a helper function to simply clean up a string, and parse
into a row based on a delimiter. If a required length is provided,
we check for this too.
'''
parsed = row.split(sep)
parsed = [x for x in parsed if x.strip()]
return parsed | parse row is a helper function to simply clean up a string, and parse
into a row based on a delimiter. If a required length is provided,
we check for this too. | entailment |
def validate_header(header, required_fields=None):
'''validate_header ensures that the first row contains the exp_id,
var_name, var_value, and token. Capitalization isn't important, but
ordering is. This criteria is very strict, but it's reasonable
to require.
Parameters
==========
header: the header row, as a list
required_fields: a list of required fields. We derive the required
length from this list.
Does not return, instead exits if malformed. Runs silently if OK.
'''
if required_fields is None:
required_fields = ['exp_id', 'var_name', 'var_value', 'token']
# The required length of the header based on required fields
length = len(required_fields)
# This is very strict, but no reason not to be
header = _validate_row(header, required_length=length)
header = [x.lower() for x in header]
for idx in range(length):
field = header[idx].lower().strip()
if required_fields[idx] != field:
bot.error('Malformed header field %s, exiting.' %field)
sys.exit(1) | validate_header ensures that the first row contains the exp_id,
var_name, var_value, and token. Capitalization isn't important, but
ordering is. This criteria is very strict, but it's reasonable
to require.
Parameters
==========
header: the header row, as a list
required_fields: a list of required fields. We derive the required
length from this list.
Does not return, instead exits if malformed. Runs silently if OK. | entailment |
def superuser_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_superuser:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': 'admin/login.html',
'redirect_field_name': request.get_full_path(),
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': _('Log in'),
'app_path': request.get_full_path()
}
}
return LoginView(request, **defaults)
return _checklogin | Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary. | entailment |
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if isinstance(lines, (bytes, unicode)):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines) | Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance. | entailment |
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file) | Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`. | entailment |
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
if isinstance(files, (bytes, unicode)):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path] | Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`). | entailment |
def match_tree(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files) | Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`). | entailment |
def pattern_to_regex(cls, pattern):
"""
Convert the pattern into a regular expression.
*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
convert into a regular expression.
Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
or :data:`None`), and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`).
"""
if isinstance(pattern, unicode):
return_type = unicode
elif isinstance(pattern, bytes):
return_type = bytes
pattern = pattern.decode(_BYTES_ENCODING)
else:
raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
pattern = pattern.strip()
if pattern.startswith('#'):
# A pattern starting with a hash ('#') serves as a comment
# (neither includes nor excludes files). Escape the hash with a
# back-slash to match a literal hash (i.e., '\#').
regex = None
include = None
elif pattern == '/':
# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
# '/' does not match any file.
regex = None
include = None
elif pattern:
if pattern.startswith('!'):
# A pattern starting with an exclamation mark ('!') negates the
# pattern (exclude instead of include). Escape the exclamation
# mark with a back-slash to match a literal exclamation mark
# (i.e., '\!').
include = False
# Remove leading exclamation mark.
pattern = pattern[1:]
else:
include = True
if pattern.startswith('\\'):
# Remove leading back-slash escape for escaped hash ('#') or
# exclamation mark ('!').
pattern = pattern[1:]
# Split pattern into segments.
pattern_segs = pattern.split('/')
# Normalize pattern to make processing easier.
if not pattern_segs[0]:
# A pattern beginning with a slash ('/') will only match paths
# directly on the root directory instead of any descendant
# paths. So, remove empty first segment to make pattern relative
# to root.
del pattern_segs[0]
elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
# A single pattern without a beginning slash ('/') will match
# any descendant path. This is equivalent to "**/{pattern}". So,
# prepend with double-asterisks to make pattern relative to
# root.
# EDGE CASE: This also holds for a single pattern with a
# trailing slash (e.g. dir/).
if pattern_segs[0] != '**':
pattern_segs.insert(0, '**')
else:
# EDGE CASE: A pattern without a beginning slash ('/') but
# contains at least one prepended directory (e.g.
# "dir/{pattern}") should not match "**/dir/{pattern}",
# according to `git check-ignore` (v2.4.1).
pass
if not pattern_segs[-1] and len(pattern_segs) > 1:
# A pattern ending with a slash ('/') will match all descendant
# paths if it is a directory but not if it is a regular file.
# This is equivilent to "{pattern}/**". So, set last segment to
# double asterisks to include all descendants.
pattern_segs[-1] = '**'
# Build regular expression from pattern.
output = ['^']
need_slash = False
end = len(pattern_segs) - 1
for i, seg in enumerate(pattern_segs):
if seg == '**':
if i == 0 and i == end:
# A pattern consisting solely of double-asterisks ('**')
# will match every path.
output.append('.+')
elif i == 0:
# A normalized pattern beginning with double-asterisks
# ('**') will match any leading path segments.
output.append('(?:.+/)?')
need_slash = False
elif i == end:
# A normalized pattern ending with double-asterisks ('**')
# will match any trailing path segments.
output.append('/.*')
else:
# A pattern with inner double-asterisks ('**') will match
# multiple (or zero) inner path segments.
output.append('(?:/.+)?')
need_slash = True
elif seg == '*':
# Match single path segment.
if need_slash:
output.append('/')
output.append('[^/]+')
need_slash = True
else:
# Match segment glob pattern.
if need_slash:
output.append('/')
output.append(cls._translate_segment_glob(seg))
if i == end and include is True:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
# EDGE CASE: However, this does not hold for exclusion cases
# according to `git check-ignore` (v2.4.1).
output.append('(?:/.*)?')
need_slash = True
output.append('$')
regex = ''.join(output)
else:
# A blank pattern is a null-operation (neither includes nor
# excludes files).
regex = None
include = None
if regex is not None and return_type is bytes:
regex = regex.encode(_BYTES_ENCODING)
return regex, include | Convert the pattern into a regular expression.
*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
convert into a regular expression.
Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
or :data:`None`), and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`). | entailment |
def _translate_segment_glob(pattern):
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ''
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == '\\':
# Escape character, escape next character.
escape = True
elif char == '*':
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += '[^/]*'
elif char == '?':
# Single-character wildcard. Match any single character (except
# a slash).
regex += '[^/]'
elif char == '[':
# Braket expression wildcard. Except for the beginning
# exclamation mark, the whole braket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matchs ']', '[' and '!'.
# - "[]-]" matchs ']' and '-'.
# - "[!]a-]" matchs any character except ']', 'a' and '-'.
j = i
# Pass brack expression negation.
if j < end and pattern[j] == '!':
j += 1
# Pass first closing braket if it is at the beginning of the
# expression.
if j < end and pattern[j] == ']':
j += 1
# Find closing braket. Stop once we reach the end or find it.
while j < end and pattern[j] != ']':
j += 1
if j < end:
# Found end of braket expression. Increment j to be one past
# the closing braket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = '['
if pattern[i] == '!':
# Braket expression needs to be negated.
expr += '^'
i += 1
elif pattern[i] == '^':
# POSIX declares that the regex braket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += '\\^'
i += 1
# Build regex braket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace('\\', '\\\\')
# Add regex braket expression to regex result.
regex += expr
# Set i to one past the closing braket.
i = j
else:
# Failed to find closing braket, treat opening braket as a
# braket literal instead of as an expression.
regex += '\\['
else:
# Regular character, escape it for regex.
regex += re.escape(char)
return regex | Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`). | entailment |
def pattern_to_regex(cls, *args, **kw):
"""
Warn about deprecation.
"""
cls._deprecated()
return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw) | Warn about deprecation. | entailment |
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel | Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*. | entailment |
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real] | Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories. | entailment |
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched | Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`. | entailment |
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files | Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`). | entailment |
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file | Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`). | entailment |
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files | Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`) | entailment |
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory | Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`. | entailment |
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
) | *message* (:class:`str`) is the error message. | entailment |
def match(self, files):
"""
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
each file relative to the root directory (e.g., ``"relative/path/to/file"``).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__)) | Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
each file relative to the root directory (e.g., ``"relative/path/to/file"``).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`). | entailment |
def match(self, files):
"""
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
if self.include is not None:
for path in files:
if self.regex.match(path) is not None:
yield path | Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`). | entailment |
def user_default_serializer(self, obj):
"""Convert a User to a cached instance representation."""
if not obj:
return None
self.user_default_add_related_pks(obj)
return dict((
('id', obj.id),
('username', obj.username),
self.field_to_json('DateTime', 'date_joined', obj.date_joined),
self.field_to_json(
'PKList', 'votes', model=Choice, pks=obj._votes_pks),
)) | Convert a User to a cached instance representation. | entailment |
def user_default_loader(self, pk):
"""Load a User from the database."""
try:
obj = User.objects.get(pk=pk)
except User.DoesNotExist:
return None
else:
self.user_default_add_related_pks(obj)
return obj | Load a User from the database. | entailment |
def user_default_add_related_pks(self, obj):
"""Add related primary keys to a User instance."""
if not hasattr(obj, '_votes_pks'):
obj._votes_pks = list(obj.votes.values_list('pk', flat=True)) | Add related primary keys to a User instance. | entailment |
def group_default_invalidator(self, obj):
"""Invalidated cached items when the Group changes."""
user_pks = User.objects.values_list('pk', flat=True)
return [('User', pk, False) for pk in user_pks] | Invalidated cached items when the Group changes. | entailment |
def question_default_serializer(self, obj):
"""Convert a Question to a cached instance representation."""
if not obj:
return None
self.question_default_add_related_pks(obj)
return dict((
('id', obj.id),
('question_text', obj.question_text),
self.field_to_json('DateTime', 'pub_date', obj.pub_date),
self.field_to_json(
'PKList', 'choices', model=Choice, pks=obj._choice_pks),
)) | Convert a Question to a cached instance representation. | entailment |
def question_default_loader(self, pk):
"""Load a Question from the database."""
try:
obj = Question.objects.get(pk=pk)
except Question.DoesNotExist:
return None
else:
self.question_default_add_related_pks(obj)
return obj | Load a Question from the database. | entailment |
def question_default_add_related_pks(self, obj):
"""Add related primary keys to a Question instance."""
if not hasattr(obj, '_choice_pks'):
obj._choice_pks = list(obj.choices.values_list('pk', flat=True)) | Add related primary keys to a Question instance. | entailment |
def choice_default_serializer(self, obj):
"""Convert a Choice to a cached instance representation."""
if not obj:
return None
self.choice_default_add_related_pks(obj)
return dict((
('id', obj.id),
('choice_text', obj.choice_text),
self.field_to_json(
'PK', 'question', model=Question, pk=obj.question_id),
self.field_to_json(
'PKList', 'voters', model=User, pks=obj._voter_pks)
)) | Convert a Choice to a cached instance representation. | entailment |
def choice_default_loader(self, pk):
"""Load a Choice from the database."""
try:
obj = Choice.objects.get(pk=pk)
except Choice.DoesNotExist:
return None
else:
self.choice_default_add_related_pks(obj)
return obj | Load a Choice from the database. | entailment |
def choice_default_add_related_pks(self, obj):
"""Add related primary keys to a Choice instance."""
if not hasattr(obj, '_voter_pks'):
obj._voter_pks = obj.voters.values_list('pk', flat=True) | Add related primary keys to a Choice instance. | entailment |
def choice_default_invalidator(self, obj):
"""Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)]
for pk in obj.voters.values_list('pk', flat=True):
invalid.append(('User', pk, False))
return invalid | Invalidated cached items when the Choice changes. | entailment |
def cache(self):
"""Get the Django cache interface.
This allows disabling the cache with
settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that
Django Debug Toolbar will record cache requests.
"""
if not self._cache:
use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True)
if use_cache:
from django.core.cache import cache
self._cache = cache
return self._cache | Get the Django cache interface.
This allows disabling the cache with
settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that
Django Debug Toolbar will record cache requests. | entailment |
def delete_all_versions(self, model_name, obj_pk):
"""Delete all versions of a cached instance."""
if self.cache:
for version in self.versions:
key = self.key_for(version, model_name, obj_pk)
self.cache.delete(key) | Delete all versions of a cached instance. | entailment |
def model_function(self, model_name, version, func_name):
"""Return the model-specific caching function."""
assert func_name in ('serializer', 'loader', 'invalidator')
name = "%s_%s_%s" % (model_name.lower(), version, func_name)
return getattr(self, name) | Return the model-specific caching function. | entailment |
def field_function(self, type_code, func_name):
"""Return the field function."""
assert func_name in ('to_json', 'from_json')
name = "field_%s_%s" % (type_code.lower(), func_name)
return getattr(self, name) | Return the field function. | entailment |
def field_to_json(self, type_code, key, *args, **kwargs):
"""Convert a field to a JSON-serializable representation."""
assert ':' not in key
to_json = self.field_function(type_code, 'to_json')
key_and_type = "%s:%s" % (key, type_code)
json_value = to_json(*args, **kwargs)
return key_and_type, json_value | Convert a field to a JSON-serializable representation. | entailment |
def field_from_json(self, key_and_type, json_value):
"""Convert a JSON-serializable representation back to a field."""
assert ':' in key_and_type
key, type_code = key_and_type.split(':', 1)
from_json = self.field_function(type_code, 'from_json')
value = from_json(json_value)
return key, value | Convert a JSON-serializable representation back to a field. | entailment |
def get_instances(self, object_specs, version=None):
"""Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None)
"""
ret = dict()
spec_keys = set()
cache_keys = []
version = version or self.default_version
# Construct all the cache keys to fetch
for model_name, obj_pk, obj in object_specs:
assert model_name
assert obj_pk
# Get cache keys to fetch
obj_key = self.key_for(version, model_name, obj_pk)
spec_keys.add((model_name, obj_pk, obj, obj_key))
cache_keys.append(obj_key)
# Fetch the cache keys
if cache_keys and self.cache:
cache_vals = self.cache.get_many(cache_keys)
else:
cache_vals = {}
# Use cached representations, or recreate
cache_to_set = {}
for model_name, obj_pk, obj, obj_key in spec_keys:
# Load cached objects
obj_val = cache_vals.get(obj_key)
obj_native = json.loads(obj_val) if obj_val else None
# Invalid or not set - load from database
if not obj_native:
if not obj:
loader = self.model_function(model_name, version, 'loader')
obj = loader(obj_pk)
serializer = self.model_function(
model_name, version, 'serializer')
obj_native = serializer(obj) or {}
if obj_native:
cache_to_set[obj_key] = json.dumps(obj_native)
# Get fields to convert
keys = [key for key in obj_native.keys() if ':' in key]
for key in keys:
json_value = obj_native.pop(key)
name, value = self.field_from_json(key, json_value)
assert name not in obj_native
obj_native[name] = value
if obj_native:
ret[(model_name, obj_pk)] = (obj_native, obj_key, obj)
# Save any new cached representations
if cache_to_set and self.cache:
self.cache.set_many(cache_to_set)
return ret | Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None) | entailment |
def update_instance(
self, model_name, pk, instance=None, version=None,
update_only=False):
"""Create or update a cached instance.
Keyword arguments are:
model_name - The name of the model
pk - The primary key of the instance
instance - The Django model instance, or None to load it
versions - Version to update, or None for all
update_only - If False (default), then missing cache entries will be
populated and will cause follow-on invalidation. If True, then
only entries already in the cache will be updated and cause
follow-on invalidation.
Return is a list of tuples (model name, pk, immediate) that also needs
to be updated.
"""
versions = [version] if version else self.versions
invalid = []
for version in versions:
serializer = self.model_function(model_name, version, 'serializer')
loader = self.model_function(model_name, version, 'loader')
invalidator = self.model_function(
model_name, version, 'invalidator')
if serializer is None and loader is None and invalidator is None:
continue
if self.cache is None:
continue
# Try to load the instance
if not instance:
instance = loader(pk)
if serializer:
# Get current value, if in cache
key = self.key_for(version, model_name, pk)
current_raw = self.cache.get(key)
current = json.loads(current_raw) if current_raw else None
# Get new value
if update_only and current_raw is None:
new = None
else:
new = serializer(instance)
deleted = not instance
# If cache is invalid, update cache
invalidate = (current != new) or deleted
if invalidate:
if deleted:
self.cache.delete(key)
else:
self.cache.set(key, json.dumps(new))
else:
invalidate = True
# Invalidate upstream caches
if instance and invalidate:
for upstream in invalidator(instance):
if isinstance(upstream, str):
self.cache.delete(upstream)
else:
m, i, immediate = upstream
if immediate:
invalidate_key = self.key_for(version, m, i)
self.cache.delete(invalidate_key)
invalid.append((m, i, version))
return invalid | Create or update a cached instance.
Keyword arguments are:
model_name - The name of the model
pk - The primary key of the instance
instance - The Django model instance, or None to load it
versions - Version to update, or None for all
update_only - If False (default), then missing cache entries will be
populated and will cause follow-on invalidation. If True, then
only entries already in the cache will be updated and cause
follow-on invalidation.
Return is a list of tuples (model name, pk, immediate) that also needs
to be updated. | entailment |
def field_date_to_json(self, day):
"""Convert a date to a date triple."""
if isinstance(day, six.string_types):
day = parse_date(day)
return [day.year, day.month, day.day] if day else None | Convert a date to a date triple. | entailment |
def field_datetime_from_json(self, json_val):
"""Convert a UTC timestamp to a UTC datetime."""
if type(json_val) == int:
seconds = int(json_val)
dt = datetime.fromtimestamp(seconds, utc)
elif json_val is None:
dt = None
else:
seconds, microseconds = [int(x) for x in json_val.split('.')]
dt = datetime.fromtimestamp(seconds, utc)
dt += timedelta(microseconds=microseconds)
return dt | Convert a UTC timestamp to a UTC datetime. | entailment |
def field_datetime_to_json(self, dt):
"""Convert a datetime to a UTC timestamp w/ microsecond resolution.
datetimes w/o timezone will be assumed to be in UTC
"""
if isinstance(dt, six.string_types):
dt = parse_datetime(dt)
if not dt:
return None
ts = timegm(dt.utctimetuple())
if dt.microsecond:
return "{0}.{1:0>6d}".format(ts, dt.microsecond)
else:
return ts | Convert a datetime to a UTC timestamp w/ microsecond resolution.
datetimes w/o timezone will be assumed to be in UTC | entailment |
def field_timedelta_from_json(self, json_val):
"""Convert json_val to a timedelta object.
json_val contains total number of seconds in the timedelta.
If json_val is a string it will be converted to a float.
"""
if isinstance(json_val, str):
return timedelta(seconds=float(json_val))
elif json_val is None:
return None
else:
return timedelta(seconds=json_val) | Convert json_val to a timedelta object.
json_val contains total number of seconds in the timedelta.
If json_val is a string it will be converted to a float. | entailment |
def field_timedelta_to_json(self, td):
"""Convert timedelta to value containing total number of seconds.
If there are fractions of a second the return value will be a
string, otherwise it will be an int.
"""
if isinstance(td, six.string_types):
td = parse_duration(td)
if not td:
return None
if td.microseconds > 0:
return str(td.total_seconds())
else:
return int(td.total_seconds()) | Convert timedelta to value containing total number of seconds.
If there are fractions of a second the return value will be a
string, otherwise it will be an int. | entailment |
def field_pklist_from_json(self, data):
"""Load a PkOnlyQueryset from a JSON dict.
This uses the same format as cached_queryset_from_json
"""
model = get_model(data['app'], data['model'])
return PkOnlyQueryset(self, model, data['pks']) | Load a PkOnlyQueryset from a JSON dict.
This uses the same format as cached_queryset_from_json | entailment |
def field_pklist_to_json(self, model, pks):
"""Convert a list of primary keys to a JSON dict.
This uses the same format as cached_queryset_to_json
"""
app_label = model._meta.app_label
model_name = model._meta.model_name
return {
'app': app_label,
'model': model_name,
'pks': list(pks),
} | Convert a list of primary keys to a JSON dict.
This uses the same format as cached_queryset_to_json | entailment |
def field_pk_from_json(self, data):
"""Load a PkOnlyModel from a JSON dict."""
model = get_model(data['app'], data['model'])
return PkOnlyModel(self, model, data['pk']) | Load a PkOnlyModel from a JSON dict. | entailment |
def field_pk_to_json(self, model, pk):
"""Convert a primary key to a JSON dict."""
app_label = model._meta.app_label
model_name = model._meta.model_name
return {
'app': app_label,
'model': model_name,
'pk': pk,
} | Convert a primary key to a JSON dict. | entailment |
def choice_voters_changed_update_cache(
sender, instance, action, reverse, model, pk_set, **kwargs):
"""Update cache when choice.voters changes."""
if action not in ('post_add', 'post_remove', 'post_clear'):
# post_clear is not handled, because clear is called in
# django.db.models.fields.related.ReverseManyRelatedObjects.__set__
# before setting the new order
return
if model == User:
assert type(instance) == Choice
choices = [instance]
if pk_set:
users = list(User.objects.filter(pk__in=pk_set))
else:
users = []
else:
if pk_set:
choices = list(Choice.objects.filter(pk__in=pk_set))
else:
choices = []
users = [instance]
from .tasks import update_cache_for_instance
for choice in choices:
update_cache_for_instance('Choice', choice.pk, choice)
for user in users:
update_cache_for_instance('User', user.pk, user) | Update cache when choice.voters changes. | entailment |
def post_delete_update_cache(sender, instance, **kwargs):
"""Update the cache when an instance is deleted."""
name = sender.__name__
if name in cached_model_names:
from .tasks import update_cache_for_instance
update_cache_for_instance(name, instance.pk, instance) | Update the cache when an instance is deleted. | entailment |
def post_save_update_cache(sender, instance, created, raw, **kwargs):
"""Update the cache when an instance is created or modified."""
if raw:
return
name = sender.__name__
if name in cached_model_names:
delay_cache = getattr(instance, '_delay_cache', False)
if not delay_cache:
from .tasks import update_cache_for_instance
update_cache_for_instance(name, instance.pk, instance) | Update the cache when an instance is created or modified. | entailment |
def get_queryset(self):
"""Get the queryset for the action.
If action is read action, return a CachedQueryset
Otherwise, return a Django queryset
"""
queryset = super(CachedViewMixin, self).get_queryset()
if self.action in ('list', 'retrieve'):
return CachedQueryset(self.get_queryset_cache(), queryset=queryset)
else:
return queryset | Get the queryset for the action.
If action is read action, return a CachedQueryset
Otherwise, return a Django queryset | entailment |
def get_object(self, queryset=None):
"""
Return the object the view is displaying.
Same as rest_framework.generics.GenericAPIView, but:
- Failed assertions instead of deprecations
"""
# Determine the base queryset to use.
assert queryset is None, "Passing a queryset is disabled"
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
assert lookup is not None, "Other lookup methods are disabled"
filter_kwargs = {self.lookup_field: lookup}
obj = self.get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj | Return the object the view is displaying.
Same as rest_framework.generics.GenericAPIView, but:
- Failed assertions instead of deprecations | entailment |
def get_object_or_404(self, queryset, *filter_args, **filter_kwargs):
"""Return an object or raise a 404.
Same as Django's standard shortcut, but make sure to raise 404
if the filter_kwargs don't match the required types.
"""
if isinstance(queryset, CachedQueryset):
try:
return queryset.get(*filter_args, **filter_kwargs)
except queryset.model.DoesNotExist:
raise Http404(
'No %s matches the given query.' % queryset.model)
else:
return get_object_or_404(queryset, *filter_args, **filter_kwargs) | Return an object or raise a 404.
Same as Django's standard shortcut, but make sure to raise 404
if the filter_kwargs don't match the required types. | entailment |
def r(self, **kwargs):
"""
Resolve the object.
This returns default (if present) or fails on an Empty.
"""
# by using kwargs we ensure that usage of positional arguments, as if
# this object were another kind of function, will fail-fast and raise
# a TypeError
if 'default' in kwargs:
default = kwargs.pop('default')
if kwargs:
raise TypeError(
"Unexpected argument: {}".format(repr(next(iter(kwargs))))
)
return default
else:
raise JSaneException(
"Key does not exist: {}".format(repr(self._key_name))
) | Resolve the object.
This returns default (if present) or fails on an Empty. | entailment |
def r(self, **kwargs):
"""
Resolve the object.
This will always succeed, since, if a lookup fails, an Empty
instance will be returned farther upstream.
"""
# by using kwargs we ensure that usage of positional arguments, as if
# this object were another kind of function, will fail-fast and raise
# a TypeError
kwargs.pop('default', None)
if kwargs:
raise TypeError(
"Unexpected argument: {}".format(repr(next(iter(kwargs))))
)
return self._obj | Resolve the object.
This will always succeed, since, if a lookup fails, an Empty
instance will be returned farther upstream. | entailment |
def update_cache_for_instance(
model_name, instance_pk, instance=None, version=None):
"""Update the cache for an instance, with cascading updates."""
cache = SampleCache()
invalid = cache.update_instance(model_name, instance_pk, instance, version)
for invalid_name, invalid_pk, invalid_version in invalid:
update_cache_for_instance.delay(
invalid_name, invalid_pk, version=invalid_version) | Update the cache for an instance, with cascading updates. | entailment |
def values_list(self, *args, **kwargs):
"""Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
"""
flat = kwargs.pop('flat', False)
assert flat is True
assert len(args) == 1
assert args[0] == self.model._meta.pk.name
return self.pks | Return the primary keys as a list.
The only valid call is values_list('pk', flat=True) | entailment |
def pks(self):
"""Lazy-load the primary keys."""
if self._primary_keys is None:
self._primary_keys = list(
self.queryset.values_list('pk', flat=True))
return self._primary_keys | Lazy-load the primary keys. | entailment |
def count(self):
"""Return a count of instances."""
if self._primary_keys is None:
return self.queryset.count()
else:
return len(self.pks) | Return a count of instances. | entailment |
def filter(self, **kwargs):
"""Filter the base queryset."""
assert not self._primary_keys
self.queryset = self.queryset.filter(**kwargs)
return self | Filter the base queryset. | entailment |
def get(self, *args, **kwargs):
"""Return the single item from the filtered queryset."""
assert not args
assert list(kwargs.keys()) == ['pk']
pk = kwargs['pk']
model_name = self.model.__name__
object_spec = (model_name, pk, None)
instances = self.cache.get_instances((object_spec,))
try:
model_data = instances[(model_name, pk)][0]
except KeyError:
raise self.model.DoesNotExist(
"No match for %r with args %r, kwargs %r" %
(self.model, args, kwargs))
else:
return CachedModel(self.model, model_data) | Return the single item from the filtered queryset. | entailment |
def collect(cls):
""" Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS
and concat their values.
"""
constants = {}
for method_path in WebpackConstants.get_constant_processors():
method = import_string(method_path)
if not callable(method):
raise ImproperlyConfigured('Constant processor "%s" is not callable' % method_path)
result = method(constants)
if isinstance(result, dict):
constants.update(result)
return constants | Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS
and concat their values. | entailment |
def phonenumber_validation(data):
""" Validates phonenumber
Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the
country prefix is absent.
"""
from phonenumber_field.phonenumber import to_python
phone_number = to_python(data)
if not phone_number:
return data
elif not phone_number.country_code:
raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555)."))
elif not phone_number.is_valid():
raise serializers.ValidationError(_('The phone number entered is not valid.'))
return data | Validates phonenumber
Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the
country prefix is absent. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.