sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _pfp__is_non_consecutive_duplicate(self, name, child):
"""Return True/False if the child is a non-consecutive duplicately named
field. Consecutive duplicately-named fields are stored in an implicit array,
non-consecutive duplicately named fields have a numeric suffix appended to their name"""
if len(self._pfp__children) == 0:
return False
# it should be an implicit array
if self._pfp__children[-1]._pfp__name == name:
return False
# if it's elsewhere in the children name map OR a collision sequence has already been
# started for this name, it should have a numeric suffix
# appended
elif name in self._pfp__children_map or name in self._pfp__name_collisions:
return True
# else, no collision
return False | Return True/False if the child is a non-consecutive duplicately named
field. Consecutive duplicately-named fields are stored in an implicit array,
non-consecutive duplicately named fields have a numeric suffix appended to their name | entailment |
def _pfp__handle_implicit_array(self, name, child):
"""Handle inserting implicit array elements
"""
existing_child = self._pfp__children_map[name]
if isinstance(existing_child, Array):
# I don't think we should check this
#
#if existing_child.field_cls != child.__class__:
# raise errors.PfpError("implicit arrays must be sequential!")
existing_child.append(child)
return existing_child
else:
cls = child._pfp__class if hasattr(child, "_pfp__class") else child.__class__
ary = Array(0, cls)
# since the array starts with the first item
ary._pfp__offset = existing_child._pfp__offset
ary._pfp__parent = self
ary._pfp__name = name
ary.implicit = True
ary.append(existing_child)
ary.append(child)
exist_idx = -1
for idx,child in enumerate(self._pfp__children):
if child is existing_child:
exist_idx = idx
break
self._pfp__children[exist_idx] = ary
self._pfp__children_map[name] = ary
return ary | Handle inserting implicit array elements | entailment |
def _pfp__parse(self, stream, save_offset=False):
"""Parse the incoming stream
:stream: Input stream to be parsed
:returns: Number of bytes parsed
"""
if save_offset:
self._pfp__offset = stream.tell()
res = 0
for child in self._pfp__children:
res += child._pfp__parse(stream, save_offset)
return res | Parse the incoming stream
:stream: Input stream to be parsed
:returns: Number of bytes parsed | entailment |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if save_offset and stream is not None:
self._pfp__offset = stream.tell()
# returns either num bytes written or total data
res = utils.binary("") if stream is None else 0
# iterate IN ORDER
for child in self._pfp__children:
child_res = child._pfp__build(stream, save_offset)
res += child_res
return res | Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None | entailment |
def _pfp__show(self, level=0, include_offset=False):
"""Show the contents of the struct
"""
res = []
res.append("{}{} {{".format(
"{:04x} ".format(self._pfp__offset) if include_offset else "",
self._pfp__show_name
))
for child in self._pfp__children:
res.append("{}{}{:10s} = {}".format(
" "*(level+1),
"{:04x} ".format(child._pfp__offset) if include_offset else "",
child._pfp__name,
child._pfp__show(level+1, include_offset)
))
res.append("{}}}".format(" "*level))
return "\n".join(res) | Show the contents of the struct | entailment |
def _pfp__add_child(self, name, child, stream=None):
"""Add a child to the Union field
:name: The name of the child
:child: A :class:`.Field` instance
:returns: The resulting field
"""
res = super(Union, self)._pfp__add_child(name, child)
self._pfp__buff.seek(0, 0)
child._pfp__build(stream=self._pfp__buff)
size = len(self._pfp__buff.getvalue())
self._pfp__buff.seek(0, 0)
if stream is not None:
curr_pos = stream.tell()
stream.seek(curr_pos-size, 0)
return res | Add a child to the Union field
:name: The name of the child
:child: A :class:`.Field` instance
:returns: The resulting field | entailment |
def _pfp__notify_update(self, child=None):
"""Handle a child with an updated value
"""
if getattr(self, "_pfp__union_update_other_children", True):
self._pfp__union_update_other_children = False
new_data = child._pfp__build()
new_stream = bitwrap.BitwrappedStream(six.BytesIO(new_data))
for other_child in self._pfp__children:
if other_child is child:
continue
if isinstance(other_child, Array) and other_child.is_stringable():
other_child._pfp__set_value(new_data)
else:
other_child._pfp__parse(new_stream)
new_stream.seek(0)
self._pfp__no_update_other_children = True
super(Union, self)._pfp__notify_update(child=child) | Handle a child with an updated value | entailment |
def _pfp__parse(self, stream, save_offset=False):
"""Parse the incoming stream
:stream: Input stream to be parsed
:returns: Number of bytes parsed
"""
if save_offset:
self._pfp__offset = stream.tell()
max_res = 0
for child in self._pfp__children:
child_res = child._pfp__parse(stream, save_offset)
if child_res > max_res:
max_res = child_res
# rewind the stream
stream.seek(child_res, -1)
self._pfp__size = max_res
self._pfp__buff = six.BytesIO(stream.read(self._pfp__size))
return max_res | Parse the incoming stream
:stream: Input stream to be parsed
:returns: Number of bytes parsed | entailment |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the union and write the result into the stream.
:stream: None
:returns: None
"""
max_size = -1
if stream is None:
core_stream = six.BytesIO()
new_stream = bitwrap.BitwrappedStream(core_stream)
else:
new_stream = stream
for child in self._pfp__children:
curr_pos = new_stream.tell()
child._pfp__build(new_stream, save_offset)
size = new_stream.tell() - curr_pos
new_stream.seek(-size, 1)
if size > max_size:
max_size = size
new_stream.seek(max_size, 1)
if stream is None:
return core_stream.getvalue()
else:
return max_size | Build the union and write the result into the stream.
:stream: None
:returns: None | entailment |
def _pfp__parse(self, stream, save_offset=False):
"""Parse the IO stream for this numeric field
:stream: An IO stream that can be read from
:returns: The number of bytes parsed
"""
if save_offset:
self._pfp__offset = stream.tell()
if self.bitsize is None:
raw_data = stream.read(self.width)
data = utils.binary(raw_data)
else:
bits = self.bitfield_rw.read_bits(stream, self.bitsize, self.bitfield_padded, self.bitfield_left_right, self.endian)
width_diff = self.width - (len(bits)//8) - 1
bits_diff = 8 - (len(bits) % 8)
padding = [0] * (width_diff * 8 + bits_diff)
bits = padding + bits
data = bitwrap.bits_to_bytes(bits)
if self.endian == LITTLE_ENDIAN:
# reverse the data
data = data[::-1]
if len(data) < self.width:
raise errors.PrematureEOF()
self._pfp__data = data
self._pfp__value = struct.unpack(
"{}{}".format(self.endian, self.format),
data
)[0]
return self.width | Parse the IO stream for this numeric field
:stream: An IO stream that can be read from
:returns: The number of bytes parsed | entailment |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
if self.bitsize is None:
data = struct.pack(
"{}{}".format(self.endian, self.format),
self._pfp__value
)
if stream is not None:
stream.write(data)
return len(data)
else:
return data
else:
data = struct.pack(
"{}{}".format(BIG_ENDIAN, self.format),
self._pfp__value
)
num_bytes = int(math.ceil(self.bitsize / 8.0))
bit_data = data[-num_bytes:]
raw_bits = bitwrap.bytes_to_bits(bit_data)
bits = raw_bits[-self.bitsize:]
if stream is not None:
self.bitfield_rw.write_bits(stream, bits, self.bitfield_padded, self.bitfield_left_right, self.endian)
return len(bits) // 8
else:
# TODO this can't be right....
return bits | Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None | entailment |
def _dom_class(self, obj1, obj2):
"""Return the dominating numeric class between the two
:obj1: TODO
:obj2: TODO
:returns: TODO
"""
if isinstance(obj1, Double) or isinstance(obj2, Double):
return Double
if isinstance(obj1, Float) or isinstance(obj2, Float):
return Float | Return the dominating numeric class between the two
:obj1: TODO
:obj2: TODO
:returns: TODO | entailment |
def _pfp__set_value(self, new_val):
"""Set the value, potentially converting an unsigned
value to a signed one (and visa versa)"""
if self._pfp__frozen:
raise errors.UnmodifiableConst()
if isinstance(new_val, IntBase):
# will automatically convert correctly between ints of
# different sizes, unsigned/signed, etc
raw = new_val._pfp__build()
while len(raw) < self.width:
if self.endian == BIG_ENDIAN:
raw = b"\x00" + raw
else:
raw += b"\x00"
while len(raw) > self.width:
if self.endian == BIG_ENDIAN:
raw = raw[1:]
else:
raw = raw[:-1]
self._pfp__parse(six.BytesIO(raw))
else:
mask = 1 << (8*self.width)
if self.signed:
max_val = (mask//2)-1
min_val = -(mask//2)
else:
max_val = mask-1
min_val = 0
if new_val < min_val:
new_val += -(min_val)
new_val &= (mask-1)
new_val -= -(min_val)
elif new_val > max_val:
new_val &= (mask-1)
self._pfp__value = new_val
self._pfp__notify_parent() | Set the value, potentially converting an unsigned
value to a signed one (and visa versa) | entailment |
def _pfp__parse(self, stream, save_offset=False):
"""Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed
"""
res = super(Enum, self)._pfp__parse(stream, save_offset)
if self._pfp__value in self.enum_vals:
self.enum_name = self.enum_vals[self._pfp__value]
else:
self.enum_name = "?? UNK_ENUM ??"
return res | Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed | entailment |
def _pfp__snapshot(self, recurse=True):
"""Save off the current value of the field
"""
super(Array, self)._pfp__snapshot(recurse=recurse)
self.snapshot_raw_data = self.raw_data
if recurse:
for item in self.items:
item._pfp__snapshot(recurse=recurse) | Save off the current value of the field | entailment |
def _pfp__set_value(self, new_val):
"""Set the value of the String, taking into account
escaping and such as well
"""
if not isinstance(new_val, Field):
new_val = utils.binary(utils.string_escape(new_val))
super(String, self)._pfp__set_value(new_val) | Set the value of the String, taking into account
escaping and such as well | entailment |
def _pfp__parse(self, stream, save_offset=False):
"""Read from the stream until the string is null-terminated
:stream: The input stream
:returns: None
"""
if save_offset:
self._pfp__offset = stream.tell()
res = utils.binary("")
while True:
byte = utils.binary(stream.read(self.read_size))
if len(byte) < self.read_size:
raise errors.PrematureEOF()
# note that the null terminator must be added back when
# built again!
if byte == self.terminator:
break
res += byte
self._pfp__value = res | Read from the stream until the string is null-terminated
:stream: The input stream
:returns: None | entailment |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the String field
:stream: TODO
:returns: TODO
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
data = self._pfp__value + utils.binary("\x00")
if stream is None:
return data
else:
stream.write(data)
return len(data) | Build the String field
:stream: TODO
:returns: TODO | entailment |
def Printf(params, ctxt, scope, stream, coord, interp):
"""Prints format string to stdout
:params: TODO
:returns: TODO
"""
if len(params) == 1:
if interp._printf:
sys.stdout.write(PYSTR(params[0]))
return len(PYSTR(params[0]))
parts = []
for part in params[1:]:
if isinstance(part, pfp.fields.Array) or isinstance(part, pfp.fields.String):
parts.append(PYSTR(part))
else:
parts.append(PYVAL(part))
to_print = PYSTR(params[0]) % tuple(parts)
res = len(to_print)
if interp._printf:
sys.stdout.write(to_print)
sys.stdout.flush()
return res | Prints format string to stdout
:params: TODO
:returns: TODO | entailment |
def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False):
"""Mutate the provided field (probably a Dom or struct instance) using the
strategy specified with ``strat_name_or_class``, yielding ``num`` mutations
that affect up to ``at_once`` fields at once.
This function will yield back the field after each mutation, optionally
also yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is
``True``). It should also be noted that the yielded set of changed fields *can*
be modified and is no longer needed by the mutate() function.
:param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)
:param strat_name_or_class: Can be the name of a strategy, or the actual strategy class (not an instance)
:param int num: The number of mutations to yield
:param int at_once: The number of fields to mutate at once
:param bool yield_changed: Yield a list of fields changed along with the mutated dom
:returns: generator
"""
import pfp.fuzz.rand as rand
init()
strat = get_strategy(strat_name_or_cls)
to_mutate = strat.which(field)
with_strats = []
for to_mutate_field in to_mutate:
field_strat = strat.get_field_strat(to_mutate_field)
if field_strat is not None:
with_strats.append((to_mutate_field, field_strat))
# we don't need these ones anymore
del to_mutate
# save the current value of all subfields without
# triggering events
field._pfp__snapshot(recurse=True)
count = 0
for x in six.moves.range(num):
chosen_fields = set()
idx_pool = set([x for x in six.moves.xrange(len(with_strats))])
# modify `at_once` number of fields OR len(with_strats) number of fields,
# whichever is lower
for at_onces in six.moves.xrange(min(len(with_strats), at_once)):
# we'll never pull the same idx from idx_pool more than once
# since we're removing the idx after choosing it
rand_idx = rand.sample(idx_pool, 1)[0]
idx_pool.remove(rand_idx)
rand_field,field_strat = with_strats[rand_idx]
chosen_fields.add(rand_field)
field_strat.mutate(rand_field)
if yield_changed:
yield field, chosen_fields
else:
# yield back the original field
yield field
# restore the saved value of all subfields without
# triggering events
field._pfp__restore_snapshot(recurse=True) | Mutate the provided field (probably a Dom or struct instance) using the
strategy specified with ``strat_name_or_class``, yielding ``num`` mutations
that affect up to ``at_once`` fields at once.
This function will yield back the field after each mutation, optionally
also yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is
``True``). It should also be noted that the yielded set of changed fields *can*
be modified and is no longer needed by the mutate() function.
:param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)
:param strat_name_or_class: Can be the name of a strategy, or the actual strategy class (not an instance)
:param int num: The number of mutations to yield
:param int at_once: The number of fields to mutate at once
:param bool yield_changed: Yield a list of fields changed along with the mutated dom
:returns: generator | entailment |
def get_strategy(name_or_cls):
"""Return the strategy identified by its name. If ``name_or_class`` is a class,
it will be simply returned.
"""
if isinstance(name_or_cls, six.string_types):
if name_or_cls not in STRATS:
raise MutationError("strat is not defined")
return STRATS[name_or_cls]()
return name_or_cls() | Return the strategy identified by its name. If ``name_or_class`` is a class,
it will be simply returned. | entailment |
def mutate(self, field):
"""Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value
"""
new_val = self.next_val(field)
field._pfp__set_value(new_val)
return field | Mutate the given field, modifying it directly. This is not
intended to preserve the value of the field.
:field: The pfp.fields.Field instance that will receive the new value | entailment |
def next_val(self, field):
"""Return a new value to mutate a field with. Do not modify the field directly
in this function. Override the ``mutate()`` function if that is needed (the field is
only passed into this function as a reference).
:field: The pfp.fields.Field instance that will receive the new value. Passed in for reference only.
:returns: The next value for the field
"""
import pfp.fuzz.rand as rand
if self.choices is not None:
choices = self._resolve_member_val(self.choices, field)
new_val = rand.choice(choices)
return self._resolve_val(new_val)
elif self.prob is not None:
prob = self._resolve_member_val(self.prob, field)
rand_val = rand.random()
curr_total = 0.0
# iterate through each of the probability choices until
# we reach one that matches the current rand_val
for prob_percent, prob_val in prob:
if rand_val <= curr_total + prob_percent:
return self._resolve_val(prob_val)
curr_total += prob_percent
raise MutationError("probabilities did not add up to 100%! {}".format(
[str(x[0]) + " - " + str(x[1])[:10] for x in prob]
)) | Return a new value to mutate a field with. Do not modify the field directly
in this function. Override the ``mutate()`` function if that is needed (the field is
only passed into this function as a reference).
:field: The pfp.fields.Field instance that will receive the new value. Passed in for reference only.
:returns: The next value for the field | entailment |
def generate_subid(self, token=None, return_user=False):
'''generate a new user in the database, still session based so we
create a new identifier.
'''
from expfactory.database.models import Participant
if not token:
p = Participant()
else:
p = Participant(token=token)
self.session.add(p)
self.session.commit()
if return_user is True:
return p
return p.id | generate a new user in the database, still session based so we
create a new identifier. | entailment |
def print_user(self, user):
'''print a relational database user
'''
status = "active"
token = user.token
if token in ['finished', 'revoked']:
status = token
if token is None:
token = ''
subid = "%s\t%s[%s]" %(user.id, token, status)
print(subid)
return subid | print a relational database user | entailment |
def list_users(self, user=None):
'''list users, each having a model in the database. A headless experiment
will use protected tokens, and interactive will be based on auto-
incremented ids.
'''
from expfactory.database.models import Participant
participants = Participant.query.all()
users = []
for user in participants:
users.append(self.print_user(user))
return users | list users, each having a model in the database. A headless experiment
will use protected tokens, and interactive will be based on auto-
incremented ids. | entailment |
def generate_user(self):
'''generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token.
'''
token = str(uuid.uuid4())
return self.generate_subid(token=token, return_user=True) | generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token. | entailment |
def finish_user(self, subid):
'''finish user will remove a user's token, making the user entry not
accesible if running in headless model'''
p = self.revoke_token(subid)
p.token = "finished"
self.session.commit()
return p | finish user will remove a user's token, making the user entry not
accesible if running in headless model | entailment |
def restart_user(self, subid):
'''restart a user, which means revoking and issuing a new token.'''
p = self.revoke_token(subid)
p = self.refresh_token(subid)
return p | restart a user, which means revoking and issuing a new token. | entailment |
def validate_token(self, token):
'''retrieve a subject based on a token. Valid means we return a participant
invalid means we return None
'''
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith(('finished','revoked')):
p = None
else:
p = p.id
return p | retrieve a subject based on a token. Valid means we return a participant
invalid means we return None | entailment |
def revoke_token(self, subid):
'''revoke a token by removing it. Is done at finish, and also available
as a command line option'''
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = 'revoked'
self.session.commit()
return p | revoke a token by removing it. Is done at finish, and also available
as a command line option | entailment |
def refresh_token(self, subid):
'''refresh or generate a new token for a user'''
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = str(uuid.uuid4())
self.session.commit()
return p | refresh or generate a new token for a user | entailment |
def save_data(self,session, exp_id, content):
'''save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files'''
from expfactory.database.models import (
Participant,
Result
)
subid = session.get('subid')
token = session.get('token')
self.logger.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning('%s attempting to use mismatched token [%s] skipping save' %(p.id, token))
elif self.headless and p.token.endswith(('finished','revoked')):
self.logger.warning('%s attempting to use expired token [%s] skipping save' %(p.id, token))
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content['data']
result = Result(data=content,
exp_id=exp_id,
participant_id=p.id) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" %(p, result)) | save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files | entailment |
def init_db(self):
'''initialize the database, with the default database path or custom with
a format corresponding to the database type:
Examples:
sqlite:////scif/data/expfactory.db
'''
# The user can provide a custom string
if self.database is None:
self.logger.error("You must provide a database url, exiting.")
sys.exit(1)
self.engine = create_engine(self.database, convert_unicode=True)
self.session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
# Database Setup
Base.query = self.session.query_property()
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import expfactory.database.models
self.Base = Base
self.Base.metadata.create_all(bind=self.engine) | initialize the database, with the default database path or custom with
a format corresponding to the database type:
Examples:
sqlite:////scif/data/expfactory.db | entailment |
def LazyField(lookup_name, scope):
"""Super non-standard stuff here. Dynamically changing the base
class using the scope and the lazy name when the class is
instantiated. This works as long as the original base class is
not directly inheriting from object (which we're not, since
our original base class is fields.Field).
"""
def __init__(self, stream=None):
base_cls = self._pfp__scope.get_id(self._pfp__lazy_name)
self.__class__.__bases__ = (base_cls,)
base_cls.__init__(self, stream)
new_class = type(lookup_name + "_lazy", (fields.Field,), {
"__init__" : __init__,
"_pfp__scope" : scope,
"_pfp__lazy_name" : lookup_name
})
return new_class | Super non-standard stuff here. Dynamically changing the base
class using the scope and the lazy name when the class is
instantiated. This works as long as the original base class is
not directly inheriting from object (which we're not, since
our original base class is fields.Field). | entailment |
def _wrap_type_instantiation(self, type_cls):
"""Wrap the creation of the type so that we can provide
a null-stream to initialize it"""
def wrapper(*args, **kwargs):
# use args for struct arguments??
return type_cls(stream=self._null_stream)
return wrapper | Wrap the creation of the type so that we can provide
a null-stream to initialize it | entailment |
def level(self):
"""Return the current scope level
"""
res = len(self._scope_stack)
if self._parent is not None:
res += self._parent.level()
return res | Return the current scope level | entailment |
def push(self, new_scope=None):
"""Create a new scope
:returns: TODO
"""
if new_scope is None:
new_scope = {
"types": {},
"vars": {}
}
self._curr_scope = new_scope
self._dlog("pushing new scope, scope level = {}".format(self.level()))
self._scope_stack.append(self._curr_scope) | Create a new scope
:returns: TODO | entailment |
def clone(self):
"""Return a new Scope object that has the curr_scope
pinned at the current one
:returns: A new scope object
"""
self._dlog("cloning the stack")
# TODO is this really necessary to create a brand new one?
# I think it is... need to think about it more.
# or... are we going to need ref counters and a global
# scope object that allows a view into (or a snapshot of)
# a specific scope stack?
res = Scope(self._log)
res._scope_stack = self._scope_stack
res._curr_scope = self._curr_scope
return res | Return a new Scope object that has the curr_scope
pinned at the current one
:returns: A new scope object | entailment |
def pop(self):
"""Leave the current scope
:returns: TODO
"""
res = self._scope_stack.pop()
self._dlog("popping scope, scope level = {}".format(self.level()))
self._curr_scope = self._scope_stack[-1]
return res | Leave the current scope
:returns: TODO | entailment |
def add_var(self, field_name, field, root=False):
"""Add a var to the current scope (vars are fields that
parse the input stream)
:field_name: TODO
:field: TODO
:returns: TODO
"""
self._dlog("adding var '{}' (root={})".format(field_name, root))
# do both so it's not clobbered by intermediate values of the same name
if root:
self._scope_stack[0]["vars"][field_name] = field
# TODO do we allow clobbering of vars???
self._curr_scope["vars"][field_name] = field | Add a var to the current scope (vars are fields that
parse the input stream)
:field_name: TODO
:field: TODO
:returns: TODO | entailment |
def get_var(self, name, recurse=True):
"""Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO
"""
self._dlog("getting var '{}'".format(name))
return self._search("vars", name, recurse) | Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO | entailment |
def add_local(self, field_name, field):
"""Add a local variable in the current scope
:field_name: The field's name
:field: The field
:returns: None
"""
self._dlog("adding local '{}'".format(field_name))
field._pfp__name = field_name
# TODO do we allow clobbering of locals???
self._curr_scope["vars"][field_name] = field | Add a local variable in the current scope
:field_name: The field's name
:field: The field
:returns: None | entailment |
def get_local(self, name, recurse=True):
"""Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field
"""
self._dlog("getting local '{}'".format(name))
return self._search("vars", name, recurse) | Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field | entailment |
def add_type_struct_or_union(self, name, interp, node):
"""Store the node with the name. When it is instantiated,
the node itself will be handled.
:name: name of the typedefd struct/union
:node: the union/struct node
:interp: the 010 interpreter
"""
self.add_type_class(name, StructUnionDef(name, interp, node)) | Store the node with the name. When it is instantiated,
the node itself will be handled.
:name: name of the typedefd struct/union
:node: the union/struct node
:interp: the 010 interpreter | entailment |
def add_type(self, new_name, orig_names):
"""Record the typedefd name for orig_names. Resolve orig_names
to their core names and save those.
:new_name: TODO
:orig_names: TODO
:returns: TODO
"""
self._dlog("adding a type '{}'".format(new_name))
# TODO do we allow clobbering of types???
res = copy.copy(orig_names)
resolved_names = self._resolve_name(res[-1])
if resolved_names is not None:
res.pop()
res += resolved_names
self._curr_scope["types"][new_name] = res | Record the typedefd name for orig_names. Resolve orig_names
to their core names and save those.
:new_name: TODO
:orig_names: TODO
:returns: TODO | entailment |
def get_type(self, name, recurse=True):
"""Get the names for the typename (created by typedef)
:name: The typedef'd name to resolve
:returns: An array of resolved names associated with the typedef'd name
"""
self._dlog("getting type '{}'".format(name))
return self._search("types", name, recurse) | Get the names for the typename (created by typedef)
:name: The typedef'd name to resolve
:returns: An array of resolved names associated with the typedef'd name | entailment |
def get_id(self, name, recurse=True):
"""Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO
"""
self._dlog("getting id '{}'".format(name))
var = self._search("vars", name, recurse)
return var | Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO | entailment |
def _resolve_name(self, name):
"""TODO: Docstring for _resolve_names.
:name: TODO
:returns: TODO
"""
res = [name]
while True:
orig_names = self._search("types", name)
if orig_names is not None:
name = orig_names[-1]
# pop off the typedefd name
res.pop()
# add back on the original names
res += orig_names
else:
break
return res | TODO: Docstring for _resolve_names.
:name: TODO
:returns: TODO | entailment |
def _search(self, category, name, recurse=True):
"""Search the scope stack for the name in the specified
category (types/locals/vars).
:category: the category to search in (locals/types/vars)
:name: name to search for
:returns: None if not found, the result of the found local/type/id
"""
idx = len(self._scope_stack) - 1
curr = self._curr_scope
for scope in reversed(self._scope_stack):
res = scope[category].get(name, None)
if res is not None:
return res
if recurse and self._parent is not None:
return self._parent._search(category, name, recurse)
return None | Search the scope stack for the name in the specified
category (types/locals/vars).
:category: the category to search in (locals/types/vars)
:name: name to search for
:returns: None if not found, the result of the found local/type/id | entailment |
def add_native(cls, name, func, ret, interp=None, send_interp=False):
"""Add the native python function ``func`` into the pfp interpreter with the
name ``name`` and return value ``ret`` so that it can be called from
within a template script.
.. note::
The :any:`@native <pfp.native.native>` decorator exists to simplify this.
All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``,
optionally allowing an interpreter param if ``send_interp`` is ``True``.
Example:
The example below defines a function ``Sum`` using the ``add_native`` method. ::
import pfp.fields
from pfp.fields import PYVAL
def native_sum(params, ctxt, scope, stream, coord):
return PYVAL(params[0]) + PYVAL(params[1])
pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64)
:param basestring name: The name the function will be exposed as in the interpreter.
:param function func: The native python function that will be referenced.
:param type(pfp.fields.Field) ret: The field class that the return value should be cast to.
:param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in.
:param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function.
"""
if interp is None:
natives = cls._natives
else:
# the instance's natives
natives = interp._natives
natives[name] = functions.NativeFunction(
name, func, ret, send_interp
) | Add the native python function ``func`` into the pfp interpreter with the
name ``name`` and return value ``ret`` so that it can be called from
within a template script.
.. note::
The :any:`@native <pfp.native.native>` decorator exists to simplify this.
All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``,
optionally allowing an interpreter param if ``send_interp`` is ``True``.
Example:
The example below defines a function ``Sum`` using the ``add_native`` method. ::
import pfp.fields
from pfp.fields import PYVAL
def native_sum(params, ctxt, scope, stream, coord):
return PYVAL(params[0]) + PYVAL(params[1])
pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64)
:param basestring name: The name the function will be exposed as in the interpreter.
:param function func: The native python function that will be referenced.
:param type(pfp.fields.Field) ret: The field class that the return value should be cast to.
:param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in.
:param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function. | entailment |
def define_natives(cls):
"""Define the native functions for PFP
"""
if len(cls._natives) > 0:
return
glob_pattern = os.path.join(os.path.dirname(__file__), "native", "*.py")
for filename in glob.glob(glob_pattern):
basename = os.path.basename(filename).replace(".py", "")
if basename == "__init__":
continue
try:
mod_base = __import__("pfp.native", globals(), locals(), fromlist=[basename])
except Exception as e:
sys.stderr.write("cannot import native module {} at '{}'".format(basename, filename))
raise e
continue
mod = getattr(mod_base, basename)
setattr(mod, "PYVAL", fields.get_value)
setattr(mod, "PYSTR", fields.get_str) | Define the native functions for PFP | entailment |
def _dlog(self, msg, indent_increase=0):
"""log the message to the log"""
self._log.debug("interp", msg, indent_increase, filename=self._orig_filename, coord=self._coord) | log the message to the log | entailment |
def parse(self, stream, template, predefines=True, orig_filename=None, keep_successful=False, printf=True):
"""Parse the data stream using the template (e.g. parse the 010 template
and interpret the template using the stream as the data source).
:stream: The input data stream
:template: The template to parse the stream with
:keep_successful: Return whatever was successfully parsed before an error. ``_pfp__error`` will contain the exception (if one was raised)
:param bool printf: If ``False``, printfs will be noops (default=``True``)
:returns: Pfp Dom
"""
self._dlog("parsing")
self._printf = printf
self._orig_filename = orig_filename
self._stream = stream
self._template = template
self._template_lines = self._template.split("\n")
self._ast = self._parse_string(template, predefines)
self._dlog("parsed template into ast")
res = self._run(keep_successful)
return res | Parse the data stream using the template (e.g. parse the 010 template
and interpret the template using the stream as the data source).
:stream: The input data stream
:template: The template to parse the stream with
:keep_successful: Return whatever was successfully parsed before an error. ``_pfp__error`` will contain the exception (if one was raised)
:param bool printf: If ``False``, printfs will be noops (default=``True``)
:returns: Pfp Dom | entailment |
def eval(self, statement, ctxt=None):
"""Eval a single statement (something returnable)
"""
self._no_debug = True
statement = statement.strip()
if not statement.endswith(";"):
statement += ";"
ast = self._parse_string(statement, predefines=False)
self._dlog("evaluating statement: {}".format(statement))
try:
res = None
for child in ast.children():
res = self._handle_node(child, self._scope, self._ctxt, self._stream)
return res
except errors.InterpReturn as e:
return e.value
finally:
self._no_debug = False | Eval a single statement (something returnable) | entailment |
def set_break(self, break_type):
"""Set if the interpreter should break.
:returns: TODO
"""
self._break_type = break_type
self._break_level = self._scope.level() | Set if the interpreter should break.
:returns: TODO | entailment |
def get_curr_lines(self):
"""Return the current line number in the template,
as well as the surrounding source lines
"""
start = max(0, self._coord.line - 5)
end = min(len(self._template_lines), self._coord.line + 4)
lines = [(x, self._template_lines[x]) for x in six.moves.range(start, end, 1)]
return self._coord.line, lines | Return the current line number in the template,
as well as the surrounding source lines | entailment |
def set_bitfield_padded(self, val):
"""Set if the bitfield input/output stream should be padded
:val: True/False
:returns: None
"""
self._padded_bitfield = val
self._stream.padded = val
self._ctxt._pfp__padded_bitfield = val | Set if the bitfield input/output stream should be padded
:val: True/False
:returns: None | entailment |
def _run(self, keep_successfull):
"""Interpret the parsed 010 AST
:returns: PfpDom
"""
# example self._ast.show():
# FileAST:
# Decl: data, [], [], []
# TypeDecl: data, []
# Struct: DATA
# Decl: a, [], [], []
# TypeDecl: a, []
# IdentifierType: ['char']
# Decl: b, [], [], []
# TypeDecl: b, []
# IdentifierType: ['char']
# Decl: c, [], [], []
# TypeDecl: c, []
# IdentifierType: ['char']
# Decl: d, [], [], []
# TypeDecl: d, []
# IdentifierType: ['char']
self._dlog("interpreting template")
try:
# it is important to pass the stream in as the stream
# may change (e.g. compressed data)
res = self._handle_node(self._ast, None, None, self._stream)
except errors.InterpReturn as e:
# TODO handle exit/return codes (e.g. return -1)
res = self._root
except errors.InterpExit as e:
res = self._root
except Exception as e:
if keep_successfull:
# return the root and set _pfp__error
res = self._root
res._pfp__error = e
else:
exc_type, exc_obj, traceback = sys.exc_info()
more_info = "\nException at {}:{}".format(
self._orig_filename,
self._coord.line
)
six.reraise(
errors.PfpError,
errors.PfpError(exc_obj.__class__.__name__ + ": " + exc_obj.args[0] + more_info if len(exc_obj.args) > 0 else more_info),
traceback
)
# final drop-in after everything has executed
if self._break_type != self.BREAK_NONE:
self.debugger.cmdloop("execution finished")
types = self.get_types()
res._pfp__types = types
return res | Interpret the parsed 010 AST
:returns: PfpDom | entailment |
def _handle_node(self, node, scope=None, ctxt=None, stream=None):
"""Recursively handle nodes in the 010 AST
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
if scope is None:
if self._scope is None:
self._scope = scope = self._create_scope()
else:
scope = self._scope
if ctxt is None and self._ctxt is not None:
ctxt = self._ctxt
else:
self._ctxt = ctxt
if type(node) is tuple:
node = node[1]
# TODO probably a better way to do this...
# this occurs with if-statements that have a single statement
# instead of a compound statement (no curly braces)
elif type(node) is list and len(list(filter(lambda x: isinstance(x, AST.Node), node))) == len(node):
node = AST.Compound(
block_items=node,
coord=node[0].coord
)
return self._handle_node(node, scope, ctxt, stream)
# need to check this so that debugger-eval'd statements
# don't mess with the current state
if not self._no_debug:
self._coord = node.coord
self._dlog("handling node type {}, line {}".format(node.__class__.__name__, node.coord.line if node.coord is not None else "?"))
self._log.inc()
breakable = self._node_is_breakable(node)
if breakable and not self._no_debug and self._break_type != self.BREAK_NONE:
# always break
if self._break_type == self.BREAK_INTO:
self._break_level = self._scope.level()
self.debugger.cmdloop()
# level <= _break_level
elif self._break_type == self.BREAK_OVER:
if self._scope.level() <= self._break_level:
self._break_level = self._scope.level()
self.debugger.cmdloop()
else:
pass
if node.__class__ not in self._node_switch:
raise errors.UnsupportedASTNode(node.coord, node.__class__.__name__)
res = self._node_switch[node.__class__](node, scope, ctxt, stream)
self._log.dec()
return res | Recursively handle nodes in the 010 AST
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_file_ast(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_file_ast.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._root = ctxt = fields.Dom(stream)
ctxt._pfp__scope = scope
self._root._pfp__name = "__root"
self._root._pfp__interp = self
self._dlog("handling file AST with {} children".format(len(node.children())))
for child in node.children():
self._handle_node(child, scope, ctxt, stream)
ctxt._pfp__process_fields_metadata()
return ctxt | TODO: Docstring for _handle_file_ast.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_cast(self, node, scope, ctxt, stream):
"""Handle cast nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling cast")
to_type = self._handle_node(node.to_type, scope, ctxt, stream)
val_to_cast = self._handle_node(node.expr, scope, ctxt, stream)
res = to_type()
res._pfp__set_value(val_to_cast)
return res | Handle cast nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_typename(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_typename
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling typename")
return self._handle_node(node.type, scope, ctxt, stream) | TODO: Docstring for _handle_typename
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _get_node_name(self, node):
"""Get the name of the node - check for node.name and
node.type.declname. Not sure why the second one occurs
exactly - it happens with declaring a new struct field
with parameters"""
res = getattr(node, "name", None)
if res is None:
return res
if isinstance(res, AST.TypeDecl):
return res.declname
return res | Get the name of the node - check for node.name and
node.type.declname. Not sure why the second one occurs
exactly - it happens with declaring a new struct field
with parameters | entailment |
def _handle_decl(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling decl")
metadata_processor = None
if node.metadata is not None:
#metadata_info = self._handle_metadata(node, scope, ctxt, stream)
def process_metadata():
metadata_info = self._handle_metadata(node, scope, ctxt, stream)
return metadata_info
metadata_processor = process_metadata
field_name = self._get_node_name(node)
field = self._handle_node(node.type, scope, ctxt, stream)
bitsize = None
bitfield_rw = None
if getattr(node, "bitsize", None) is not None:
bitsize = self._handle_node(node.bitsize, scope, ctxt, stream)
has_prev = len(ctxt._pfp__children) > 0
bitfield_rw = None
if has_prev:
prev = ctxt._pfp__children[-1]
# if it was a bitfield as well
# TODO I don't think this will handle multiple bitfield groups in a row.
# E.g.
# char a: 8, b:8;
# char c: 8, d:8;
if ((self._padded_bitfield and prev.__class__.width == field.width) or not self._padded_bitfield) \
and prev.bitsize is not None and prev.bitfield_rw.reserve_bits(bitsize, stream):
bitfield_rw = prev.bitfield_rw
# either because there was no previous bitfield, or the previous was full
if bitfield_rw is None:
bitfield_rw = fields.BitfieldRW(self, field)
bitfield_rw.reserve_bits(bitsize, stream)
if getattr(node, "is_func_param", False):
# we want to keep this as a class and not instantiate it
# instantiation will be done in functions.ParamListDef.instantiate
field = (field_name, field)
# locals and consts still get a field instance, but DON'T parse the
# stream!
elif "local" in node.quals or "const" in node.quals:
is_struct = issubclass(field, fields.Struct)
if not isinstance(field, fields.Field) and not is_struct:
field = field()
scope.add_local(field_name, field)
# this should only be able to be done with locals, right?
# if not, move it to the bottom of the function
if node.init is not None:
val = self._handle_node(node.init, scope, ctxt, stream)
if is_struct:
field = val
scope.add_local(field_name, field)
else:
field._pfp__set_value(val)
if "const" in node.quals:
field._pfp__freeze()
field._pfp__interp = self
elif isinstance(field, functions.Function):
# eh, just add it as a local...
# maybe the whole local/vars thinking needs to change...
# and we should only have ONE map TODO
field.name = field_name
scope.add_local(field_name, field)
elif field_name is not None:
added_child = False
# by this point, structs are already instantiated (they need to be
# in order to set the new context)
if not isinstance(field, fields.Field):
if issubclass(field, fields.NumberBase):
# use the default bitfield direction
if self._bitfield_direction is self.BITFIELD_DIR_DEFAULT:
bitfield_left_right = True if field.endian == fields.BIG_ENDIAN else False
else:
bitfield_left_right = (self._bitfield_direction is self.BITFIELD_DIR_LEFT_RIGHT)
field = field(
stream,
bitsize=bitsize,
metadata_processor=metadata_processor,
bitfield_rw=bitfield_rw,
bitfield_padded=self._padded_bitfield,
bitfield_left_right=bitfield_left_right
)
# TODO
# for now if there's a struct inside of a union that is being
# parsed when there's an error, the user will lose information
# about how far the parsing got. Here we are explicitly checking for
# adding structs and unions to a parent union.
elif (issubclass(field, fields.Struct) or issubclass(field, fields.Union)) \
and not isinstance(ctxt, fields.Union) \
and hasattr(field, "_pfp__init"):
# this is so that we can have all nested structs added to
# the root DOM, even if there's an error in parsing the data.
# If we didn't do this, any errors parsing the data would cause
# the new struct to not be added to its parent, and the user would
# not be able to see how far the script got
field = field(stream, metadata_processor=metadata_processor, do_init=False)
field._pfp__interp = self
field_res = ctxt._pfp__add_child(field_name, field, stream)
# when adding a new field to a struct/union/fileast, add it to the
# root of the ctxt's scope so that it doesn't get lost by being declared
# from within a function
scope.add_var(field_name, field_res, root=True)
field_res._pfp__interp = self
field._pfp__init(stream)
added_child = True
else:
field = field(stream, metadata_processor=metadata_processor)
if not added_child:
field._pfp__interp = self
field_res = ctxt._pfp__add_child(field_name, field, stream)
field_res._pfp__interp = self
# when adding a new field to a struct/union/fileast, add it to the
# root of the ctxt's scope so that it doesn't get lost by being declared
# from within a function
scope.add_var(field_name, field_res, root=True)
# this shouldn't be used elsewhere, but should still be explicit with
# this flag
added_child = True
# enums will get here. If there is no name, then no
# field is being declared (but the enum values _will_
# get defined). E.g.:
# enum <uchar blah {
# BLAH1,
# BLAH2,
# BLAH3
# };
elif field_name is None:
pass
return field | TODO: Docstring for _handle_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_metadata(self, node, scope, ctxt, stream):
"""Handle metadata for the node
"""
self._dlog("handling node metadata {}".format(node.metadata.keyvals))
keyvals = node.metadata.keyvals
metadata_info = []
if "watch" in node.metadata.keyvals or "update" in keyvals:
metadata_info.append(
self._handle_watch_metadata(node, scope, ctxt, stream)
)
if "packtype" in node.metadata.keyvals or "packer" in keyvals:
metadata_info.append(
self._handle_packed_metadata(node, scope, ctxt, stream)
)
return metadata_info | Handle metadata for the node | entailment |
def _handle_watch_metadata(self, node, scope, ctxt, stream):
"""Handle watch vars for fields
"""
keyvals = node.metadata.keyvals
if "watch" not in keyvals:
raise errors.PfpError("Packed fields require a packer function set")
if "update" not in keyvals:
raise errors.PfpError("Packed fields require a packer function set")
watch_field_name = keyvals["watch"]
update_func_name = keyvals["update"]
watch_fields = list(map(lambda x: self.eval(x.strip()), watch_field_name.split(";")))
update_func = scope.get_id(update_func_name)
return {
"type": "watch",
"watch_fields": watch_fields,
"update_func": update_func,
"func_call_info": (ctxt, scope, stream, self, self._coord)
} | Handle watch vars for fields | entailment |
def _handle_packed_metadata(self, node, scope, ctxt, stream):
"""Handle packed metadata
"""
keyvals = node.metadata.keyvals
if "packer" not in keyvals and ("pack" not in keyvals or "unpack" not in keyvals):
raise errors.PfpError("Packed fields require a packer function to be set or pack and unpack functions to be set")
if "packtype" not in keyvals:
raise errors.PfpError("Packed fields require a packtype to be set")
args_ = {}
if "packer" in keyvals:
packer_func_name = keyvals["packer"]
packer_func = scope.get_id(packer_func_name)
args_["packer"] = packer_func
elif "pack" in keyvals and "unpack" in keyvals:
pack_func = scope.get_id(keyvals["pack"])
unpack_func = scope.get_id(keyvals["unpack"])
args_["pack"] = pack_func
args_["unpack"] = unpack_func
packtype_cls_name = keyvals["packtype"]
packtype_cls = scope.get_type(packtype_cls_name)
args_["pack_type"] = packtype_cls
args_["type"] = "packed"
args_["func_call_info"] = (ctxt, scope, stream, self, self._coord)
return args_ | Handle packed metadata | entailment |
def _handle_byref_decl(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_byref_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling byref decl")
field = self._handle_node(node.type.type, scope, ctxt, stream)
# this will not really be used (maybe except for introspection)
# with byref function params
# see issue #35 - we need to wrap the field cls so that the byref
# doesn't permanently stay on the class
field = functions.ParamClsWrapper(field)
field.byref = True
return field | TODO: Docstring for _handle_byref_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_type_decl(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_type_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling type decl")
decl = self._handle_node(node.type, scope, ctxt, stream)
return decl | TODO: Docstring for _handle_type_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_struct_ref(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling struct ref")
# name
# field
struct = self._handle_node(node.name, scope, ctxt, stream)
try:
sub_field = getattr(struct, node.field.name)
except AttributeError as e:
# should be able to access implicit array items by index OR
# access the last one's members directly without index
#
# E.g.:
#
# local int total_length = 0;
# while(!FEof()) {
# HEADER header;
# total_length += header.length;
# }
if isinstance(struct, fields.Array) and struct.implicit:
last_item = struct[-1]
sub_field = getattr(last_item, node.field.name)
else:
raise
return sub_field | TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_union(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_union.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling union")
union_cls = StructUnionDef("union", self, node)
return union_cls | TODO: Docstring for _handle_union.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_init_list(self, node, scope, ctxt, stream):
"""Handle InitList nodes (e.g. when initializing a struct)
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling init list")
res = []
for _,init_child in node.children():
init_field = self._handle_node(init_child, scope, ctxt, stream)
res.append(init_field)
return res | Handle InitList nodes (e.g. when initializing a struct)
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_struct_call_type_decl(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_struct_call_type_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling struct with parameters")
struct_cls = self._handle_node(node.type, scope, ctxt, stream)
struct_args = self._handle_node(node.args, scope, ctxt, stream)
res = StructDeclWithParams(scope, struct_cls, struct_args)
return res | TODO: Docstring for _handle_struct_call_type_decl.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_struct(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_struct.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling struct")
if node.args is not None:
for param in node.args.params:
param.is_func_param = True
# it's actually being defined
if node.decls is not None:
struct_cls = StructUnionDef("struct", self, node)
if node.name is not None:
scope.add_type_class(node.name, struct_cls)
return struct_cls
# it's declaring a struct field. E.g.
# struct IFD subDir;
else:
return scope.get_type(node.name) | TODO: Docstring for _handle_struct.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_identifier_type(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_identifier_type.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling identifier")
cls = self._resolve_to_field_class(node.names, scope)
return cls | TODO: Docstring for _handle_identifier_type.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_typedef(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_typedef.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
is_union_or_struct = (node.type.type.__class__ in [AST.Union, AST.Struct])
is_enum = (node.type.type.__class__ is AST.Enum)
if is_union_or_struct:
self._dlog("handling typedef struct/union '{}'".format(node.name))
scope.add_type_struct_or_union(node.name, self, node.type.type)
elif is_enum:
enum_cls = self._handle_node(node.type, scope, ctxt, stream)
scope.add_type_class(node.name, enum_cls)
elif isinstance(node.type, AST.ArrayDecl):
# this does not parse data, just creates the ArrayDecl class
array_cls = self._handle_node(node.type, scope, ctxt, stream)
scope.add_type_class(node.name, array_cls)
else:
names = node.type.type.names
self._dlog("handling typedef '{}' ({})".format(node.name, names))
# don't actually handle the TypeDecl and Identifier nodes,
# just directly add the types. Example structure:
#
# Typedef: BLAH, [], ['typedef']
# TypeDecl: BLAH, []
# IdentifierType: ['unsigned', 'char']
#
scope.add_type(node.name, names) | TODO: Docstring for _handle_typedef.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _str_to_int(self, string):
"""Check for the hex
"""
string = string.lower()
if string.endswith("l"):
string = string[:-1]
if string.lower().startswith("0x"):
# should always match
match = re.match(r'0[xX]([a-fA-F0-9]+)', string)
return int(match.group(1), 0x10)
else:
return int(string) | Check for the hex | entailment |
def _handle_constant(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_constant.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling constant type {}".format(node.type))
switch = {
"int": (self._str_to_int, self._choose_const_int_class),
"long": (self._str_to_int, self._choose_const_int_class),
# TODO this isn't quite right, but py010parser wouldn't have
# parsed it if it wasn't correct...
"float": (lambda x: float(x.lower().replace("f", "")), fields.Float),
"double": (float, fields.Double),
# cut out the quotes
"char": (lambda x: ord(utils.string_escape(x[1:-1])), fields.Char),
# TODO should this be unicode?? will probably bite me later...
# cut out the quotes
"string": (lambda x: str(utils.string_escape(x[1:-1])), fields.String)
}
if node.type in switch:
#return switch[node.type](node.value)
conversion,field_cls = switch[node.type]
val = conversion(node.value)
if hasattr(field_cls, "__call__") and not type(field_cls) is type:
field_cls = field_cls(val)
field = field_cls()
field._pfp__set_value(val)
return field
raise UnsupportedConstantType(node.coord, node.type) | TODO: Docstring for _handle_constant.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_binary_op(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_binary_op.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling binary operation {}".format(node.op))
switch = {
"+": lambda x,y: x+y,
"-": lambda x,y: x-y,
"*": lambda x,y: x*y,
"/": lambda x,y: x/y,
"|": lambda x,y: x|y,
"^": lambda x,y: x^y,
"&": lambda x,y: x&y,
"%": lambda x,y: x%y,
">": lambda x,y: x>y,
"<": lambda x,y: x<y,
"||": lambda x,y: x or y,
">=": lambda x,y: x>=y,
"<=": lambda x,y: x<=y,
"==": lambda x,y: x == y,
"!=": lambda x,y: x != y,
"&&": lambda x,y: x and y,
">>": lambda x,y: x >> y,
"<<": lambda x,y: x << y,
}
left_val = self._handle_node(node.left, scope, ctxt, stream)
right_val = self._handle_node(node.right, scope, ctxt, stream)
if node.op not in switch:
raise errors.UnsupportedBinaryOperator(node.coord, node.op)
res = switch[node.op](left_val, right_val)
if type(res) is bool:
new_res = fields.Int()
if res:
new_res._pfp__set_value(1)
else:
new_res._pfp__set_value(0)
res = new_res
return res | TODO: Docstring for _handle_binary_op.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_unary_op(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_unary_op.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling unary op {}".format(node.op))
special_switch = {
"parentof" : self._handle_parentof,
"exists" : self._handle_exists,
"function_exists" : self._handle_function_exists,
"p++" : self._handle_post_plus_plus,
"p--" : self._handle_post_minus_minus,
}
switch = {
# for ++i and --i
"++": lambda x,v: x.__iadd__(1),
"--": lambda x,v: x.__isub__(1),
"~": lambda x,v: ~x,
"!": lambda x,v: not x,
"-": lambda x,v: -x,
"sizeof": lambda x,v: (fields.UInt64()+x._pfp__width()),
"startof": lambda x,v: (fields.UInt64()+x._pfp__offset),
}
if node.op not in switch and node.op not in special_switch:
raise errors.UnsupportedUnaryOperator(node.coord, node.op)
if node.op in special_switch:
return special_switch[node.op](node, scope, ctxt, stream)
field = self._handle_node(node.expr, scope, ctxt, stream)
if type(field) is type:
field = field()
res = switch[node.op](field, 1)
if type(res) is bool:
new_res = field.__class__()
new_res._pfp__set_value(1 if res == True else 0)
res = new_res
return res | TODO: Docstring for _handle_unary_op.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_parentof(self, node, scope, ctxt, stream):
"""Handle the parentof unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
# if someone does something like parentof(this).blah,
# we'll end up with a StructRef instead of an ID ref
# for node.expr, but we'll also end up with a structref
# if the user does parentof(a.b.c)...
#
# TODO how to differentiate between the two??
#
# the proper way would be to do (parentof(a.b.c)).a or
# (parentof a.b.c).a
field = self._handle_node(node.expr, scope, ctxt, stream)
parent = field._pfp__parent
return parent | Handle the parentof unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_exists(self, node, scope, ctxt, stream):
"""Handle the exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
res = fields.Int()
try:
self._handle_node(node.expr, scope, ctxt, stream)
res._pfp__set_value(1)
except AttributeError:
res._pfp__set_value(0)
return res | Handle the exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_function_exists(self, node, scope, ctxt, stream):
"""Handle the function_exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
res = fields.Int()
try:
func = self._handle_node(node.expr, scope, ctxt, stream)
if isinstance(func, functions.BaseFunction):
res._pfp__set_value(1)
else:
res._pfp__set_value(0)
except errors.UnresolvedID:
res._pfp__set_value(0)
return res | Handle the function_exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_id(self, node, scope, ctxt, stream):
"""Handle an ID node (return a field object for the ID)
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
if node.name == "__root":
return self._root
if node.name == "__this" or node.name == "this":
return ctxt
self._dlog("handling id {}".format(node.name))
field = scope.get_id(node.name)
is_lazy = getattr(node, "is_lazy", False)
if field is None and not is_lazy:
raise errors.UnresolvedID(node.coord, node.name)
elif is_lazy:
return LazyField(node.name, scope)
return field | Handle an ID node (return a field object for the ID)
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_assignment(self, node, scope, ctxt, stream):
"""Handle assignment nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
def add_op(x,y): x += y
def sub_op(x,y): x -= y
def div_op(x,y): x /= y
def mod_op(x,y): x %= y
def mul_op(x,y): x *= y
def xor_op(x,y): x ^= y
def and_op(x,y): x &= y
def or_op(x,y): x |= y
def lshift_op(x,y): x <<= y
def rshift_op(x,y): x >>= y
def assign_op(x,y): x._pfp__set_value(y)
switch = {
"+=" : add_op,
"-=" : sub_op,
"/=" : div_op,
"%=" : mod_op,
"*=" : mul_op,
"^=" : xor_op,
"&=" : and_op,
"|=" : or_op,
"<<=" : lshift_op,
">>=" : rshift_op,
"=" : assign_op
}
self._dlog("handling assignment")
field = self._handle_node(node.lvalue, scope, ctxt, stream)
self._dlog("field = {}".format(field))
value = self._handle_node(node.rvalue, scope, ctxt, stream)
if node.op is None:
self._dlog("value = {}".format(value))
field._pfp__set_value(value)
else:
self._dlog("value {}= {}".format(node.op, value))
if node.op not in switch:
raise errors.UnsupportedAssignmentOperator(node.coord, node.op)
switch[node.op](field, value) | Handle assignment nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_func_def(self, node, scope, ctxt, stream):
"""Handle FuncDef nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling function definition")
func = self._handle_node(node.decl, scope, ctxt, stream)
func.body = node.body | Handle FuncDef nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_param_list(self, node, scope, ctxt, stream):
"""Handle ParamList nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling param list")
# params should be a list of tuples:
# [(<name>, <field_class>), ...]
params = []
for param in node.params:
self._mark_id_as_lazy(param)
param_info = self._handle_node(param, scope, ctxt, stream)
params.append(param_info)
param_list = functions.ParamListDef(params, node.coord)
return param_list | Handle ParamList nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_func_decl(self, node, scope, ctxt, stream):
"""Handle FuncDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling func decl")
if node.args is not None:
# could just call _handle_param_list directly...
for param in node.args.params:
# see the check in _handle_decl for how this is kept from
# being added to the local context/scope
param.is_func_param = True
params = self._handle_node(node.args, scope, ctxt, stream)
else:
params = functions.ParamListDef([], node.coord)
func_type = self._handle_node(node.type, scope, ctxt, stream)
func = functions.Function(func_type, params, scope)
return func | Handle FuncDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_func_call(self, node, scope, ctxt, stream):
"""Handle FuncCall nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling function call to '{}'".format(node.name.name))
if node.args is None:
func_args = []
else:
func_args = self._handle_node(node.args, scope, ctxt, stream)
func = self._handle_node(node.name, scope, ctxt, stream)
return func.call(func_args, ctxt, scope, stream, self, node.coord) | Handle FuncCall nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_expr_list(self, node, scope, ctxt, stream):
"""Handle ExprList nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling expression list")
exprs = [
self._handle_node(expr, scope, ctxt, stream) for expr in node.exprs
]
return exprs | Handle ExprList nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_compound(self, node, scope, ctxt, stream):
"""Handle Compound nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling compound statement")
#scope.push()
try:
for child in node.children():
self._handle_node(child, scope, ctxt, stream)
# in case a return occurs, be sure to pop the scope
# (returns are implemented by raising an exception)
finally:
#scope.pop()
pass | Handle Compound nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_return(self, node, scope, ctxt, stream):
"""Handle Return nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling return")
if node.expr is None:
ret_val = None
else:
ret_val = self._handle_node(node.expr, scope, ctxt, stream)
self._dlog("return value = {}".format(ret_val))
raise errors.InterpReturn(ret_val) | Handle Return nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_enum(self, node, scope, ctxt, stream):
"""Handle enum nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling enum")
if node.type is None:
enum_cls = fields.Int
else:
enum_cls = self._handle_node(node.type, scope, ctxt, stream)
enum_vals = {}
curr_val = enum_cls()
curr_val._pfp__value = -1
for enumerator in node.values.enumerators:
if enumerator.value is not None:
curr_val = self._handle_node(enumerator.value, scope, ctxt, stream)
else:
curr_val = curr_val + 1
curr_val._pfp__freeze()
enum_vals[enumerator.name] = curr_val
enum_vals[fields.PYVAL(curr_val)] = enumerator.name
scope.add_local(enumerator.name, curr_val)
if node.name is not None:
enum_cls = EnumDef(node.name, enum_cls, enum_vals)
scope.add_type_class(node.name, enum_cls)
else:
enum_cls = EnumDef("enum_" + enum_cls.__name__, enum_cls, enum_vals)
# don't add to scope if we don't have a name
return enum_cls | Handle enum nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_array_decl(self, node, scope, ctxt, stream):
"""Handle ArrayDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling array declaration '{}'".format(node.type.declname))
if node.dim is None:
# will be used
array_size = None
else:
array_size = self._handle_node(node.dim, scope, ctxt, stream)
self._dlog("array size = {}".format(array_size))
# TODO node.dim_quals
# node.type
field_cls = self._handle_node(node.type, scope, ctxt, stream)
self._dlog("field class = {}".format(field_cls))
array = ArrayDecl(field_cls, array_size)
#array = fields.Array(array_size, field_cls)
array._pfp__name = node.type.declname
#array._pfp__parse(stream)
return array | Handle ArrayDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_array_ref(self, node, scope, ctxt, stream):
"""Handle ArrayRef nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
ary = self._handle_node(node.name, scope, ctxt, stream)
subscript = self._handle_node(node.subscript, scope, ctxt, stream)
return ary[fields.PYVAL(subscript)] | Handle ArrayRef nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_if(self, node, scope, ctxt, stream):
"""Handle If nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling if/ternary_op")
cond = self._handle_node(node.cond, scope, ctxt, stream)
if cond:
# there should always be an iftrue
return self._handle_node(node.iftrue, scope, ctxt, stream)
else:
if node.iffalse is not None:
return self._handle_node(node.iffalse, scope, ctxt, stream) | Handle If nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_for(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling for")
if node.init is not None:
# perform the init
self._handle_node(node.init, scope, ctxt, stream)
while node.cond is None or self._handle_node(node.cond, scope, ctxt, stream):
if node.stmt is not None:
try:
# do the for body
self._handle_node(node.stmt, scope, ctxt, stream)
except errors.InterpBreak as e:
break
# we still need to interpret the "next" statement,
# so just pass
except errors.InterpContinue as e:
pass
if node.next is not None:
# do the next statement
self._handle_node(node.next, scope, ctxt, stream) | Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_while(self, node, scope, ctxt, stream):
"""Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling while")
while node.cond is None or self._handle_node(node.cond, scope, ctxt, stream):
if node.stmt is not None:
try:
self._handle_node(node.stmt, scope, ctxt, stream)
except errors.InterpBreak as e:
break
except errors.InterpContinue as e:
pass | Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
def _handle_switch(self, node, scope, ctxt, stream):
"""Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
def exec_case(idx, cases):
# keep executing cases until a break is found,
# or they've all been executed
for case in cases[idx:]:
stmts = case.stmts
try:
for stmt in stmts:
self._handle_node(stmt, scope, ctxt, stream)
except errors.InterpBreak as e:
break
def get_stmts(stmts, res=None):
if res is None:
res = []
stmts = self._flatten_list(stmts)
for stmt in stmts:
if isinstance(stmt, tuple):
stmt = stmt[1]
res.append(stmt)
if stmt.__class__ in [AST.Case, AST.Default]:
get_stmts(stmt.stmts, res)
return res
def get_cases(nodes, acc=None):
cases = []
stmts = get_stmts(nodes)
for stmt in stmts:
if stmt.__class__ in [AST.Case, AST.Default]:
cases.append(stmt)
stmt.stmts = []
else:
cases[-1].stmts.append(stmt)
return cases
cond = self._handle_node(node.cond, scope, ctxt, stream)
default_idx = None
found_match = False
cases = getattr(node, "pfp_cases", None)
if cases is None:
cases = get_cases(node.stmt.children())
node.pfp_cases = cases
for idx,child in enumerate(cases):
if child.__class__ == AST.Default:
default_idx = idx
continue
elif child.__class__ == AST.Case:
expr = self._handle_node(child.expr, scope, ctxt, stream)
if expr == cond:
found_match = True
exec_case(idx, cases)
break
if default_idx is not None and not found_match:
exec_case(default_idx, cases) | Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.