sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def variations(word): """Create variations of the word based on letter combinations like oo, sh, etc.""" if len(word) == 1: return [[word[0]]] elif word == 'aa': return [['A']] elif word == 'ee': return [['i']] elif word == 'ei': return [['ei']] elif word in ['oo', 'ou']: return [['u']] elif word == 'kha': return [['kha'], ['kh', 'a']] elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return [[word]] elif word in ["'ee", "'ei"]: return [["'i"]] elif word in ["'oo", "'ou"]: return [["'u"]] elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[0] + "'"]] elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [["'" + word[1]]] elif len(word) == 2 and word[0] == word[1]: return [[word[0]]] if word[:2] == 'aa': return [['A'] + i for i in variations(word[2:])] elif word[:2] == 'ee': return [['i'] + i for i in variations(word[2:])] elif word[:2] in ['oo', 'ou']: return [['u'] + i for i in variations(word[2:])] elif word[:3] == 'kha': return \ [['kha'] + i for i in variations(word[3:])] + \ [['kh', 'a'] + i for i in variations(word[3:])] + \ [['k', 'h', 'a'] + i for i in variations(word[3:])] elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return \ [[word[:2]] + i for i in variations(word[2:])] + \ [[word[0]] + i for i in variations(word[1:])] elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[:2]] + i for i in variations(word[2:])] elif word[:3] in ["'ee", "'ei"]: return [["'i"] + i for i in variations(word[3:])] elif word[:3] in ["'oo", "'ou"]: return [["'u"] + i for i in variations(word[3:])] elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [[word[:2]] + i for i in variations(word[2:])] elif len(word) >= 2 and word[0] == word[1]: return [[word[0]] + i for i in variations(word[2:])] else: return [[word[0]] + i for i in variations(word[1:])]
Create variations of the word based on letter combinations like oo, sh, etc.
entailment
def f2p_word(word, max_word_size=15, cutoff=3): """Convert a single word from Finglish to Persian. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. """ original_word = word word = word.lower() c = dictionary.get(word) if c: return [(c, 1.0)] if word == '': return [] elif len(word) > max_word_size: return [(original_word, 1.0)] results = [] for w in variations(word): results.extend(f2p_word_internal(w, original_word)) # sort results based on the confidence value results.sort(key=lambda r: r[1], reverse=True) # return the top three results in order to cut down on the number # of possibilities. return results[:cutoff]
Convert a single word from Finglish to Persian. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument.
entailment
def f2p_list(phrase, max_word_size=15, cutoff=3): """Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values. """ # split the phrase into words results = [w for w in sep_regex.split(phrase) if w] # return an empty list if no words if results == []: return [] # convert each word separately results = [f2p_word(w, max_word_size, cutoff) for w in results] return results
Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values.
entailment
def f2p(phrase, max_word_size=15, cutoff=3): """Convert a Finglish phrase to the most probable Persian phrase. """ results = f2p_list(phrase, max_word_size, cutoff) return ' '.join(i[0][0] for i in results)
Convert a Finglish phrase to the most probable Persian phrase.
entailment
def distribution_version(name): """try to get the version of the named distribution, returs None on failure""" from pkg_resources import get_distribution, DistributionNotFound try: dist = get_distribution(name) except DistributionNotFound: pass else: return dist.version
try to get the version of the named distribution, returs None on failure
entailment
def initpkg(pkgname, exportdefs, attr=None, eager=False): """ initialize given package from the export definitions. """ attr = attr or {} oldmod = sys.modules.get(pkgname) d = {} f = getattr(oldmod, '__file__', None) if f: f = _py_abspath(f) d['__file__'] = f if hasattr(oldmod, '__version__'): d['__version__'] = oldmod.__version__ if hasattr(oldmod, '__loader__'): d['__loader__'] = oldmod.__loader__ if hasattr(oldmod, '__path__'): d['__path__'] = [_py_abspath(p) for p in oldmod.__path__] if hasattr(oldmod, '__package__'): d['__package__'] = oldmod.__package__ if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): d['__doc__'] = oldmod.__doc__ d.update(attr) if hasattr(oldmod, "__dict__"): oldmod.__dict__.update(d) mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod # eagerload in bypthon to avoid their monkeypatching breaking packages if 'bpython' in sys.modules or eager: for module in list(sys.modules.values()): if isinstance(module, ApiModule): module.__dict__
initialize given package from the export definitions.
entailment
def importobj(modpath, attrname): """imports a module, then resolves the attrname on it""" module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval
imports a module, then resolves the attrname on it
entailment
def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown.""" # print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') importobj(*target)() try: modpath, attrname = self.__map__[name] except KeyError: if target is not None and name != '__onfirstaccess__': # retry, onfirstaccess might have set attrs return getattr(self, name) raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) try: del self.__map__[name] except KeyError: pass # in a recursive-import situation a double-del can happen return result
lazily compute value for name or raise AttributeError if unknown.
entailment
def _request(self, http_method, relative_url='', **kwargs): """Does actual HTTP request using requests library.""" # It could be possible to call api.resource.get('/index') # but it would be non-intuitive that the path would resolve # to root of domain relative_url = self._remove_leading_slash(relative_url) # Add default kwargs with possible custom kwargs returned by # before_request new_kwargs = self.default_kwargs().copy() custom_kwargs = self.before_request( http_method, relative_url, kwargs.copy() ) new_kwargs.update(custom_kwargs) response = requests.request( http_method, self._join_url(relative_url), **new_kwargs ) return self.after_request(response)
Does actual HTTP request using requests library.
entailment
def _new_url(self, relative_url): """Create new Url which points to new url.""" return Url( urljoin(self._base_url, relative_url), **self._default_kwargs )
Create new Url which points to new url.
entailment
def roles(self): """gets user groups""" result = AuthGroup.objects(creator=self.client).only('role') return json.loads(result.to_json())
gets user groups
entailment
def get_permissions(self, role): """gets permissions of role""" target_role = AuthGroup.objects(role=role, creator=self.client).first() if not target_role: return '[]' targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name') return json.loads(targets.to_json())
gets permissions of role
entailment
def get_user_permissions(self, user): """get permissions of a user""" memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups') results = [] for each in memberShipRecords: for group in each.groups: targetPermissionRecords = AuthPermission.objects(creator=self.client, groups=group).only('name') for each_permission in targetPermissionRecords: results.append({'name':each_permission.name}) return results
get permissions of a user
entailment
def get_user_roles(self, user): """get permissions of a user""" memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups') results = [] for each in memberShipRecords: for group in each.groups: results.append({'role':group.role}) return results
get permissions of a user
entailment
def get_role_members(self, role): """get permissions of a user""" targetRoleDb = AuthGroup.objects(creator=self.client, role=role) members = AuthMembership.objects(groups__in=targetRoleDb).only('user') return json.loads(members.to_json())
get permissions of a user
entailment
def which_roles_can(self, name): """Which role can SendMail? """ targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first() return [{'role': group.role} for group in targetPermissionRecords.groups]
Which role can SendMail?
entailment
def which_users_can(self, name): """Which role can SendMail? """ _roles = self.which_roles_can(name) result = [self.get_role_members(i.get('role')) for i in _roles] return result
Which role can SendMail?
entailment
def get_role(self, role): """Returns a role object """ role = AuthGroup.objects(role=role, creator=self.client).first() return role
Returns a role object
entailment
def add_role(self, role, description=None): """ Creates a new group """ new_group = AuthGroup(role=role, creator=self.client) try: new_group.save() return True except NotUniqueError: return False
Creates a new group
entailment
def del_role(self, role): """ deletes a group """ target = AuthGroup.objects(role=role, creator=self.client).first() if target: target.delete() return True else: return False
deletes a group
entailment
def add_membership(self, user, role): """ make user a member of a group """ targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False target = AuthMembership.objects(user=user, creator=self.client).first() if not target: target = AuthMembership(user=user, creator=self.client) if not role in [i.role for i in target.groups]: target.groups.append(targetGroup) target.save() return True
make user a member of a group
entailment
def del_membership(self, user, role): """ dismember user from a group """ if not self.has_membership(user, role): return True targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return True for group in targetRecord.groups: if group.role==role: targetRecord.groups.remove(group) targetRecord.save() return True
dismember user from a group
entailment
def has_membership(self, user, role): """ checks if user is member of a group""" targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if targetRecord: return role in [i.role for i in targetRecord.groups] return False
checks if user is member of a group
entailment
def add_permission(self, role, name): """ authorize a group for something """ if self.has_permission(role, name): return True targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False # Create or update permission = AuthPermission.objects(name=name).update( add_to_set__groups=[targetGroup], creator=self.client, upsert=True ) return True
authorize a group for something
entailment
def del_permission(self, role, name): """ revoke authorization of a group """ if not self.has_permission(role, name): return True targetGroup = AuthGroup.objects(role=role, creator=self.client).first() target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first() if not target: return True target.delete() return True
revoke authorization of a group
entailment
def user_has_permission(self, user, name): """ verify user has permission """ targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return False for group in targetRecord.groups: if self.has_permission(group.role, name): return True return False
verify user has permission
entailment
def bump_version(version, bump='patch'): """patch: patch, minor, major""" try: parts = map(int, version.split('.')) except ValueError: fail('Current version is not numeric') if bump == 'patch': parts[2] += 1 elif bump == 'minor': parts[1] += 1 parts[2] = 0 elif bump == 'major': parts[0] +=1 parts[1] = 0 parts[2] = 0 return '.'.join(map(str, parts))
patch: patch, minor, major
entailment
def handler(event): """Signal decorator to allow use of callback functions as class decorators.""" def decorator(fn): def apply(cls): event.connect(fn, sender=cls) return cls fn.apply = apply return fn return decorator
Signal decorator to allow use of callback functions as class decorators.
entailment
def stringify(req, resp): """ dumps all valid jsons This is the latest after hook """ if isinstance(resp.body, dict): try: resp.body = json.dumps(resp.body) except(nameError): resp.status = falcon.HTTP_500
dumps all valid jsons This is the latest after hook
entailment
def process_response(self, req, resp, resource): """Post-processing of the response (after routing). Args: req: Request object. resp: Response object. resource: Resource object to which the request was routed. May be None if no route was found for the request. """ if isinstance(resp.body, dict): try: resp.body = json.dumps(resp.body) except(nameError): resp.status = falcon.HTTP_500
Post-processing of the response (after routing). Args: req: Request object. resp: Response object. resource: Resource object to which the request was routed. May be None if no route was found for the request.
entailment
def run(command=None, *arguments): """ Run the given command. Parameters: :param command: A string describing a command. :param arguments: A list of strings describing arguments to the command. """ if command is None: sys.exit('django-shortcuts: No argument was supplied, please specify one.') if command in ALIASES: command = ALIASES[command] if command == 'startproject': return call('django-admin.py startproject %s' % ' '.join(arguments), shell=True) script_path = os.getcwd() while not os.path.exists(os.path.join(script_path, 'manage.py')): base_dir = os.path.dirname(script_path) if base_dir != script_path: script_path = base_dir else: sys.exit('django-shortcuts: No \'manage.py\' script found in this directory or its parents.') return call('%(python)s %(script_path)s %(command)s %(arguments)s' % { 'python': sys.executable, 'script_path': os.path.join(script_path, 'manage.py'), 'command': command or '', 'arguments': ' '.join(arguments) }, shell=True)
Run the given command. Parameters: :param command: A string describing a command. :param arguments: A list of strings describing arguments to the command.
entailment
def instantiate(self, scope, args, interp): """Create a ParamList instance for actual interpretation :args: TODO :returns: A ParamList object """ param_instances = [] BYREF = "byref" # TODO are default values for function parameters allowed in 010? for param_name, param_cls in self._params: # we don't instantiate a copy of byref params if getattr(param_cls, "byref", False): param_instances.append(BYREF) else: field = param_cls() field._pfp__name = param_name param_instances.append(field) if len(args) != len(param_instances): raise errors.InvalidArguments( self._coords, [x.__class__.__name__ for x in args], [x.__class__.__name__ for x in param_instances] ) # TODO type checking on provided types for x in six.moves.range(len(args)): param = param_instances[x] # arrays are simply passed through into the function. We shouldn't # have to worry about frozenness/unfrozenness at this point if param is BYREF or isinstance(param, pfp.fields.Array): param = args[x] param_instances[x] = param scope.add_local(self._params[x][0], param) else: param._pfp__set_value(args[x]) scope.add_local(param._pfp__name, param) param._pfp__interp = interp return ParamList(param_instances)
Create a ParamList instance for actual interpretation :args: TODO :returns: A ParamList object
entailment
def get_experiments(base, load=False): ''' get_experiments will return loaded json for all valid experiments from an experiment folder :param base: full path to the base folder with experiments inside :param load: if True, returns a list of loaded config.json objects. If False (default) returns the paths to the experiments ''' experiments = find_directories(base) valid_experiments = [e for e in experiments if validate(e,cleanup=False)] bot.info("Found %s valid experiments" %(len(valid_experiments))) if load is True: valid_experiments = load_experiments(valid_experiments) #TODO at some point in this workflow we would want to grab instructions from help # and variables from labels, environment, etc. return valid_experiments
get_experiments will return loaded json for all valid experiments from an experiment folder :param base: full path to the base folder with experiments inside :param load: if True, returns a list of loaded config.json objects. If False (default) returns the paths to the experiments
entailment
def load_experiments(folders): '''load_experiments a wrapper for load_experiment to read multiple experiments :param experiment_folders: a list of experiment folders to load, full paths ''' experiments = [] if isinstance(folders,str): folders = [experiment_folders] for folder in folders: exp = load_experiment(folder) experiments.append(exp) return experiments
load_experiments a wrapper for load_experiment to read multiple experiments :param experiment_folders: a list of experiment folders to load, full paths
entailment
def load_experiment(folder, return_path=False): '''load_experiment: reads in the config.json for a folder, returns None if not found. :param folder: full path to experiment folder :param return_path: if True, don't load the config.json, but return it ''' fullpath = os.path.abspath(folder) config = "%s/config.json" %(fullpath) if not os.path.exists(config): bot.error("config.json could not be found in %s" %(folder)) config = None if return_path is False and config is not None: config = read_json(config) return config
load_experiment: reads in the config.json for a folder, returns None if not found. :param folder: full path to experiment folder :param return_path: if True, don't load the config.json, but return it
entailment
def get_selection(available, selection, base='/scif/apps'): '''we compare the basename (the exp_id) of the selection and available, regardless of parent directories''' if isinstance(selection, str): selection = selection.split(',') available = [os.path.basename(x) for x in available] selection = [os.path.basename(x) for x in selection] finalset = [x for x in selection if x in available] if len(finalset) == 0: bot.warning("No user experiments selected, providing all %s" %(len(available))) finalset = available return ["%s/%s" %(base,x) for x in finalset]
we compare the basename (the exp_id) of the selection and available, regardless of parent directories
entailment
def make_lookup(experiment_list, key='exp_id'): '''make_lookup returns dict object to quickly look up query experiment on exp_id :param experiment_list: a list of query (dict objects) :param key_field: the key in the dictionary to base the lookup key (str) :returns lookup: dict (json) with key as "key_field" from query_list ''' lookup = dict() for single_experiment in experiment_list: if isinstance(single_experiment, str): single_experiment = load_experiment(single_experiment) lookup_key = single_experiment[key] lookup[lookup_key] = single_experiment return lookup
make_lookup returns dict object to quickly look up query experiment on exp_id :param experiment_list: a list of query (dict objects) :param key_field: the key in the dictionary to base the lookup key (str) :returns lookup: dict (json) with key as "key_field" from query_list
entailment
def validate(folder=None, cleanup=False): '''validate :param folder: full path to experiment folder with config.json. If path begins with https, we assume to be starting from a repository. ''' from expfactory.validator import ExperimentValidator cli = ExperimentValidator() return cli.validate(folder, cleanup=cleanup)
validate :param folder: full path to experiment folder with config.json. If path begins with https, we assume to be starting from a repository.
entailment
def get_library(lookup=True, key='exp_id'): ''' return the raw library, without parsing''' library = None response = requests.get(EXPFACTORY_LIBRARY) if response.status_code == 200: library = response.json() if lookup is True: return make_lookup(library,key=key) return library
return the raw library, without parsing
entailment
def int3(params, ctxt, scope, stream, coord, interp): """Define the ``Int3()`` function in the interpreter. Calling ``Int3()`` will drop the user into an interactive debugger. """ if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop()
Define the ``Int3()`` function in the interpreter. Calling ``Int3()`` will drop the user into an interactive debugger.
entailment
def initdb(self): '''initdb will check for writability of the data folder, meaning that it is bound to the local machine. If the folder isn't bound, expfactory runs in demo mode (not saving data) ''' self.database = EXPFACTORY_DATABASE bot.info("DATABASE: %s" %self.database) # Supported database options valid = ('sqlite', 'postgres', 'mysql', 'filesystem') if not self.database.startswith(valid): bot.warning('%s is not yet a supported type, saving to filesystem.' % self.database) self.database = 'filesystem' # Add functions specific to database type self.init_db() # uses url in self.database bot.log("Data base: %s" % self.database)
initdb will check for writability of the data folder, meaning that it is bound to the local machine. If the folder isn't bound, expfactory runs in demo mode (not saving data)
entailment
def setup(self): ''' obtain database and filesystem preferences from defaults, and compare with selection in container. ''' self.selection = EXPFACTORY_EXPERIMENTS self.ordered = len(EXPFACTORY_EXPERIMENTS) > 0 self.data_base = EXPFACTORY_DATA self.study_id = EXPFACTORY_SUBID self.base = EXPFACTORY_BASE self.randomize = EXPFACTORY_RANDOMIZE self.headless = EXPFACTORY_HEADLESS # Generate variables, if they exist self.vars = generate_runtime_vars() or None available = get_experiments("%s" % self.base) self.experiments = get_selection(available, self.selection) self.logger.debug(self.experiments) self.lookup = make_lookup(self.experiments) final = "\n".join(list(self.lookup.keys())) bot.log("Headless mode: %s" % self.headless) bot.log("User has selected: %s" % self.selection) bot.log("Experiments Available: %s" %"\n".join(available)) bot.log("Randomize: %s" % self.randomize) bot.log("Final Set \n%s" % final)
obtain database and filesystem preferences from defaults, and compare with selection in container.
entailment
def get_next(self, session): '''return the name of the next experiment, depending on the user's choice to randomize. We don't remove any experiments here, that is done on finish, in the case the user doesn't submit data (and thus finish). A return of None means the user has completed the battery of experiments. ''' next = None experiments = session.get('experiments', []) if len(experiments) > 0: if app.randomize is True: next = random.choice(range(0,len(experiments))) next = experiments[next] else: next = experiments[0] return next
return the name of the next experiment, depending on the user's choice to randomize. We don't remove any experiments here, that is done on finish, in the case the user doesn't submit data (and thus finish). A return of None means the user has completed the battery of experiments.
entailment
def finish_experiment(self, session, exp_id): '''remove an experiment from the list after completion. ''' self.logger.debug('Finishing %s' %exp_id) experiments = session.get('experiments', []) experiments = [x for x in experiments if x != exp_id] session['experiments'] = experiments return experiments
remove an experiment from the list after completion.
entailment
def find_subdirectories(basepath): ''' Return directories (and sub) starting from a base ''' directories = [] for root, dirnames, filenames in os.walk(basepath): new_directories = [d for d in dirnames if d not in directories] directories = directories + new_directories return directories
Return directories (and sub) starting from a base
entailment
def find_directories(root,fullpath=True): ''' Return directories at one level specified by user (not recursive) ''' directories = [] for item in os.listdir(root): # Don't include hidden directories if not re.match("^[.]",item): if os.path.isdir(os.path.join(root, item)): if fullpath: directories.append(os.path.abspath(os.path.join(root, item))) else: directories.append(item) return directories
Return directories at one level specified by user (not recursive)
entailment
def copy_directory(src, dest, force=False): ''' Copy an entire directory recursively ''' if os.path.exists(dest) and force is True: shutil.rmtree(dest) try: shutil.copytree(src, dest) except OSError as e: # If the error was caused because the source wasn't a directory if e.errno == errno.ENOTDIR: shutil.copy(src, dest) else: bot.error('Directory not copied. Error: %s' % e) sys.exit(1)
Copy an entire directory recursively
entailment
def clone(url, tmpdir=None): '''clone a repository from Github''' if tmpdir is None: tmpdir = tempfile.mkdtemp() name = os.path.basename(url).replace('.git', '') dest = '%s/%s' %(tmpdir,name) return_code = os.system('git clone %s %s' %(url,dest)) if return_code == 0: return dest bot.error('Error cloning repo.') sys.exit(return_code)
clone a repository from Github
entailment
def run_command(cmd): '''run_command uses subprocess to send a command to the terminal. :param cmd: the command to send, should be a list for subprocess ''' output = Popen(cmd,stderr=STDOUT,stdout=PIPE) t = output.communicate()[0],output.returncode output = {'message':t[0], 'return_code':t[1]} return output
run_command uses subprocess to send a command to the terminal. :param cmd: the command to send, should be a list for subprocess
entailment
def get_template(name, base=None): '''read in and return a template file ''' # If the file doesn't exist, assume relative to base template_file = name if not os.path.exists(template_file): if base is None: base = get_templatedir() template_file = "%s/%s" %(base, name) # Then try again, if it still doesn't exist, bad name if os.path.exists(template_file): with open(template_file,"r") as filey: template = "".join(filey.readlines()) return template bot.error("%s does not exist." %template_file)
read in and return a template file
entailment
def sub_template(template,template_tag,substitution): '''make a substitution for a template_tag in a template ''' template = template.replace(template_tag,substitution) return template
make a substitution for a template_tag in a template
entailment
def get_post_fields(request): '''parse through a request, and return fields from post in a dictionary ''' fields = dict() for field,value in request.form.items(): fields[field] = value return fields
parse through a request, and return fields from post in a dictionary
entailment
def getenv(variable_key, default=None, required=False, silent=True): '''getenv will attempt to get an environment variable. If the variable is not found, None is returned. :param variable_key: the variable name :param required: exit with error if not found :param silent: Do not print debugging information for variable ''' variable = os.environ.get(variable_key, default) if variable is None and required: bot.error("Cannot find environment variable %s, exiting." %variable_key) sys.exit(1) if not silent: if variable is not None: bot.verbose2("%s found as %s" %(variable_key,variable)) else: bot.verbose2("%s not defined (None)" %variable_key) return variable
getenv will attempt to get an environment variable. If the variable is not found, None is returned. :param variable_key: the variable name :param required: exit with error if not found :param silent: Do not print debugging information for variable
entailment
def parse( data = None, template = None, data_file = None, template_file = None, interp = None, debug = False, predefines = True, int3 = True, keep_successful = False, printf = True, ): """Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM """ if data is None and data_file is None: raise Exception("No input data was specified") if data is not None and data_file is not None: raise Exception("Only one input data may be specified") if isinstance(data, six.string_types): data = six.StringIO(data) if data_file is not None: data = open(os.path.expanduser(data_file), "rb") if template is None and template_file is None: raise Exception("No template specified!") if template is not None and template_file is not None: raise Exception("Only one template may be specified!") orig_filename = "string" if template_file is not None: orig_filename = template_file try: with open(os.path.expanduser(template_file), "r") as f: template = f.read() except Exception as e: raise Exception("Could not open template file '{}'".format(template_file)) # the user may specify their own instance of PfpInterp to be # used if interp is None: interp = pfp.interp.PfpInterp( debug = debug, parser = PARSER, int3 = int3, ) # so we can consume single bits at a time data = BitwrappedStream(data) dom = interp.parse( data, template, predefines = predefines, orig_filename = orig_filename, keep_successful = keep_successful, printf = printf, ) # close the data stream if a data_file was specified if data_file is not None: data.close() return dom
Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM
entailment
def Checksum(params, ctxt, scope, stream, coord): """ Runs a simple checksum on a file and returns the result as a int64. The algorithm can be one of the following constants: CHECKSUM_BYTE - Treats the file as a set of unsigned bytes CHECKSUM_SHORT_LE - Treats the file as a set of unsigned little-endian shorts CHECKSUM_SHORT_BE - Treats the file as a set of unsigned big-endian shorts CHECKSUM_INT_LE - Treats the file as a set of unsigned little-endian ints CHECKSUM_INT_BE - Treats the file as a set of unsigned big-endian ints CHECKSUM_INT64_LE - Treats the file as a set of unsigned little-endian int64s CHECKSUM_INT64_BE - Treats the file as a set of unsigned big-endian int64s CHECKSUM_SUM8 - Same as CHECKSUM_BYTE except result output as 8-bits CHECKSUM_SUM16 - Same as CHECKSUM_BYTE except result output as 16-bits CHECKSUM_SUM32 - Same as CHECKSUM_BYTE except result output as 32-bits CHECKSUM_SUM64 - Same as CHECKSUM_BYTE CHECKSUM_CRC16 CHECKSUM_CRCCCITT CHECKSUM_CRC32 CHECKSUM_ADLER32 If start and size are zero, the algorithm is run on the whole file. If they are not zero then the algorithm is run on size bytes starting at address start. See the ChecksumAlgBytes and ChecksumAlgStr functions to run more complex algorithms. crcPolynomial and crcInitValue can be used to set a custom polynomial and initial value for the CRC functions. A value of -1 for these parameters uses the default values as described in the Check Sum/Hash Algorithms topic. A negative number is returned on error. """ checksum_types = { 0: "CHECKSUM_BYTE", # Treats the file as a set of unsigned bytes 1: "CHECKSUM_SHORT_LE", # Treats the file as a set of unsigned little-endian shorts 2: "CHECKSUM_SHORT_BE", # Treats the file as a set of unsigned big-endian shorts 3: "CHECKSUM_INT_LE", # Treats the file as a set of unsigned little-endian ints 4: "CHECKSUM_INT_BE", # Treats the file as a set of unsigned big-endian ints 5: "CHECKSUM_INT64_LE", # Treats the file as a set of unsigned little-endian int64s 6: "CHECKSUM_INT64_BE", # Treats the file as a set of unsigned big-endian int64s 7: "CHECKSUM_SUM8", # Same as CHECKSUM_BYTE except result output as 8-bits 8: "CHECKSUM_SUM16", # Same as CHECKSUM_BYTE except result output as 16-bits 9: "CHECKSUM_SUM32", # Same as CHECKSUM_BYTE except result output as 32-bits 10: "CHECKSUM_SUM64", # Same as CHECKSUM_BYTE 11: "CHECKSUM_CRC16", 12: "CHECKSUM_CRCCCITT", 13: _crc32, 14: _checksum_Adler32 } if len(params) < 1: raise errors.InvalidArguments(coord, "at least 1 argument", "{} args".format(len(params))) alg = PYVAL(params[0]) if alg not in checksum_types: raise errors.InvalidArguments(coord, "checksum alg must be one of (0-14)", "{}".format(alg)) start = 0 if len(params) > 1: start = PYVAL(params[1]) size = 0 if len(params) > 2: size = PYVAL(params[2]) crc_poly = -1 if len(params) > 3: crc_poly = PYVAL(params[3]) crc_init = -1 if len(params) > 4: crc_init = PYVAL(params[4]) stream_pos = stream.tell() if start + size == 0: stream.seek(0, 0) data = stream.read() else: stream.seek(start, 0) data = stream.read(size) try: return checksum_types[alg](data, crc_init, crc_poly) finally: # yes, this does execute even though a return statement # exists within the try stream.seek(stream_pos, 0)
Runs a simple checksum on a file and returns the result as a int64. The algorithm can be one of the following constants: CHECKSUM_BYTE - Treats the file as a set of unsigned bytes CHECKSUM_SHORT_LE - Treats the file as a set of unsigned little-endian shorts CHECKSUM_SHORT_BE - Treats the file as a set of unsigned big-endian shorts CHECKSUM_INT_LE - Treats the file as a set of unsigned little-endian ints CHECKSUM_INT_BE - Treats the file as a set of unsigned big-endian ints CHECKSUM_INT64_LE - Treats the file as a set of unsigned little-endian int64s CHECKSUM_INT64_BE - Treats the file as a set of unsigned big-endian int64s CHECKSUM_SUM8 - Same as CHECKSUM_BYTE except result output as 8-bits CHECKSUM_SUM16 - Same as CHECKSUM_BYTE except result output as 16-bits CHECKSUM_SUM32 - Same as CHECKSUM_BYTE except result output as 32-bits CHECKSUM_SUM64 - Same as CHECKSUM_BYTE CHECKSUM_CRC16 CHECKSUM_CRCCCITT CHECKSUM_CRC32 CHECKSUM_ADLER32 If start and size are zero, the algorithm is run on the whole file. If they are not zero then the algorithm is run on size bytes starting at address start. See the ChecksumAlgBytes and ChecksumAlgStr functions to run more complex algorithms. crcPolynomial and crcInitValue can be used to set a custom polynomial and initial value for the CRC functions. A value of -1 for these parameters uses the default values as described in the Check Sum/Hash Algorithms topic. A negative number is returned on error.
entailment
def FindAll(params, ctxt, scope, stream, coord, interp): """ This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file: """ matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp) matches = list(matches_iter) types = interp.get_types() res = types.TFindResults() res.count = len(matches) # python3 map doesn't return a list starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches)) res.start = starts # python3 map doesn't return a list sizes = list(map(lambda m: m.end()-m.start(), matches)) res.size = sizes return res
This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
entailment
def FindFirst(params, ctxt, scope, stream, coord, interp): """ This function is identical to the FindAll function except that the return value is the position of the first occurrence of the target found. A negative number is returned if the value could not be found. """ global FIND_MATCHES_ITER FIND_MATCHES_ITER = _find_helper(params, ctxt, scope, stream, coord, interp) try: first = six.next(FIND_MATCHES_ITER) return first.start() + FIND_MATCHES_START_OFFSET except StopIteration as e: return -1
This function is identical to the FindAll function except that the return value is the position of the first occurrence of the target found. A negative number is returned if the value could not be found.
entailment
def FindNext(params, ctxt, scope, stream, coord): """ This function returns the position of the next occurrence of the target value specified with the FindFirst function. If dir is 1, the find direction is down. If dir is 0, the find direction is up. The return value is the address of the found data, or -1 if the target is not found. """ if FIND_MATCHES_ITER is None: raise errors.InvalidState() direction = 1 if len(params) > 0: direction = PYVAL(params[0]) if direction != 1: # TODO maybe instead of storing the iterator in FIND_MATCHES_ITER, # we should go ahead and find _all the matches in the file and store them # in a list, keeping track of the idx of the current match. # # This would be highly inefficient on large files though. raise NotImplementedError("Reverse searching is not yet implemented") try: next_match = six.next(FIND_MATCHES_ITER) return next_match.start() + FIND_MATCHES_START_OFFSET except StopIteration as e: return -1
This function returns the position of the next occurrence of the target value specified with the FindFirst function. If dir is 1, the find direction is down. If dir is 0, the find direction is up. The return value is the address of the found data, or -1 if the target is not found.
entailment
def generate_subid(self, token=None): '''assumes a flat (file system) database, organized by experiment id, and subject id, with data (json) organized by subject identifier ''' # Not headless auto-increments if not token: token = str(uuid.uuid4()) # Headless doesn't use any folder_id, just generated token folder return "%s/%s" % (self.study_id, token)
assumes a flat (file system) database, organized by experiment id, and subject id, with data (json) organized by subject identifier
entailment
def list_users(self): '''list users, each associated with a filesystem folder ''' folders = glob('%s/*' %(self.database)) folders.sort() return [self.print_user(x) for x in folders]
list users, each associated with a filesystem folder
entailment
def print_user(self, user): '''print a filesystem database user. A "database" folder that might end with the participant status (e.g. _finished) is extracted to print in format [folder] [identifier][studyid] /scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid] ''' status = "active" if user.endswith('_finished'): status = "finished" elif user.endswith('_revoked'): status = "revoked" subid = os.path.basename(user) for ext in ['_revoked','_finished']: subid = subid.replace(ext, '') subid = '%s\t%s[%s]' %(user, subid, status) print(subid) return subid
print a filesystem database user. A "database" folder that might end with the participant status (e.g. _finished) is extracted to print in format [folder] [identifier][studyid] /scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid]
entailment
def generate_user(self, subid=None): '''generate a new user on the filesystem, still session based so we create a new identifier. This function is called from the users new entrypoint, and it assumes we want a user generated with a token. since we don't have a database proper, we write the folder name to the filesystem ''' # Only generate token if subid being created if subid is None: token = str(uuid.uuid4()) subid = self.generate_subid(token=token) if os.path.exists(self.data_base): # /scif/data data_base = "%s/%s" %(self.data_base, subid) # expfactory/00001 if not os.path.exists(data_base): mkdir_p(data_base) return data_base
generate a new user on the filesystem, still session based so we create a new identifier. This function is called from the users new entrypoint, and it assumes we want a user generated with a token. since we don't have a database proper, we write the folder name to the filesystem
entailment
def finish_user(self, subid, ext='finished'): '''finish user will append "finished" (or other) to the data folder when the user has completed (or been revoked from) the battery. For headless, this means that the session is ended and the token will not work again to rewrite the result. If the user needs to update or redo an experiment, this can be done with a new session. Note that if this function is called internally by the application at experiment finish, the subid includes a study id (e.g., expfactory/xxxx-xxxx) but if called by the user, it may not (e.g., xxxx-xxxx). We check for this to ensure it works in both places. ''' if os.path.exists(self.data_base): # /scif/data # Only relevant to filesystem save - the studyid is the top folder if subid.startswith(self.study_id): data_base = "%s/%s" %(self.data_base, subid) else: data_base = "%s/%s/%s" %(self.data_base, self.study_id, subid) # The renamed file will be here finished = "%s_%s" % (data_base, ext) # Participant already finished if os.path.exists(finished): self.logger.warning('[%s] is already finished: %s' % (subid, data_base)) # Exists and can finish elif os.path.exists(data_base): os.rename(data_base, finished) # Not finished, doesn't exist else: finished = None self.logger.warning('%s does not exist, cannot finish. %s' % (data_base, subid)) return finished
finish user will append "finished" (or other) to the data folder when the user has completed (or been revoked from) the battery. For headless, this means that the session is ended and the token will not work again to rewrite the result. If the user needs to update or redo an experiment, this can be done with a new session. Note that if this function is called internally by the application at experiment finish, the subid includes a study id (e.g., expfactory/xxxx-xxxx) but if called by the user, it may not (e.g., xxxx-xxxx). We check for this to ensure it works in both places.
entailment
def restart_user(self, subid): '''restart user will remove any "finished" or "revoked" extensions from the user folder to restart the session. This command always comes from the client users function, so we know subid does not start with the study identifer first ''' if os.path.exists(self.data_base): # /scif/data/<study_id> data_base = "%s/%s" %(self.data_base, subid) for ext in ['revoked','finished']: folder = "%s_%s" % (data_base, ext) if os.path.exists(folder): os.rename(folder, data_base) self.logger.info('Restarting %s, folder is %s.' % (subid, data_base)) self.logger.warning('%s does not have revoked or finished folder, no changes necessary.' % (subid)) return data_base self.logger.warning('%s does not exist, cannot restart. %s' % (self.database, subid))
restart user will remove any "finished" or "revoked" extensions from the user folder to restart the session. This command always comes from the client users function, so we know subid does not start with the study identifer first
entailment
def validate_token(self, token): '''retrieve a subject based on a token. Valid means we return a participant invalid means we return None ''' # A token that is finished or revoked is not valid subid = None if not token.endswith(('finished','revoked')): subid = self.generate_subid(token=token) data_base = "%s/%s" %(self.data_base, subid) if not os.path.exists(data_base): subid = None return subid
retrieve a subject based on a token. Valid means we return a participant invalid means we return None
entailment
def refresh_token(self, subid): '''refresh or generate a new token for a user. If the user is finished, this will also make the folder available again for using.''' if os.path.exists(self.data_base): # /scif/data data_base = "%s/%s" %(self.data_base, subid) if os.path.exists(data_base): refreshed = "%s/%s" %(self.database, str(uuid.uuid4())) os.rename(data_base, refreshed) return refreshed self.logger.warning('%s does not exist, cannot rename %s' % (data_base, subid)) else: self.logger.warning('%s does not exist, cannot rename %s' % (self.database, subid))
refresh or generate a new token for a user. If the user is finished, this will also make the folder available again for using.
entailment
def save_data(self, session, exp_id, content): '''save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files''' subid = session.get('subid') # We only attempt save if there is a subject id, set at start data_file = None if subid is not None: data_base = "%s/%s" %(self.data_base, subid) # If not running in headless, ensure path exists if not self.headless and not os.path.exists(data_base): mkdir_p(data_base) # Conditions for saving: do_save = False # If headless with token pre-generated OR not headless if self.headless and os.path.exists(data_base) or not self.headless: do_save = True if data_base.endswith(('revoked','finished')): do_save = False # If headless with token pre-generated OR not headless if do_save is True: data_file = "%s/%s-results.json" %(data_base, exp_id) if os.path.exists(data_file): self.logger.warning('%s exists, and is being overwritten.' %data_file) write_json(content, data_file) return data_file
save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files
entailment
def init_db(self): '''init_db for the filesystem ensures that the base folder (named according to the studyid) exists. ''' self.session = None if not os.path.exists(self.data_base): mkdir_p(self.data_base) self.database = "%s/%s" %(self.data_base, self.study_id) if not os.path.exists(self.database): mkdir_p(self.database)
init_db for the filesystem ensures that the base folder (named according to the studyid) exists.
entailment
def native(name, ret, interp=None, send_interp=False): """Used as a decorator to add the decorated function to the pfp interpreter so that it can be used from within scripts. :param str name: The name of the function as it will be exposed in template scripts. :param pfp.fields.Field ret: The return type of the function (a class) :param pfp.interp.PfpInterp interp: The specific interpreter to add the function to :param bool send_interp: If the current interpreter should be passed to the function. Examples: The example below defines a ``Sum`` function that will return the sum of all parameters passed to the function: :: from pfp.fields import PYVAL @native(name="Sum", ret=pfp.fields.Int64) def sum_numbers(params, ctxt, scope, stream, coord): res = 0 for param in params: res += PYVAL(param) return res The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it requires that the interpreter be sent as a parameter: :: @native(name="Int3", ret=pfp.fields.Void, send_interp=True) def int3(params, ctxt, scope, stream, coord, interp): if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop() """ def native_decorator(func): @functools.wraps(func) def native_wrapper(*args, **kwargs): return func(*args, **kwargs) pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp) return native_wrapper return native_decorator
Used as a decorator to add the decorated function to the pfp interpreter so that it can be used from within scripts. :param str name: The name of the function as it will be exposed in template scripts. :param pfp.fields.Field ret: The return type of the function (a class) :param pfp.interp.PfpInterp interp: The specific interpreter to add the function to :param bool send_interp: If the current interpreter should be passed to the function. Examples: The example below defines a ``Sum`` function that will return the sum of all parameters passed to the function: :: from pfp.fields import PYVAL @native(name="Sum", ret=pfp.fields.Int64) def sum_numbers(params, ctxt, scope, stream, coord): res = 0 for param in params: res += PYVAL(param) return res The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it requires that the interpreter be sent as a parameter: :: @native(name="Int3", ret=pfp.fields.Void, send_interp=True) def int3(params, ctxt, scope, stream, coord, interp): if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop()
entailment
def do_peek(self, args): """Peek at the next 16 bytes in the stream:: Example: The peek command will display the next 16 hex bytes in the input stream:: pfp> peek 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 .PNG........IHDR """ s = self._interp._stream # make a copy of it pos = s.tell() saved_bits = collections.deque(s._bits) data = s.read(0x10) s.seek(pos, 0) s._bits = saved_bits parts = ["{:02x}".format(ord(data[x:x+1])) for x in range(len(data))] if len(parts) != 0x10: parts += [" "] * (0x10 - len(parts)) hex_line = " ".join(parts) res = utils.binary("") for x in range(len(data)): char = data[x:x+1] val = ord(char) if 0x20 <= val <= 0x7e: res += char else: res += utils.binary(".") if len(res) < 0x10: res += utils.binary(" " * (0x10 - len(res))) res = "{} {}".format(hex_line, utils.string(res)) if len(saved_bits) > 0: reverse_bits = reversed(list(saved_bits)) print("bits: {}".format(" ".join(str(x) for x in reverse_bits))) print(res)
Peek at the next 16 bytes in the stream:: Example: The peek command will display the next 16 hex bytes in the input stream:: pfp> peek 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 .PNG........IHDR
entailment
def do_next(self, args): """Step over the next statement """ self._do_print_from_last_cmd = True self._interp.step_over() return True
Step over the next statement
entailment
def do_step(self, args): """Step INTO the next statement """ self._do_print_from_last_cmd = True self._interp.step_into() return True
Step INTO the next statement
entailment
def do_continue(self, args): """Continue the interpreter """ self._do_print_from_last_cmd = True self._interp.cont() return True
Continue the interpreter
entailment
def do_eval(self, args): """Eval the user-supplied statement. Note that you can do anything with this command that you can do in a template. The resulting value of your statement will be displayed. """ try: res = self._interp.eval(args) if res is not None: if hasattr(res, "_pfp__show"): print(res._pfp__show()) else: print(repr(res)) except errors.UnresolvedID as e: print("ERROR: " + e.message) except Exception as e: raise print("ERROR: " + e.message) return False
Eval the user-supplied statement. Note that you can do anything with this command that you can do in a template. The resulting value of your statement will be displayed.
entailment
def do_show(self, args): """Show the current structure of __root (no args), or show the result of the expression (something that can be eval'd). """ args = args.strip() to_show = self._interp._root if args != "": try: to_show = self._interp.eval(args) except Exception as e: print("ERROR: " + e.message) return False if hasattr(to_show, "_pfp__show"): print(to_show._pfp__show()) else: print(repr(to_show))
Show the current structure of __root (no args), or show the result of the expression (something that can be eval'd).
entailment
def do_quit(self, args): """The quit command """ self._interp.set_break(self._interp.BREAK_NONE) return True
The quit command
entailment
def validate(self, url): ''' takes in a Github repository for validation of preview and runtime (and possibly tests passing? ''' # Preview must provide the live URL of the repository if not url.startswith('http') or not 'github' in url: bot.error('Test of preview must be given a Github repostitory.') return False if not self._validate_preview(url): return False return True
takes in a Github repository for validation of preview and runtime (and possibly tests passing?
entailment
def mask_dict_password(dictionary, secret='***'): """Replace passwords with a secret in a dictionary.""" d = dictionary.copy() for k in d: if 'password' in k: d[k] = secret return d
Replace passwords with a secret in a dictionary.
entailment
def save(): '''save is a view to save data. We might want to adjust this to allow for updating saved data, but given single file is just one post for now ''' if request.method == 'POST': exp_id = session.get('exp_id') app.logger.debug('Saving data for %s' %exp_id) fields = get_post_fields(request) result_file = app.save_data(session=session, content=fields, exp_id=exp_id) experiments = app.finish_experiment(session, exp_id) app.logger.info('Finished %s, %s remaining.' % (exp_id, len(experiments))) # Note, this doesn't seem to be enough to trigger ajax success return json.dumps({'success':True}), 200, {'ContentType':'application/json'} return json.dumps({'success':False}), 403, {'ContentType':'application/json'}
save is a view to save data. We might want to adjust this to allow for updating saved data, but given single file is just one post for now
entailment
def main(args,parser,subparser=None): '''this is the main entrypoint for a container based web server, with most of the variables coming from the environment. See the Dockerfile template for how this function is executed. ''' # First priority to args.base base = args.base if base is None: base = os.environ.get('EXPFACTORY_BASE') # Does the base folder exist? if base is None: bot.error("You must set a base of experiments with --base" % base) sys.exit(1) if not os.path.exists(base): bot.error("Base folder %s does not exist." % base) sys.exit(1) # Export environment variables for the client experiments = args.experiments if experiments is None: experiments = " ".join(glob("%s/*" % base)) os.environ['EXPFACTORY_EXPERIMENTS'] = experiments # If defined and file exists, set runtime variables if args.vars is not None: if os.path.exists(args.vars): os.environ['EXPFACTORY_RUNTIME_VARS'] = args.vars os.environ['EXPFACTORY_RUNTIME_DELIM'] = args.delim else: bot.warning('Variables file %s not found.' %args.vars) subid = os.environ.get('EXPFACTORY_STUDY_ID') if args.subid is not None: subid = args.subid os.environ['EXPFACTORY_SUBID'] = subid os.environ['EXPFACTORY_RANDOM'] = str(args.disable_randomize) os.environ['EXPFACTORY_BASE'] = base from expfactory.server import start start(port=5000)
this is the main entrypoint for a container based web server, with most of the variables coming from the environment. See the Dockerfile template for how this function is executed.
entailment
def save_data(self,session, exp_id, content): '''save data will obtain the current subid from the session, and save it depending on the database type.''' from expfactory.database.models import ( Participant, Result ) subid = session.get('subid') bot.info('Saving data for subid %s' % subid) # We only attempt save if there is a subject id, set at start if subid is not None: p = Participant.query.filter(Participant.id == subid).first() # better query here # Preference is to save data under 'data', otherwise do all of it if "data" in content: content = content['data'] if isinstance(content,dict): content = json.dumps(content) result = Result(data=content, exp_id=exp_id, participant_id=p.id) # check if changes from str/int self.session.add(result) p.results.append(result) self.session.commit() bot.info("Participant: %s" %p) bot.info("Result: %s" %result)
save data will obtain the current subid from the session, and save it depending on the database type.
entailment
def init_db(self): '''initialize the database, with the default database path or custom of the format sqlite:////scif/data/expfactory.db ''' # Database Setup, use default if uri not provided if self.database == 'sqlite': db_path = os.path.join(EXPFACTORY_DATA, '%s.db' % EXPFACTORY_SUBID) self.database = 'sqlite:///%s' % db_path bot.info("Database located at %s" % self.database) self.engine = create_engine(self.database, convert_unicode=True) self.session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=self.engine)) Base.query = self.session.query_property() # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() import expfactory.database.models Base.metadata.create_all(bind=self.engine) self.Base = Base
initialize the database, with the default database path or custom of the format sqlite:////scif/data/expfactory.db
entailment
def reserve_bits(self, num_bits, stream): """Used to "reserve" ``num_bits`` amount of bits in order to keep track of consecutive bitfields (or are the called bitfield groups?). E.g. :: struct { char a:8, b:8; char c:4, d:4, e:8; } :param int num_bits: The number of bits to claim :param pfp.bitwrap.BitwrappedStream stream: The stream to reserve bits on :returns: If room existed for the reservation """ padded = self.interp.get_bitfield_padded() num_bits = PYVAL(num_bits) if padded: num_bits = PYVAL(num_bits) if num_bits + self.reserved_bits > self.max_bits: return False # if unpadded, always allow it if not padded: if self._cls_bits is None: self._cls_bits = [] # reserve bits will only be called just prior to reading the bits, # so check to see if we have enough bits in self._cls_bits, else # read what's missing diff = len(self._cls_bits) - num_bits if diff < 0: self._cls_bits += stream.read_bits(-diff) self.reserved_bits += num_bits return True
Used to "reserve" ``num_bits`` amount of bits in order to keep track of consecutive bitfields (or are the called bitfield groups?). E.g. :: struct { char a:8, b:8; char c:4, d:4, e:8; } :param int num_bits: The number of bits to claim :param pfp.bitwrap.BitwrappedStream stream: The stream to reserve bits on :returns: If room existed for the reservation
entailment
def read_bits(self, stream, num_bits, padded, left_right, endian): """Return ``num_bits`` bits, taking into account endianness and left-right bit directions """ if self._cls_bits is None and padded: raw_bits = stream.read_bits(self.cls.width*8) self._cls_bits = self._endian_transform(raw_bits, endian) if self._cls_bits is not None: if num_bits > len(self._cls_bits): raise errors.PfpError("BitfieldRW reached invalid state") if left_right: res = self._cls_bits[:num_bits] self._cls_bits = self._cls_bits[num_bits:] else: res = self._cls_bits[-num_bits:] self._cls_bits = self._cls_bits[:-num_bits] return res else: return stream.read_bits(num_bits)
Return ``num_bits`` bits, taking into account endianness and left-right bit directions
entailment
def write_bits(self, stream, raw_bits, padded, left_right, endian): """Write the bits. Once the size of the written bits is equal to the number of the reserved bits, flush it to the stream """ if padded: if left_right: self._write_bits += raw_bits else: self._write_bits = raw_bits + self._write_bits if len(self._write_bits) == self.reserved_bits: bits = self._endian_transform(self._write_bits, endian) # if it's padded, and all of the bits in the field weren't used, # we need to flush out the unused bits # TODO should we save the value of the unused bits so the data that # is written out matches exactly what was read? if self.reserved_bits < self.cls.width * 8: filler = [0] * ((self.cls.width * 8) - self.reserved_bits) if left_right: bits += filler else: bits = filler + bits stream.write_bits(bits) self._write_bits = [] else: # if an unpadded field ended up using the same BitfieldRW and # as a previous padded field, there will be unwritten bits left in # self._write_bits. These need to be flushed out as well if len(self._write_bits) > 0: stream.write_bits(self._write_bits) self._write_bits = [] stream.write_bits(raw_bits)
Write the bits. Once the size of the written bits is equal to the number of the reserved bits, flush it to the stream
entailment
def _pfp__snapshot(self, recurse=True): """Save off the current value of the field """ if hasattr(self, "_pfp__value"): self._pfp__snapshot_value = self._pfp__value
Save off the current value of the field
entailment
def _pfp__process_metadata(self): """Process the metadata once the entire struct has been declared. """ if self._pfp__metadata_processor is None: return metadata_info = self._pfp__metadata_processor() if isinstance(metadata_info, list): for metadata in metadata_info: if metadata["type"] == "watch": self._pfp__set_watch( metadata["watch_fields"], metadata["update_func"], *metadata["func_call_info"] ) elif metadata["type"] == "packed": del metadata["type"] self._pfp__set_packer(**metadata) if self._pfp__can_unpack(): self._pfp__unpack_data(self.raw_data)
Process the metadata once the entire struct has been declared.
entailment
def _pfp__watch(self, watcher): """Add the watcher to the list of fields that are watching this field """ if self._pfp__parent is not None and isinstance(self._pfp__parent, Union): self._pfp__parent._pfp__watch(watcher) else: self._pfp__watchers.append(watcher)
Add the watcher to the list of fields that are watching this field
entailment
def _pfp__set_watch(self, watch_fields, update_func, *func_call_info): """Subscribe to update events on each field in ``watch_fields``, using ``update_func`` to update self's value when ``watch_field`` changes""" self._pfp__watch_fields = watch_fields for watch_field in watch_fields: watch_field._pfp__watch(self) self._pfp__update_func = update_func self._pfp__update_func_call_info = func_call_info
Subscribe to update events on each field in ``watch_fields``, using ``update_func`` to update self's value when ``watch_field`` changes
entailment
def _pfp__set_packer(self, pack_type, packer=None, pack=None, unpack=None, func_call_info=None): """Set the packer/pack/unpack functions for this field, as well as the pack type. :pack_type: The data type of the packed data :packer: A function that can handle packing and unpacking. First arg is true/false (to pack or unpack). Second arg is the stream. Must return an array of chars. :pack: A function that packs data. It must accept an array of chars and return an array of chars that is a packed form of the input. :unpack: A function that unpacks data. It must accept an array of chars and return an array of chars """ self._pfp__pack_type = pack_type self._pfp__unpack = unpack self._pfp__pack = pack self._pfp__packer = packer self._pfp__pack_func_call_info = func_call_info
Set the packer/pack/unpack functions for this field, as well as the pack type. :pack_type: The data type of the packed data :packer: A function that can handle packing and unpacking. First arg is true/false (to pack or unpack). Second arg is the stream. Must return an array of chars. :pack: A function that packs data. It must accept an array of chars and return an array of chars that is a packed form of the input. :unpack: A function that unpacks data. It must accept an array of chars and return an array of chars
entailment
def _pfp__pack_data(self): """Pack the nested field """ if self._pfp__pack_type is None: return tmp_stream = six.BytesIO() self._._pfp__build(bitwrap.BitwrappedStream(tmp_stream)) raw_data = tmp_stream.getvalue() unpack_func = self._pfp__packer unpack_args = [] if self._pfp__packer is not None: unpack_func = self._pfp__packer unpack_args = [true(), raw_data] elif self._pfp__pack is not None: unpack_func = self._pfp__pack unpack_args = [raw_data] # does not need to be converted to a char array if not isinstance(unpack_func, functions.NativeFunction): io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data)) unpack_args[-1] = Array(len(raw_data), Char, io_stream) res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True) if isinstance(res, Array): res = res._pfp__build() io_stream = six.BytesIO(res) tmp_stream = bitwrap.BitwrappedStream(io_stream) self._pfp__no_unpack = True self._pfp__parse(tmp_stream) self._pfp__no_unpack = False
Pack the nested field
entailment
def _pfp__unpack_data(self, raw_data): """Means that the field has already been parsed normally, and that it now needs to be unpacked. :raw_data: A string of the data that the field consumed while parsing """ if self._pfp__pack_type is None: return if self._pfp__no_unpack: return unpack_func = self._pfp__packer unpack_args = [] if self._pfp__packer is not None: unpack_func = self._pfp__packer unpack_args = [false(), raw_data] elif self._pfp__unpack is not None: unpack_func = self._pfp__unpack unpack_args = [raw_data] # does not need to be converted to a char array if not isinstance(unpack_func, functions.NativeFunction): io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data)) unpack_args[-1] = Array(len(raw_data), Char, io_stream) res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True) if isinstance(res, Array): res = res._pfp__build() io_stream = six.BytesIO(res) tmp_stream = bitwrap.BitwrappedStream(io_stream) tmp_stream.padded = self._pfp__interp.get_bitfield_padded() self._ = self._pfp__parsed_packed = self._pfp__pack_type(tmp_stream) self._._pfp__watch(self)
Means that the field has already been parsed normally, and that it now needs to be unpacked. :raw_data: A string of the data that the field consumed while parsing
entailment
def _pfp__handle_updated(self, watched_field): """Handle the watched field that was updated """ self._pfp__no_notify = True # nested data has been changed, so rebuild the # nested data to update the field # TODO a global setting to determine this behavior? # could slow things down a bit for large nested structures # notice the use of _is_ here - 'is' != '=='. '==' uses # the __eq__ operator, while is compares id(object) results if watched_field is self._: self._pfp__pack_data() elif self._pfp__update_func is not None: self._pfp__update_func.call( [self] + self._pfp__watch_fields, *self._pfp__update_func_call_info ) self._pfp__no_notify = False
Handle the watched field that was updated
entailment
def _pfp__width(self): """Return the width of the field (sizeof) """ raw_output = six.BytesIO() output = bitwrap.BitwrappedStream(raw_output) self._pfp__build(output) output.flush() return len(raw_output.getvalue())
Return the width of the field (sizeof)
entailment
def _pfp__set_value(self, new_val): """Set the new value if type checking is passes, potentially (TODO? reevaluate this) casting the value to something else :new_val: The new value :returns: TODO """ if self._pfp__frozen: raise errors.UnmodifiableConst() self._pfp__value = self._pfp__get_root_value(new_val) self._pfp__notify_parent()
Set the new value if type checking is passes, potentially (TODO? reevaluate this) casting the value to something else :new_val: The new value :returns: TODO
entailment
def _pfp__snapshot(self, recurse=True): """Save off the current value of the field """ super(Struct, self)._pfp__snapshot(recurse=recurse) if recurse: for child in self._pfp__children: child._pfp__snapshot(recurse=recurse)
Save off the current value of the field
entailment
def _pfp__restore_snapshot(self, recurse=True): """Restore the snapshotted value without triggering any events """ super(Struct, self)._pfp__restore_snapshot(recurse=recurse) if recurse: for child in self._pfp__children: child._pfp__restore_snapshot(recurse=recurse)
Restore the snapshotted value without triggering any events
entailment
def _pfp__set_value(self, value): """Initialize the struct. Value should be an array of fields, one each for each struct member. :value: An array of fields to initialize the struct with :returns: None """ if self._pfp__frozen: raise errors.UnmodifiableConst() if len(value) != len(self._pfp__children): raise errors.PfpError("struct initialization has wrong number of members") for x in six.moves.range(len(self._pfp__children)): self._pfp__children[x]._pfp__set_value(value[x])
Initialize the struct. Value should be an array of fields, one each for each struct member. :value: An array of fields to initialize the struct with :returns: None
entailment
def _pfp__add_child(self, name, child, stream=None, overwrite=False): """Add a child to the Struct field. If multiple consecutive fields are added with the same name, an implicit array will be created to store all fields of that name. :param str name: The name of the child :param pfp.fields.Field child: The field to add :param bool overwrite: Overwrite existing fields (False) :param pfp.bitwrap.BitwrappedStream stream: unused, but her for compatability with Union._pfp__add_child :returns: The resulting field added """ if not overwrite and self._pfp__is_non_consecutive_duplicate(name, child): return self._pfp__handle_non_consecutive_duplicate(name, child) elif not overwrite and name in self._pfp__children_map: return self._pfp__handle_implicit_array(name, child) else: child._pfp__parent = self self._pfp__children.append(child) child._pfp__name = name self._pfp__children_map[name] = child return child
Add a child to the Struct field. If multiple consecutive fields are added with the same name, an implicit array will be created to store all fields of that name. :param str name: The name of the child :param pfp.fields.Field child: The field to add :param bool overwrite: Overwrite existing fields (False) :param pfp.bitwrap.BitwrappedStream stream: unused, but her for compatability with Union._pfp__add_child :returns: The resulting field added
entailment
def _pfp__handle_non_consecutive_duplicate(self, name, child, insert=True): """This new child, and potentially one already existing child, need to have a numeric suffix appended to their name. An entry will be made for this name in ``self._pfp__name_collisions`` to keep track of the next available suffix number""" if name in self._pfp__children_map: previous_child = self._pfp__children_map[name] # DO NOT cause __eq__ to be called, we want to test actual objects, not comparison # operators if previous_child is not child: self._pfp__handle_non_consecutive_duplicate(name, previous_child, insert=False) del self._pfp__children_map[name] next_suffix = self._pfp__name_collisions.setdefault(name, 0) new_name = "{}_{}".format(name, next_suffix) child._pfp__name = new_name self._pfp__name_collisions[name] = next_suffix + 1 self._pfp__children_map[new_name] = child child._pfp__parent = self if insert: self._pfp__children.append(child) return child
This new child, and potentially one already existing child, need to have a numeric suffix appended to their name. An entry will be made for this name in ``self._pfp__name_collisions`` to keep track of the next available suffix number
entailment