repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
intuition-io/insights
insights/plugins/hipchat.py
Bot.message
def message(self, body, room_id, style='text'): ''' Send a message to the given room ''' # TODO Automatically detect body format ? path = 'rooms/message' data = { 'room_id': room_id, 'message': body, 'from': self.name, 'notify': 1, 'message_format': style, 'color': self.bg_color } log.info('sending message to hipchat', message=body, room=room_id) feedback = self._api_call(path, data, requests.post) log.debug(feedback) return feedback
python
def message(self, body, room_id, style='text'): ''' Send a message to the given room ''' # TODO Automatically detect body format ? path = 'rooms/message' data = { 'room_id': room_id, 'message': body, 'from': self.name, 'notify': 1, 'message_format': style, 'color': self.bg_color } log.info('sending message to hipchat', message=body, room=room_id) feedback = self._api_call(path, data, requests.post) log.debug(feedback) return feedback
[ "def", "message", "(", "self", ",", "body", ",", "room_id", ",", "style", "=", "'text'", ")", ":", "# TODO Automatically detect body format ?", "path", "=", "'rooms/message'", "data", "=", "{", "'room_id'", ":", "room_id", ",", "'message'", ":", "body", ",", "'from'", ":", "self", ".", "name", ",", "'notify'", ":", "1", ",", "'message_format'", ":", "style", ",", "'color'", ":", "self", ".", "bg_color", "}", "log", ".", "info", "(", "'sending message to hipchat'", ",", "message", "=", "body", ",", "room", "=", "room_id", ")", "feedback", "=", "self", ".", "_api_call", "(", "path", ",", "data", ",", "requests", ".", "post", ")", "log", ".", "debug", "(", "feedback", ")", "return", "feedback" ]
Send a message to the given room
[ "Send", "a", "message", "to", "the", "given", "room" ]
a4eae53a1886164db96751d2b0964aa2acb7c2d7
https://github.com/intuition-io/insights/blob/a4eae53a1886164db96751d2b0964aa2acb7c2d7/insights/plugins/hipchat.py#L53-L68
train
tethysplatform/condorpy
condorpy/job.py
Job.job_file
def job_file(self): """The path to the submit description file representing this job. """ job_file_name = '%s.job' % (self.name) job_file_path = os.path.join(self.initial_dir, job_file_name) self._job_file = job_file_path return self._job_file
python
def job_file(self): """The path to the submit description file representing this job. """ job_file_name = '%s.job' % (self.name) job_file_path = os.path.join(self.initial_dir, job_file_name) self._job_file = job_file_path return self._job_file
[ "def", "job_file", "(", "self", ")", ":", "job_file_name", "=", "'%s.job'", "%", "(", "self", ".", "name", ")", "job_file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "initial_dir", ",", "job_file_name", ")", "self", ".", "_job_file", "=", "job_file_path", "return", "self", ".", "_job_file" ]
The path to the submit description file representing this job.
[ "The", "path", "to", "the", "submit", "description", "file", "representing", "this", "job", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L173-L180
train
tethysplatform/condorpy
condorpy/job.py
Job.log_file
def log_file(self): """The path to the log file for this job. """ log_file = self.get('log') if not log_file: log_file = '%s.log' % (self.name) self.set('log', log_file) return os.path.join(self.initial_dir, self.get('log'))
python
def log_file(self): """The path to the log file for this job. """ log_file = self.get('log') if not log_file: log_file = '%s.log' % (self.name) self.set('log', log_file) return os.path.join(self.initial_dir, self.get('log'))
[ "def", "log_file", "(", "self", ")", ":", "log_file", "=", "self", ".", "get", "(", "'log'", ")", "if", "not", "log_file", ":", "log_file", "=", "'%s.log'", "%", "(", "self", ".", "name", ")", "self", ".", "set", "(", "'log'", ",", "log_file", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "initial_dir", ",", "self", ".", "get", "(", "'log'", ")", ")" ]
The path to the log file for this job.
[ "The", "path", "to", "the", "log", "file", "for", "this", "job", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L183-L191
train
tethysplatform/condorpy
condorpy/job.py
Job.initial_dir
def initial_dir(self): """The initial directory defined for the job. All input files, and log files are relative to this directory. Output files will be copied into this directory by default. This directory will be created if it doesn't already exist when the job is submitted. Note: The executable file is defined relative to the current working directory, NOT to the initial directory. The initial directory is created in the current working directory. """ initial_dir = self.get('initialdir') if not initial_dir: initial_dir = os.curdir #TODO does this conflict with the working directory? if self._remote and os.path.isabs(initial_dir): raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler') return initial_dir
python
def initial_dir(self): """The initial directory defined for the job. All input files, and log files are relative to this directory. Output files will be copied into this directory by default. This directory will be created if it doesn't already exist when the job is submitted. Note: The executable file is defined relative to the current working directory, NOT to the initial directory. The initial directory is created in the current working directory. """ initial_dir = self.get('initialdir') if not initial_dir: initial_dir = os.curdir #TODO does this conflict with the working directory? if self._remote and os.path.isabs(initial_dir): raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler') return initial_dir
[ "def", "initial_dir", "(", "self", ")", ":", "initial_dir", "=", "self", ".", "get", "(", "'initialdir'", ")", "if", "not", "initial_dir", ":", "initial_dir", "=", "os", ".", "curdir", "#TODO does this conflict with the working directory?", "if", "self", ".", "_remote", "and", "os", ".", "path", ".", "isabs", "(", "initial_dir", ")", ":", "raise", "RemoteError", "(", "'Cannot define an absolute path as an initial_dir on a remote scheduler'", ")", "return", "initial_dir" ]
The initial directory defined for the job. All input files, and log files are relative to this directory. Output files will be copied into this directory by default. This directory will be created if it doesn't already exist when the job is submitted. Note: The executable file is defined relative to the current working directory, NOT to the initial directory. The initial directory is created in the current working directory.
[ "The", "initial", "directory", "defined", "for", "the", "job", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L194-L210
train
tethysplatform/condorpy
condorpy/job.py
Job.submit
def submit(self, queue=None, options=[]): """Submits the job either locally or to a remote server if it is defined. Args: queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of this object. Defaults to None, meaning the value of num_jobs will be used. options (list of str, optional): A list of command line options for the condor_submit command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html. Defaults to an empty list. """ if not self.executable: log.error('Job %s was submitted with no executable', self.name) raise NoExecutable('You cannot submit a job without an executable') self._num_jobs = queue or self.num_jobs self._write_job_file() args = ['condor_submit'] args.extend(options) args.append(self.job_file) log.info('Submitting job %s with options: %s', self.name, args) return super(Job, self).submit(args)
python
def submit(self, queue=None, options=[]): """Submits the job either locally or to a remote server if it is defined. Args: queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of this object. Defaults to None, meaning the value of num_jobs will be used. options (list of str, optional): A list of command line options for the condor_submit command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html. Defaults to an empty list. """ if not self.executable: log.error('Job %s was submitted with no executable', self.name) raise NoExecutable('You cannot submit a job without an executable') self._num_jobs = queue or self.num_jobs self._write_job_file() args = ['condor_submit'] args.extend(options) args.append(self.job_file) log.info('Submitting job %s with options: %s', self.name, args) return super(Job, self).submit(args)
[ "def", "submit", "(", "self", ",", "queue", "=", "None", ",", "options", "=", "[", "]", ")", ":", "if", "not", "self", ".", "executable", ":", "log", ".", "error", "(", "'Job %s was submitted with no executable'", ",", "self", ".", "name", ")", "raise", "NoExecutable", "(", "'You cannot submit a job without an executable'", ")", "self", ".", "_num_jobs", "=", "queue", "or", "self", ".", "num_jobs", "self", ".", "_write_job_file", "(", ")", "args", "=", "[", "'condor_submit'", "]", "args", ".", "extend", "(", "options", ")", "args", ".", "append", "(", "self", ".", "job_file", ")", "log", ".", "info", "(", "'Submitting job %s with options: %s'", ",", "self", ".", "name", ",", "args", ")", "return", "super", "(", "Job", ",", "self", ")", ".", "submit", "(", "args", ")" ]
Submits the job either locally or to a remote server if it is defined. Args: queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of this object. Defaults to None, meaning the value of num_jobs will be used. options (list of str, optional): A list of command line options for the condor_submit command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html. Defaults to an empty list.
[ "Submits", "the", "job", "either", "locally", "or", "to", "a", "remote", "server", "if", "it", "is", "defined", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L212-L236
train
tethysplatform/condorpy
condorpy/job.py
Job.wait
def wait(self, options=[], sub_job_num=None): """Wait for the job, or a sub-job to complete. Args: options (list of str, optional): A list of command line options for the condor_wait command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html. Defaults to an empty list. job_num (int, optional): The number """ args = ['condor_wait'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) if self._remote: abs_log_file = self.log_file else: abs_log_file = os.path.abspath(self.log_file) args.extend([abs_log_file, job_id]) out, err = self._execute(args) return out, err
python
def wait(self, options=[], sub_job_num=None): """Wait for the job, or a sub-job to complete. Args: options (list of str, optional): A list of command line options for the condor_wait command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html. Defaults to an empty list. job_num (int, optional): The number """ args = ['condor_wait'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) if self._remote: abs_log_file = self.log_file else: abs_log_file = os.path.abspath(self.log_file) args.extend([abs_log_file, job_id]) out, err = self._execute(args) return out, err
[ "def", "wait", "(", "self", ",", "options", "=", "[", "]", ",", "sub_job_num", "=", "None", ")", ":", "args", "=", "[", "'condor_wait'", "]", "args", ".", "extend", "(", "options", ")", "job_id", "=", "'%s.%s'", "%", "(", "self", ".", "cluster_id", ",", "sub_job_num", ")", "if", "sub_job_num", "else", "str", "(", "self", ".", "cluster_id", ")", "if", "self", ".", "_remote", ":", "abs_log_file", "=", "self", ".", "log_file", "else", ":", "abs_log_file", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "log_file", ")", "args", ".", "extend", "(", "[", "abs_log_file", ",", "job_id", "]", ")", "out", ",", "err", "=", "self", ".", "_execute", "(", "args", ")", "return", "out", ",", "err" ]
Wait for the job, or a sub-job to complete. Args: options (list of str, optional): A list of command line options for the condor_wait command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html. Defaults to an empty list. job_num (int, optional): The number
[ "Wait", "for", "the", "job", "or", "a", "sub", "-", "job", "to", "complete", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L247-L265
train
tethysplatform/condorpy
condorpy/job.py
Job.get
def get(self, attr, value=None, resolve=True): """Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. """ try: if resolve: value = self._resolve_attribute(attr) else: value = self.attributes[attr] except KeyError: pass return value
python
def get(self, attr, value=None, resolve=True): """Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. """ try: if resolve: value = self._resolve_attribute(attr) else: value = self.attributes[attr] except KeyError: pass return value
[ "def", "get", "(", "self", ",", "attr", ",", "value", "=", "None", ",", "resolve", "=", "True", ")", ":", "try", ":", "if", "resolve", ":", "value", "=", "self", ".", "_resolve_attribute", "(", "attr", ")", "else", ":", "value", "=", "self", ".", "attributes", "[", "attr", "]", "except", "KeyError", ":", "pass", "return", "value" ]
Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
[ "Get", "the", "value", "of", "an", "attribute", "from", "submit", "description", "file", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L267-L286
train
tethysplatform/condorpy
condorpy/job.py
Job.set
def set(self, attr, value): """Set the value of an attribute in the submit description file. The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean). The Python values will be reformatted into strings based on the standards described in the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html Args: attr (str): The name of the attribute to set. value (str): The value to assign to 'attr'. """ def escape_new_syntax(value, double_quote_escape='"'): value = str(value) value = value.replace("'", "''") value = value.replace('"', '%s"' % double_quote_escape) if ' ' in value or '\t' in value: value = "'%s'" % value return value def escape_new_syntax_pre_post_script(value): return escape_new_syntax(value, '\\') def escape_remap(value): value = value.replace('=', '\=') value = value.replace(';', '\;') return value def join_function_template(join_string, escape_func): return lambda value: join_string.join([escape_func(i) for i in value]) def quote_join_function_template(join_string, escape_func): return lambda value: join_function_template(join_string, escape_func)(value) join_functions = {'rempas': quote_join_function_template('; ', escape_remap), 'arguments': quote_join_function_template(' ', escape_new_syntax), 'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script) } if value is False: value = 'false' elif value is True: value = 'true' elif isinstance(value, list) or isinstance(value, tuple): join_function = join_function_template(', ', str) for key in list(join_functions.keys()): if attr.endswith(key): join_function = join_functions[key] value = join_function(value) self.attributes[attr] = value
python
def set(self, attr, value): """Set the value of an attribute in the submit description file. The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean). The Python values will be reformatted into strings based on the standards described in the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html Args: attr (str): The name of the attribute to set. value (str): The value to assign to 'attr'. """ def escape_new_syntax(value, double_quote_escape='"'): value = str(value) value = value.replace("'", "''") value = value.replace('"', '%s"' % double_quote_escape) if ' ' in value or '\t' in value: value = "'%s'" % value return value def escape_new_syntax_pre_post_script(value): return escape_new_syntax(value, '\\') def escape_remap(value): value = value.replace('=', '\=') value = value.replace(';', '\;') return value def join_function_template(join_string, escape_func): return lambda value: join_string.join([escape_func(i) for i in value]) def quote_join_function_template(join_string, escape_func): return lambda value: join_function_template(join_string, escape_func)(value) join_functions = {'rempas': quote_join_function_template('; ', escape_remap), 'arguments': quote_join_function_template(' ', escape_new_syntax), 'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script) } if value is False: value = 'false' elif value is True: value = 'true' elif isinstance(value, list) or isinstance(value, tuple): join_function = join_function_template(', ', str) for key in list(join_functions.keys()): if attr.endswith(key): join_function = join_functions[key] value = join_function(value) self.attributes[attr] = value
[ "def", "set", "(", "self", ",", "attr", ",", "value", ")", ":", "def", "escape_new_syntax", "(", "value", ",", "double_quote_escape", "=", "'\"'", ")", ":", "value", "=", "str", "(", "value", ")", "value", "=", "value", ".", "replace", "(", "\"'\"", ",", "\"''\"", ")", "value", "=", "value", ".", "replace", "(", "'\"'", ",", "'%s\"'", "%", "double_quote_escape", ")", "if", "' '", "in", "value", "or", "'\\t'", "in", "value", ":", "value", "=", "\"'%s'\"", "%", "value", "return", "value", "def", "escape_new_syntax_pre_post_script", "(", "value", ")", ":", "return", "escape_new_syntax", "(", "value", ",", "'\\\\'", ")", "def", "escape_remap", "(", "value", ")", ":", "value", "=", "value", ".", "replace", "(", "'='", ",", "'\\='", ")", "value", "=", "value", ".", "replace", "(", "';'", ",", "'\\;'", ")", "return", "value", "def", "join_function_template", "(", "join_string", ",", "escape_func", ")", ":", "return", "lambda", "value", ":", "join_string", ".", "join", "(", "[", "escape_func", "(", "i", ")", "for", "i", "in", "value", "]", ")", "def", "quote_join_function_template", "(", "join_string", ",", "escape_func", ")", ":", "return", "lambda", "value", ":", "join_function_template", "(", "join_string", ",", "escape_func", ")", "(", "value", ")", "join_functions", "=", "{", "'rempas'", ":", "quote_join_function_template", "(", "'; '", ",", "escape_remap", ")", ",", "'arguments'", ":", "quote_join_function_template", "(", "' '", ",", "escape_new_syntax", ")", ",", "'Arguments'", ":", "quote_join_function_template", "(", "' '", ",", "escape_new_syntax_pre_post_script", ")", "}", "if", "value", "is", "False", ":", "value", "=", "'false'", "elif", "value", "is", "True", ":", "value", "=", "'true'", "elif", "isinstance", "(", "value", ",", "list", ")", "or", "isinstance", "(", "value", ",", "tuple", ")", ":", "join_function", "=", "join_function_template", "(", "', '", ",", "str", ")", "for", "key", "in", "list", "(", "join_functions", ".", "keys", "(", ")", ")", ":", "if", "attr", ".", "endswith", "(", "key", ")", ":", "join_function", "=", "join_functions", "[", "key", "]", "value", "=", "join_function", "(", "value", ")", "self", ".", "attributes", "[", "attr", "]", "=", "value" ]
Set the value of an attribute in the submit description file. The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean). The Python values will be reformatted into strings based on the standards described in the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html Args: attr (str): The name of the attribute to set. value (str): The value to assign to 'attr'.
[ "Set", "the", "value", "of", "an", "attribute", "in", "the", "submit", "description", "file", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L288-L339
train
tethysplatform/condorpy
condorpy/job.py
Job._update_status
def _update_status(self, sub_job_num=None): """Gets the job status. Return: str: The current status of the job """ job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) format = ['-format', '"%d"', 'JobStatus'] cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format)) args = [cmd] out, err = self._execute(args, shell=True, run_in_job_dir=False) if err: log.error('Error while updating status for job %s: %s', job_id, err) raise HTCondorError(err) if not out: log.error('Error while updating status for job %s: Job not found.', job_id) raise HTCondorError('Job not found.') out = out.replace('\"', '') log.info('Job %s status: %s', job_id, out) if not sub_job_num: if len(out) >= self.num_jobs: out = out[:self.num_jobs] else: msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out)) log.error(msg) raise HTCondorError(msg) #initialize status dictionary status_dict = dict() for val in CONDOR_JOB_STATUSES.values(): status_dict[val] = 0 for status_code_str in out: status_code = 0 try: status_code = int(status_code_str) except ValueError: pass key = CONDOR_JOB_STATUSES[status_code] status_dict[key] += 1 return status_dict
python
def _update_status(self, sub_job_num=None): """Gets the job status. Return: str: The current status of the job """ job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) format = ['-format', '"%d"', 'JobStatus'] cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format)) args = [cmd] out, err = self._execute(args, shell=True, run_in_job_dir=False) if err: log.error('Error while updating status for job %s: %s', job_id, err) raise HTCondorError(err) if not out: log.error('Error while updating status for job %s: Job not found.', job_id) raise HTCondorError('Job not found.') out = out.replace('\"', '') log.info('Job %s status: %s', job_id, out) if not sub_job_num: if len(out) >= self.num_jobs: out = out[:self.num_jobs] else: msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out)) log.error(msg) raise HTCondorError(msg) #initialize status dictionary status_dict = dict() for val in CONDOR_JOB_STATUSES.values(): status_dict[val] = 0 for status_code_str in out: status_code = 0 try: status_code = int(status_code_str) except ValueError: pass key = CONDOR_JOB_STATUSES[status_code] status_dict[key] += 1 return status_dict
[ "def", "_update_status", "(", "self", ",", "sub_job_num", "=", "None", ")", ":", "job_id", "=", "'%s.%s'", "%", "(", "self", ".", "cluster_id", ",", "sub_job_num", ")", "if", "sub_job_num", "else", "str", "(", "self", ".", "cluster_id", ")", "format", "=", "[", "'-format'", ",", "'\"%d\"'", ",", "'JobStatus'", "]", "cmd", "=", "'condor_q {0} {1} && condor_history {0} {1}'", ".", "format", "(", "job_id", ",", "' '", ".", "join", "(", "format", ")", ")", "args", "=", "[", "cmd", "]", "out", ",", "err", "=", "self", ".", "_execute", "(", "args", ",", "shell", "=", "True", ",", "run_in_job_dir", "=", "False", ")", "if", "err", ":", "log", ".", "error", "(", "'Error while updating status for job %s: %s'", ",", "job_id", ",", "err", ")", "raise", "HTCondorError", "(", "err", ")", "if", "not", "out", ":", "log", ".", "error", "(", "'Error while updating status for job %s: Job not found.'", ",", "job_id", ")", "raise", "HTCondorError", "(", "'Job not found.'", ")", "out", "=", "out", ".", "replace", "(", "'\\\"'", ",", "''", ")", "log", ".", "info", "(", "'Job %s status: %s'", ",", "job_id", ",", "out", ")", "if", "not", "sub_job_num", ":", "if", "len", "(", "out", ")", ">=", "self", ".", "num_jobs", ":", "out", "=", "out", "[", ":", "self", ".", "num_jobs", "]", "else", ":", "msg", "=", "'There are {0} sub-jobs, but {1} status(es).'", ".", "format", "(", "self", ".", "num_jobs", ",", "len", "(", "out", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "HTCondorError", "(", "msg", ")", "#initialize status dictionary", "status_dict", "=", "dict", "(", ")", "for", "val", "in", "CONDOR_JOB_STATUSES", ".", "values", "(", ")", ":", "status_dict", "[", "val", "]", "=", "0", "for", "status_code_str", "in", "out", ":", "status_code", "=", "0", "try", ":", "status_code", "=", "int", "(", "status_code_str", ")", "except", "ValueError", ":", "pass", "key", "=", "CONDOR_JOB_STATUSES", "[", "status_code", "]", "status_dict", "[", "key", "]", "+=", "1", "return", "status_dict" ]
Gets the job status. Return: str: The current status of the job
[ "Gets", "the", "job", "status", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L350-L394
train
tethysplatform/condorpy
condorpy/job.py
Job._resolve_attribute
def _resolve_attribute(self, attribute): """Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'. """ value = self.attributes[attribute] if not value: return None resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value) return resolved_value
python
def _resolve_attribute(self, attribute): """Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'. """ value = self.attributes[attribute] if not value: return None resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value) return resolved_value
[ "def", "_resolve_attribute", "(", "self", ",", "attribute", ")", ":", "value", "=", "self", ".", "attributes", "[", "attribute", "]", "if", "not", "value", ":", "return", "None", "resolved_value", "=", "re", ".", "sub", "(", "'\\$\\((.*?)\\)'", ",", "self", ".", "_resolve_attribute_match", ",", "value", ")", "return", "resolved_value" ]
Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'.
[ "Recursively", "replaces", "references", "to", "other", "attributes", "with", "their", "value", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L417-L431
train
tethysplatform/condorpy
condorpy/job.py
Job._resolve_attribute_match
def _resolve_attribute_match(self, match): """Replaces a reference to an attribute with the value of the attribute. Args: match (re.match object): A match object containing a match to a reference to an attribute. """ if match.group(1) == 'cluster': return str(self.cluster_id) return self.get(match.group(1), match.group(0))
python
def _resolve_attribute_match(self, match): """Replaces a reference to an attribute with the value of the attribute. Args: match (re.match object): A match object containing a match to a reference to an attribute. """ if match.group(1) == 'cluster': return str(self.cluster_id) return self.get(match.group(1), match.group(0))
[ "def", "_resolve_attribute_match", "(", "self", ",", "match", ")", ":", "if", "match", ".", "group", "(", "1", ")", "==", "'cluster'", ":", "return", "str", "(", "self", ".", "cluster_id", ")", "return", "self", ".", "get", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "0", ")", ")" ]
Replaces a reference to an attribute with the value of the attribute. Args: match (re.match object): A match object containing a match to a reference to an attribute.
[ "Replaces", "a", "reference", "to", "an", "attribute", "with", "the", "value", "of", "the", "attribute", "." ]
a5aaaef0d73198f7d9756dda7abe98b4e209f1f4
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L433-L443
train
ktdreyer/txkoji
txkoji/channel.py
Channel.total_capacity
def total_capacity(self): """ Find the total task capacity available for this channel. Query all the enabled hosts for this channel and sum up all the capacities. Each task has a "weight". Each task will be in "FREE" state until there is enough capacity for the task's "weight" on a host. :returns: deferred that when fired returns a float value: the total task weight that this channel can have open simultaneously. """ # Ensure this task's channel has spare capacity for this task. total_capacity = 0 hosts = yield self.hosts(enabled=True) for host in hosts: total_capacity += host.capacity defer.returnValue(total_capacity)
python
def total_capacity(self): """ Find the total task capacity available for this channel. Query all the enabled hosts for this channel and sum up all the capacities. Each task has a "weight". Each task will be in "FREE" state until there is enough capacity for the task's "weight" on a host. :returns: deferred that when fired returns a float value: the total task weight that this channel can have open simultaneously. """ # Ensure this task's channel has spare capacity for this task. total_capacity = 0 hosts = yield self.hosts(enabled=True) for host in hosts: total_capacity += host.capacity defer.returnValue(total_capacity)
[ "def", "total_capacity", "(", "self", ")", ":", "# Ensure this task's channel has spare capacity for this task.", "total_capacity", "=", "0", "hosts", "=", "yield", "self", ".", "hosts", "(", "enabled", "=", "True", ")", "for", "host", "in", "hosts", ":", "total_capacity", "+=", "host", ".", "capacity", "defer", ".", "returnValue", "(", "total_capacity", ")" ]
Find the total task capacity available for this channel. Query all the enabled hosts for this channel and sum up all the capacities. Each task has a "weight". Each task will be in "FREE" state until there is enough capacity for the task's "weight" on a host. :returns: deferred that when fired returns a float value: the total task weight that this channel can have open simultaneously.
[ "Find", "the", "total", "task", "capacity", "available", "for", "this", "channel", "." ]
a7de380f29f745bf11730b27217208f6d4da7733
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/channel.py#L33-L51
train
tjcsl/cslbot
cslbot/commands/google.py
cmd
def cmd(send, msg, args): """Googles something. Syntax: {command} <term> """ if not msg: send("Google what?") return key = args['config']['api']['googleapikey'] cx = args['config']['api']['googlesearchid'] data = get('https://www.googleapis.com/customsearch/v1', params={'key': key, 'cx': cx, 'q': msg}).json() if 'items' not in data: send("Google didn't say much.") else: url = data['items'][0]['link'] send("Google says %s" % url)
python
def cmd(send, msg, args): """Googles something. Syntax: {command} <term> """ if not msg: send("Google what?") return key = args['config']['api']['googleapikey'] cx = args['config']['api']['googlesearchid'] data = get('https://www.googleapis.com/customsearch/v1', params={'key': key, 'cx': cx, 'q': msg}).json() if 'items' not in data: send("Google didn't say much.") else: url = data['items'][0]['link'] send("Google says %s" % url)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "not", "msg", ":", "send", "(", "\"Google what?\"", ")", "return", "key", "=", "args", "[", "'config'", "]", "[", "'api'", "]", "[", "'googleapikey'", "]", "cx", "=", "args", "[", "'config'", "]", "[", "'api'", "]", "[", "'googlesearchid'", "]", "data", "=", "get", "(", "'https://www.googleapis.com/customsearch/v1'", ",", "params", "=", "{", "'key'", ":", "key", ",", "'cx'", ":", "cx", ",", "'q'", ":", "msg", "}", ")", ".", "json", "(", ")", "if", "'items'", "not", "in", "data", ":", "send", "(", "\"Google didn't say much.\"", ")", "else", ":", "url", "=", "data", "[", "'items'", "]", "[", "0", "]", "[", "'link'", "]", "send", "(", "\"Google says %s\"", "%", "url", ")" ]
Googles something. Syntax: {command} <term>
[ "Googles", "something", "." ]
aebe07be47141f61d7c180706bddfb707f19b2b5
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/google.py#L24-L40
train
rraadd88/rohan
rohan/dandage/__init__.py
get_deps
def get_deps(cfg=None,deps=[]): """ Installs conda dependencies. :param cfg: configuration dict """ if not cfg is None: if not 'deps' in cfg: cfg['deps']=deps else: deps=cfg['deps'] if not len(deps)==0: for dep in deps: if not dep in cfg: runbashcmd(f'conda install {dep}',test=cfg['test']) cfg[dep]=dep logging.info(f"{len(deps)} deps installed.") return cfg
python
def get_deps(cfg=None,deps=[]): """ Installs conda dependencies. :param cfg: configuration dict """ if not cfg is None: if not 'deps' in cfg: cfg['deps']=deps else: deps=cfg['deps'] if not len(deps)==0: for dep in deps: if not dep in cfg: runbashcmd(f'conda install {dep}',test=cfg['test']) cfg[dep]=dep logging.info(f"{len(deps)} deps installed.") return cfg
[ "def", "get_deps", "(", "cfg", "=", "None", ",", "deps", "=", "[", "]", ")", ":", "if", "not", "cfg", "is", "None", ":", "if", "not", "'deps'", "in", "cfg", ":", "cfg", "[", "'deps'", "]", "=", "deps", "else", ":", "deps", "=", "cfg", "[", "'deps'", "]", "if", "not", "len", "(", "deps", ")", "==", "0", ":", "for", "dep", "in", "deps", ":", "if", "not", "dep", "in", "cfg", ":", "runbashcmd", "(", "f'conda install {dep}'", ",", "test", "=", "cfg", "[", "'test'", "]", ")", "cfg", "[", "dep", "]", "=", "dep", "logging", ".", "info", "(", "f\"{len(deps)} deps installed.\"", ")", "return", "cfg" ]
Installs conda dependencies. :param cfg: configuration dict
[ "Installs", "conda", "dependencies", "." ]
b0643a3582a2fffc0165ace69fb80880d92bfb10
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/__init__.py#L7-L24
train
Syndace/python-xeddsa
xeddsa/xeddsa.py
XEdDSA.mont_pub_from_mont_priv
def mont_pub_from_mont_priv(cls, mont_priv): """ Restore the Montgomery public key from a Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. """ if not isinstance(mont_priv, bytes): raise TypeError("Wrong type passed for the mont_priv parameter.") if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE: raise ValueError("Invalid value passed for the mont_priv parameter.") return bytes(cls._mont_pub_from_mont_priv(bytearray(mont_priv)))
python
def mont_pub_from_mont_priv(cls, mont_priv): """ Restore the Montgomery public key from a Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. """ if not isinstance(mont_priv, bytes): raise TypeError("Wrong type passed for the mont_priv parameter.") if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE: raise ValueError("Invalid value passed for the mont_priv parameter.") return bytes(cls._mont_pub_from_mont_priv(bytearray(mont_priv)))
[ "def", "mont_pub_from_mont_priv", "(", "cls", ",", "mont_priv", ")", ":", "if", "not", "isinstance", "(", "mont_priv", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"Wrong type passed for the mont_priv parameter.\"", ")", "if", "len", "(", "mont_priv", ")", "!=", "cls", ".", "MONT_PRIV_KEY_SIZE", ":", "raise", "ValueError", "(", "\"Invalid value passed for the mont_priv parameter.\"", ")", "return", "bytes", "(", "cls", ".", "_mont_pub_from_mont_priv", "(", "bytearray", "(", "mont_priv", ")", ")", ")" ]
Restore the Montgomery public key from a Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE.
[ "Restore", "the", "Montgomery", "public", "key", "from", "a", "Montgomery", "private", "key", "." ]
a11721524c96ce354cca3628e003c6fcf7ce3e42
https://github.com/Syndace/python-xeddsa/blob/a11721524c96ce354cca3628e003c6fcf7ce3e42/xeddsa/xeddsa.py#L85-L101
train
Syndace/python-xeddsa
xeddsa/xeddsa.py
XEdDSA.mont_priv_to_ed_pair
def mont_priv_to_ed_pair(cls, mont_priv): """ Derive a Twisted Edwards key pair from given Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A tuple of bytes-like objects encoding the private key with length ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE. """ if not isinstance(mont_priv, bytes): raise TypeError("Wrong type passed for the mont_priv parameter.") if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE: raise ValueError("Invalid value passed for the mont_priv parameter.") ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(mont_priv)) return bytes(ed_priv), bytes(ed_pub)
python
def mont_priv_to_ed_pair(cls, mont_priv): """ Derive a Twisted Edwards key pair from given Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A tuple of bytes-like objects encoding the private key with length ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE. """ if not isinstance(mont_priv, bytes): raise TypeError("Wrong type passed for the mont_priv parameter.") if len(mont_priv) != cls.MONT_PRIV_KEY_SIZE: raise ValueError("Invalid value passed for the mont_priv parameter.") ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(mont_priv)) return bytes(ed_priv), bytes(ed_pub)
[ "def", "mont_priv_to_ed_pair", "(", "cls", ",", "mont_priv", ")", ":", "if", "not", "isinstance", "(", "mont_priv", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"Wrong type passed for the mont_priv parameter.\"", ")", "if", "len", "(", "mont_priv", ")", "!=", "cls", ".", "MONT_PRIV_KEY_SIZE", ":", "raise", "ValueError", "(", "\"Invalid value passed for the mont_priv parameter.\"", ")", "ed_priv", ",", "ed_pub", "=", "cls", ".", "_mont_priv_to_ed_pair", "(", "bytearray", "(", "mont_priv", ")", ")", "return", "bytes", "(", "ed_priv", ")", ",", "bytes", "(", "ed_pub", ")" ]
Derive a Twisted Edwards key pair from given Montgomery private key. :param mont_priv: A bytes-like object encoding the private key with length MONT_PRIV_KEY_SIZE. :returns: A tuple of bytes-like objects encoding the private key with length ED_PRIV_KEY_SIZE and the public key with length ED_PUB_KEY_SIZE.
[ "Derive", "a", "Twisted", "Edwards", "key", "pair", "from", "given", "Montgomery", "private", "key", "." ]
a11721524c96ce354cca3628e003c6fcf7ce3e42
https://github.com/Syndace/python-xeddsa/blob/a11721524c96ce354cca3628e003c6fcf7ce3e42/xeddsa/xeddsa.py#L116-L134
train
Syndace/python-xeddsa
xeddsa/xeddsa.py
XEdDSA.mont_pub_to_ed_pub
def mont_pub_to_ed_pub(cls, mont_pub): """ Derive a Twisted Edwards public key from given Montgomery public key. :param mont_pub: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. :returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE. """ if not isinstance(mont_pub, bytes): raise TypeError("Wrong type passed for the mont_pub parameter.") if len(mont_pub) != cls.MONT_PUB_KEY_SIZE: raise ValueError("Invalid value passed for the mont_pub parameter.") return bytes(cls._mont_pub_to_ed_pub(bytearray(mont_pub)))
python
def mont_pub_to_ed_pub(cls, mont_pub): """ Derive a Twisted Edwards public key from given Montgomery public key. :param mont_pub: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. :returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE. """ if not isinstance(mont_pub, bytes): raise TypeError("Wrong type passed for the mont_pub parameter.") if len(mont_pub) != cls.MONT_PUB_KEY_SIZE: raise ValueError("Invalid value passed for the mont_pub parameter.") return bytes(cls._mont_pub_to_ed_pub(bytearray(mont_pub)))
[ "def", "mont_pub_to_ed_pub", "(", "cls", ",", "mont_pub", ")", ":", "if", "not", "isinstance", "(", "mont_pub", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"Wrong type passed for the mont_pub parameter.\"", ")", "if", "len", "(", "mont_pub", ")", "!=", "cls", ".", "MONT_PUB_KEY_SIZE", ":", "raise", "ValueError", "(", "\"Invalid value passed for the mont_pub parameter.\"", ")", "return", "bytes", "(", "cls", ".", "_mont_pub_to_ed_pub", "(", "bytearray", "(", "mont_pub", ")", ")", ")" ]
Derive a Twisted Edwards public key from given Montgomery public key. :param mont_pub: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. :returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE.
[ "Derive", "a", "Twisted", "Edwards", "public", "key", "from", "given", "Montgomery", "public", "key", "." ]
a11721524c96ce354cca3628e003c6fcf7ce3e42
https://github.com/Syndace/python-xeddsa/blob/a11721524c96ce354cca3628e003c6fcf7ce3e42/xeddsa/xeddsa.py#L150-L165
train
Syndace/python-xeddsa
xeddsa/xeddsa.py
XEdDSA.sign
def sign(self, data, nonce = None): """ Sign data using the Montgomery private key stored by this XEdDSA instance. :param data: A bytes-like object containing the data to sign. :param nonce: A bytes-like object with length 64 or None. :returns: A bytes-like object encoding the signature with length SIGNATURE_SIZE. If the nonce parameter is None, a new nonce is generated and used. :raises MissingKeyException: If the Montgomery private key is not available. """ cls = self.__class__ if not self.__mont_priv: raise MissingKeyException( "Cannot sign using this XEdDSA instance, Montgomery private key missing." ) if not isinstance(data, bytes): raise TypeError("The data parameter must be a bytes-like object.") if nonce == None: nonce = os.urandom(64) if not isinstance(nonce, bytes): raise TypeError("Wrong type passed for the nonce parameter.") if len(nonce) != 64: raise ValueError("Invalid value passed for the nonce parameter.") ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(self.__mont_priv)) return bytes(cls._sign( bytearray(data), bytearray(nonce), ed_priv, ed_pub ))
python
def sign(self, data, nonce = None): """ Sign data using the Montgomery private key stored by this XEdDSA instance. :param data: A bytes-like object containing the data to sign. :param nonce: A bytes-like object with length 64 or None. :returns: A bytes-like object encoding the signature with length SIGNATURE_SIZE. If the nonce parameter is None, a new nonce is generated and used. :raises MissingKeyException: If the Montgomery private key is not available. """ cls = self.__class__ if not self.__mont_priv: raise MissingKeyException( "Cannot sign using this XEdDSA instance, Montgomery private key missing." ) if not isinstance(data, bytes): raise TypeError("The data parameter must be a bytes-like object.") if nonce == None: nonce = os.urandom(64) if not isinstance(nonce, bytes): raise TypeError("Wrong type passed for the nonce parameter.") if len(nonce) != 64: raise ValueError("Invalid value passed for the nonce parameter.") ed_priv, ed_pub = cls._mont_priv_to_ed_pair(bytearray(self.__mont_priv)) return bytes(cls._sign( bytearray(data), bytearray(nonce), ed_priv, ed_pub ))
[ "def", "sign", "(", "self", ",", "data", ",", "nonce", "=", "None", ")", ":", "cls", "=", "self", ".", "__class__", "if", "not", "self", ".", "__mont_priv", ":", "raise", "MissingKeyException", "(", "\"Cannot sign using this XEdDSA instance, Montgomery private key missing.\"", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"The data parameter must be a bytes-like object.\"", ")", "if", "nonce", "==", "None", ":", "nonce", "=", "os", ".", "urandom", "(", "64", ")", "if", "not", "isinstance", "(", "nonce", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"Wrong type passed for the nonce parameter.\"", ")", "if", "len", "(", "nonce", ")", "!=", "64", ":", "raise", "ValueError", "(", "\"Invalid value passed for the nonce parameter.\"", ")", "ed_priv", ",", "ed_pub", "=", "cls", ".", "_mont_priv_to_ed_pair", "(", "bytearray", "(", "self", ".", "__mont_priv", ")", ")", "return", "bytes", "(", "cls", ".", "_sign", "(", "bytearray", "(", "data", ")", ",", "bytearray", "(", "nonce", ")", ",", "ed_priv", ",", "ed_pub", ")", ")" ]
Sign data using the Montgomery private key stored by this XEdDSA instance. :param data: A bytes-like object containing the data to sign. :param nonce: A bytes-like object with length 64 or None. :returns: A bytes-like object encoding the signature with length SIGNATURE_SIZE. If the nonce parameter is None, a new nonce is generated and used. :raises MissingKeyException: If the Montgomery private key is not available.
[ "Sign", "data", "using", "the", "Montgomery", "private", "key", "stored", "by", "this", "XEdDSA", "instance", "." ]
a11721524c96ce354cca3628e003c6fcf7ce3e42
https://github.com/Syndace/python-xeddsa/blob/a11721524c96ce354cca3628e003c6fcf7ce3e42/xeddsa/xeddsa.py#L179-L218
train
Syndace/python-xeddsa
xeddsa/xeddsa.py
XEdDSA.verify
def verify(self, data, signature): """ Verify signed data using the Montgomery public key stored by this XEdDSA instance. :param data: A bytes-like object containing the data that was signed. :param signature: A bytes-like object encoding the signature with length SIGNATURE_SIZE. :returns: A boolean indicating whether the signature was valid or not. """ cls = self.__class__ if not isinstance(data, bytes): raise TypeError("The data parameter must be a bytes-like object.") if not isinstance(signature, bytes): raise TypeError("Wrong type passed for the signature parameter.") if len(signature) != cls.SIGNATURE_SIZE: raise ValueError("Invalid value passed for the signature parameter.") return cls._verify( bytearray(data), bytearray(signature), cls._mont_pub_to_ed_pub(bytearray(self.__mont_pub)) )
python
def verify(self, data, signature): """ Verify signed data using the Montgomery public key stored by this XEdDSA instance. :param data: A bytes-like object containing the data that was signed. :param signature: A bytes-like object encoding the signature with length SIGNATURE_SIZE. :returns: A boolean indicating whether the signature was valid or not. """ cls = self.__class__ if not isinstance(data, bytes): raise TypeError("The data parameter must be a bytes-like object.") if not isinstance(signature, bytes): raise TypeError("Wrong type passed for the signature parameter.") if len(signature) != cls.SIGNATURE_SIZE: raise ValueError("Invalid value passed for the signature parameter.") return cls._verify( bytearray(data), bytearray(signature), cls._mont_pub_to_ed_pub(bytearray(self.__mont_pub)) )
[ "def", "verify", "(", "self", ",", "data", ",", "signature", ")", ":", "cls", "=", "self", ".", "__class__", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"The data parameter must be a bytes-like object.\"", ")", "if", "not", "isinstance", "(", "signature", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"Wrong type passed for the signature parameter.\"", ")", "if", "len", "(", "signature", ")", "!=", "cls", ".", "SIGNATURE_SIZE", ":", "raise", "ValueError", "(", "\"Invalid value passed for the signature parameter.\"", ")", "return", "cls", ".", "_verify", "(", "bytearray", "(", "data", ")", ",", "bytearray", "(", "signature", ")", ",", "cls", ".", "_mont_pub_to_ed_pub", "(", "bytearray", "(", "self", ".", "__mont_pub", ")", ")", ")" ]
Verify signed data using the Montgomery public key stored by this XEdDSA instance. :param data: A bytes-like object containing the data that was signed. :param signature: A bytes-like object encoding the signature with length SIGNATURE_SIZE. :returns: A boolean indicating whether the signature was valid or not.
[ "Verify", "signed", "data", "using", "the", "Montgomery", "public", "key", "stored", "by", "this", "XEdDSA", "instance", "." ]
a11721524c96ce354cca3628e003c6fcf7ce3e42
https://github.com/Syndace/python-xeddsa/blob/a11721524c96ce354cca3628e003c6fcf7ce3e42/xeddsa/xeddsa.py#L234-L259
train
ZanderBrown/nudatus
nudatus.py
mangle
def mangle(text): """ Takes a script and mangles it TokenError is thrown when encountering bad syntax """ text_bytes = text.encode('utf-8') # Wrap the input script as a byte stream buff = BytesIO(text_bytes) # Byte stream for the mangled script mangled = BytesIO() last_tok = token.INDENT last_line = -1 last_col = 0 last_line_text = '' open_list_dicts = 0 # Build tokens from the script tokens = tokenizer(buff.readline) for t, text, (line_s, col_s), (line_e, col_e), line in tokens: # If this is a new line (except the very first) if line_s > last_line and last_line != -1: # Reset the column last_col = 0 # If the last line ended in a '\' (continuation) if last_line_text.rstrip()[-1:] == '\\': # Recreate it mangled.write(b' \\\n') # We don't want to be calling the this multiple times striped = text.strip() # Tokens or characters for opening or closing a list/dict list_dict_open = [token.LSQB, token.LBRACE, '[', '{'] list_dict_close = [token.RSQB, token.RBRACE, ']', '}'] # If this is a list or dict if t in list_dict_open or striped in list_dict_open: # Increase the dict / list level open_list_dicts += 1 elif t in list_dict_close or striped in list_dict_close: # Decrease the dict / list level open_list_dicts -= 1 # Remove docstrings # Docstrings are strings not used in an expression, # unfortunatly it isn't as simple as "t is string and t # not in expression" if t == token.STRING and (last_tok == token.INDENT or ( (last_tok == token.NEWLINE or last_tok == tokenize.NL or last_tok == token.DEDENT or last_tok == tokenize.ENCODING) and open_list_dicts == 0)): # Output number of lines corresponding those in # the docstring comment mangled.write(b'\n' * (len(text.split('\n')) - 1)) # Or is it a standard comment elif t == tokenize.COMMENT: # Plain comment, just don't write it pass else: # Recreate indentation, ideally we should use tabs if col_s > last_col: mangled.write(b' ' * (col_s - last_col)) # On Python 3 the first token specifies the encoding # but we already know it's utf-8 and writing it just # gives us an invalid script if t != tokenize.ENCODING: mangled.write(text.encode('utf-8')) # Store the previous state last_tok = t last_col = col_e last_line = line_e last_line_text = line # Return a string return mangled.getvalue().decode('utf-8')
python
def mangle(text): """ Takes a script and mangles it TokenError is thrown when encountering bad syntax """ text_bytes = text.encode('utf-8') # Wrap the input script as a byte stream buff = BytesIO(text_bytes) # Byte stream for the mangled script mangled = BytesIO() last_tok = token.INDENT last_line = -1 last_col = 0 last_line_text = '' open_list_dicts = 0 # Build tokens from the script tokens = tokenizer(buff.readline) for t, text, (line_s, col_s), (line_e, col_e), line in tokens: # If this is a new line (except the very first) if line_s > last_line and last_line != -1: # Reset the column last_col = 0 # If the last line ended in a '\' (continuation) if last_line_text.rstrip()[-1:] == '\\': # Recreate it mangled.write(b' \\\n') # We don't want to be calling the this multiple times striped = text.strip() # Tokens or characters for opening or closing a list/dict list_dict_open = [token.LSQB, token.LBRACE, '[', '{'] list_dict_close = [token.RSQB, token.RBRACE, ']', '}'] # If this is a list or dict if t in list_dict_open or striped in list_dict_open: # Increase the dict / list level open_list_dicts += 1 elif t in list_dict_close or striped in list_dict_close: # Decrease the dict / list level open_list_dicts -= 1 # Remove docstrings # Docstrings are strings not used in an expression, # unfortunatly it isn't as simple as "t is string and t # not in expression" if t == token.STRING and (last_tok == token.INDENT or ( (last_tok == token.NEWLINE or last_tok == tokenize.NL or last_tok == token.DEDENT or last_tok == tokenize.ENCODING) and open_list_dicts == 0)): # Output number of lines corresponding those in # the docstring comment mangled.write(b'\n' * (len(text.split('\n')) - 1)) # Or is it a standard comment elif t == tokenize.COMMENT: # Plain comment, just don't write it pass else: # Recreate indentation, ideally we should use tabs if col_s > last_col: mangled.write(b' ' * (col_s - last_col)) # On Python 3 the first token specifies the encoding # but we already know it's utf-8 and writing it just # gives us an invalid script if t != tokenize.ENCODING: mangled.write(text.encode('utf-8')) # Store the previous state last_tok = t last_col = col_e last_line = line_e last_line_text = line # Return a string return mangled.getvalue().decode('utf-8')
[ "def", "mangle", "(", "text", ")", ":", "text_bytes", "=", "text", ".", "encode", "(", "'utf-8'", ")", "# Wrap the input script as a byte stream", "buff", "=", "BytesIO", "(", "text_bytes", ")", "# Byte stream for the mangled script", "mangled", "=", "BytesIO", "(", ")", "last_tok", "=", "token", ".", "INDENT", "last_line", "=", "-", "1", "last_col", "=", "0", "last_line_text", "=", "''", "open_list_dicts", "=", "0", "# Build tokens from the script", "tokens", "=", "tokenizer", "(", "buff", ".", "readline", ")", "for", "t", ",", "text", ",", "(", "line_s", ",", "col_s", ")", ",", "(", "line_e", ",", "col_e", ")", ",", "line", "in", "tokens", ":", "# If this is a new line (except the very first)", "if", "line_s", ">", "last_line", "and", "last_line", "!=", "-", "1", ":", "# Reset the column", "last_col", "=", "0", "# If the last line ended in a '\\' (continuation)", "if", "last_line_text", ".", "rstrip", "(", ")", "[", "-", "1", ":", "]", "==", "'\\\\'", ":", "# Recreate it", "mangled", ".", "write", "(", "b' \\\\\\n'", ")", "# We don't want to be calling the this multiple times", "striped", "=", "text", ".", "strip", "(", ")", "# Tokens or characters for opening or closing a list/dict", "list_dict_open", "=", "[", "token", ".", "LSQB", ",", "token", ".", "LBRACE", ",", "'['", ",", "'{'", "]", "list_dict_close", "=", "[", "token", ".", "RSQB", ",", "token", ".", "RBRACE", ",", "']'", ",", "'}'", "]", "# If this is a list or dict", "if", "t", "in", "list_dict_open", "or", "striped", "in", "list_dict_open", ":", "# Increase the dict / list level", "open_list_dicts", "+=", "1", "elif", "t", "in", "list_dict_close", "or", "striped", "in", "list_dict_close", ":", "# Decrease the dict / list level", "open_list_dicts", "-=", "1", "# Remove docstrings", "# Docstrings are strings not used in an expression,", "# unfortunatly it isn't as simple as \"t is string and t", "# not in expression\"", "if", "t", "==", "token", ".", "STRING", "and", "(", "last_tok", "==", "token", ".", "INDENT", "or", "(", "(", "last_tok", "==", "token", ".", "NEWLINE", "or", "last_tok", "==", "tokenize", ".", "NL", "or", "last_tok", "==", "token", ".", "DEDENT", "or", "last_tok", "==", "tokenize", ".", "ENCODING", ")", "and", "open_list_dicts", "==", "0", ")", ")", ":", "# Output number of lines corresponding those in", "# the docstring comment", "mangled", ".", "write", "(", "b'\\n'", "*", "(", "len", "(", "text", ".", "split", "(", "'\\n'", ")", ")", "-", "1", ")", ")", "# Or is it a standard comment", "elif", "t", "==", "tokenize", ".", "COMMENT", ":", "# Plain comment, just don't write it", "pass", "else", ":", "# Recreate indentation, ideally we should use tabs", "if", "col_s", ">", "last_col", ":", "mangled", ".", "write", "(", "b' '", "*", "(", "col_s", "-", "last_col", ")", ")", "# On Python 3 the first token specifies the encoding", "# but we already know it's utf-8 and writing it just", "# gives us an invalid script", "if", "t", "!=", "tokenize", ".", "ENCODING", ":", "mangled", ".", "write", "(", "text", ".", "encode", "(", "'utf-8'", ")", ")", "# Store the previous state", "last_tok", "=", "t", "last_col", "=", "col_e", "last_line", "=", "line_e", "last_line_text", "=", "line", "# Return a string", "return", "mangled", ".", "getvalue", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
Takes a script and mangles it TokenError is thrown when encountering bad syntax
[ "Takes", "a", "script", "and", "mangles", "it" ]
29a9627b09c3498fb6f9370f6e6d1c9a876453d8
https://github.com/ZanderBrown/nudatus/blob/29a9627b09c3498fb6f9370f6e6d1c9a876453d8/nudatus.py#L36-L115
train
ZanderBrown/nudatus
nudatus.py
main
def main(argv=None): """ Command line entry point """ if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser(description=_HELP_TEXT) parser.add_argument('input', nargs='?', default=None) parser.add_argument('output', nargs='?', default=None) parser.add_argument('--version', action='version', version='%(prog)s ' + get_version()) args = parser.parse_args(argv) if not args.input: print("No file specified", file=sys.stderr) sys.exit(1) try: with open(args.input, 'r') as f: res = mangle(f.read()) if not args.output: print(res, end='') else: with open(args.output, 'w') as o: o.write(res) except Exception as ex: print("Error mangling {}: {!s}".format(args.input, ex), file=sys.stderr) sys.exit(1)
python
def main(argv=None): """ Command line entry point """ if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser(description=_HELP_TEXT) parser.add_argument('input', nargs='?', default=None) parser.add_argument('output', nargs='?', default=None) parser.add_argument('--version', action='version', version='%(prog)s ' + get_version()) args = parser.parse_args(argv) if not args.input: print("No file specified", file=sys.stderr) sys.exit(1) try: with open(args.input, 'r') as f: res = mangle(f.read()) if not args.output: print(res, end='') else: with open(args.output, 'w') as o: o.write(res) except Exception as ex: print("Error mangling {}: {!s}".format(args.input, ex), file=sys.stderr) sys.exit(1)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "not", "argv", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "_HELP_TEXT", ")", "parser", ".", "add_argument", "(", "'input'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'output'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'%(prog)s '", "+", "get_version", "(", ")", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "if", "not", "args", ".", "input", ":", "print", "(", "\"No file specified\"", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "with", "open", "(", "args", ".", "input", ",", "'r'", ")", "as", "f", ":", "res", "=", "mangle", "(", "f", ".", "read", "(", ")", ")", "if", "not", "args", ".", "output", ":", "print", "(", "res", ",", "end", "=", "''", ")", "else", ":", "with", "open", "(", "args", ".", "output", ",", "'w'", ")", "as", "o", ":", "o", ".", "write", "(", "res", ")", "except", "Exception", "as", "ex", ":", "print", "(", "\"Error mangling {}: {!s}\"", ".", "format", "(", "args", ".", "input", ",", "ex", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")" ]
Command line entry point
[ "Command", "line", "entry", "point" ]
29a9627b09c3498fb6f9370f6e6d1c9a876453d8
https://github.com/ZanderBrown/nudatus/blob/29a9627b09c3498fb6f9370f6e6d1c9a876453d8/nudatus.py#L127-L156
train
raymondEhlers/pachyderm
pachyderm/histogram.py
get_histograms_in_list
def get_histograms_in_list(filename: str, list_name: str = None) -> Dict[str, Any]: """ Get histograms from the file and make them available in a dict. Lists are recursively explored, with all lists converted to dictionaries, such that the return dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection`` derived objects). Args: filename: Filename of the ROOT file containing the list. list_name: Name of the list to retrieve. Returns: Contains hists with keys as their names. Lists are recursively added, mirroring the structure under which the hists were stored. Raises: ValueError: If the list could not be found in the given file. """ hists: dict = {} with RootOpen(filename = filename, mode = "READ") as fIn: if list_name is not None: hist_list = fIn.Get(list_name) else: hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()] if not hist_list: fIn.ls() # Closing this file appears (but is not entirely confirmed) to be extremely important! Otherwise, # the memory will leak, leading to ROOT memory issues! fIn.Close() raise ValueError(f"Could not find list with name \"{list_name}\". Possible names are listed above.") # Retrieve objects in the hist list for obj in hist_list: _retrieve_object(hists, obj) return hists
python
def get_histograms_in_list(filename: str, list_name: str = None) -> Dict[str, Any]: """ Get histograms from the file and make them available in a dict. Lists are recursively explored, with all lists converted to dictionaries, such that the return dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection`` derived objects). Args: filename: Filename of the ROOT file containing the list. list_name: Name of the list to retrieve. Returns: Contains hists with keys as their names. Lists are recursively added, mirroring the structure under which the hists were stored. Raises: ValueError: If the list could not be found in the given file. """ hists: dict = {} with RootOpen(filename = filename, mode = "READ") as fIn: if list_name is not None: hist_list = fIn.Get(list_name) else: hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()] if not hist_list: fIn.ls() # Closing this file appears (but is not entirely confirmed) to be extremely important! Otherwise, # the memory will leak, leading to ROOT memory issues! fIn.Close() raise ValueError(f"Could not find list with name \"{list_name}\". Possible names are listed above.") # Retrieve objects in the hist list for obj in hist_list: _retrieve_object(hists, obj) return hists
[ "def", "get_histograms_in_list", "(", "filename", ":", "str", ",", "list_name", ":", "str", "=", "None", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "hists", ":", "dict", "=", "{", "}", "with", "RootOpen", "(", "filename", "=", "filename", ",", "mode", "=", "\"READ\"", ")", "as", "fIn", ":", "if", "list_name", "is", "not", "None", ":", "hist_list", "=", "fIn", ".", "Get", "(", "list_name", ")", "else", ":", "hist_list", "=", "[", "obj", ".", "ReadObj", "(", ")", "for", "obj", "in", "fIn", ".", "GetListOfKeys", "(", ")", "]", "if", "not", "hist_list", ":", "fIn", ".", "ls", "(", ")", "# Closing this file appears (but is not entirely confirmed) to be extremely important! Otherwise,", "# the memory will leak, leading to ROOT memory issues!", "fIn", ".", "Close", "(", ")", "raise", "ValueError", "(", "f\"Could not find list with name \\\"{list_name}\\\". Possible names are listed above.\"", ")", "# Retrieve objects in the hist list", "for", "obj", "in", "hist_list", ":", "_retrieve_object", "(", "hists", ",", "obj", ")", "return", "hists" ]
Get histograms from the file and make them available in a dict. Lists are recursively explored, with all lists converted to dictionaries, such that the return dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection`` derived objects). Args: filename: Filename of the ROOT file containing the list. list_name: Name of the list to retrieve. Returns: Contains hists with keys as their names. Lists are recursively added, mirroring the structure under which the hists were stored. Raises: ValueError: If the list could not be found in the given file.
[ "Get", "histograms", "from", "the", "file", "and", "make", "them", "available", "in", "a", "dict", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/histogram.py#L52-L86
train
raymondEhlers/pachyderm
pachyderm/histogram.py
_retrieve_object
def _retrieve_object(output_dict: Dict[str, Any], obj: Any) -> None: """ Function to recursively retrieve histograms from a list in a ROOT file. ``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given ownership of the retrieved objects. Args: output_dict (dict): Dict under which hists should be stored. obj (ROOT.TObject derived): Object(s) to be stored. If it is a collection, it will be recursed through. Returns: None: Changes in the dict are reflected in the output_dict which was passed. """ import ROOT # Store TH1 or THn if isinstance(obj, ROOT.TH1) or isinstance(obj, ROOT.THnBase): # Ensure that it is not lost after the file is closed # Only works for TH1 if isinstance(obj, ROOT.TH1): obj.SetDirectory(0) # Explicitly note that python owns the object # From more on memory management with ROOT and python, see: # https://root.cern.ch/root/html/guides/users-guide/PythonRuby.html#memory-handling ROOT.SetOwnership(obj, False) # Store the object output_dict[obj.GetName()] = obj # Recurse over lists if isinstance(obj, ROOT.TCollection): # Keeping it in order simply makes it easier to follow output_dict[obj.GetName()] = {} # Iterate over the objects in the collection and recursively store them for obj_temp in list(obj): _retrieve_object(output_dict[obj.GetName()], obj_temp)
python
def _retrieve_object(output_dict: Dict[str, Any], obj: Any) -> None: """ Function to recursively retrieve histograms from a list in a ROOT file. ``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given ownership of the retrieved objects. Args: output_dict (dict): Dict under which hists should be stored. obj (ROOT.TObject derived): Object(s) to be stored. If it is a collection, it will be recursed through. Returns: None: Changes in the dict are reflected in the output_dict which was passed. """ import ROOT # Store TH1 or THn if isinstance(obj, ROOT.TH1) or isinstance(obj, ROOT.THnBase): # Ensure that it is not lost after the file is closed # Only works for TH1 if isinstance(obj, ROOT.TH1): obj.SetDirectory(0) # Explicitly note that python owns the object # From more on memory management with ROOT and python, see: # https://root.cern.ch/root/html/guides/users-guide/PythonRuby.html#memory-handling ROOT.SetOwnership(obj, False) # Store the object output_dict[obj.GetName()] = obj # Recurse over lists if isinstance(obj, ROOT.TCollection): # Keeping it in order simply makes it easier to follow output_dict[obj.GetName()] = {} # Iterate over the objects in the collection and recursively store them for obj_temp in list(obj): _retrieve_object(output_dict[obj.GetName()], obj_temp)
[ "def", "_retrieve_object", "(", "output_dict", ":", "Dict", "[", "str", ",", "Any", "]", ",", "obj", ":", "Any", ")", "->", "None", ":", "import", "ROOT", "# Store TH1 or THn", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TH1", ")", "or", "isinstance", "(", "obj", ",", "ROOT", ".", "THnBase", ")", ":", "# Ensure that it is not lost after the file is closed", "# Only works for TH1", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TH1", ")", ":", "obj", ".", "SetDirectory", "(", "0", ")", "# Explicitly note that python owns the object", "# From more on memory management with ROOT and python, see:", "# https://root.cern.ch/root/html/guides/users-guide/PythonRuby.html#memory-handling", "ROOT", ".", "SetOwnership", "(", "obj", ",", "False", ")", "# Store the object", "output_dict", "[", "obj", ".", "GetName", "(", ")", "]", "=", "obj", "# Recurse over lists", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TCollection", ")", ":", "# Keeping it in order simply makes it easier to follow", "output_dict", "[", "obj", ".", "GetName", "(", ")", "]", "=", "{", "}", "# Iterate over the objects in the collection and recursively store them", "for", "obj_temp", "in", "list", "(", "obj", ")", ":", "_retrieve_object", "(", "output_dict", "[", "obj", ".", "GetName", "(", ")", "]", ",", "obj_temp", ")" ]
Function to recursively retrieve histograms from a list in a ROOT file. ``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given ownership of the retrieved objects. Args: output_dict (dict): Dict under which hists should be stored. obj (ROOT.TObject derived): Object(s) to be stored. If it is a collection, it will be recursed through. Returns: None: Changes in the dict are reflected in the output_dict which was passed.
[ "Function", "to", "recursively", "retrieve", "histograms", "from", "a", "list", "in", "a", "ROOT", "file", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/histogram.py#L88-L124
train
raymondEhlers/pachyderm
pachyderm/histogram.py
get_array_from_hist2D
def get_array_from_hist2D(hist: Hist, set_zero_to_NaN: bool = True, return_bin_edges: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Extract x, y, and bin values from a 2D ROOT histogram. Converts the histogram into a numpy array, and suitably processes it for a surface plot by removing 0s (which can cause problems when taking logs), and returning a set of (x, y) mesh values utilziing either the bin edges or bin centers. Note: This is a different format than the 1D version! Args: hist (ROOT.TH2): Histogram to be converted. set_zero_to_NaN: If true, set 0 in the array to NaN. Useful with matplotlib so that it will ignore the values when plotting. See comments in this function for more details. Default: True. return_bin_edges: Return x and y using bin edges instead of bin centers. Returns: Contains (x values, y values, numpy array of hist data) where (x, y) are values on a grid (from np.meshgrid) using the selected bin values. """ # Process the hist into a suitable state # NOTE: The shape specific can be somewhat confusing (ie. I would naviely expected to specify the x first.) # This says that the ``GetYaxis().GetNbins()`` number of rows and ``GetXaxis().GetNbins()`` number of columns. shape = (hist.GetYaxis().GetNbins(), hist.GetXaxis().GetNbins()) # To keep consistency with the root_numpy 2D hist format, we transpose the final result # This format has x values as columns. hist_array = np.array([hist.GetBinContent(x) for x in range(1, hist.GetNcells()) if not hist.IsBinUnderflow(x) and not hist.IsBinOverflow(x)]) # The hist_array was linear, so we need to shape it into our expected 2D values. hist_array = hist_array.reshape(shape) # Transpose the array to better match expectations # In particular, by transposing the array, it means that ``thist_array[1][0]`` gives the 2nd x # value (x_index = 1) and the 1st y value (y_index = 1). This is as we would expect. This is also # the same convention as used by root_numpy hist_array = hist_array.T # Set all 0s to nan to get similar behavior to ROOT. In ROOT, it will basically ignore 0s. This is # especially important for log plots. Matplotlib doesn't handle 0s as well, since it attempts to # plot them and then will throw exceptions when the log is taken. # By setting to nan, matplotlib basically ignores them similar to ROOT # NOTE: This requires a few special functions later which ignore nan when calculating min and max. if set_zero_to_NaN: hist_array[hist_array == 0] = np.nan if return_bin_edges: # Bin edges x_bin_edges = get_bin_edges_from_axis(hist.GetXaxis()) y_bin_edges = get_bin_edges_from_axis(hist.GetYaxis()) # NOTE: The addition of epsilon to the max is extremely important! Otherwise, the x and y # ranges will be one bin short since ``arange`` is not inclusive. This could also be resolved # by using ``linspace``, but I think this approach is perfectly fine. # NOTE: This epsilon is smaller than the one in ``utils`` because we are sometimes dealing # with small times (~ns). The other value is larger because (I seem to recall) that # smaller values didn't always place nice with ROOT, but it is fine here, since we're # working with numpy. # NOTE: This should be identical to taking the min and max of the axis using # ``TAxis.GetXmin()`` and ``TAxis.GetXmax()``, but I prefer this approach. epsilon = 1e-9 x_range = np.arange( np.amin(x_bin_edges), np.amax(x_bin_edges) + epsilon, hist.GetXaxis().GetBinWidth(1) ) y_range = np.arange( np.amin(y_bin_edges), np.amax(y_bin_edges) + epsilon, hist.GetYaxis().GetBinWidth(1) ) else: # We want an array of bin centers x_range = np.array([hist.GetXaxis().GetBinCenter(i) for i in range(1, hist.GetXaxis().GetNbins() + 1)]) y_range = np.array([hist.GetYaxis().GetBinCenter(i) for i in range(1, hist.GetYaxis().GetNbins() + 1)]) X, Y = np.meshgrid(x_range, y_range) return (X, Y, hist_array)
python
def get_array_from_hist2D(hist: Hist, set_zero_to_NaN: bool = True, return_bin_edges: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Extract x, y, and bin values from a 2D ROOT histogram. Converts the histogram into a numpy array, and suitably processes it for a surface plot by removing 0s (which can cause problems when taking logs), and returning a set of (x, y) mesh values utilziing either the bin edges or bin centers. Note: This is a different format than the 1D version! Args: hist (ROOT.TH2): Histogram to be converted. set_zero_to_NaN: If true, set 0 in the array to NaN. Useful with matplotlib so that it will ignore the values when plotting. See comments in this function for more details. Default: True. return_bin_edges: Return x and y using bin edges instead of bin centers. Returns: Contains (x values, y values, numpy array of hist data) where (x, y) are values on a grid (from np.meshgrid) using the selected bin values. """ # Process the hist into a suitable state # NOTE: The shape specific can be somewhat confusing (ie. I would naviely expected to specify the x first.) # This says that the ``GetYaxis().GetNbins()`` number of rows and ``GetXaxis().GetNbins()`` number of columns. shape = (hist.GetYaxis().GetNbins(), hist.GetXaxis().GetNbins()) # To keep consistency with the root_numpy 2D hist format, we transpose the final result # This format has x values as columns. hist_array = np.array([hist.GetBinContent(x) for x in range(1, hist.GetNcells()) if not hist.IsBinUnderflow(x) and not hist.IsBinOverflow(x)]) # The hist_array was linear, so we need to shape it into our expected 2D values. hist_array = hist_array.reshape(shape) # Transpose the array to better match expectations # In particular, by transposing the array, it means that ``thist_array[1][0]`` gives the 2nd x # value (x_index = 1) and the 1st y value (y_index = 1). This is as we would expect. This is also # the same convention as used by root_numpy hist_array = hist_array.T # Set all 0s to nan to get similar behavior to ROOT. In ROOT, it will basically ignore 0s. This is # especially important for log plots. Matplotlib doesn't handle 0s as well, since it attempts to # plot them and then will throw exceptions when the log is taken. # By setting to nan, matplotlib basically ignores them similar to ROOT # NOTE: This requires a few special functions later which ignore nan when calculating min and max. if set_zero_to_NaN: hist_array[hist_array == 0] = np.nan if return_bin_edges: # Bin edges x_bin_edges = get_bin_edges_from_axis(hist.GetXaxis()) y_bin_edges = get_bin_edges_from_axis(hist.GetYaxis()) # NOTE: The addition of epsilon to the max is extremely important! Otherwise, the x and y # ranges will be one bin short since ``arange`` is not inclusive. This could also be resolved # by using ``linspace``, but I think this approach is perfectly fine. # NOTE: This epsilon is smaller than the one in ``utils`` because we are sometimes dealing # with small times (~ns). The other value is larger because (I seem to recall) that # smaller values didn't always place nice with ROOT, but it is fine here, since we're # working with numpy. # NOTE: This should be identical to taking the min and max of the axis using # ``TAxis.GetXmin()`` and ``TAxis.GetXmax()``, but I prefer this approach. epsilon = 1e-9 x_range = np.arange( np.amin(x_bin_edges), np.amax(x_bin_edges) + epsilon, hist.GetXaxis().GetBinWidth(1) ) y_range = np.arange( np.amin(y_bin_edges), np.amax(y_bin_edges) + epsilon, hist.GetYaxis().GetBinWidth(1) ) else: # We want an array of bin centers x_range = np.array([hist.GetXaxis().GetBinCenter(i) for i in range(1, hist.GetXaxis().GetNbins() + 1)]) y_range = np.array([hist.GetYaxis().GetBinCenter(i) for i in range(1, hist.GetYaxis().GetNbins() + 1)]) X, Y = np.meshgrid(x_range, y_range) return (X, Y, hist_array)
[ "def", "get_array_from_hist2D", "(", "hist", ":", "Hist", ",", "set_zero_to_NaN", ":", "bool", "=", "True", ",", "return_bin_edges", ":", "bool", "=", "False", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "np", ".", "ndarray", "]", ":", "# Process the hist into a suitable state", "# NOTE: The shape specific can be somewhat confusing (ie. I would naviely expected to specify the x first.)", "# This says that the ``GetYaxis().GetNbins()`` number of rows and ``GetXaxis().GetNbins()`` number of columns.", "shape", "=", "(", "hist", ".", "GetYaxis", "(", ")", ".", "GetNbins", "(", ")", ",", "hist", ".", "GetXaxis", "(", ")", ".", "GetNbins", "(", ")", ")", "# To keep consistency with the root_numpy 2D hist format, we transpose the final result", "# This format has x values as columns.", "hist_array", "=", "np", ".", "array", "(", "[", "hist", ".", "GetBinContent", "(", "x", ")", "for", "x", "in", "range", "(", "1", ",", "hist", ".", "GetNcells", "(", ")", ")", "if", "not", "hist", ".", "IsBinUnderflow", "(", "x", ")", "and", "not", "hist", ".", "IsBinOverflow", "(", "x", ")", "]", ")", "# The hist_array was linear, so we need to shape it into our expected 2D values.", "hist_array", "=", "hist_array", ".", "reshape", "(", "shape", ")", "# Transpose the array to better match expectations", "# In particular, by transposing the array, it means that ``thist_array[1][0]`` gives the 2nd x", "# value (x_index = 1) and the 1st y value (y_index = 1). This is as we would expect. This is also", "# the same convention as used by root_numpy", "hist_array", "=", "hist_array", ".", "T", "# Set all 0s to nan to get similar behavior to ROOT. In ROOT, it will basically ignore 0s. This is", "# especially important for log plots. Matplotlib doesn't handle 0s as well, since it attempts to", "# plot them and then will throw exceptions when the log is taken.", "# By setting to nan, matplotlib basically ignores them similar to ROOT", "# NOTE: This requires a few special functions later which ignore nan when calculating min and max.", "if", "set_zero_to_NaN", ":", "hist_array", "[", "hist_array", "==", "0", "]", "=", "np", ".", "nan", "if", "return_bin_edges", ":", "# Bin edges", "x_bin_edges", "=", "get_bin_edges_from_axis", "(", "hist", ".", "GetXaxis", "(", ")", ")", "y_bin_edges", "=", "get_bin_edges_from_axis", "(", "hist", ".", "GetYaxis", "(", ")", ")", "# NOTE: The addition of epsilon to the max is extremely important! Otherwise, the x and y", "# ranges will be one bin short since ``arange`` is not inclusive. This could also be resolved", "# by using ``linspace``, but I think this approach is perfectly fine.", "# NOTE: This epsilon is smaller than the one in ``utils`` because we are sometimes dealing", "# with small times (~ns). The other value is larger because (I seem to recall) that", "# smaller values didn't always place nice with ROOT, but it is fine here, since we're", "# working with numpy.", "# NOTE: This should be identical to taking the min and max of the axis using", "# ``TAxis.GetXmin()`` and ``TAxis.GetXmax()``, but I prefer this approach.", "epsilon", "=", "1e-9", "x_range", "=", "np", ".", "arange", "(", "np", ".", "amin", "(", "x_bin_edges", ")", ",", "np", ".", "amax", "(", "x_bin_edges", ")", "+", "epsilon", ",", "hist", ".", "GetXaxis", "(", ")", ".", "GetBinWidth", "(", "1", ")", ")", "y_range", "=", "np", ".", "arange", "(", "np", ".", "amin", "(", "y_bin_edges", ")", ",", "np", ".", "amax", "(", "y_bin_edges", ")", "+", "epsilon", ",", "hist", ".", "GetYaxis", "(", ")", ".", "GetBinWidth", "(", "1", ")", ")", "else", ":", "# We want an array of bin centers", "x_range", "=", "np", ".", "array", "(", "[", "hist", ".", "GetXaxis", "(", ")", ".", "GetBinCenter", "(", "i", ")", "for", "i", "in", "range", "(", "1", ",", "hist", ".", "GetXaxis", "(", ")", ".", "GetNbins", "(", ")", "+", "1", ")", "]", ")", "y_range", "=", "np", ".", "array", "(", "[", "hist", ".", "GetYaxis", "(", ")", ".", "GetBinCenter", "(", "i", ")", "for", "i", "in", "range", "(", "1", ",", "hist", ".", "GetYaxis", "(", ")", ".", "GetNbins", "(", ")", "+", "1", ")", "]", ")", "X", ",", "Y", "=", "np", ".", "meshgrid", "(", "x_range", ",", "y_range", ")", "return", "(", "X", ",", "Y", ",", "hist_array", ")" ]
Extract x, y, and bin values from a 2D ROOT histogram. Converts the histogram into a numpy array, and suitably processes it for a surface plot by removing 0s (which can cause problems when taking logs), and returning a set of (x, y) mesh values utilziing either the bin edges or bin centers. Note: This is a different format than the 1D version! Args: hist (ROOT.TH2): Histogram to be converted. set_zero_to_NaN: If true, set 0 in the array to NaN. Useful with matplotlib so that it will ignore the values when plotting. See comments in this function for more details. Default: True. return_bin_edges: Return x and y using bin edges instead of bin centers. Returns: Contains (x values, y values, numpy array of hist data) where (x, y) are values on a grid (from np.meshgrid) using the selected bin values.
[ "Extract", "x", "y", "and", "bin", "values", "from", "a", "2D", "ROOT", "histogram", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/histogram.py#L527-L600
train
raymondEhlers/pachyderm
pachyderm/histogram.py
get_bin_edges_from_axis
def get_bin_edges_from_axis(axis) -> np.ndarray: """ Get bin edges from a ROOT hist axis. Note: Doesn't include over- or underflow bins! Args: axis (ROOT.TAxis): Axis from which the bin edges should be extracted. Returns: Array containing the bin edges. """ # Don't include over- or underflow bins bins = range(1, axis.GetNbins() + 1) # Bin edges bin_edges = np.empty(len(bins) + 1) bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins] bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins()) return bin_edges
python
def get_bin_edges_from_axis(axis) -> np.ndarray: """ Get bin edges from a ROOT hist axis. Note: Doesn't include over- or underflow bins! Args: axis (ROOT.TAxis): Axis from which the bin edges should be extracted. Returns: Array containing the bin edges. """ # Don't include over- or underflow bins bins = range(1, axis.GetNbins() + 1) # Bin edges bin_edges = np.empty(len(bins) + 1) bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins] bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins()) return bin_edges
[ "def", "get_bin_edges_from_axis", "(", "axis", ")", "->", "np", ".", "ndarray", ":", "# Don't include over- or underflow bins", "bins", "=", "range", "(", "1", ",", "axis", ".", "GetNbins", "(", ")", "+", "1", ")", "# Bin edges", "bin_edges", "=", "np", ".", "empty", "(", "len", "(", "bins", ")", "+", "1", ")", "bin_edges", "[", ":", "-", "1", "]", "=", "[", "axis", ".", "GetBinLowEdge", "(", "i", ")", "for", "i", "in", "bins", "]", "bin_edges", "[", "-", "1", "]", "=", "axis", ".", "GetBinUpEdge", "(", "axis", ".", "GetNbins", "(", ")", ")", "return", "bin_edges" ]
Get bin edges from a ROOT hist axis. Note: Doesn't include over- or underflow bins! Args: axis (ROOT.TAxis): Axis from which the bin edges should be extracted. Returns: Array containing the bin edges.
[ "Get", "bin", "edges", "from", "a", "ROOT", "hist", "axis", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/histogram.py#L602-L620
train
lowandrew/OLCTools
spadespipeline/sistr.py
Sistr.sistr
def sistr(self): """Perform sistr analyses on Salmonella""" logging.info('Performing sistr analyses') with progressbar(self.metadata) as bar: for sample in bar: # Create the analysis-type specific attribute setattr(sample, self.analysistype, GenObject()) if sample.general.bestassemblyfile != 'NA': try: # Only process strains that have been determined to be Salmonella if sample.general.referencegenus == 'Salmonella': # Set and create the path of the directory to store the strain-specific reports sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype) # Name of the .json output file sample[self.analysistype].jsonoutput = os.path.join(sample[self.analysistype].reportdir, '{}.json'.format(sample.name)) # Set the sistr system call sample.commands.sistr = \ 'sistr -f json -o {} -t {} -T {} {}'\ .format(sample[self.analysistype].jsonoutput, self.cpus, os.path.join(sample[self.analysistype].reportdir, 'tmp'), sample.general.bestassemblyfile) # sample[self.analysistype].logout = os.path.join(sample[self.analysistype].reportdir, 'logout') sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].reportdir, 'logerr') # Only run the analyses if the output json file does not exist if not os.path.isfile(sample[self.analysistype].jsonoutput): out, err = run_subprocess(sample.commands.sistr) write_to_logfile(sample.commands.sistr, sample.commands.sistr, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) self.queue.task_done() except (ValueError, KeyError): pass self.queue.join() self.report()
python
def sistr(self): """Perform sistr analyses on Salmonella""" logging.info('Performing sistr analyses') with progressbar(self.metadata) as bar: for sample in bar: # Create the analysis-type specific attribute setattr(sample, self.analysistype, GenObject()) if sample.general.bestassemblyfile != 'NA': try: # Only process strains that have been determined to be Salmonella if sample.general.referencegenus == 'Salmonella': # Set and create the path of the directory to store the strain-specific reports sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype) # Name of the .json output file sample[self.analysistype].jsonoutput = os.path.join(sample[self.analysistype].reportdir, '{}.json'.format(sample.name)) # Set the sistr system call sample.commands.sistr = \ 'sistr -f json -o {} -t {} -T {} {}'\ .format(sample[self.analysistype].jsonoutput, self.cpus, os.path.join(sample[self.analysistype].reportdir, 'tmp'), sample.general.bestassemblyfile) # sample[self.analysistype].logout = os.path.join(sample[self.analysistype].reportdir, 'logout') sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].reportdir, 'logerr') # Only run the analyses if the output json file does not exist if not os.path.isfile(sample[self.analysistype].jsonoutput): out, err = run_subprocess(sample.commands.sistr) write_to_logfile(sample.commands.sistr, sample.commands.sistr, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) self.queue.task_done() except (ValueError, KeyError): pass self.queue.join() self.report()
[ "def", "sistr", "(", "self", ")", ":", "logging", ".", "info", "(", "'Performing sistr analyses'", ")", "with", "progressbar", "(", "self", ".", "metadata", ")", "as", "bar", ":", "for", "sample", "in", "bar", ":", "# Create the analysis-type specific attribute", "setattr", "(", "sample", ",", "self", ".", "analysistype", ",", "GenObject", "(", ")", ")", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "try", ":", "# Only process strains that have been determined to be Salmonella", "if", "sample", ".", "general", ".", "referencegenus", "==", "'Salmonella'", ":", "# Set and create the path of the directory to store the strain-specific reports", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", "=", "os", ".", "path", ".", "join", "(", "sample", ".", "general", ".", "outputdirectory", ",", "self", ".", "analysistype", ")", "# Name of the .json output file", "sample", "[", "self", ".", "analysistype", "]", ".", "jsonoutput", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'{}.json'", ".", "format", "(", "sample", ".", "name", ")", ")", "# Set the sistr system call", "sample", ".", "commands", ".", "sistr", "=", "'sistr -f json -o {} -t {} -T {} {}'", ".", "format", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsonoutput", ",", "self", ".", "cpus", ",", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'tmp'", ")", ",", "sample", ".", "general", ".", "bestassemblyfile", ")", "#", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'logout'", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'logerr'", ")", "# Only run the analyses if the output json file does not exist", "if", "not", "os", ".", "path", ".", "isfile", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsonoutput", ")", ":", "out", ",", "err", "=", "run_subprocess", "(", "sample", ".", "commands", ".", "sistr", ")", "write_to_logfile", "(", "sample", ".", "commands", ".", "sistr", ",", "sample", ".", "commands", ".", "sistr", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")", "write_to_logfile", "(", "out", ",", "err", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")", "self", ".", "queue", ".", "task_done", "(", ")", "except", "(", "ValueError", ",", "KeyError", ")", ":", "pass", "self", ".", "queue", ".", "join", "(", ")", "self", ".", "report", "(", ")" ]
Perform sistr analyses on Salmonella
[ "Perform", "sistr", "analyses", "on", "Salmonella" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/sistr.py#L18-L57
train
lowandrew/OLCTools
spadespipeline/sistr.py
Sistr.report
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
python
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
[ "def", "report", "(", "self", ")", ":", "# Initialise strings to store report data", "header", "=", "'\\t'", ".", "join", "(", "self", ".", "headers", ")", "+", "'\\n'", "data", "=", "''", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "# Each strain is a fresh row", "row", "=", "''", "try", ":", "# Read in the output .json file into the metadata", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "=", "json", ".", "load", "(", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsonoutput", ",", "'r'", ")", ")", "# Set the name of the report.", "# Note that this is a tab-separated file, as there can be commas in the results", "sample", "[", "self", ".", "analysistype", "]", ".", "report", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'{}.tsv'", ".", "format", "(", "sample", ".", "name", ")", ")", "# Iterate through all the headers to use as keys in the json-formatted output", "for", "category", "in", "self", ".", "headers", ":", "# Tab separate all the results", "row", "+=", "'{}\\t'", ".", "format", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "[", "0", "]", "[", "category", "]", ")", "# Create attributes for each category", "setattr", "(", "sample", "[", "self", ".", "analysistype", "]", ",", "category", ",", "str", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "[", "0", "]", "[", "category", "]", ")", ")", "# End the results with a newline", "row", "+=", "'\\n'", "data", "+=", "row", "# Create and write headers and results to the strain-specific report", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "report", ",", "'w'", ")", "as", "strainreport", ":", "strainreport", ".", "write", "(", "header", ")", "strainreport", ".", "write", "(", "row", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "# Create and write headers and cumulative results to the combined report", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportdir", ",", "'sistr.tsv'", ")", ",", "'w'", ")", "as", "report", ":", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
Creates sistr reports
[ "Creates", "sistr", "reports" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/sistr.py#L59-L95
train
portfors-lab/sparkle
sparkle/gui/stim/abstract_editor.py
AbstractEditorWidget.purgeDeletedWidgets
def purgeDeletedWidgets(): """Finds old references to stashed fields and deletes them""" toremove = [] for field in AbstractEditorWidget.funit_fields: if sip.isdeleted(field): toremove.append(field) for field in toremove: AbstractEditorWidget.funit_fields.remove(field) toremove = [] for field in AbstractEditorWidget.tunit_fields: if sip.isdeleted(field): toremove.append(field) for field in toremove: AbstractEditorWidget.tunit_fields.remove(field)
python
def purgeDeletedWidgets(): """Finds old references to stashed fields and deletes them""" toremove = [] for field in AbstractEditorWidget.funit_fields: if sip.isdeleted(field): toremove.append(field) for field in toremove: AbstractEditorWidget.funit_fields.remove(field) toremove = [] for field in AbstractEditorWidget.tunit_fields: if sip.isdeleted(field): toremove.append(field) for field in toremove: AbstractEditorWidget.tunit_fields.remove(field)
[ "def", "purgeDeletedWidgets", "(", ")", ":", "toremove", "=", "[", "]", "for", "field", "in", "AbstractEditorWidget", ".", "funit_fields", ":", "if", "sip", ".", "isdeleted", "(", "field", ")", ":", "toremove", ".", "append", "(", "field", ")", "for", "field", "in", "toremove", ":", "AbstractEditorWidget", ".", "funit_fields", ".", "remove", "(", "field", ")", "toremove", "=", "[", "]", "for", "field", "in", "AbstractEditorWidget", ".", "tunit_fields", ":", "if", "sip", ".", "isdeleted", "(", "field", ")", ":", "toremove", ".", "append", "(", "field", ")", "for", "field", "in", "toremove", ":", "AbstractEditorWidget", ".", "tunit_fields", ".", "remove", "(", "field", ")" ]
Finds old references to stashed fields and deletes them
[ "Finds", "old", "references", "to", "stashed", "fields", "and", "deletes", "them" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/abstract_editor.py#L22-L36
train
lowandrew/OLCTools
spadespipeline/fastqmover.py
FastqMover.movefastq
def movefastq(self): """Find .fastq files for each sample and move them to an appropriately named folder""" logging.info('Moving FASTQ files') # Iterate through each sample for sample in self.metadata.runmetadata.samples: # Retrieve the output directory outputdir = os.path.join(self.path, sample.name) # Find any fastq files with the sample name fastqfiles = sorted(glob(os.path.join(self.path, '{}_*.fastq*'.format(sample.name)))) \ if sorted(glob(os.path.join(self.path, '{}_*.fastq*'.format(sample.name)))) \ else sorted(glob(os.path.join(self.path, '{}.fastq*'.format(sample.name)))) \ if sorted(glob(os.path.join(self.path, '{}.fastq*'.format(sample.name)))) \ else sorted(glob(os.path.join(self.path, '{}*.fastq*'.format(sample.name)))) # Only try and move the files if the files exist if fastqfiles: make_path(outputdir) # Symlink the fastq files to the directory try: list(map(lambda x: os.symlink(os.path.join('..', os.path.basename(x)), os.path.join(outputdir, os.path.basename(x))), fastqfiles)) except OSError: pass # Find any fastq files with the sample name fastqfiles = [fastq for fastq in sorted(glob(os.path.join(outputdir, '{}*.fastq*'.format(sample.name)))) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq] else: if outputdir: # Find any fastq files with the sample name fastqfiles = [fastq for fastq in sorted(glob(os.path.join( outputdir, '{}*.fastq*'.format(outputdir, sample.name)))) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq] sample.general.fastqfiles = fastqfiles
python
def movefastq(self): """Find .fastq files for each sample and move them to an appropriately named folder""" logging.info('Moving FASTQ files') # Iterate through each sample for sample in self.metadata.runmetadata.samples: # Retrieve the output directory outputdir = os.path.join(self.path, sample.name) # Find any fastq files with the sample name fastqfiles = sorted(glob(os.path.join(self.path, '{}_*.fastq*'.format(sample.name)))) \ if sorted(glob(os.path.join(self.path, '{}_*.fastq*'.format(sample.name)))) \ else sorted(glob(os.path.join(self.path, '{}.fastq*'.format(sample.name)))) \ if sorted(glob(os.path.join(self.path, '{}.fastq*'.format(sample.name)))) \ else sorted(glob(os.path.join(self.path, '{}*.fastq*'.format(sample.name)))) # Only try and move the files if the files exist if fastqfiles: make_path(outputdir) # Symlink the fastq files to the directory try: list(map(lambda x: os.symlink(os.path.join('..', os.path.basename(x)), os.path.join(outputdir, os.path.basename(x))), fastqfiles)) except OSError: pass # Find any fastq files with the sample name fastqfiles = [fastq for fastq in sorted(glob(os.path.join(outputdir, '{}*.fastq*'.format(sample.name)))) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq] else: if outputdir: # Find any fastq files with the sample name fastqfiles = [fastq for fastq in sorted(glob(os.path.join( outputdir, '{}*.fastq*'.format(outputdir, sample.name)))) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq] sample.general.fastqfiles = fastqfiles
[ "def", "movefastq", "(", "self", ")", ":", "logging", ".", "info", "(", "'Moving FASTQ files'", ")", "# Iterate through each sample", "for", "sample", "in", "self", ".", "metadata", ".", "runmetadata", ".", "samples", ":", "# Retrieve the output directory", "outputdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "sample", ".", "name", ")", "# Find any fastq files with the sample name", "fastqfiles", "=", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'{}_*.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "if", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'{}_*.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "else", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'{}.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "if", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'{}.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "else", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'{}*.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "# Only try and move the files if the files exist", "if", "fastqfiles", ":", "make_path", "(", "outputdir", ")", "# Symlink the fastq files to the directory", "try", ":", "list", "(", "map", "(", "lambda", "x", ":", "os", ".", "symlink", "(", "os", ".", "path", ".", "join", "(", "'..'", ",", "os", ".", "path", ".", "basename", "(", "x", ")", ")", ",", "os", ".", "path", ".", "join", "(", "outputdir", ",", "os", ".", "path", ".", "basename", "(", "x", ")", ")", ")", ",", "fastqfiles", ")", ")", "except", "OSError", ":", "pass", "# Find any fastq files with the sample name", "fastqfiles", "=", "[", "fastq", "for", "fastq", "in", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "outputdir", ",", "'{}*.fastq*'", ".", "format", "(", "sample", ".", "name", ")", ")", ")", ")", "if", "'trimmed'", "not", "in", "fastq", "and", "'normalised'", "not", "in", "fastq", "and", "'corrected'", "not", "in", "fastq", "and", "'paired'", "not", "in", "fastq", "and", "'unpaired'", "not", "in", "fastq", "]", "else", ":", "if", "outputdir", ":", "# Find any fastq files with the sample name", "fastqfiles", "=", "[", "fastq", "for", "fastq", "in", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "outputdir", ",", "'{}*.fastq*'", ".", "format", "(", "outputdir", ",", "sample", ".", "name", ")", ")", ")", ")", "if", "'trimmed'", "not", "in", "fastq", "and", "'normalised'", "not", "in", "fastq", "and", "'corrected'", "not", "in", "fastq", "and", "'paired'", "not", "in", "fastq", "and", "'unpaired'", "not", "in", "fastq", "]", "sample", ".", "general", ".", "fastqfiles", "=", "fastqfiles" ]
Find .fastq files for each sample and move them to an appropriately named folder
[ "Find", ".", "fastq", "files", "for", "each", "sample", "and", "move", "them", "to", "an", "appropriately", "named", "folder" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/fastqmover.py#L11-L44
train
oracal/cineworld
cineworld/cineworld.py
CW.get_list
def get_list(self, datatype, url, **kwargs): """base function for connecting to API""" search_url = [url, '?'] kwargs.update({'key': self.api_key}) search_url.append(urlencode(kwargs)) data = json.loads(urlopen(''.join(search_url)).read()) return data[datatype]
python
def get_list(self, datatype, url, **kwargs): """base function for connecting to API""" search_url = [url, '?'] kwargs.update({'key': self.api_key}) search_url.append(urlencode(kwargs)) data = json.loads(urlopen(''.join(search_url)).read()) return data[datatype]
[ "def", "get_list", "(", "self", ",", "datatype", ",", "url", ",", "*", "*", "kwargs", ")", ":", "search_url", "=", "[", "url", ",", "'?'", "]", "kwargs", ".", "update", "(", "{", "'key'", ":", "self", ".", "api_key", "}", ")", "search_url", ".", "append", "(", "urlencode", "(", "kwargs", ")", ")", "data", "=", "json", ".", "loads", "(", "urlopen", "(", "''", ".", "join", "(", "search_url", ")", ")", ".", "read", "(", ")", ")", "return", "data", "[", "datatype", "]" ]
base function for connecting to API
[ "base", "function", "for", "connecting", "to", "API" ]
073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5
https://github.com/oracal/cineworld/blob/073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5/cineworld/cineworld.py#L40-L46
train
oracal/cineworld
cineworld/cineworld.py
CW.film_search
def film_search(self, title): """film search using fuzzy matching""" films = [] #check for cache or update if not hasattr(self, 'film_list'): self.get_film_list() #iterate over films and check for fuzzy string match for film in self.film_list: strength = WRatio(title, film['title']) if strength > 80: film.update({u'strength':strength}) films.append(film) #sort films by the strength of the fuzzy string match films_sorted = sorted(films, key=itemgetter('strength'), reverse = True) return films_sorted
python
def film_search(self, title): """film search using fuzzy matching""" films = [] #check for cache or update if not hasattr(self, 'film_list'): self.get_film_list() #iterate over films and check for fuzzy string match for film in self.film_list: strength = WRatio(title, film['title']) if strength > 80: film.update({u'strength':strength}) films.append(film) #sort films by the strength of the fuzzy string match films_sorted = sorted(films, key=itemgetter('strength'), reverse = True) return films_sorted
[ "def", "film_search", "(", "self", ",", "title", ")", ":", "films", "=", "[", "]", "#check for cache or update", "if", "not", "hasattr", "(", "self", ",", "'film_list'", ")", ":", "self", ".", "get_film_list", "(", ")", "#iterate over films and check for fuzzy string match ", "for", "film", "in", "self", ".", "film_list", ":", "strength", "=", "WRatio", "(", "title", ",", "film", "[", "'title'", "]", ")", "if", "strength", ">", "80", ":", "film", ".", "update", "(", "{", "u'strength'", ":", "strength", "}", ")", "films", ".", "append", "(", "film", ")", "#sort films by the strength of the fuzzy string match", "films_sorted", "=", "sorted", "(", "films", ",", "key", "=", "itemgetter", "(", "'strength'", ")", ",", "reverse", "=", "True", ")", "return", "films_sorted" ]
film search using fuzzy matching
[ "film", "search", "using", "fuzzy", "matching" ]
073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5
https://github.com/oracal/cineworld/blob/073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5/cineworld/cineworld.py#L81-L95
train
oracal/cineworld
cineworld/cineworld.py
CW.get_film_id
def get_film_id(self, title, three_dimensional=False): """get the film id using the title in conjunction with the searching function""" films = self.film_search(title) for film in films: if (film['title'].find('3D') is - 1) is not three_dimensional: return film['edi'] return -1
python
def get_film_id(self, title, three_dimensional=False): """get the film id using the title in conjunction with the searching function""" films = self.film_search(title) for film in films: if (film['title'].find('3D') is - 1) is not three_dimensional: return film['edi'] return -1
[ "def", "get_film_id", "(", "self", ",", "title", ",", "three_dimensional", "=", "False", ")", ":", "films", "=", "self", ".", "film_search", "(", "title", ")", "for", "film", "in", "films", ":", "if", "(", "film", "[", "'title'", "]", ".", "find", "(", "'3D'", ")", "is", "-", "1", ")", "is", "not", "three_dimensional", ":", "return", "film", "[", "'edi'", "]", "return", "-", "1" ]
get the film id using the title in conjunction with the searching function
[ "get", "the", "film", "id", "using", "the", "title", "in", "conjunction", "with", "the", "searching", "function" ]
073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5
https://github.com/oracal/cineworld/blob/073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5/cineworld/cineworld.py#L97-L103
train
portfors-lab/sparkle
sparkle/run/search_runner.py
SearchRunner.set_current_stim_parameter
def set_current_stim_parameter(self, param, val): """Sets a parameter on the current stimulus :param param: name of the parameter of the stimulus to set :type param: str :param val: new value to set the parameter to """ component = self._stimulus.component(0,1) component.set(param, val)
python
def set_current_stim_parameter(self, param, val): """Sets a parameter on the current stimulus :param param: name of the parameter of the stimulus to set :type param: str :param val: new value to set the parameter to """ component = self._stimulus.component(0,1) component.set(param, val)
[ "def", "set_current_stim_parameter", "(", "self", ",", "param", ",", "val", ")", ":", "component", "=", "self", ".", "_stimulus", ".", "component", "(", "0", ",", "1", ")", "component", ".", "set", "(", "param", ",", "val", ")" ]
Sets a parameter on the current stimulus :param param: name of the parameter of the stimulus to set :type param: str :param val: new value to set the parameter to
[ "Sets", "a", "parameter", "on", "the", "current", "stimulus" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/search_runner.py#L74-L82
train
portfors-lab/sparkle
sparkle/run/search_runner.py
SearchRunner.save_to_file
def save_to_file(self, data, stamp): """Saves data to current dataset. :param data: data to save to file :type data: numpy.ndarray :param stamp: time stamp of when the data was acquired :type stamp: str """ self.datafile.append(self.current_dataset_name, data) # save stimulu info info = dict(self._stimulus.componentDoc().items() + self._stimulus.testDoc().items()) print 'saving doc', info info['time_stamps'] = [stamp] info['samplerate_ad'] = self.player.aifs self.datafile.append_trace_info(self.current_dataset_name, info)
python
def save_to_file(self, data, stamp): """Saves data to current dataset. :param data: data to save to file :type data: numpy.ndarray :param stamp: time stamp of when the data was acquired :type stamp: str """ self.datafile.append(self.current_dataset_name, data) # save stimulu info info = dict(self._stimulus.componentDoc().items() + self._stimulus.testDoc().items()) print 'saving doc', info info['time_stamps'] = [stamp] info['samplerate_ad'] = self.player.aifs self.datafile.append_trace_info(self.current_dataset_name, info)
[ "def", "save_to_file", "(", "self", ",", "data", ",", "stamp", ")", ":", "self", ".", "datafile", ".", "append", "(", "self", ".", "current_dataset_name", ",", "data", ")", "# save stimulu info", "info", "=", "dict", "(", "self", ".", "_stimulus", ".", "componentDoc", "(", ")", ".", "items", "(", ")", "+", "self", ".", "_stimulus", ".", "testDoc", "(", ")", ".", "items", "(", ")", ")", "print", "'saving doc'", ",", "info", "info", "[", "'time_stamps'", "]", "=", "[", "stamp", "]", "info", "[", "'samplerate_ad'", "]", "=", "self", ".", "player", ".", "aifs", "self", ".", "datafile", ".", "append_trace_info", "(", "self", ".", "current_dataset_name", ",", "info", ")" ]
Saves data to current dataset. :param data: data to save to file :type data: numpy.ndarray :param stamp: time stamp of when the data was acquired :type stamp: str
[ "Saves", "data", "to", "current", "dataset", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/search_runner.py#L181-L195
train
lsst-sqre/sqre-codekit
codekit/progressbar.py
countdown_timer
def countdown_timer(seconds=10): """Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero. """ tick = 0.1 # seconds n_ticks = int(seconds / tick) widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()] pbar = progressbar.ProgressBar( widgets=widgets, max_value=n_ticks ).start() for i in range(n_ticks): pbar.update(i) sleep(tick) pbar.finish()
python
def countdown_timer(seconds=10): """Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero. """ tick = 0.1 # seconds n_ticks = int(seconds / tick) widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()] pbar = progressbar.ProgressBar( widgets=widgets, max_value=n_ticks ).start() for i in range(n_ticks): pbar.update(i) sleep(tick) pbar.finish()
[ "def", "countdown_timer", "(", "seconds", "=", "10", ")", ":", "tick", "=", "0.1", "# seconds", "n_ticks", "=", "int", "(", "seconds", "/", "tick", ")", "widgets", "=", "[", "'Pause for panic: '", ",", "progressbar", ".", "ETA", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", ")", "]", "pbar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "widgets", ",", "max_value", "=", "n_ticks", ")", ".", "start", "(", ")", "for", "i", "in", "range", "(", "n_ticks", ")", ":", "pbar", ".", "update", "(", "i", ")", "sleep", "(", "tick", ")", "pbar", ".", "finish", "(", ")" ]
Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero.
[ "Show", "a", "simple", "countdown", "progress", "bar" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/progressbar.py#L19-L40
train
sirfoga/pyhal
hal/files/save_as.py
FileSaver.write_dicts_to_csv
def write_dicts_to_csv(self, dicts): """Saves .csv file with posts data :param dicts: Dictionaries with same values """ csv_headers = sorted(dicts[0].keys()) with open(self.path, "w") as out_file: # write to file dict_writer = csv.DictWriter( out_file, csv_headers, delimiter=",", quotechar="\"" ) dict_writer.writeheader() dict_writer.writerows(dicts)
python
def write_dicts_to_csv(self, dicts): """Saves .csv file with posts data :param dicts: Dictionaries with same values """ csv_headers = sorted(dicts[0].keys()) with open(self.path, "w") as out_file: # write to file dict_writer = csv.DictWriter( out_file, csv_headers, delimiter=",", quotechar="\"" ) dict_writer.writeheader() dict_writer.writerows(dicts)
[ "def", "write_dicts_to_csv", "(", "self", ",", "dicts", ")", ":", "csv_headers", "=", "sorted", "(", "dicts", "[", "0", "]", ".", "keys", "(", ")", ")", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "as", "out_file", ":", "# write to file", "dict_writer", "=", "csv", ".", "DictWriter", "(", "out_file", ",", "csv_headers", ",", "delimiter", "=", "\",\"", ",", "quotechar", "=", "\"\\\"\"", ")", "dict_writer", ".", "writeheader", "(", ")", "dict_writer", ".", "writerows", "(", "dicts", ")" ]
Saves .csv file with posts data :param dicts: Dictionaries with same values
[ "Saves", ".", "csv", "file", "with", "posts", "data" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/save_as.py#L18-L29
train
sirfoga/pyhal
hal/files/save_as.py
FileSaver.write_matrix_to_csv
def write_matrix_to_csv(self, headers, data): """Saves .csv file with data :param headers: column names :param data: Data """ with open(self.path, "w") as out_file: # write to file data_writer = csv.writer(out_file, delimiter=",") data_writer.writerow(headers) # write headers data_writer.writerows(data)
python
def write_matrix_to_csv(self, headers, data): """Saves .csv file with data :param headers: column names :param data: Data """ with open(self.path, "w") as out_file: # write to file data_writer = csv.writer(out_file, delimiter=",") data_writer.writerow(headers) # write headers data_writer.writerows(data)
[ "def", "write_matrix_to_csv", "(", "self", ",", "headers", ",", "data", ")", ":", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "as", "out_file", ":", "# write to file", "data_writer", "=", "csv", ".", "writer", "(", "out_file", ",", "delimiter", "=", "\",\"", ")", "data_writer", ".", "writerow", "(", "headers", ")", "# write headers", "data_writer", ".", "writerows", "(", "data", ")" ]
Saves .csv file with data :param headers: column names :param data: Data
[ "Saves", ".", "csv", "file", "with", "data" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/save_as.py#L31-L40
train
sirfoga/pyhal
hal/files/save_as.py
FileSaver.write_dicts_to_json
def write_dicts_to_json(self, data): """Saves .json file with data :param data: Data """ with open(self.path, "w") as out: json.dump( data, # data out, # file handler indent=4, sort_keys=True # pretty print )
python
def write_dicts_to_json(self, data): """Saves .json file with data :param data: Data """ with open(self.path, "w") as out: json.dump( data, # data out, # file handler indent=4, sort_keys=True # pretty print )
[ "def", "write_dicts_to_json", "(", "self", ",", "data", ")", ":", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "as", "out", ":", "json", ".", "dump", "(", "data", ",", "# data", "out", ",", "# file handler", "indent", "=", "4", ",", "sort_keys", "=", "True", "# pretty print", ")" ]
Saves .json file with data :param data: Data
[ "Saves", ".", "json", "file", "with", "data" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/save_as.py#L42-L52
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.start_listening
def start_listening(self): """Start listener threads for acquistion callback queues""" self._qlisten() self._halt_threads = False for t in self.queue_threads: t.start()
python
def start_listening(self): """Start listener threads for acquistion callback queues""" self._qlisten() self._halt_threads = False for t in self.queue_threads: t.start()
[ "def", "start_listening", "(", "self", ")", ":", "self", ".", "_qlisten", "(", ")", "self", ".", "_halt_threads", "=", "False", "for", "t", "in", "self", ".", "queue_threads", ":", "t", ".", "start", "(", ")" ]
Start listener threads for acquistion callback queues
[ "Start", "listener", "threads", "for", "acquistion", "callback", "queues" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L87-L92
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.stop_listening
def stop_listening(self): """Stop listener threads for acquistion queues""" self._halt_threads = True # wake them up so that they can die for name, queue_waker in self.recieved_signals.items(): q, wake_event = queue_waker wake_event.set()
python
def stop_listening(self): """Stop listener threads for acquistion queues""" self._halt_threads = True # wake them up so that they can die for name, queue_waker in self.recieved_signals.items(): q, wake_event = queue_waker wake_event.set()
[ "def", "stop_listening", "(", "self", ")", ":", "self", ".", "_halt_threads", "=", "True", "# wake them up so that they can die", "for", "name", ",", "queue_waker", "in", "self", ".", "recieved_signals", ".", "items", "(", ")", ":", "q", ",", "wake_event", "=", "queue_waker", "wake_event", ".", "set", "(", ")" ]
Stop listener threads for acquistion queues
[ "Stop", "listener", "threads", "for", "acquistion", "queues" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L94-L100
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_queue_callback
def set_queue_callback(self, name, func): """Sets a function to execute when the named acquistion queue has data placed in it. :param name: name of the queue to pull data from :type name: str :param func: function reference to execute, expects queue contents as argument(s) :type func: callable """ if name in self.acquisition_hooks: self.acquisition_hooks[name].append(func) else: self.acquisition_hooks[name] = [func]
python
def set_queue_callback(self, name, func): """Sets a function to execute when the named acquistion queue has data placed in it. :param name: name of the queue to pull data from :type name: str :param func: function reference to execute, expects queue contents as argument(s) :type func: callable """ if name in self.acquisition_hooks: self.acquisition_hooks[name].append(func) else: self.acquisition_hooks[name] = [func]
[ "def", "set_queue_callback", "(", "self", ",", "name", ",", "func", ")", ":", "if", "name", "in", "self", ".", "acquisition_hooks", ":", "self", ".", "acquisition_hooks", "[", "name", "]", ".", "append", "(", "func", ")", "else", ":", "self", ".", "acquisition_hooks", "[", "name", "]", "=", "[", "func", "]" ]
Sets a function to execute when the named acquistion queue has data placed in it. :param name: name of the queue to pull data from :type name: str :param func: function reference to execute, expects queue contents as argument(s) :type func: callable
[ "Sets", "a", "function", "to", "execute", "when", "the", "named", "acquistion", "queue", "has", "data", "placed", "in", "it", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L102-L114
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_calibration
def set_calibration(self, datakey, calf=None, frange=None): """Sets a calibration for all of the acquisition operations, from an already gathered calibration data set. :param datakey: name of the calibration to set. This key must be present in the current data file. A value of ``None`` clears calibration. :type datakey: str :param calf: Calibration frequency for the attenuation vector to be in relation to. All other frequencies will be in attenutaion from this frequency. :type calf: int :param frange: Frequency range, low and high, for which to restrict the calibration to :type frange: (int, int) """ if datakey is None: calibration_vector, calibration_freqs = None, None else: if calf is None: raise Exception('calibration reference frequency must be specified') try: cal = self.datafile.get_calibration(datakey, calf) except: print "Error: unable to load calibration data from: ", datakey raise calibration_vector, calibration_freqs = cal # clear one cache -- affects all StimulusModels StimulusModel.clearCache() logger = logging.getLogger('main') logger.debug('clearing cache') logger.debug('setting explore calibration') self.explorer.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting protocol calibration') self.protocoler.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting chart calibration') self.charter.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting calibrator calibration') self.bs_calibrator.stash_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting tone calibrator calibration') self.tone_calibrator.stash_calibration(calibration_vector, calibration_freqs, frange, datakey)
python
def set_calibration(self, datakey, calf=None, frange=None): """Sets a calibration for all of the acquisition operations, from an already gathered calibration data set. :param datakey: name of the calibration to set. This key must be present in the current data file. A value of ``None`` clears calibration. :type datakey: str :param calf: Calibration frequency for the attenuation vector to be in relation to. All other frequencies will be in attenutaion from this frequency. :type calf: int :param frange: Frequency range, low and high, for which to restrict the calibration to :type frange: (int, int) """ if datakey is None: calibration_vector, calibration_freqs = None, None else: if calf is None: raise Exception('calibration reference frequency must be specified') try: cal = self.datafile.get_calibration(datakey, calf) except: print "Error: unable to load calibration data from: ", datakey raise calibration_vector, calibration_freqs = cal # clear one cache -- affects all StimulusModels StimulusModel.clearCache() logger = logging.getLogger('main') logger.debug('clearing cache') logger.debug('setting explore calibration') self.explorer.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting protocol calibration') self.protocoler.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting chart calibration') self.charter.set_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting calibrator calibration') self.bs_calibrator.stash_calibration(calibration_vector, calibration_freqs, frange, datakey) logger.debug('setting tone calibrator calibration') self.tone_calibrator.stash_calibration(calibration_vector, calibration_freqs, frange, datakey)
[ "def", "set_calibration", "(", "self", ",", "datakey", ",", "calf", "=", "None", ",", "frange", "=", "None", ")", ":", "if", "datakey", "is", "None", ":", "calibration_vector", ",", "calibration_freqs", "=", "None", ",", "None", "else", ":", "if", "calf", "is", "None", ":", "raise", "Exception", "(", "'calibration reference frequency must be specified'", ")", "try", ":", "cal", "=", "self", ".", "datafile", ".", "get_calibration", "(", "datakey", ",", "calf", ")", "except", ":", "print", "\"Error: unable to load calibration data from: \"", ",", "datakey", "raise", "calibration_vector", ",", "calibration_freqs", "=", "cal", "# clear one cache -- affects all StimulusModels", "StimulusModel", ".", "clearCache", "(", ")", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "debug", "(", "'clearing cache'", ")", "logger", ".", "debug", "(", "'setting explore calibration'", ")", "self", ".", "explorer", ".", "set_calibration", "(", "calibration_vector", ",", "calibration_freqs", ",", "frange", ",", "datakey", ")", "logger", ".", "debug", "(", "'setting protocol calibration'", ")", "self", ".", "protocoler", ".", "set_calibration", "(", "calibration_vector", ",", "calibration_freqs", ",", "frange", ",", "datakey", ")", "logger", ".", "debug", "(", "'setting chart calibration'", ")", "self", ".", "charter", ".", "set_calibration", "(", "calibration_vector", ",", "calibration_freqs", ",", "frange", ",", "datakey", ")", "logger", ".", "debug", "(", "'setting calibrator calibration'", ")", "self", ".", "bs_calibrator", ".", "stash_calibration", "(", "calibration_vector", ",", "calibration_freqs", ",", "frange", ",", "datakey", ")", "logger", ".", "debug", "(", "'setting tone calibrator calibration'", ")", "self", ".", "tone_calibrator", ".", "stash_calibration", "(", "calibration_vector", ",", "calibration_freqs", ",", "frange", ",", "datakey", ")" ]
Sets a calibration for all of the acquisition operations, from an already gathered calibration data set. :param datakey: name of the calibration to set. This key must be present in the current data file. A value of ``None`` clears calibration. :type datakey: str :param calf: Calibration frequency for the attenuation vector to be in relation to. All other frequencies will be in attenutaion from this frequency. :type calf: int :param frange: Frequency range, low and high, for which to restrict the calibration to :type frange: (int, int)
[ "Sets", "a", "calibration", "for", "all", "of", "the", "acquisition", "operations", "from", "an", "already", "gathered", "calibration", "data", "set", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L127-L162
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_calibration_duration
def set_calibration_duration(self, dur): """Sets the stimulus duration for the calibration stimulus. Sets for calibration chirp, test tone, and calibration curve tones :param dur: Duration (seconds) of output signal :type dur: float """ self.bs_calibrator.set_duration(dur) self.tone_calibrator.set_duration(dur)
python
def set_calibration_duration(self, dur): """Sets the stimulus duration for the calibration stimulus. Sets for calibration chirp, test tone, and calibration curve tones :param dur: Duration (seconds) of output signal :type dur: float """ self.bs_calibrator.set_duration(dur) self.tone_calibrator.set_duration(dur)
[ "def", "set_calibration_duration", "(", "self", ",", "dur", ")", ":", "self", ".", "bs_calibrator", ".", "set_duration", "(", "dur", ")", "self", ".", "tone_calibrator", ".", "set_duration", "(", "dur", ")" ]
Sets the stimulus duration for the calibration stimulus. Sets for calibration chirp, test tone, and calibration curve tones :param dur: Duration (seconds) of output signal :type dur: float
[ "Sets", "the", "stimulus", "duration", "for", "the", "calibration", "stimulus", ".", "Sets", "for", "calibration", "chirp", "test", "tone", "and", "calibration", "curve", "tones" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L171-L178
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_calibration_reps
def set_calibration_reps(self, reps): """Sets the number of repetitions for calibration stimuli :param reps: Number of times a unique stimulus is presented in calibration operations :type reps: int """ self.bs_calibrator.set_reps(reps) self.tone_calibrator.set_reps(reps)
python
def set_calibration_reps(self, reps): """Sets the number of repetitions for calibration stimuli :param reps: Number of times a unique stimulus is presented in calibration operations :type reps: int """ self.bs_calibrator.set_reps(reps) self.tone_calibrator.set_reps(reps)
[ "def", "set_calibration_reps", "(", "self", ",", "reps", ")", ":", "self", ".", "bs_calibrator", ".", "set_reps", "(", "reps", ")", "self", ".", "tone_calibrator", ".", "set_reps", "(", "reps", ")" ]
Sets the number of repetitions for calibration stimuli :param reps: Number of times a unique stimulus is presented in calibration operations :type reps: int
[ "Sets", "the", "number", "of", "repetitions", "for", "calibration", "stimuli" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L180-L187
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.load_data_file
def load_data_file(self, fname, filemode='a'): """Opens an existing data file to append to :param fname: File path of the location for the data file to open :type fname: str """ self.close_data() self.datafile = open_acqdata(fname, filemode=filemode) self.explorer.set(datafile=self.datafile) self.protocoler.set(datafile=self.datafile) self.charter.set(datafile=self.datafile) self.bs_calibrator.set(datafile=self.datafile) self.tone_calibrator.set(datafile=self.datafile) self.set_calibration(None) self.current_cellid = dict(self.datafile.get_info('')).get('total cells', 0)
python
def load_data_file(self, fname, filemode='a'): """Opens an existing data file to append to :param fname: File path of the location for the data file to open :type fname: str """ self.close_data() self.datafile = open_acqdata(fname, filemode=filemode) self.explorer.set(datafile=self.datafile) self.protocoler.set(datafile=self.datafile) self.charter.set(datafile=self.datafile) self.bs_calibrator.set(datafile=self.datafile) self.tone_calibrator.set(datafile=self.datafile) self.set_calibration(None) self.current_cellid = dict(self.datafile.get_info('')).get('total cells', 0)
[ "def", "load_data_file", "(", "self", ",", "fname", ",", "filemode", "=", "'a'", ")", ":", "self", ".", "close_data", "(", ")", "self", ".", "datafile", "=", "open_acqdata", "(", "fname", ",", "filemode", "=", "filemode", ")", "self", ".", "explorer", ".", "set", "(", "datafile", "=", "self", ".", "datafile", ")", "self", ".", "protocoler", ".", "set", "(", "datafile", "=", "self", ".", "datafile", ")", "self", ".", "charter", ".", "set", "(", "datafile", "=", "self", ".", "datafile", ")", "self", ".", "bs_calibrator", ".", "set", "(", "datafile", "=", "self", ".", "datafile", ")", "self", ".", "tone_calibrator", ".", "set", "(", "datafile", "=", "self", ".", "datafile", ")", "self", ".", "set_calibration", "(", "None", ")", "self", ".", "current_cellid", "=", "dict", "(", "self", ".", "datafile", ".", "get_info", "(", "''", ")", ")", ".", "get", "(", "'total cells'", ",", "0", ")" ]
Opens an existing data file to append to :param fname: File path of the location for the data file to open :type fname: str
[ "Opens", "an", "existing", "data", "file", "to", "append", "to" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L189-L205
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_threshold
def set_threshold(self, threshold): """Sets spike detection threshold :param threshold: electrical potential to determine spikes (V) :type threshold: float """ self.explorer.set_threshold(threshold) self.protocoler.set_threshold(threshold)
python
def set_threshold(self, threshold): """Sets spike detection threshold :param threshold: electrical potential to determine spikes (V) :type threshold: float """ self.explorer.set_threshold(threshold) self.protocoler.set_threshold(threshold)
[ "def", "set_threshold", "(", "self", ",", "threshold", ")", ":", "self", ".", "explorer", ".", "set_threshold", "(", "threshold", ")", "self", ".", "protocoler", ".", "set_threshold", "(", "threshold", ")" ]
Sets spike detection threshold :param threshold: electrical potential to determine spikes (V) :type threshold: float
[ "Sets", "spike", "detection", "threshold" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L214-L221
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set
def set(self, **kwargs): """Sets acquisition parameters for all acquisition types See :meth:`AbstractAcquisitionRunner<sparkle.run.abstract_acquisition.AbstractAcquisitionRunner.set>` """ self.explorer.set(**kwargs) self.protocoler.set(**kwargs) self.tone_calibrator.set(**kwargs) self.charter.set(**kwargs) self.bs_calibrator.set(**kwargs) self.mphone_calibrator.set(**kwargs)
python
def set(self, **kwargs): """Sets acquisition parameters for all acquisition types See :meth:`AbstractAcquisitionRunner<sparkle.run.abstract_acquisition.AbstractAcquisitionRunner.set>` """ self.explorer.set(**kwargs) self.protocoler.set(**kwargs) self.tone_calibrator.set(**kwargs) self.charter.set(**kwargs) self.bs_calibrator.set(**kwargs) self.mphone_calibrator.set(**kwargs)
[ "def", "set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "explorer", ".", "set", "(", "*", "*", "kwargs", ")", "self", ".", "protocoler", ".", "set", "(", "*", "*", "kwargs", ")", "self", ".", "tone_calibrator", ".", "set", "(", "*", "*", "kwargs", ")", "self", ".", "charter", ".", "set", "(", "*", "*", "kwargs", ")", "self", ".", "bs_calibrator", ".", "set", "(", "*", "*", "kwargs", ")", "self", ".", "mphone_calibrator", ".", "set", "(", "*", "*", "kwargs", ")" ]
Sets acquisition parameters for all acquisition types See :meth:`AbstractAcquisitionRunner<sparkle.run.abstract_acquisition.AbstractAcquisitionRunner.set>`
[ "Sets", "acquisition", "parameters", "for", "all", "acquisition", "types" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L223-L233
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.set_mphone_calibration
def set_mphone_calibration(self, sens, db): """Sets the microphone calibration, for the purpose of calculating recorded dB levels :param sens: microphone sensitivity (V) :type sens: float :param db: dB SPL that the calibration was measured at :type db: int """ self.bs_calibrator.set_mphone_calibration(sens, db) self.tone_calibrator.set_mphone_calibration(sens, db)
python
def set_mphone_calibration(self, sens, db): """Sets the microphone calibration, for the purpose of calculating recorded dB levels :param sens: microphone sensitivity (V) :type sens: float :param db: dB SPL that the calibration was measured at :type db: int """ self.bs_calibrator.set_mphone_calibration(sens, db) self.tone_calibrator.set_mphone_calibration(sens, db)
[ "def", "set_mphone_calibration", "(", "self", ",", "sens", ",", "db", ")", ":", "self", ".", "bs_calibrator", ".", "set_mphone_calibration", "(", "sens", ",", "db", ")", "self", ".", "tone_calibrator", ".", "set_mphone_calibration", "(", "sens", ",", "db", ")" ]
Sets the microphone calibration, for the purpose of calculating recorded dB levels :param sens: microphone sensitivity (V) :type sens: float :param db: dB SPL that the calibration was measured at :type db: int
[ "Sets", "the", "microphone", "calibration", "for", "the", "purpose", "of", "calculating", "recorded", "dB", "levels" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L276-L285
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.run_chart_protocol
def run_chart_protocol(self, interval): """Runs the stimuli presentation during a chart acquisition :param interval: The repetition interval between stimuli presentations (seconds) :type interval: float :returns: :py:class:`threading.Thread` -- the acquisition thread """ self.charter.setup(interval) return self.charter.run()
python
def run_chart_protocol(self, interval): """Runs the stimuli presentation during a chart acquisition :param interval: The repetition interval between stimuli presentations (seconds) :type interval: float :returns: :py:class:`threading.Thread` -- the acquisition thread """ self.charter.setup(interval) return self.charter.run()
[ "def", "run_chart_protocol", "(", "self", ",", "interval", ")", ":", "self", ".", "charter", ".", "setup", "(", "interval", ")", "return", "self", ".", "charter", ".", "run", "(", ")" ]
Runs the stimuli presentation during a chart acquisition :param interval: The repetition interval between stimuli presentations (seconds) :type interval: float :returns: :py:class:`threading.Thread` -- the acquisition thread
[ "Runs", "the", "stimuli", "presentation", "during", "a", "chart", "acquisition" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L332-L340
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.process_calibration
def process_calibration(self, save=True, calf=20000): """Processes a completed calibration :param save: Wether to save this calibration to file :type save: bool :param calf: Frequency for which to reference attenuation curve from :type calf: int :returns: str -- name of a saved calibration """ if self.selected_calibration_index == 2: raise Exception("Calibration curve processing not currently supported") else: results, calname, freq, db = self.bs_calibrator.process_calibration(save) return calname, db
python
def process_calibration(self, save=True, calf=20000): """Processes a completed calibration :param save: Wether to save this calibration to file :type save: bool :param calf: Frequency for which to reference attenuation curve from :type calf: int :returns: str -- name of a saved calibration """ if self.selected_calibration_index == 2: raise Exception("Calibration curve processing not currently supported") else: results, calname, freq, db = self.bs_calibrator.process_calibration(save) return calname, db
[ "def", "process_calibration", "(", "self", ",", "save", "=", "True", ",", "calf", "=", "20000", ")", ":", "if", "self", ".", "selected_calibration_index", "==", "2", ":", "raise", "Exception", "(", "\"Calibration curve processing not currently supported\"", ")", "else", ":", "results", ",", "calname", ",", "freq", ",", "db", "=", "self", ".", "bs_calibrator", ".", "process_calibration", "(", "save", ")", "return", "calname", ",", "db" ]
Processes a completed calibration :param save: Wether to save this calibration to file :type save: bool :param calf: Frequency for which to reference attenuation curve from :type calf: int :returns: str -- name of a saved calibration
[ "Processes", "a", "completed", "calibration" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L342-L355
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.close_data
def close_data(self): """Closes the current data file""" # save the total number of cells to make re-loading convient if self.datafile is not None: if self.datafile.filemode != 'r': self.datafile.set_metadata('', {'total cells': self.current_cellid}) self.datafile.close() self.datafile = None
python
def close_data(self): """Closes the current data file""" # save the total number of cells to make re-loading convient if self.datafile is not None: if self.datafile.filemode != 'r': self.datafile.set_metadata('', {'total cells': self.current_cellid}) self.datafile.close() self.datafile = None
[ "def", "close_data", "(", "self", ")", ":", "# save the total number of cells to make re-loading convient", "if", "self", ".", "datafile", "is", "not", "None", ":", "if", "self", ".", "datafile", ".", "filemode", "!=", "'r'", ":", "self", ".", "datafile", ".", "set_metadata", "(", "''", ",", "{", "'total cells'", ":", "self", ".", "current_cellid", "}", ")", "self", ".", "datafile", ".", "close", "(", ")", "self", ".", "datafile", "=", "None" ]
Closes the current data file
[ "Closes", "the", "current", "data", "file" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L369-L376
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.calibration_stimulus
def calibration_stimulus(self, mode): """Gets the stimulus model for calibration :param mode: Type of stimulus to get: tone or noise :type mode: str :returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>` """ if mode == 'tone': return self.tone_calibrator.stimulus elif mode =='noise': return self.bs_calibrator.stimulus
python
def calibration_stimulus(self, mode): """Gets the stimulus model for calibration :param mode: Type of stimulus to get: tone or noise :type mode: str :returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>` """ if mode == 'tone': return self.tone_calibrator.stimulus elif mode =='noise': return self.bs_calibrator.stimulus
[ "def", "calibration_stimulus", "(", "self", ",", "mode", ")", ":", "if", "mode", "==", "'tone'", ":", "return", "self", ".", "tone_calibrator", ".", "stimulus", "elif", "mode", "==", "'noise'", ":", "return", "self", ".", "bs_calibrator", ".", "stimulus" ]
Gets the stimulus model for calibration :param mode: Type of stimulus to get: tone or noise :type mode: str :returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>`
[ "Gets", "the", "stimulus", "model", "for", "calibration" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L388-L398
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.calibration_template
def calibration_template(self): """Gets the template documentation for the both the tone curve calibration and noise calibration :returns: dict -- all information necessary to recreate calibration objects """ temp = {} temp['tone_doc'] = self.tone_calibrator.stimulus.templateDoc() comp_doc = [] for calstim in self.bs_calibrator.get_stims(): comp_doc.append(calstim.stateDict()) temp['noise_doc'] = comp_doc return temp
python
def calibration_template(self): """Gets the template documentation for the both the tone curve calibration and noise calibration :returns: dict -- all information necessary to recreate calibration objects """ temp = {} temp['tone_doc'] = self.tone_calibrator.stimulus.templateDoc() comp_doc = [] for calstim in self.bs_calibrator.get_stims(): comp_doc.append(calstim.stateDict()) temp['noise_doc'] = comp_doc return temp
[ "def", "calibration_template", "(", "self", ")", ":", "temp", "=", "{", "}", "temp", "[", "'tone_doc'", "]", "=", "self", ".", "tone_calibrator", ".", "stimulus", ".", "templateDoc", "(", ")", "comp_doc", "=", "[", "]", "for", "calstim", "in", "self", ".", "bs_calibrator", ".", "get_stims", "(", ")", ":", "comp_doc", ".", "append", "(", "calstim", ".", "stateDict", "(", ")", ")", "temp", "[", "'noise_doc'", "]", "=", "comp_doc", "return", "temp" ]
Gets the template documentation for the both the tone curve calibration and noise calibration :returns: dict -- all information necessary to recreate calibration objects
[ "Gets", "the", "template", "documentation", "for", "the", "both", "the", "tone", "curve", "calibration", "and", "noise", "calibration" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L421-L432
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.load_calibration_template
def load_calibration_template(self, template): """Reloads calibration settings from saved template doc :param template: Values for calibration stimuli (see calibration_template function) :type template: dict """ self.tone_calibrator.stimulus.clearComponents() self.tone_calibrator.stimulus.loadFromTemplate(template['tone_doc'], self.tone_calibrator.stimulus) comp_doc = template['noise_doc'] for state, calstim in zip(comp_doc, self.bs_calibrator.get_stims()): calstim.loadState(state)
python
def load_calibration_template(self, template): """Reloads calibration settings from saved template doc :param template: Values for calibration stimuli (see calibration_template function) :type template: dict """ self.tone_calibrator.stimulus.clearComponents() self.tone_calibrator.stimulus.loadFromTemplate(template['tone_doc'], self.tone_calibrator.stimulus) comp_doc = template['noise_doc'] for state, calstim in zip(comp_doc, self.bs_calibrator.get_stims()): calstim.loadState(state)
[ "def", "load_calibration_template", "(", "self", ",", "template", ")", ":", "self", ".", "tone_calibrator", ".", "stimulus", ".", "clearComponents", "(", ")", "self", ".", "tone_calibrator", ".", "stimulus", ".", "loadFromTemplate", "(", "template", "[", "'tone_doc'", "]", ",", "self", ".", "tone_calibrator", ".", "stimulus", ")", "comp_doc", "=", "template", "[", "'noise_doc'", "]", "for", "state", ",", "calstim", "in", "zip", "(", "comp_doc", ",", "self", ".", "bs_calibrator", ".", "get_stims", "(", ")", ")", ":", "calstim", ".", "loadState", "(", "state", ")" ]
Reloads calibration settings from saved template doc :param template: Values for calibration stimuli (see calibration_template function) :type template: dict
[ "Reloads", "calibration", "settings", "from", "saved", "template", "doc" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L434-L444
train
portfors-lab/sparkle
sparkle/run/acquisition_manager.py
AcquisitionManager.attenuator_connection
def attenuator_connection(self, connect=True): """Checks the connection to the attenuator, and attempts to connect if not connected. Will also set an appropriate ouput minimum for stimuli, if connection successful :returns: bool - whether there is a connection """ # all or none will be connected acquisition_modules = [self.explorer, self.protocoler, self.bs_calibrator, self.tone_calibrator, self.charter] if connect: if not acquisition_modules[0].player.attenuator_connected(): #attempt to re-connect first for module in acquisition_modules: success = module.player.connect_attenuator() if success is None: StimulusModel.setMinVoltage(0.0) return False else: StimulusModel.setMinVoltage(0.005) return True else: StimulusModel.setMinVoltage(0.005) return True else: for module in acquisition_modules: module.player.connect_attenuator(False) StimulusModel.setMinVoltage(0.0) return False
python
def attenuator_connection(self, connect=True): """Checks the connection to the attenuator, and attempts to connect if not connected. Will also set an appropriate ouput minimum for stimuli, if connection successful :returns: bool - whether there is a connection """ # all or none will be connected acquisition_modules = [self.explorer, self.protocoler, self.bs_calibrator, self.tone_calibrator, self.charter] if connect: if not acquisition_modules[0].player.attenuator_connected(): #attempt to re-connect first for module in acquisition_modules: success = module.player.connect_attenuator() if success is None: StimulusModel.setMinVoltage(0.0) return False else: StimulusModel.setMinVoltage(0.005) return True else: StimulusModel.setMinVoltage(0.005) return True else: for module in acquisition_modules: module.player.connect_attenuator(False) StimulusModel.setMinVoltage(0.0) return False
[ "def", "attenuator_connection", "(", "self", ",", "connect", "=", "True", ")", ":", "# all or none will be connected", "acquisition_modules", "=", "[", "self", ".", "explorer", ",", "self", ".", "protocoler", ",", "self", ".", "bs_calibrator", ",", "self", ".", "tone_calibrator", ",", "self", ".", "charter", "]", "if", "connect", ":", "if", "not", "acquisition_modules", "[", "0", "]", ".", "player", ".", "attenuator_connected", "(", ")", ":", "#attempt to re-connect first", "for", "module", "in", "acquisition_modules", ":", "success", "=", "module", ".", "player", ".", "connect_attenuator", "(", ")", "if", "success", "is", "None", ":", "StimulusModel", ".", "setMinVoltage", "(", "0.0", ")", "return", "False", "else", ":", "StimulusModel", ".", "setMinVoltage", "(", "0.005", ")", "return", "True", "else", ":", "StimulusModel", ".", "setMinVoltage", "(", "0.005", ")", "return", "True", "else", ":", "for", "module", "in", "acquisition_modules", ":", "module", ".", "player", ".", "connect_attenuator", "(", "False", ")", "StimulusModel", ".", "setMinVoltage", "(", "0.0", ")", "return", "False" ]
Checks the connection to the attenuator, and attempts to connect if not connected. Will also set an appropriate ouput minimum for stimuli, if connection successful :returns: bool - whether there is a connection
[ "Checks", "the", "connection", "to", "the", "attenuator", "and", "attempts", "to", "connect", "if", "not", "connected", ".", "Will", "also", "set", "an", "appropriate", "ouput", "minimum", "for", "stimuli", "if", "connection", "successful" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L456-L482
train
agramian/subprocess-manager
subprocess_manager/nbstream_readerwriter.py
NonBlockingStreamReaderWriter.readline
def readline(self, timeout = 0.1): """Try to read a line from the stream queue. """ try: return self._q.get(block = timeout is not None, timeout = timeout) except Empty: return None
python
def readline(self, timeout = 0.1): """Try to read a line from the stream queue. """ try: return self._q.get(block = timeout is not None, timeout = timeout) except Empty: return None
[ "def", "readline", "(", "self", ",", "timeout", "=", "0.1", ")", ":", "try", ":", "return", "self", ".", "_q", ".", "get", "(", "block", "=", "timeout", "is", "not", "None", ",", "timeout", "=", "timeout", ")", "except", "Empty", ":", "return", "None" ]
Try to read a line from the stream queue.
[ "Try", "to", "read", "a", "line", "from", "the", "stream", "queue", "." ]
fff9ff2ddab644a86f96e1ccf5df142c482a8247
https://github.com/agramian/subprocess-manager/blob/fff9ff2ddab644a86f96e1ccf5df142c482a8247/subprocess_manager/nbstream_readerwriter.py#L54-L61
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
CommandHistory.verification_events
def verification_events(self): """ Events related to command verification. :type: List[:class:`.CommandHistoryEvent`] """ queued = self._assemble_event('Verifier_Queued') started = self._assemble_event('Verifier_Started') return [x for x in [queued, started] if x]
python
def verification_events(self): """ Events related to command verification. :type: List[:class:`.CommandHistoryEvent`] """ queued = self._assemble_event('Verifier_Queued') started = self._assemble_event('Verifier_Started') return [x for x in [queued, started] if x]
[ "def", "verification_events", "(", "self", ")", ":", "queued", "=", "self", ".", "_assemble_event", "(", "'Verifier_Queued'", ")", "started", "=", "self", ".", "_assemble_event", "(", "'Verifier_Started'", ")", "return", "[", "x", "for", "x", "in", "[", "queued", ",", "started", "]", "if", "x", "]" ]
Events related to command verification. :type: List[:class:`.CommandHistoryEvent`]
[ "Events", "related", "to", "command", "verification", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L122-L130
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
CommandHistory.events
def events(self): """ All events. :type: List[:class:`.CommandHistoryEvent`] """ events = [self.acknowledge_event] + self.verification_events return [x for x in events if x]
python
def events(self): """ All events. :type: List[:class:`.CommandHistoryEvent`] """ events = [self.acknowledge_event] + self.verification_events return [x for x in events if x]
[ "def", "events", "(", "self", ")", ":", "events", "=", "[", "self", ".", "acknowledge_event", "]", "+", "self", ".", "verification_events", "return", "[", "x", "for", "x", "in", "events", "if", "x", "]" ]
All events. :type: List[:class:`.CommandHistoryEvent`]
[ "All", "events", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L133-L140
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.generation_time
def generation_time(self): """ The generation time as set by Yamcs. :type: :class:`~datetime.datetime` """ entry = self._proto.commandQueueEntry if entry.HasField('generationTimeUTC'): return parse_isostring(entry.generationTimeUTC) return None
python
def generation_time(self): """ The generation time as set by Yamcs. :type: :class:`~datetime.datetime` """ entry = self._proto.commandQueueEntry if entry.HasField('generationTimeUTC'): return parse_isostring(entry.generationTimeUTC) return None
[ "def", "generation_time", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "HasField", "(", "'generationTimeUTC'", ")", ":", "return", "parse_isostring", "(", "entry", ".", "generationTimeUTC", ")", "return", "None" ]
The generation time as set by Yamcs. :type: :class:`~datetime.datetime`
[ "The", "generation", "time", "as", "set", "by", "Yamcs", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L173-L182
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.username
def username(self): """The username of the issuer.""" entry = self._proto.commandQueueEntry if entry.HasField('username'): return entry.username return None
python
def username(self): """The username of the issuer.""" entry = self._proto.commandQueueEntry if entry.HasField('username'): return entry.username return None
[ "def", "username", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "HasField", "(", "'username'", ")", ":", "return", "entry", ".", "username", "return", "None" ]
The username of the issuer.
[ "The", "username", "of", "the", "issuer", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L185-L190
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.queue
def queue(self): """The name of the queue that this command was assigned to.""" entry = self._proto.commandQueueEntry if entry.HasField('queueName'): return entry.queueName return None
python
def queue(self): """The name of the queue that this command was assigned to.""" entry = self._proto.commandQueueEntry if entry.HasField('queueName'): return entry.queueName return None
[ "def", "queue", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "HasField", "(", "'queueName'", ")", ":", "return", "entry", ".", "queueName", "return", "None" ]
The name of the queue that this command was assigned to.
[ "The", "name", "of", "the", "queue", "that", "this", "command", "was", "assigned", "to", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L193-L198
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.origin
def origin(self): """ The origin of this command. This is often empty, but may also be a hostname. """ entry = self._proto.commandQueueEntry if entry.cmdId.HasField('origin'): return entry.cmdId.origin return None
python
def origin(self): """ The origin of this command. This is often empty, but may also be a hostname. """ entry = self._proto.commandQueueEntry if entry.cmdId.HasField('origin'): return entry.cmdId.origin return None
[ "def", "origin", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "cmdId", ".", "HasField", "(", "'origin'", ")", ":", "return", "entry", ".", "cmdId", ".", "origin", "return", "None" ]
The origin of this command. This is often empty, but may also be a hostname.
[ "The", "origin", "of", "this", "command", ".", "This", "is", "often", "empty", "but", "may", "also", "be", "a", "hostname", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L201-L209
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.sequence_number
def sequence_number(self): """ The sequence number of this command. This is the sequence number assigned by the issuing client. """ entry = self._proto.commandQueueEntry if entry.cmdId.HasField('sequenceNumber'): return entry.cmdId.sequenceNumber return None
python
def sequence_number(self): """ The sequence number of this command. This is the sequence number assigned by the issuing client. """ entry = self._proto.commandQueueEntry if entry.cmdId.HasField('sequenceNumber'): return entry.cmdId.sequenceNumber return None
[ "def", "sequence_number", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "cmdId", ".", "HasField", "(", "'sequenceNumber'", ")", ":", "return", "entry", ".", "cmdId", ".", "sequenceNumber", "return", "None" ]
The sequence number of this command. This is the sequence number assigned by the issuing client.
[ "The", "sequence", "number", "of", "this", "command", ".", "This", "is", "the", "sequence", "number", "assigned", "by", "the", "issuing", "client", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L212-L220
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
IssuedCommand.create_command_history_subscription
def create_command_history_subscription(self, on_data=None, timeout=60): """ Create a new command history subscription for this command. :param on_data: Function that gets called with :class:`.CommandHistory` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .CommandHistorySubscription """ return self._client.create_command_history_subscription( issued_command=self, on_data=on_data, timeout=timeout)
python
def create_command_history_subscription(self, on_data=None, timeout=60): """ Create a new command history subscription for this command. :param on_data: Function that gets called with :class:`.CommandHistory` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .CommandHistorySubscription """ return self._client.create_command_history_subscription( issued_command=self, on_data=on_data, timeout=timeout)
[ "def", "create_command_history_subscription", "(", "self", ",", "on_data", "=", "None", ",", "timeout", "=", "60", ")", ":", "return", "self", ".", "_client", ".", "create_command_history_subscription", "(", "issued_command", "=", "self", ",", "on_data", "=", "on_data", ",", "timeout", "=", "timeout", ")" ]
Create a new command history subscription for this command. :param on_data: Function that gets called with :class:`.CommandHistory` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .CommandHistorySubscription
[ "Create", "a", "new", "command", "history", "subscription", "for", "this", "command", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L243-L256
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
Alarm.acknowledged_by
def acknowledged_by(self): """Username of the acknowledger.""" if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgedBy')): return self._proto.acknowledgeInfo.acknowledgedBy return None
python
def acknowledged_by(self): """Username of the acknowledger.""" if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgedBy')): return self._proto.acknowledgeInfo.acknowledgedBy return None
[ "def", "acknowledged_by", "(", "self", ")", ":", "if", "(", "self", ".", "is_acknowledged", "and", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "HasField", "(", "'acknowledgedBy'", ")", ")", ":", "return", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "acknowledgedBy", "return", "None" ]
Username of the acknowledger.
[ "Username", "of", "the", "acknowledger", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L315-L320
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
Alarm.acknowledge_message
def acknowledge_message(self): """Comment provided when acknowledging the alarm.""" if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgeMessage')): return self._proto.acknowledgeInfo.acknowledgeMessage return None
python
def acknowledge_message(self): """Comment provided when acknowledging the alarm.""" if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgeMessage')): return self._proto.acknowledgeInfo.acknowledgeMessage return None
[ "def", "acknowledge_message", "(", "self", ")", ":", "if", "(", "self", ".", "is_acknowledged", "and", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "HasField", "(", "'acknowledgeMessage'", ")", ")", ":", "return", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "acknowledgeMessage", "return", "None" ]
Comment provided when acknowledging the alarm.
[ "Comment", "provided", "when", "acknowledging", "the", "alarm", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L323-L328
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
Alarm.acknowledge_time
def acknowledge_time(self): """ Processor time when the alarm was acknowledged. :type: :class:`~datetime.datetime` """ if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgeTime')): return parse_isostring(self._proto.acknowledgeInfo.acknowledgeTime) return None
python
def acknowledge_time(self): """ Processor time when the alarm was acknowledged. :type: :class:`~datetime.datetime` """ if (self.is_acknowledged and self._proto.acknowledgeInfo.HasField('acknowledgeTime')): return parse_isostring(self._proto.acknowledgeInfo.acknowledgeTime) return None
[ "def", "acknowledge_time", "(", "self", ")", ":", "if", "(", "self", ".", "is_acknowledged", "and", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "HasField", "(", "'acknowledgeTime'", ")", ")", ":", "return", "parse_isostring", "(", "self", ".", "_proto", ".", "acknowledgeInfo", ".", "acknowledgeTime", ")", "return", "None" ]
Processor time when the alarm was acknowledged. :type: :class:`~datetime.datetime`
[ "Processor", "time", "when", "the", "alarm", "was", "acknowledged", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L331-L340
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
ParameterValue.name
def name(self): """ An identifying name for the parameter value. Typically this is the fully-qualified XTCE name, but it may also be an alias depending on how the parameter update was requested. """ if self._proto.id.namespace: return self._proto.id.namespace + '/' + self._proto.id.name return self._proto.id.name
python
def name(self): """ An identifying name for the parameter value. Typically this is the fully-qualified XTCE name, but it may also be an alias depending on how the parameter update was requested. """ if self._proto.id.namespace: return self._proto.id.namespace + '/' + self._proto.id.name return self._proto.id.name
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "_proto", ".", "id", ".", "namespace", ":", "return", "self", ".", "_proto", ".", "id", ".", "namespace", "+", "'/'", "+", "self", ".", "_proto", ".", "id", ".", "name", "return", "self", ".", "_proto", ".", "id", ".", "name" ]
An identifying name for the parameter value. Typically this is the fully-qualified XTCE name, but it may also be an alias depending on how the parameter update was requested.
[ "An", "identifying", "name", "for", "the", "parameter", "value", ".", "Typically", "this", "is", "the", "fully", "-", "qualified", "XTCE", "name", "but", "it", "may", "also", "be", "an", "alias", "depending", "on", "how", "the", "parameter", "update", "was", "requested", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L410-L418
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
ParameterValue.validity_duration
def validity_duration(self): """ How long this parameter value is valid. .. note: There is also an option when subscribing to get updated when the parameter values expire. :type: :class:`~datetime.timedelta` """ if self._proto.HasField('expireMillis'): return timedelta(milliseconds=self._proto.expireMillis) return None
python
def validity_duration(self): """ How long this parameter value is valid. .. note: There is also an option when subscribing to get updated when the parameter values expire. :type: :class:`~datetime.timedelta` """ if self._proto.HasField('expireMillis'): return timedelta(milliseconds=self._proto.expireMillis) return None
[ "def", "validity_duration", "(", "self", ")", ":", "if", "self", ".", "_proto", ".", "HasField", "(", "'expireMillis'", ")", ":", "return", "timedelta", "(", "milliseconds", "=", "self", ".", "_proto", ".", "expireMillis", ")", "return", "None" ]
How long this parameter value is valid. .. note: There is also an option when subscribing to get updated when the parameter values expire. :type: :class:`~datetime.timedelta`
[ "How", "long", "this", "parameter", "value", "is", "valid", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L444-L455
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
ParameterValue.range_condition
def range_condition(self): """ If the value is out of limits, this indicates ``LOW`` or ``HIGH``. """ if self._proto.HasField('rangeCondition'): return pvalue_pb2.RangeCondition.Name(self._proto.rangeCondition) return None
python
def range_condition(self): """ If the value is out of limits, this indicates ``LOW`` or ``HIGH``. """ if self._proto.HasField('rangeCondition'): return pvalue_pb2.RangeCondition.Name(self._proto.rangeCondition) return None
[ "def", "range_condition", "(", "self", ")", ":", "if", "self", ".", "_proto", ".", "HasField", "(", "'rangeCondition'", ")", ":", "return", "pvalue_pb2", ".", "RangeCondition", ".", "Name", "(", "self", ".", "_proto", ".", "rangeCondition", ")", "return", "None" ]
If the value is out of limits, this indicates ``LOW`` or ``HIGH``.
[ "If", "the", "value", "is", "out", "of", "limits", "this", "indicates", "LOW", "or", "HIGH", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L482-L488
train
lowandrew/OLCTools
sipprCommon/sippingmethods.py
Sippr.reversebait
def reversebait(self, maskmiddle='f', k=19): """ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned """ logging.info('Performing reverse kmer baiting of targets with FASTQ files') if self.kmer_size is None: kmer = k else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis: outfile = os.path.join(sample[self.analysistype].outputdir, 'baitedtargets.fa') sample[self.analysistype].revbbdukcmd = \ 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' \ 'maskmiddle={mm} outm={outm}' \ .format(mem=self.mem, ref=sample[self.analysistype].baitedfastq, in1=sample[self.analysistype].baitfile, kmer=kmer, cpus=str(self.cpus), mcf=self.cutoff, mm=maskmiddle, outm=outfile) # Run the system call (if necessary) if not os.path.isfile(outfile): out, err = run_subprocess(sample[self.analysistype].revbbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # Set the baitfile to use in the mapping steps as the newly created outfile sample[self.analysistype].baitfile = outfile
python
def reversebait(self, maskmiddle='f', k=19): """ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned """ logging.info('Performing reverse kmer baiting of targets with FASTQ files') if self.kmer_size is None: kmer = k else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis: outfile = os.path.join(sample[self.analysistype].outputdir, 'baitedtargets.fa') sample[self.analysistype].revbbdukcmd = \ 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' \ 'maskmiddle={mm} outm={outm}' \ .format(mem=self.mem, ref=sample[self.analysistype].baitedfastq, in1=sample[self.analysistype].baitfile, kmer=kmer, cpus=str(self.cpus), mcf=self.cutoff, mm=maskmiddle, outm=outfile) # Run the system call (if necessary) if not os.path.isfile(outfile): out, err = run_subprocess(sample[self.analysistype].revbbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # Set the baitfile to use in the mapping steps as the newly created outfile sample[self.analysistype].baitfile = outfile
[ "def", "reversebait", "(", "self", ",", "maskmiddle", "=", "'f'", ",", "k", "=", "19", ")", ":", "logging", ".", "info", "(", "'Performing reverse kmer baiting of targets with FASTQ files'", ")", "if", "self", ".", "kmer_size", "is", "None", ":", "kmer", "=", "k", "else", ":", "kmer", "=", "self", ".", "kmer_size", "with", "progressbar", "(", "self", ".", "runmetadata", ")", "as", "bar", ":", "for", "sample", "in", "bar", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", "and", "sample", "[", "self", ".", "analysistype", "]", ".", "runanalysis", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "outputdir", ",", "'baitedtargets.fa'", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "revbbdukcmd", "=", "'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} '", "'maskmiddle={mm} outm={outm}'", ".", "format", "(", "mem", "=", "self", ".", "mem", ",", "ref", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "baitedfastq", ",", "in1", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "baitfile", ",", "kmer", "=", "kmer", ",", "cpus", "=", "str", "(", "self", ".", "cpus", ")", ",", "mcf", "=", "self", ".", "cutoff", ",", "mm", "=", "maskmiddle", ",", "outm", "=", "outfile", ")", "# Run the system call (if necessary)", "if", "not", "os", ".", "path", ".", "isfile", "(", "outfile", ")", ":", "out", ",", "err", "=", "run_subprocess", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "revbbdukcmd", ")", "write_to_logfile", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "bbdukcmd", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "bbdukcmd", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")", "write_to_logfile", "(", "out", ",", "err", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")", "# Set the baitfile to use in the mapping steps as the newly created outfile", "sample", "[", "self", ".", "analysistype", "]", ".", "baitfile", "=", "outfile" ]
Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned
[ "Use", "the", "freshly", "-", "baited", "FASTQ", "files", "to", "bait", "out", "sequence", "from", "the", "original", "target", "files", ".", "This", "will", "reduce", "the", "number", "of", "possibly", "targets", "against", "which", "the", "baited", "reads", "must", "be", "aligned" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/sippingmethods.py#L193-L230
train
lowandrew/OLCTools
sipprCommon/sippingmethods.py
Sippr.clipper
def clipper(self): """ Filter out results based on the presence of cigar features such as internal soft-clipping """ for sample in self.runmetadata: # Create a dictionary to store all the samples that do not have features replacementresults = dict() try: # SixteenS analyses seem to fail if results are filtered out if self.analysistype != 'sixteens_full' and self.analysistype != 'resfinder': # Iterate through all the baited genes for gene in sample[self.analysistype].faidict: try: percentidentity = sample[self.analysistype].results[gene] try: # Create a list to store whether a feature is present in enough reads to discard the # sample passingfeature = list() for location, feature in sample[self.analysistype].features[gene].items(): # If the feature is present in under 30% of the reads, set the passing variable # to true if len(feature) < int(float(sample[self.analysistype].avgdepth[gene])) * 0.3: passingfeature.append(True) # Otherwise set it to false else: passingfeature.append(False) # If all the features are 'true' (present in fewer than 30% of the reads), add this # contig to the list of passing results if all(passingfeature): replacementresults[gene] = percentidentity # If the allele does not have any features, it is added to the passing list except KeyError: replacementresults[gene] = percentidentity except KeyError: pass # Update the .results attribute with the filtered dictionary sample[self.analysistype].results = replacementresults except AttributeError: pass
python
def clipper(self): """ Filter out results based on the presence of cigar features such as internal soft-clipping """ for sample in self.runmetadata: # Create a dictionary to store all the samples that do not have features replacementresults = dict() try: # SixteenS analyses seem to fail if results are filtered out if self.analysistype != 'sixteens_full' and self.analysistype != 'resfinder': # Iterate through all the baited genes for gene in sample[self.analysistype].faidict: try: percentidentity = sample[self.analysistype].results[gene] try: # Create a list to store whether a feature is present in enough reads to discard the # sample passingfeature = list() for location, feature in sample[self.analysistype].features[gene].items(): # If the feature is present in under 30% of the reads, set the passing variable # to true if len(feature) < int(float(sample[self.analysistype].avgdepth[gene])) * 0.3: passingfeature.append(True) # Otherwise set it to false else: passingfeature.append(False) # If all the features are 'true' (present in fewer than 30% of the reads), add this # contig to the list of passing results if all(passingfeature): replacementresults[gene] = percentidentity # If the allele does not have any features, it is added to the passing list except KeyError: replacementresults[gene] = percentidentity except KeyError: pass # Update the .results attribute with the filtered dictionary sample[self.analysistype].results = replacementresults except AttributeError: pass
[ "def", "clipper", "(", "self", ")", ":", "for", "sample", "in", "self", ".", "runmetadata", ":", "# Create a dictionary to store all the samples that do not have features", "replacementresults", "=", "dict", "(", ")", "try", ":", "# SixteenS analyses seem to fail if results are filtered out", "if", "self", ".", "analysistype", "!=", "'sixteens_full'", "and", "self", ".", "analysistype", "!=", "'resfinder'", ":", "# Iterate through all the baited genes", "for", "gene", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "faidict", ":", "try", ":", "percentidentity", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "results", "[", "gene", "]", "try", ":", "# Create a list to store whether a feature is present in enough reads to discard the", "# sample", "passingfeature", "=", "list", "(", ")", "for", "location", ",", "feature", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "features", "[", "gene", "]", ".", "items", "(", ")", ":", "# If the feature is present in under 30% of the reads, set the passing variable", "# to true", "if", "len", "(", "feature", ")", "<", "int", "(", "float", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "avgdepth", "[", "gene", "]", ")", ")", "*", "0.3", ":", "passingfeature", ".", "append", "(", "True", ")", "# Otherwise set it to false", "else", ":", "passingfeature", ".", "append", "(", "False", ")", "# If all the features are 'true' (present in fewer than 30% of the reads), add this", "# contig to the list of passing results", "if", "all", "(", "passingfeature", ")", ":", "replacementresults", "[", "gene", "]", "=", "percentidentity", "# If the allele does not have any features, it is added to the passing list", "except", "KeyError", ":", "replacementresults", "[", "gene", "]", "=", "percentidentity", "except", "KeyError", ":", "pass", "# Update the .results attribute with the filtered dictionary", "sample", "[", "self", ".", "analysistype", "]", ".", "results", "=", "replacementresults", "except", "AttributeError", ":", "pass" ]
Filter out results based on the presence of cigar features such as internal soft-clipping
[ "Filter", "out", "results", "based", "on", "the", "presence", "of", "cigar", "features", "such", "as", "internal", "soft", "-", "clipping" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/sippingmethods.py#L739-L777
train
sirfoga/pyhal
hal/help.py
main
def main(): """Pretty-print the bug information as JSON""" reporter = BugReporter() print("JSON report:") print(reporter.as_json()) print() print("Markdown report:") print(reporter.as_markdown()) print("SQL report:") print(reporter.as_sql()) print("Choose the appropriate format (if you're submitting a Github Issue " "please chose the Markdown report) and paste it!")
python
def main(): """Pretty-print the bug information as JSON""" reporter = BugReporter() print("JSON report:") print(reporter.as_json()) print() print("Markdown report:") print(reporter.as_markdown()) print("SQL report:") print(reporter.as_sql()) print("Choose the appropriate format (if you're submitting a Github Issue " "please chose the Markdown report) and paste it!")
[ "def", "main", "(", ")", ":", "reporter", "=", "BugReporter", "(", ")", "print", "(", "\"JSON report:\"", ")", "print", "(", "reporter", ".", "as_json", "(", ")", ")", "print", "(", ")", "print", "(", "\"Markdown report:\"", ")", "print", "(", "reporter", ".", "as_markdown", "(", ")", ")", "print", "(", "\"SQL report:\"", ")", "print", "(", "reporter", ".", "as_sql", "(", ")", ")", "print", "(", "\"Choose the appropriate format (if you're submitting a Github Issue \"", "\"please chose the Markdown report) and paste it!\"", ")" ]
Pretty-print the bug information as JSON
[ "Pretty", "-", "print", "the", "bug", "information", "as", "JSON" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/help.py#L97-L113
train
sirfoga/pyhal
hal/help.py
BugReporter.get_platform_info
def get_platform_info(): """Gets platform info :return: platform info """ try: system_name = platform.system() release_name = platform.release() except: system_name = "Unknown" release_name = "Unknown" return { 'system': system_name, 'release': release_name, }
python
def get_platform_info(): """Gets platform info :return: platform info """ try: system_name = platform.system() release_name = platform.release() except: system_name = "Unknown" release_name = "Unknown" return { 'system': system_name, 'release': release_name, }
[ "def", "get_platform_info", "(", ")", ":", "try", ":", "system_name", "=", "platform", ".", "system", "(", ")", "release_name", "=", "platform", ".", "release", "(", ")", "except", ":", "system_name", "=", "\"Unknown\"", "release_name", "=", "\"Unknown\"", "return", "{", "'system'", ":", "system_name", ",", "'release'", ":", "release_name", ",", "}" ]
Gets platform info :return: platform info
[ "Gets", "platform", "info" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/help.py#L20-L36
train
sirfoga/pyhal
hal/help.py
BugReporter.get_bug_report
def get_bug_report(): """Generate information for a bug report :return: information for bug report """ platform_info = BugReporter.get_platform_info() module_info = { 'version': hal_version.__version__, 'build': hal_version.__build__ } return { 'platform': platform_info, 'pyhal': module_info }
python
def get_bug_report(): """Generate information for a bug report :return: information for bug report """ platform_info = BugReporter.get_platform_info() module_info = { 'version': hal_version.__version__, 'build': hal_version.__build__ } return { 'platform': platform_info, 'pyhal': module_info }
[ "def", "get_bug_report", "(", ")", ":", "platform_info", "=", "BugReporter", ".", "get_platform_info", "(", ")", "module_info", "=", "{", "'version'", ":", "hal_version", ".", "__version__", ",", "'build'", ":", "hal_version", ".", "__build__", "}", "return", "{", "'platform'", ":", "platform_info", ",", "'pyhal'", ":", "module_info", "}" ]
Generate information for a bug report :return: information for bug report
[ "Generate", "information", "for", "a", "bug", "report" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/help.py#L39-L53
train
yamcs/yamcs-python
yamcs-client/yamcs/core/helpers.py
to_isostring
def to_isostring(dt): """ Converts the given datetime to an ISO String. This assumes the datetime is UTC. """ if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) > timedelta(0): logging.warn('Warning: aware datetimes are interpreted as if they were naive') # -3 to change microseconds to milliseconds return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
python
def to_isostring(dt): """ Converts the given datetime to an ISO String. This assumes the datetime is UTC. """ if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) > timedelta(0): logging.warn('Warning: aware datetimes are interpreted as if they were naive') # -3 to change microseconds to milliseconds return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
[ "def", "to_isostring", "(", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "not", "None", "and", "dt", ".", "tzinfo", ".", "utcoffset", "(", "dt", ")", ">", "timedelta", "(", "0", ")", ":", "logging", ".", "warn", "(", "'Warning: aware datetimes are interpreted as if they were naive'", ")", "# -3 to change microseconds to milliseconds", "return", "dt", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "[", ":", "-", "3", "]", "+", "'Z'" ]
Converts the given datetime to an ISO String. This assumes the datetime is UTC.
[ "Converts", "the", "given", "datetime", "to", "an", "ISO", "String", ".", "This", "assumes", "the", "datetime", "is", "UTC", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/core/helpers.py#L8-L17
train
yamcs/yamcs-python
yamcs-client/yamcs/core/helpers.py
parse_value
def parse_value(proto): """ Convers a Protobuf `Value` from the API into a python native value """ if proto.HasField('floatValue'): return proto.floatValue elif proto.HasField('doubleValue'): return proto.doubleValue elif proto.HasField('sint32Value'): return proto.sint32Value elif proto.HasField('uint32Value'): return proto.uint32Value elif proto.HasField('binaryValue'): return proto.binaryValue elif proto.HasField('timestampValue'): # Don't use the actual 'timestampValue' field, it contains a number # that is difficult to interpret on the client. Instead parse from # the ISO String also set by Yamcs. return parse_isostring(proto.stringValue) elif proto.HasField('stringValue'): return proto.stringValue elif proto.HasField('uint64Value'): return proto.uint64Value elif proto.HasField('sint64Value'): return proto.sint64Value elif proto.HasField('booleanValue'): return proto.booleanValue elif proto.HasField('arrayValue'): return [parse_value(v) for v in proto.arrayValue] elif proto.HasField('aggregateValue'): return OrderedDict(zip(proto.aggregateValue.name, proto.aggregateValue.value)) else: logging.warn('Unrecognized value type for update %s', proto) return None
python
def parse_value(proto): """ Convers a Protobuf `Value` from the API into a python native value """ if proto.HasField('floatValue'): return proto.floatValue elif proto.HasField('doubleValue'): return proto.doubleValue elif proto.HasField('sint32Value'): return proto.sint32Value elif proto.HasField('uint32Value'): return proto.uint32Value elif proto.HasField('binaryValue'): return proto.binaryValue elif proto.HasField('timestampValue'): # Don't use the actual 'timestampValue' field, it contains a number # that is difficult to interpret on the client. Instead parse from # the ISO String also set by Yamcs. return parse_isostring(proto.stringValue) elif proto.HasField('stringValue'): return proto.stringValue elif proto.HasField('uint64Value'): return proto.uint64Value elif proto.HasField('sint64Value'): return proto.sint64Value elif proto.HasField('booleanValue'): return proto.booleanValue elif proto.HasField('arrayValue'): return [parse_value(v) for v in proto.arrayValue] elif proto.HasField('aggregateValue'): return OrderedDict(zip(proto.aggregateValue.name, proto.aggregateValue.value)) else: logging.warn('Unrecognized value type for update %s', proto) return None
[ "def", "parse_value", "(", "proto", ")", ":", "if", "proto", ".", "HasField", "(", "'floatValue'", ")", ":", "return", "proto", ".", "floatValue", "elif", "proto", ".", "HasField", "(", "'doubleValue'", ")", ":", "return", "proto", ".", "doubleValue", "elif", "proto", ".", "HasField", "(", "'sint32Value'", ")", ":", "return", "proto", ".", "sint32Value", "elif", "proto", ".", "HasField", "(", "'uint32Value'", ")", ":", "return", "proto", ".", "uint32Value", "elif", "proto", ".", "HasField", "(", "'binaryValue'", ")", ":", "return", "proto", ".", "binaryValue", "elif", "proto", ".", "HasField", "(", "'timestampValue'", ")", ":", "# Don't use the actual 'timestampValue' field, it contains a number", "# that is difficult to interpret on the client. Instead parse from", "# the ISO String also set by Yamcs.", "return", "parse_isostring", "(", "proto", ".", "stringValue", ")", "elif", "proto", ".", "HasField", "(", "'stringValue'", ")", ":", "return", "proto", ".", "stringValue", "elif", "proto", ".", "HasField", "(", "'uint64Value'", ")", ":", "return", "proto", ".", "uint64Value", "elif", "proto", ".", "HasField", "(", "'sint64Value'", ")", ":", "return", "proto", ".", "sint64Value", "elif", "proto", ".", "HasField", "(", "'booleanValue'", ")", ":", "return", "proto", ".", "booleanValue", "elif", "proto", ".", "HasField", "(", "'arrayValue'", ")", ":", "return", "[", "parse_value", "(", "v", ")", "for", "v", "in", "proto", ".", "arrayValue", "]", "elif", "proto", ".", "HasField", "(", "'aggregateValue'", ")", ":", "return", "OrderedDict", "(", "zip", "(", "proto", ".", "aggregateValue", ".", "name", ",", "proto", ".", "aggregateValue", ".", "value", ")", ")", "else", ":", "logging", ".", "warn", "(", "'Unrecognized value type for update %s'", ",", "proto", ")", "return", "None" ]
Convers a Protobuf `Value` from the API into a python native value
[ "Convers", "a", "Protobuf", "Value", "from", "the", "API", "into", "a", "python", "native", "value" ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/core/helpers.py#L30-L63
train
sirfoga/pyhal
hal/charts/correlation.py
create_correlation_matrix_plot
def create_correlation_matrix_plot(correlation_matrix, title, feature_list): """Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image """ chart = SimpleChart(title) ax1 = chart.get_ax() ax1.set_xticks(list(range(len(feature_list)))) ax1.set_xticklabels([feature_list[i] for i in range(len(feature_list))], rotation=90) ax1.set_yticks(list(range(len(feature_list)))) ax1.set_yticklabels([feature_list[i] for i in range(len(feature_list))]) cax = ax1.imshow(correlation_matrix, interpolation="nearest", cmap=cm.get_cmap("jet", 30)) chart.get_fig().colorbar(cax, ticks=np.linspace(-1, 1, 21)) plt.gcf().subplots_adjust(bottom=0.25)
python
def create_correlation_matrix_plot(correlation_matrix, title, feature_list): """Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image """ chart = SimpleChart(title) ax1 = chart.get_ax() ax1.set_xticks(list(range(len(feature_list)))) ax1.set_xticklabels([feature_list[i] for i in range(len(feature_list))], rotation=90) ax1.set_yticks(list(range(len(feature_list)))) ax1.set_yticklabels([feature_list[i] for i in range(len(feature_list))]) cax = ax1.imshow(correlation_matrix, interpolation="nearest", cmap=cm.get_cmap("jet", 30)) chart.get_fig().colorbar(cax, ticks=np.linspace(-1, 1, 21)) plt.gcf().subplots_adjust(bottom=0.25)
[ "def", "create_correlation_matrix_plot", "(", "correlation_matrix", ",", "title", ",", "feature_list", ")", ":", "chart", "=", "SimpleChart", "(", "title", ")", "ax1", "=", "chart", ".", "get_ax", "(", ")", "ax1", ".", "set_xticks", "(", "list", "(", "range", "(", "len", "(", "feature_list", ")", ")", ")", ")", "ax1", ".", "set_xticklabels", "(", "[", "feature_list", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "feature_list", ")", ")", "]", ",", "rotation", "=", "90", ")", "ax1", ".", "set_yticks", "(", "list", "(", "range", "(", "len", "(", "feature_list", ")", ")", ")", ")", "ax1", ".", "set_yticklabels", "(", "[", "feature_list", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "feature_list", ")", ")", "]", ")", "cax", "=", "ax1", ".", "imshow", "(", "correlation_matrix", ",", "interpolation", "=", "\"nearest\"", ",", "cmap", "=", "cm", ".", "get_cmap", "(", "\"jet\"", ",", "30", ")", ")", "chart", ".", "get_fig", "(", ")", ".", "colorbar", "(", "cax", ",", "ticks", "=", "np", ".", "linspace", "(", "-", "1", ",", "1", ",", "21", ")", ")", "plt", ".", "gcf", "(", ")", ".", "subplots_adjust", "(", "bottom", "=", "0.25", ")" ]
Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image
[ "Creates", "plot", "for", "correlation", "matrix" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/charts/correlation.py#L11-L31
train
IS-ENES-Data/esgf-pid
esgfpid/utils/logutils.py
log_every_x_times
def log_every_x_times(logger, counter, x, msg, *args, **kwargs): ''' Works like logdebug, but only prints first and and every xth message. ''' if counter==1 or counter % x == 0: #msg = msg + (' (counter %i)' % counter) logdebug(logger, msg, *args, **kwargs)
python
def log_every_x_times(logger, counter, x, msg, *args, **kwargs): ''' Works like logdebug, but only prints first and and every xth message. ''' if counter==1 or counter % x == 0: #msg = msg + (' (counter %i)' % counter) logdebug(logger, msg, *args, **kwargs)
[ "def", "log_every_x_times", "(", "logger", ",", "counter", ",", "x", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "counter", "==", "1", "or", "counter", "%", "x", "==", "0", ":", "#msg = msg + (' (counter %i)' % counter)", "logdebug", "(", "logger", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Works like logdebug, but only prints first and and every xth message.
[ "Works", "like", "logdebug", "but", "only", "prints", "first", "and", "and", "every", "xth", "message", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L47-L54
train
mpds-io/python-api-client
mpds_client/retrieve_MPDS.py
MPDSDataRetrieval.get_dataframe
def get_dataframe(self, *args, **kwargs): """ Retrieve data as a Pandas dataframe. Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept fields: (dict) Data of interest for C-, S-, and P-entries, e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']}, documented at https://developer.mpds.io/#JSON-schemata columns: (list) Column names for Pandas dataframe Returns: (object) Pandas dataframe object containing the results """ columns = kwargs.get('columns') if columns: del kwargs['columns'] else: columns = self.default_titles return pd.DataFrame(self.get_data(*args, **kwargs), columns=columns)
python
def get_dataframe(self, *args, **kwargs): """ Retrieve data as a Pandas dataframe. Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept fields: (dict) Data of interest for C-, S-, and P-entries, e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']}, documented at https://developer.mpds.io/#JSON-schemata columns: (list) Column names for Pandas dataframe Returns: (object) Pandas dataframe object containing the results """ columns = kwargs.get('columns') if columns: del kwargs['columns'] else: columns = self.default_titles return pd.DataFrame(self.get_data(*args, **kwargs), columns=columns)
[ "def", "get_dataframe", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "columns", "=", "kwargs", ".", "get", "(", "'columns'", ")", "if", "columns", ":", "del", "kwargs", "[", "'columns'", "]", "else", ":", "columns", "=", "self", ".", "default_titles", "return", "pd", ".", "DataFrame", "(", "self", ".", "get_data", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "columns", "=", "columns", ")" ]
Retrieve data as a Pandas dataframe. Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept fields: (dict) Data of interest for C-, S-, and P-entries, e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']}, documented at https://developer.mpds.io/#JSON-schemata columns: (list) Column names for Pandas dataframe Returns: (object) Pandas dataframe object containing the results
[ "Retrieve", "data", "as", "a", "Pandas", "dataframe", "." ]
edfdd79c6aac44d0a5f7f785e252a88acc95b6fe
https://github.com/mpds-io/python-api-client/blob/edfdd79c6aac44d0a5f7f785e252a88acc95b6fe/mpds_client/retrieve_MPDS.py#L319-L340
train
portfors-lab/sparkle
sparkle/gui/stim/auto_parameter_view.py
AutoParameterTableView.grabImage
def grabImage(self, index): """Returns an image of the parameter row. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.grabImage>` """ # grab an image of the cell we are moving # assume all rows same height row_height = self.rowHeight(0) # -5 becuase it a a little off y = (row_height*index.row()) + row_height - 5 x = self.width() rect = QtCore.QRect(5,y,x,row_height) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, rect) return pixmap
python
def grabImage(self, index): """Returns an image of the parameter row. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.grabImage>` """ # grab an image of the cell we are moving # assume all rows same height row_height = self.rowHeight(0) # -5 becuase it a a little off y = (row_height*index.row()) + row_height - 5 x = self.width() rect = QtCore.QRect(5,y,x,row_height) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, rect) return pixmap
[ "def", "grabImage", "(", "self", ",", "index", ")", ":", "# grab an image of the cell we are moving", "# assume all rows same height", "row_height", "=", "self", ".", "rowHeight", "(", "0", ")", "# -5 becuase it a a little off", "y", "=", "(", "row_height", "*", "index", ".", "row", "(", ")", ")", "+", "row_height", "-", "5", "x", "=", "self", ".", "width", "(", ")", "rect", "=", "QtCore", ".", "QRect", "(", "5", ",", "y", ",", "x", ",", "row_height", ")", "pixmap", "=", "QtGui", ".", "QPixmap", "(", ")", "pixmap", "=", "pixmap", ".", "grabWidget", "(", "self", ",", "rect", ")", "return", "pixmap" ]
Returns an image of the parameter row. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.grabImage>`
[ "Returns", "an", "image", "of", "the", "parameter", "row", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/auto_parameter_view.py#L37-L51
train
portfors-lab/sparkle
sparkle/gui/stim/auto_parameter_view.py
AutoParameterTableView.mousePressEvent
def mousePressEvent(self, event): """Begins edit on cell clicked, if allowed, and passes event to super class""" index = self.indexAt(event.pos()) if index.isValid(): self.selectRow(index.row()) # selecting the row sets the current index to 0,0 for tab # order to work correctly, we must set the current index self.setCurrentIndex(index) self.parameterChanged.emit(self.model().selection(index)) self.edit(index, QtGui.QAbstractItemView.DoubleClicked, event) super(AutoParameterTableView, self).mousePressEvent(event)
python
def mousePressEvent(self, event): """Begins edit on cell clicked, if allowed, and passes event to super class""" index = self.indexAt(event.pos()) if index.isValid(): self.selectRow(index.row()) # selecting the row sets the current index to 0,0 for tab # order to work correctly, we must set the current index self.setCurrentIndex(index) self.parameterChanged.emit(self.model().selection(index)) self.edit(index, QtGui.QAbstractItemView.DoubleClicked, event) super(AutoParameterTableView, self).mousePressEvent(event)
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "index", "=", "self", ".", "indexAt", "(", "event", ".", "pos", "(", ")", ")", "if", "index", ".", "isValid", "(", ")", ":", "self", ".", "selectRow", "(", "index", ".", "row", "(", ")", ")", "# selecting the row sets the current index to 0,0 for tab", "# order to work correctly, we must set the current index", "self", ".", "setCurrentIndex", "(", "index", ")", "self", ".", "parameterChanged", ".", "emit", "(", "self", ".", "model", "(", ")", ".", "selection", "(", "index", ")", ")", "self", ".", "edit", "(", "index", ",", "QtGui", ".", "QAbstractItemView", ".", "DoubleClicked", ",", "event", ")", "super", "(", "AutoParameterTableView", ",", "self", ")", ".", "mousePressEvent", "(", "event", ")" ]
Begins edit on cell clicked, if allowed, and passes event to super class
[ "Begins", "edit", "on", "cell", "clicked", "if", "allowed", "and", "passes", "event", "to", "super", "class" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/auto_parameter_view.py#L53-L63
train
outini/python-pylls
pylls/client.py
CachetAPIClient.request
def request(self, path, method, data=None, **kwargs): """Handle requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Parsed json response as :class:`dict` Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object. """ if self.api_token: self.request_headers['X-Cachet-Token'] = self.api_token if not path.startswith('http://') and not path.startswith('https://'): url = "%s/%s" % (self.api_endpoint, path) else: url = path if data is None: data = {} response = self.r_session.request(method, url, data=json.dumps(data), headers=self.request_headers, timeout=self.timeout, verify=self.verify, **kwargs) # If API returns an error, we simply raise and let caller handle it response.raise_for_status() try: return response.json() except ValueError: return {'data': response.text}
python
def request(self, path, method, data=None, **kwargs): """Handle requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Parsed json response as :class:`dict` Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object. """ if self.api_token: self.request_headers['X-Cachet-Token'] = self.api_token if not path.startswith('http://') and not path.startswith('https://'): url = "%s/%s" % (self.api_endpoint, path) else: url = path if data is None: data = {} response = self.r_session.request(method, url, data=json.dumps(data), headers=self.request_headers, timeout=self.timeout, verify=self.verify, **kwargs) # If API returns an error, we simply raise and let caller handle it response.raise_for_status() try: return response.json() except ValueError: return {'data': response.text}
[ "def", "request", "(", "self", ",", "path", ",", "method", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "api_token", ":", "self", ".", "request_headers", "[", "'X-Cachet-Token'", "]", "=", "self", ".", "api_token", "if", "not", "path", ".", "startswith", "(", "'http://'", ")", "and", "not", "path", ".", "startswith", "(", "'https://'", ")", ":", "url", "=", "\"%s/%s\"", "%", "(", "self", ".", "api_endpoint", ",", "path", ")", "else", ":", "url", "=", "path", "if", "data", "is", "None", ":", "data", "=", "{", "}", "response", "=", "self", ".", "r_session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "self", ".", "request_headers", ",", "timeout", "=", "self", ".", "timeout", ",", "verify", "=", "self", ".", "verify", ",", "*", "*", "kwargs", ")", "# If API returns an error, we simply raise and let caller handle it", "response", ".", "raise_for_status", "(", ")", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "return", "{", "'data'", ":", "response", ".", "text", "}" ]
Handle requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Parsed json response as :class:`dict` Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object.
[ "Handle", "requests", "to", "API" ]
f9fa220594bc1974469097d9bad690a42d0d0f0f
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/client.py#L86-L121
train
outini/python-pylls
pylls/client.py
CachetAPIClient.paginate_request
def paginate_request(self, path, method, data=None, **kwargs): """Handle paginated requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Response data items (:class:`Generator`) Cachet pagination is handled and next pages requested on demand. Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object. """ next_page = path while next_page: response = self.request(next_page, method, data=data, **kwargs) if not isinstance(response.get('data'), list): next_page = None yield response['data'] else: for entry in response['data']: yield entry # Get next page if it exists try: links = response['meta']['pagination']['links'] next_page = links.get('next_page') except KeyError: next_page = None
python
def paginate_request(self, path, method, data=None, **kwargs): """Handle paginated requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Response data items (:class:`Generator`) Cachet pagination is handled and next pages requested on demand. Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object. """ next_page = path while next_page: response = self.request(next_page, method, data=data, **kwargs) if not isinstance(response.get('data'), list): next_page = None yield response['data'] else: for entry in response['data']: yield entry # Get next page if it exists try: links = response['meta']['pagination']['links'] next_page = links.get('next_page') except KeyError: next_page = None
[ "def", "paginate_request", "(", "self", ",", "path", ",", "method", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "next_page", "=", "path", "while", "next_page", ":", "response", "=", "self", ".", "request", "(", "next_page", ",", "method", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "response", ".", "get", "(", "'data'", ")", ",", "list", ")", ":", "next_page", "=", "None", "yield", "response", "[", "'data'", "]", "else", ":", "for", "entry", "in", "response", "[", "'data'", "]", ":", "yield", "entry", "# Get next page if it exists", "try", ":", "links", "=", "response", "[", "'meta'", "]", "[", "'pagination'", "]", "[", "'links'", "]", "next_page", "=", "links", ".", "get", "(", "'next_page'", ")", "except", "KeyError", ":", "next_page", "=", "None" ]
Handle paginated requests to API :param str path: API endpoint's path to request :param str method: HTTP method to use :param dict data: Data to send (optional) :return: Response data items (:class:`Generator`) Cachet pagination is handled and next pages requested on demand. Additional named argument may be passed and are directly transmitted to :meth:`request` method of :class:`requests.Session` object.
[ "Handle", "paginated", "requests", "to", "API" ]
f9fa220594bc1974469097d9bad690a42d0d0f0f
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/client.py#L123-L152
train
etal/biocma
biocma/sugar.py
maybe_open
def maybe_open(infile, mode='r'): """Take a file name or a handle, and return a handle. Simplifies creating functions that automagically accept either a file name or an already opened file handle. """ # ENH: Exception safety? if isinstance(infile, basestring): handle = open(infile, mode) do_close = True else: handle = infile do_close = False yield handle if do_close: handle.close()
python
def maybe_open(infile, mode='r'): """Take a file name or a handle, and return a handle. Simplifies creating functions that automagically accept either a file name or an already opened file handle. """ # ENH: Exception safety? if isinstance(infile, basestring): handle = open(infile, mode) do_close = True else: handle = infile do_close = False yield handle if do_close: handle.close()
[ "def", "maybe_open", "(", "infile", ",", "mode", "=", "'r'", ")", ":", "# ENH: Exception safety?", "if", "isinstance", "(", "infile", ",", "basestring", ")", ":", "handle", "=", "open", "(", "infile", ",", "mode", ")", "do_close", "=", "True", "else", ":", "handle", "=", "infile", "do_close", "=", "False", "yield", "handle", "if", "do_close", ":", "handle", ".", "close", "(", ")" ]
Take a file name or a handle, and return a handle. Simplifies creating functions that automagically accept either a file name or an already opened file handle.
[ "Take", "a", "file", "name", "or", "a", "handle", "and", "return", "a", "handle", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/sugar.py#L26-L41
train
sirfoga/pyhal
hal/internet/parser.py
HtmlTable._get_row_tag
def _get_row_tag(row, tag): """Parses row and gets columns matching tag :param row: HTML row :param tag: tag to get :return: list of labels in row """ is_empty = True data = [] for column_label in row.find_all(tag): # cycle through all labels data.append( String(column_label.text).strip_bad_html() ) if data[-1]: is_empty = False if not is_empty: return data return None
python
def _get_row_tag(row, tag): """Parses row and gets columns matching tag :param row: HTML row :param tag: tag to get :return: list of labels in row """ is_empty = True data = [] for column_label in row.find_all(tag): # cycle through all labels data.append( String(column_label.text).strip_bad_html() ) if data[-1]: is_empty = False if not is_empty: return data return None
[ "def", "_get_row_tag", "(", "row", ",", "tag", ")", ":", "is_empty", "=", "True", "data", "=", "[", "]", "for", "column_label", "in", "row", ".", "find_all", "(", "tag", ")", ":", "# cycle through all labels", "data", ".", "append", "(", "String", "(", "column_label", ".", "text", ")", ".", "strip_bad_html", "(", ")", ")", "if", "data", "[", "-", "1", "]", ":", "is_empty", "=", "False", "if", "not", "is_empty", ":", "return", "data", "return", "None" ]
Parses row and gets columns matching tag :param row: HTML row :param tag: tag to get :return: list of labels in row
[ "Parses", "row", "and", "gets", "columns", "matching", "tag" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/parser.py#L22-L42
train
sirfoga/pyhal
hal/internet/parser.py
HtmlTable._parse_row
def _parse_row(row): """Parses HTML row :param row: HTML row :return: list of values in row """ data = [] labels = HtmlTable._get_row_tag(row, "th") if labels: data += labels columns = HtmlTable._get_row_tag(row, "td") if columns: data += columns return data
python
def _parse_row(row): """Parses HTML row :param row: HTML row :return: list of values in row """ data = [] labels = HtmlTable._get_row_tag(row, "th") if labels: data += labels columns = HtmlTable._get_row_tag(row, "td") if columns: data += columns return data
[ "def", "_parse_row", "(", "row", ")", ":", "data", "=", "[", "]", "labels", "=", "HtmlTable", ".", "_get_row_tag", "(", "row", ",", "\"th\"", ")", "if", "labels", ":", "data", "+=", "labels", "columns", "=", "HtmlTable", ".", "_get_row_tag", "(", "row", ",", "\"td\"", ")", "if", "columns", ":", "data", "+=", "columns", "return", "data" ]
Parses HTML row :param row: HTML row :return: list of values in row
[ "Parses", "HTML", "row" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/parser.py#L45-L62
train
sirfoga/pyhal
hal/internet/parser.py
HtmlTable.parse
def parse(self): """Parses data in table :return: List of list of values in table """ data = [] # add name of section for row in self.soup.find_all("tr"): # cycle through all rows parsed = self._parse_row(row) if parsed: data.append(parsed) return data
python
def parse(self): """Parses data in table :return: List of list of values in table """ data = [] # add name of section for row in self.soup.find_all("tr"): # cycle through all rows parsed = self._parse_row(row) if parsed: data.append(parsed) return data
[ "def", "parse", "(", "self", ")", ":", "data", "=", "[", "]", "# add name of section", "for", "row", "in", "self", ".", "soup", ".", "find_all", "(", "\"tr\"", ")", ":", "# cycle through all rows", "parsed", "=", "self", ".", "_parse_row", "(", "row", ")", "if", "parsed", ":", "data", ".", "append", "(", "parsed", ")", "return", "data" ]
Parses data in table :return: List of list of values in table
[ "Parses", "data", "in", "table" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/parser.py#L64-L76
train
moin18/utilspie
utilspie/importutils/import_utils.py
delete_module
def delete_module(modname): """ Delete module and sub-modules from `sys.module` """ try: _ = sys.modules[modname] except KeyError: raise ValueError("Module not found in sys.modules: '{}'".format(modname)) for module in list(sys.modules.keys()): if module and module.startswith(modname): del sys.modules[module]
python
def delete_module(modname): """ Delete module and sub-modules from `sys.module` """ try: _ = sys.modules[modname] except KeyError: raise ValueError("Module not found in sys.modules: '{}'".format(modname)) for module in list(sys.modules.keys()): if module and module.startswith(modname): del sys.modules[module]
[ "def", "delete_module", "(", "modname", ")", ":", "try", ":", "_", "=", "sys", ".", "modules", "[", "modname", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Module not found in sys.modules: '{}'\"", ".", "format", "(", "modname", ")", ")", "for", "module", "in", "list", "(", "sys", ".", "modules", ".", "keys", "(", ")", ")", ":", "if", "module", "and", "module", ".", "startswith", "(", "modname", ")", ":", "del", "sys", ".", "modules", "[", "module", "]" ]
Delete module and sub-modules from `sys.module`
[ "Delete", "module", "and", "sub", "-", "modules", "from", "sys", ".", "module" ]
ea96860b93fd058019a829847258e39323fef31f
https://github.com/moin18/utilspie/blob/ea96860b93fd058019a829847258e39323fef31f/utilspie/importutils/import_utils.py#L8-L19
train
moin18/utilspie
utilspie/importutils/import_utils.py
reload_module
def reload_module(module): """ Reload the Python module """ try: # For Python 2.x reload(module) except (ImportError, NameError): # For <= Python3.3: import imp imp.reload(module) except (ImportError, NameError): # For >= Python3.4 import importlib importlib.reload(module)
python
def reload_module(module): """ Reload the Python module """ try: # For Python 2.x reload(module) except (ImportError, NameError): # For <= Python3.3: import imp imp.reload(module) except (ImportError, NameError): # For >= Python3.4 import importlib importlib.reload(module)
[ "def", "reload_module", "(", "module", ")", ":", "try", ":", "# For Python 2.x", "reload", "(", "module", ")", "except", "(", "ImportError", ",", "NameError", ")", ":", "# For <= Python3.3:", "import", "imp", "imp", ".", "reload", "(", "module", ")", "except", "(", "ImportError", ",", "NameError", ")", ":", "# For >= Python3.4", "import", "importlib", "importlib", ".", "reload", "(", "module", ")" ]
Reload the Python module
[ "Reload", "the", "Python", "module" ]
ea96860b93fd058019a829847258e39323fef31f
https://github.com/moin18/utilspie/blob/ea96860b93fd058019a829847258e39323fef31f/utilspie/importutils/import_utils.py#L22-L36
train
moin18/utilspie
utilspie/importutils/import_utils.py
lazy_load_modules
def lazy_load_modules(*modules): """ Decorator to load module to perform related operation for specific function and delete the module from imports once the task is done. GC frees the memory related to module during clean-up. """ def decorator(function): def wrapper(*args, **kwargs): module_dict = {} for module_string in modules: module = __import__(module_string) # Add `module` entry in `sys.modules`. After deleting the module # from `sys.modules` and re-importing the module don't update # the module entry in `sys.modules` dict sys.modules[module.__package__] = module reload_module(module) module_dict[module_string] = module func_response = function(*args, **kwargs) for module_string, module in module_dict.items(): # delete idna module delete_module(module_string) del module # delete reference to idna return func_response return wrapper return decorator
python
def lazy_load_modules(*modules): """ Decorator to load module to perform related operation for specific function and delete the module from imports once the task is done. GC frees the memory related to module during clean-up. """ def decorator(function): def wrapper(*args, **kwargs): module_dict = {} for module_string in modules: module = __import__(module_string) # Add `module` entry in `sys.modules`. After deleting the module # from `sys.modules` and re-importing the module don't update # the module entry in `sys.modules` dict sys.modules[module.__package__] = module reload_module(module) module_dict[module_string] = module func_response = function(*args, **kwargs) for module_string, module in module_dict.items(): # delete idna module delete_module(module_string) del module # delete reference to idna return func_response return wrapper return decorator
[ "def", "lazy_load_modules", "(", "*", "modules", ")", ":", "def", "decorator", "(", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "module_dict", "=", "{", "}", "for", "module_string", "in", "modules", ":", "module", "=", "__import__", "(", "module_string", ")", "# Add `module` entry in `sys.modules`. After deleting the module", "# from `sys.modules` and re-importing the module don't update", "# the module entry in `sys.modules` dict", "sys", ".", "modules", "[", "module", ".", "__package__", "]", "=", "module", "reload_module", "(", "module", ")", "module_dict", "[", "module_string", "]", "=", "module", "func_response", "=", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "module_string", ",", "module", "in", "module_dict", ".", "items", "(", ")", ":", "# delete idna module", "delete_module", "(", "module_string", ")", "del", "module", "# delete reference to idna", "return", "func_response", "return", "wrapper", "return", "decorator" ]
Decorator to load module to perform related operation for specific function and delete the module from imports once the task is done. GC frees the memory related to module during clean-up.
[ "Decorator", "to", "load", "module", "to", "perform", "related", "operation", "for", "specific", "function", "and", "delete", "the", "module", "from", "imports", "once", "the", "task", "is", "done", ".", "GC", "frees", "the", "memory", "related", "to", "module", "during", "clean", "-", "up", "." ]
ea96860b93fd058019a829847258e39323fef31f
https://github.com/moin18/utilspie/blob/ea96860b93fd058019a829847258e39323fef31f/utilspie/importutils/import_utils.py#L39-L68
train
NoviceLive/intellicoder
intellicoder/init.py
LevelFormatter.format
def format(self, record): """ Format the record using the corresponding formatter. """ if record.levelno == DEBUG: return self.debug_formatter.format(record) if record.levelno == INFO: return self.info_formatter.format(record) if record.levelno == ERROR: return self.error_formatter.format(record) if record.levelno == WARNING: return self.warning_formatter.format(record) if record.levelno == CRITICAL: return self.critical_formatter.format(record)
python
def format(self, record): """ Format the record using the corresponding formatter. """ if record.levelno == DEBUG: return self.debug_formatter.format(record) if record.levelno == INFO: return self.info_formatter.format(record) if record.levelno == ERROR: return self.error_formatter.format(record) if record.levelno == WARNING: return self.warning_formatter.format(record) if record.levelno == CRITICAL: return self.critical_formatter.format(record)
[ "def", "format", "(", "self", ",", "record", ")", ":", "if", "record", ".", "levelno", "==", "DEBUG", ":", "return", "self", ".", "debug_formatter", ".", "format", "(", "record", ")", "if", "record", ".", "levelno", "==", "INFO", ":", "return", "self", ".", "info_formatter", ".", "format", "(", "record", ")", "if", "record", ".", "levelno", "==", "ERROR", ":", "return", "self", ".", "error_formatter", ".", "format", "(", "record", ")", "if", "record", ".", "levelno", "==", "WARNING", ":", "return", "self", ".", "warning_formatter", ".", "format", "(", "record", ")", "if", "record", ".", "levelno", "==", "CRITICAL", ":", "return", "self", ".", "critical_formatter", ".", "format", "(", "record", ")" ]
Format the record using the corresponding formatter.
[ "Format", "the", "record", "using", "the", "corresponding", "formatter", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/init.py#L85-L98
train
dalloriam/engel
engel/widgets/structure.py
Head.load_stylesheet
def load_stylesheet(self, id, path): """ Proper way to dynamically inject a stylesheet in a page. :param path: Path of the stylesheet to inject. """ self.add_child(HeadLink(id=id, link_type="stylesheet", path=path))
python
def load_stylesheet(self, id, path): """ Proper way to dynamically inject a stylesheet in a page. :param path: Path of the stylesheet to inject. """ self.add_child(HeadLink(id=id, link_type="stylesheet", path=path))
[ "def", "load_stylesheet", "(", "self", ",", "id", ",", "path", ")", ":", "self", ".", "add_child", "(", "HeadLink", "(", "id", "=", "id", ",", "link_type", "=", "\"stylesheet\"", ",", "path", "=", "path", ")", ")" ]
Proper way to dynamically inject a stylesheet in a page. :param path: Path of the stylesheet to inject.
[ "Proper", "way", "to", "dynamically", "inject", "a", "stylesheet", "in", "a", "page", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/structure.py#L31-L37
train
dalloriam/engel
engel/widgets/structure.py
List.add_child
def add_child(self, widget): """ Append a widget to the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement` """ li_itm = _li(id=self.id + str(self._count)) li_itm.add_child(widget) super(List, self).add_child(li_itm) self._items.append((widget, li_itm)) self._count += 1
python
def add_child(self, widget): """ Append a widget to the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement` """ li_itm = _li(id=self.id + str(self._count)) li_itm.add_child(widget) super(List, self).add_child(li_itm) self._items.append((widget, li_itm)) self._count += 1
[ "def", "add_child", "(", "self", ",", "widget", ")", ":", "li_itm", "=", "_li", "(", "id", "=", "self", ".", "id", "+", "str", "(", "self", ".", "_count", ")", ")", "li_itm", ".", "add_child", "(", "widget", ")", "super", "(", "List", ",", "self", ")", ".", "add_child", "(", "li_itm", ")", "self", ".", "_items", ".", "append", "(", "(", "widget", ",", "li_itm", ")", ")", "self", ".", "_count", "+=", "1" ]
Append a widget to the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement`
[ "Append", "a", "widget", "to", "the", "list", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/structure.py#L70-L81
train
dalloriam/engel
engel/widgets/structure.py
List.remove_child
def remove_child(self, widget): """ Remove a widget from the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement` """ raw = list(filter(lambda x: x[0] == widget, self._items)) if raw: itm, wrapped = raw[0] self._items.remove(raw[0]) super(List, self).remove_child(wrapped) else: raise ValueError("Child not in list.")
python
def remove_child(self, widget): """ Remove a widget from the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement` """ raw = list(filter(lambda x: x[0] == widget, self._items)) if raw: itm, wrapped = raw[0] self._items.remove(raw[0]) super(List, self).remove_child(wrapped) else: raise ValueError("Child not in list.")
[ "def", "remove_child", "(", "self", ",", "widget", ")", ":", "raw", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "==", "widget", ",", "self", ".", "_items", ")", ")", "if", "raw", ":", "itm", ",", "wrapped", "=", "raw", "[", "0", "]", "self", ".", "_items", ".", "remove", "(", "raw", "[", "0", "]", ")", "super", "(", "List", ",", "self", ")", ".", "remove_child", "(", "wrapped", ")", "else", ":", "raise", "ValueError", "(", "\"Child not in list.\"", ")" ]
Remove a widget from the list. :param widget: Object inheriting :class:`~.widgets.base.BaseElement`
[ "Remove", "a", "widget", "from", "the", "list", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/structure.py#L83-L95
train
ArabellaTech/django-basic-cms
basic_cms/admin/views.py
move_page
def move_page(request, page_id, extra_context=None): """Move the page to the requested target, at the given position.""" page = Page.objects.get(pk=page_id) target = request.POST.get('target', None) position = request.POST.get('position', None) if target is not None and position is not None: try: target = Page.objects.get(pk=target) except Page.DoesNotExist: pass # TODO: should use the django message system # to display this message # _('Page could not been moved.') else: page.invalidate() target.invalidate() from mptt.exceptions import InvalidMove invalid_move = False try: page.move_to(target, position) except InvalidMove: invalid_move = True return list_pages_ajax(request, invalid_move) return HttpResponseRedirect('../../')
python
def move_page(request, page_id, extra_context=None): """Move the page to the requested target, at the given position.""" page = Page.objects.get(pk=page_id) target = request.POST.get('target', None) position = request.POST.get('position', None) if target is not None and position is not None: try: target = Page.objects.get(pk=target) except Page.DoesNotExist: pass # TODO: should use the django message system # to display this message # _('Page could not been moved.') else: page.invalidate() target.invalidate() from mptt.exceptions import InvalidMove invalid_move = False try: page.move_to(target, position) except InvalidMove: invalid_move = True return list_pages_ajax(request, invalid_move) return HttpResponseRedirect('../../')
[ "def", "move_page", "(", "request", ",", "page_id", ",", "extra_context", "=", "None", ")", ":", "page", "=", "Page", ".", "objects", ".", "get", "(", "pk", "=", "page_id", ")", "target", "=", "request", ".", "POST", ".", "get", "(", "'target'", ",", "None", ")", "position", "=", "request", ".", "POST", ".", "get", "(", "'position'", ",", "None", ")", "if", "target", "is", "not", "None", "and", "position", "is", "not", "None", ":", "try", ":", "target", "=", "Page", ".", "objects", ".", "get", "(", "pk", "=", "target", ")", "except", "Page", ".", "DoesNotExist", ":", "pass", "# TODO: should use the django message system", "# to display this message", "# _('Page could not been moved.')", "else", ":", "page", ".", "invalidate", "(", ")", "target", ".", "invalidate", "(", ")", "from", "mptt", ".", "exceptions", "import", "InvalidMove", "invalid_move", "=", "False", "try", ":", "page", ".", "move_to", "(", "target", ",", "position", ")", "except", "InvalidMove", ":", "invalid_move", "=", "True", "return", "list_pages_ajax", "(", "request", ",", "invalid_move", ")", "return", "HttpResponseRedirect", "(", "'../../'", ")" ]
Move the page to the requested target, at the given position.
[ "Move", "the", "page", "to", "the", "requested", "target", "at", "the", "given", "position", "." ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/admin/views.py#L113-L138
train
NoviceLive/intellicoder
intellicoder/sources.py
reloc_var
def reloc_var(var_name, reloc_delta, pointer, var_type): """ Build C source code to relocate a variable. """ template = '{0} {3}{1} = RELOC_VAR(_{1}, {2}, {0});\n' return template.format( var_type, var_name, reloc_delta, '*' if pointer else '' )
python
def reloc_var(var_name, reloc_delta, pointer, var_type): """ Build C source code to relocate a variable. """ template = '{0} {3}{1} = RELOC_VAR(_{1}, {2}, {0});\n' return template.format( var_type, var_name, reloc_delta, '*' if pointer else '' )
[ "def", "reloc_var", "(", "var_name", ",", "reloc_delta", ",", "pointer", ",", "var_type", ")", ":", "template", "=", "'{0} {3}{1} = RELOC_VAR(_{1}, {2}, {0});\\n'", "return", "template", ".", "format", "(", "var_type", ",", "var_name", ",", "reloc_delta", ",", "'*'", "if", "pointer", "else", "''", ")" ]
Build C source code to relocate a variable.
[ "Build", "C", "source", "code", "to", "relocate", "a", "variable", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/sources.py#L138-L146
train
NoviceLive/intellicoder
intellicoder/sources.py
make_c_args
def make_c_args(arg_pairs): """ Build a C argument list from return type and arguments pairs. """ logging.debug(arg_pairs) c_args = [ '{} {}'.format(arg_type, arg_name) if arg_name else arg_type for dummy_number, arg_type, arg_name in sorted(arg_pairs) ] return ', '.join(c_args)
python
def make_c_args(arg_pairs): """ Build a C argument list from return type and arguments pairs. """ logging.debug(arg_pairs) c_args = [ '{} {}'.format(arg_type, arg_name) if arg_name else arg_type for dummy_number, arg_type, arg_name in sorted(arg_pairs) ] return ', '.join(c_args)
[ "def", "make_c_args", "(", "arg_pairs", ")", ":", "logging", ".", "debug", "(", "arg_pairs", ")", "c_args", "=", "[", "'{} {}'", ".", "format", "(", "arg_type", ",", "arg_name", ")", "if", "arg_name", "else", "arg_type", "for", "dummy_number", ",", "arg_type", ",", "arg_name", "in", "sorted", "(", "arg_pairs", ")", "]", "return", "', '", ".", "join", "(", "c_args", ")" ]
Build a C argument list from return type and arguments pairs.
[ "Build", "a", "C", "argument", "list", "from", "return", "type", "and", "arguments", "pairs", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/sources.py#L166-L175
train
lowandrew/OLCTools
spadespipeline/phix.py
PhiX.interop_parse
def interop_parse(self): """ Use interop to parse the files in the InterOp folder to extract the number of reads mapping to PhiX as well as the error rate """ # Parse the files and load the data try: run_metrics = py_interop_run_metrics.run_metrics() valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0) py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load) run_metrics.read(self.path, valid_to_load) summary = py_interop_summary.run_summary() py_interop_summary.summarize_run_metrics(run_metrics, summary) # PhiX error rate for run over all "usable cycles" errorrate = summary.total_summary().error_rate() # Percent aligned PhiX pctaligned = summary.total_summary().percent_aligned() # Add the error rate and the percent of reads that align to PhiX to the metadata object for sample in self.metadata: sample.run.error_rate = '{:.2f}'.format(errorrate) sample.run.phix_aligned = '{:.2f}'.format(pctaligned) except: for sample in self.metadata: sample.run.error_rate = 'ND' sample.run.phix_aligned = 'ND'
python
def interop_parse(self): """ Use interop to parse the files in the InterOp folder to extract the number of reads mapping to PhiX as well as the error rate """ # Parse the files and load the data try: run_metrics = py_interop_run_metrics.run_metrics() valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0) py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load) run_metrics.read(self.path, valid_to_load) summary = py_interop_summary.run_summary() py_interop_summary.summarize_run_metrics(run_metrics, summary) # PhiX error rate for run over all "usable cycles" errorrate = summary.total_summary().error_rate() # Percent aligned PhiX pctaligned = summary.total_summary().percent_aligned() # Add the error rate and the percent of reads that align to PhiX to the metadata object for sample in self.metadata: sample.run.error_rate = '{:.2f}'.format(errorrate) sample.run.phix_aligned = '{:.2f}'.format(pctaligned) except: for sample in self.metadata: sample.run.error_rate = 'ND' sample.run.phix_aligned = 'ND'
[ "def", "interop_parse", "(", "self", ")", ":", "# Parse the files and load the data", "try", ":", "run_metrics", "=", "py_interop_run_metrics", ".", "run_metrics", "(", ")", "valid_to_load", "=", "py_interop_run", ".", "uchar_vector", "(", "py_interop_run", ".", "MetricCount", ",", "0", ")", "py_interop_run_metrics", ".", "list_summary_metrics_to_load", "(", "valid_to_load", ")", "run_metrics", ".", "read", "(", "self", ".", "path", ",", "valid_to_load", ")", "summary", "=", "py_interop_summary", ".", "run_summary", "(", ")", "py_interop_summary", ".", "summarize_run_metrics", "(", "run_metrics", ",", "summary", ")", "# PhiX error rate for run over all \"usable cycles\"", "errorrate", "=", "summary", ".", "total_summary", "(", ")", ".", "error_rate", "(", ")", "# Percent aligned PhiX", "pctaligned", "=", "summary", ".", "total_summary", "(", ")", ".", "percent_aligned", "(", ")", "# Add the error rate and the percent of reads that align to PhiX to the metadata object", "for", "sample", "in", "self", ".", "metadata", ":", "sample", ".", "run", ".", "error_rate", "=", "'{:.2f}'", ".", "format", "(", "errorrate", ")", "sample", ".", "run", ".", "phix_aligned", "=", "'{:.2f}'", ".", "format", "(", "pctaligned", ")", "except", ":", "for", "sample", "in", "self", ".", "metadata", ":", "sample", ".", "run", ".", "error_rate", "=", "'ND'", "sample", ".", "run", ".", "phix_aligned", "=", "'ND'" ]
Use interop to parse the files in the InterOp folder to extract the number of reads mapping to PhiX as well as the error rate
[ "Use", "interop", "to", "parse", "the", "files", "in", "the", "InterOp", "folder", "to", "extract", "the", "number", "of", "reads", "mapping", "to", "PhiX", "as", "well", "as", "the", "error", "rate" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/phix.py#L31-L55
train
NoviceLive/intellicoder
intellicoder/msbuild/builders.py
Builder.make_inc
def make_inc(incs): """ Make include directory for link.exe. """ inc_args = [['/I', inc] for inc in incs] return list(chain.from_iterable(inc_args))
python
def make_inc(incs): """ Make include directory for link.exe. """ inc_args = [['/I', inc] for inc in incs] return list(chain.from_iterable(inc_args))
[ "def", "make_inc", "(", "incs", ")", ":", "inc_args", "=", "[", "[", "'/I'", ",", "inc", "]", "for", "inc", "in", "incs", "]", "return", "list", "(", "chain", ".", "from_iterable", "(", "inc_args", ")", ")" ]
Make include directory for link.exe.
[ "Make", "include", "directory", "for", "link", ".", "exe", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/msbuild/builders.py#L113-L118
train