repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
nocarryr/python-dispatch
pydispatch/utils.py
WeakMethodContainer.del_instance
def del_instance(self, obj): """Remove any stored instance methods that belong to an object Args: obj: The instance object to remove """ to_remove = set() for wrkey, _obj in self.iter_instances(): if obj is _obj: to_remove.add(wrkey) for wrkey in to_remove: del self[wrkey]
python
def del_instance(self, obj): """Remove any stored instance methods that belong to an object Args: obj: The instance object to remove """ to_remove = set() for wrkey, _obj in self.iter_instances(): if obj is _obj: to_remove.add(wrkey) for wrkey in to_remove: del self[wrkey]
[ "def", "del_instance", "(", "self", ",", "obj", ")", ":", "to_remove", "=", "set", "(", ")", "for", "wrkey", ",", "_obj", "in", "self", ".", "iter_instances", "(", ")", ":", "if", "obj", "is", "_obj", ":", "to_remove", ".", "add", "(", "wrkey", ")", "for", "wrkey", "in", "to_remove", ":", "del", "self", "[", "wrkey", "]" ]
Remove any stored instance methods that belong to an object Args: obj: The instance object to remove
[ "Remove", "any", "stored", "instance", "methods", "that", "belong", "to", "an", "object" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L74-L85
train
nocarryr/python-dispatch
pydispatch/utils.py
WeakMethodContainer.iter_instances
def iter_instances(self): """Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object """ for wrkey in set(self.keys()): obj = self.get(wrkey) if obj is None: continue yield wrkey, obj
python
def iter_instances(self): """Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object """ for wrkey in set(self.keys()): obj = self.get(wrkey) if obj is None: continue yield wrkey, obj
[ "def", "iter_instances", "(", "self", ")", ":", "for", "wrkey", "in", "set", "(", "self", ".", "keys", "(", ")", ")", ":", "obj", "=", "self", ".", "get", "(", "wrkey", ")", "if", "obj", "is", "None", ":", "continue", "yield", "wrkey", ",", "obj" ]
Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object
[ "Iterate", "over", "the", "stored", "objects" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L86-L97
train
nocarryr/python-dispatch
pydispatch/utils.py
WeakMethodContainer.iter_methods
def iter_methods(self): """Iterate over stored functions and instance methods Yields: Instance methods or function objects """ for wrkey, obj in self.iter_instances(): f, obj_id = wrkey if f == 'function': yield self[wrkey] else: yield getattr(obj, f.__name__)
python
def iter_methods(self): """Iterate over stored functions and instance methods Yields: Instance methods or function objects """ for wrkey, obj in self.iter_instances(): f, obj_id = wrkey if f == 'function': yield self[wrkey] else: yield getattr(obj, f.__name__)
[ "def", "iter_methods", "(", "self", ")", ":", "for", "wrkey", ",", "obj", "in", "self", ".", "iter_instances", "(", ")", ":", "f", ",", "obj_id", "=", "wrkey", "if", "f", "==", "'function'", ":", "yield", "self", "[", "wrkey", "]", "else", ":", "yield", "getattr", "(", "obj", ",", "f", ".", "__name__", ")" ]
Iterate over stored functions and instance methods Yields: Instance methods or function objects
[ "Iterate", "over", "stored", "functions", "and", "instance", "methods" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L98-L109
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
load_data_subject_areas
def load_data_subject_areas(subject_file): """ reads the subject file to a list, to confirm config is setup """ lst = [] if os.path.exists(subject_file): with open(subject_file, 'r') as f: for line in f: lst.append(line.strip()) else: print('MISSING DATA FILE (subject_file) ' , subject_file) print('update your config.py or config.txt') return lst
python
def load_data_subject_areas(subject_file): """ reads the subject file to a list, to confirm config is setup """ lst = [] if os.path.exists(subject_file): with open(subject_file, 'r') as f: for line in f: lst.append(line.strip()) else: print('MISSING DATA FILE (subject_file) ' , subject_file) print('update your config.py or config.txt') return lst
[ "def", "load_data_subject_areas", "(", "subject_file", ")", ":", "lst", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "subject_file", ")", ":", "with", "open", "(", "subject_file", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "lst", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "else", ":", "print", "(", "'MISSING DATA FILE (subject_file) '", ",", "subject_file", ")", "print", "(", "'update your config.py or config.txt'", ")", "return", "lst" ]
reads the subject file to a list, to confirm config is setup
[ "reads", "the", "subject", "file", "to", "a", "list", "to", "confirm", "config", "is", "setup" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L27-L39
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
check_ontology
def check_ontology(fname): """ reads the ontology yaml file and does basic verifcation """ with open(fname, 'r') as stream: y = yaml.safe_load(stream) import pprint pprint.pprint(y)
python
def check_ontology(fname): """ reads the ontology yaml file and does basic verifcation """ with open(fname, 'r') as stream: y = yaml.safe_load(stream) import pprint pprint.pprint(y)
[ "def", "check_ontology", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'r'", ")", "as", "stream", ":", "y", "=", "yaml", ".", "safe_load", "(", "stream", ")", "import", "pprint", "pprint", ".", "pprint", "(", "y", ")" ]
reads the ontology yaml file and does basic verifcation
[ "reads", "the", "ontology", "yaml", "file", "and", "does", "basic", "verifcation" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L135-L142
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
FileMap.find_type
def find_type(self, txt): """ top level function used to simply return the ONE ACTUAL string used for data types """ searchString = txt.upper() match = 'Unknown' for i in self.lst_type: if searchString in i: match = i return match
python
def find_type(self, txt): """ top level function used to simply return the ONE ACTUAL string used for data types """ searchString = txt.upper() match = 'Unknown' for i in self.lst_type: if searchString in i: match = i return match
[ "def", "find_type", "(", "self", ",", "txt", ")", ":", "searchString", "=", "txt", ".", "upper", "(", ")", "match", "=", "'Unknown'", "for", "i", "in", "self", ".", "lst_type", ":", "if", "searchString", "in", "i", ":", "match", "=", "i", "return", "match" ]
top level function used to simply return the ONE ACTUAL string used for data types
[ "top", "level", "function", "used", "to", "simply", "return", "the", "ONE", "ACTUAL", "string", "used", "for", "data", "types" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L105-L115
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
FileMap.get_full_filename
def get_full_filename(self, dataType, subjectArea): """ returns the file based on dataType and subjectArea """ return dataPath + os.sep + 'core' + os.sep + dataType + '_' + subjectArea + '.CSV'
python
def get_full_filename(self, dataType, subjectArea): """ returns the file based on dataType and subjectArea """ return dataPath + os.sep + 'core' + os.sep + dataType + '_' + subjectArea + '.CSV'
[ "def", "get_full_filename", "(", "self", ",", "dataType", ",", "subjectArea", ")", ":", "return", "dataPath", "+", "os", ".", "sep", "+", "'core'", "+", "os", ".", "sep", "+", "dataType", "+", "'_'", "+", "subjectArea", "+", "'.CSV'" ]
returns the file based on dataType and subjectArea
[ "returns", "the", "file", "based", "on", "dataType", "and", "subjectArea" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L117-L121
train
acutesoftware/AIKIF
aikif/lib/cls_plan_BDI.py
Plan_BDI.load_plan
def load_plan(self, fname): """ read the list of thoughts from a text file """ with open(fname, "r") as f: for line in f: if line != '': tpe, txt = self.parse_plan_from_string(line) #print('tpe= "' + tpe + '"', txt) if tpe == 'name': self.name = txt elif tpe == 'version': self.plan_version = txt elif tpe == 'belief': self.beliefs.add(txt) elif tpe == 'desire': self.desires.add(txt) elif tpe == 'intention': self.intentions.add(txt)
python
def load_plan(self, fname): """ read the list of thoughts from a text file """ with open(fname, "r") as f: for line in f: if line != '': tpe, txt = self.parse_plan_from_string(line) #print('tpe= "' + tpe + '"', txt) if tpe == 'name': self.name = txt elif tpe == 'version': self.plan_version = txt elif tpe == 'belief': self.beliefs.add(txt) elif tpe == 'desire': self.desires.add(txt) elif tpe == 'intention': self.intentions.add(txt)
[ "def", "load_plan", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", "!=", "''", ":", "tpe", ",", "txt", "=", "self", ".", "parse_plan_from_string", "(", "line", ")", "#print('tpe= \"' + tpe + '\"', txt)", "if", "tpe", "==", "'name'", ":", "self", ".", "name", "=", "txt", "elif", "tpe", "==", "'version'", ":", "self", ".", "plan_version", "=", "txt", "elif", "tpe", "==", "'belief'", ":", "self", ".", "beliefs", ".", "add", "(", "txt", ")", "elif", "tpe", "==", "'desire'", ":", "self", ".", "desires", ".", "add", "(", "txt", ")", "elif", "tpe", "==", "'intention'", ":", "self", ".", "intentions", ".", "add", "(", "txt", ")" ]
read the list of thoughts from a text file
[ "read", "the", "list", "of", "thoughts", "from", "a", "text", "file" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_plan_BDI.py#L48-L64
train
acutesoftware/AIKIF
aikif/lib/cls_plan_BDI.py
Plan_BDI.add_constraint
def add_constraint(self, name, tpe, val): """ adds a constraint for the plan """ self.constraint.append([name, tpe, val])
python
def add_constraint(self, name, tpe, val): """ adds a constraint for the plan """ self.constraint.append([name, tpe, val])
[ "def", "add_constraint", "(", "self", ",", "name", ",", "tpe", ",", "val", ")", ":", "self", ".", "constraint", ".", "append", "(", "[", "name", ",", "tpe", ",", "val", "]", ")" ]
adds a constraint for the plan
[ "adds", "a", "constraint", "for", "the", "plan" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_plan_BDI.py#L100-L104
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.get_maps_stats
def get_maps_stats(self): """ calculates basic stats on the MapRule elements of the maps to give a quick overview. """ tpes = {} for m in self.maps: if m.tpe in tpes: tpes[m.tpe] += 1 else: tpes[m.tpe] = 1 return tpes
python
def get_maps_stats(self): """ calculates basic stats on the MapRule elements of the maps to give a quick overview. """ tpes = {} for m in self.maps: if m.tpe in tpes: tpes[m.tpe] += 1 else: tpes[m.tpe] = 1 return tpes
[ "def", "get_maps_stats", "(", "self", ")", ":", "tpes", "=", "{", "}", "for", "m", "in", "self", ".", "maps", ":", "if", "m", ".", "tpe", "in", "tpes", ":", "tpes", "[", "m", ".", "tpe", "]", "+=", "1", "else", ":", "tpes", "[", "m", ".", "tpe", "]", "=", "1", "return", "tpes" ]
calculates basic stats on the MapRule elements of the maps to give a quick overview.
[ "calculates", "basic", "stats", "on", "the", "MapRule", "elements", "of", "the", "maps", "to", "give", "a", "quick", "overview", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L40-L51
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.save_rules
def save_rules(self, op_file): """ save the rules to file after web updates or program changes """ with open(op_file, 'w') as f: for m in self.maps: f.write(m.format_for_file_output())
python
def save_rules(self, op_file): """ save the rules to file after web updates or program changes """ with open(op_file, 'w') as f: for m in self.maps: f.write(m.format_for_file_output())
[ "def", "save_rules", "(", "self", ",", "op_file", ")", ":", "with", "open", "(", "op_file", ",", "'w'", ")", "as", "f", ":", "for", "m", "in", "self", ".", "maps", ":", "f", ".", "write", "(", "m", ".", "format_for_file_output", "(", ")", ")" ]
save the rules to file after web updates or program changes
[ "save", "the", "rules", "to", "file", "after", "web", "updates", "or", "program", "changes" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L65-L71
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.process_rule
def process_rule(self, m, dct, tpe): """ uses the MapRule 'm' to run through the 'dict' and extract data based on the rule """ print('TODO - ' + tpe + ' + applying rule ' + str(m).replace('\n', '') )
python
def process_rule(self, m, dct, tpe): """ uses the MapRule 'm' to run through the 'dict' and extract data based on the rule """ print('TODO - ' + tpe + ' + applying rule ' + str(m).replace('\n', '') )
[ "def", "process_rule", "(", "self", ",", "m", ",", "dct", ",", "tpe", ")", ":", "print", "(", "'TODO - '", "+", "tpe", "+", "' + applying rule '", "+", "str", "(", "m", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ")" ]
uses the MapRule 'm' to run through the 'dict' and extract data based on the rule
[ "uses", "the", "MapRule", "m", "to", "run", "through", "the", "dict", "and", "extract", "data", "based", "on", "the", "rule" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L123-L128
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.format_raw_data
def format_raw_data(self, tpe, raw_data): """ uses type to format the raw information to a dictionary usable by the mapper """ if tpe == 'text': formatted_raw_data = self.parse_text_to_dict(raw_data) elif tpe == 'file': formatted_raw_data = self.parse_file_to_dict(raw_data) else: formatted_raw_data = {'ERROR':'unknown data type', 'data':[raw_data]} return formatted_raw_data
python
def format_raw_data(self, tpe, raw_data): """ uses type to format the raw information to a dictionary usable by the mapper """ if tpe == 'text': formatted_raw_data = self.parse_text_to_dict(raw_data) elif tpe == 'file': formatted_raw_data = self.parse_file_to_dict(raw_data) else: formatted_raw_data = {'ERROR':'unknown data type', 'data':[raw_data]} return formatted_raw_data
[ "def", "format_raw_data", "(", "self", ",", "tpe", ",", "raw_data", ")", ":", "if", "tpe", "==", "'text'", ":", "formatted_raw_data", "=", "self", ".", "parse_text_to_dict", "(", "raw_data", ")", "elif", "tpe", "==", "'file'", ":", "formatted_raw_data", "=", "self", ".", "parse_file_to_dict", "(", "raw_data", ")", "else", ":", "formatted_raw_data", "=", "{", "'ERROR'", ":", "'unknown data type'", ",", "'data'", ":", "[", "raw_data", "]", "}", "return", "formatted_raw_data" ]
uses type to format the raw information to a dictionary usable by the mapper
[ "uses", "type", "to", "format", "the", "raw", "information", "to", "a", "dictionary", "usable", "by", "the", "mapper" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L131-L143
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.parse_text_to_dict
def parse_text_to_dict(self, txt): """ takes a string and parses via NLP, ready for mapping """ op = {} print('TODO - import NLP, split into verbs / nouns') op['nouns'] = txt op['verbs'] = txt return op
python
def parse_text_to_dict(self, txt): """ takes a string and parses via NLP, ready for mapping """ op = {} print('TODO - import NLP, split into verbs / nouns') op['nouns'] = txt op['verbs'] = txt return op
[ "def", "parse_text_to_dict", "(", "self", ",", "txt", ")", ":", "op", "=", "{", "}", "print", "(", "'TODO - import NLP, split into verbs / nouns'", ")", "op", "[", "'nouns'", "]", "=", "txt", "op", "[", "'verbs'", "]", "=", "txt", "return", "op" ]
takes a string and parses via NLP, ready for mapping
[ "takes", "a", "string", "and", "parses", "via", "NLP", "ready", "for", "mapping" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L145-L154
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.parse_file_to_dict
def parse_file_to_dict(self, fname): """ process the file according to the mapping rules. The cols list must match the columns in the filename """ print('TODO - parse_file_to_dict' + fname) for m in self.maps: if m.tpe == 'file': if m.key[0:3] == 'col': print('reading column..')
python
def parse_file_to_dict(self, fname): """ process the file according to the mapping rules. The cols list must match the columns in the filename """ print('TODO - parse_file_to_dict' + fname) for m in self.maps: if m.tpe == 'file': if m.key[0:3] == 'col': print('reading column..')
[ "def", "parse_file_to_dict", "(", "self", ",", "fname", ")", ":", "print", "(", "'TODO - parse_file_to_dict'", "+", "fname", ")", "for", "m", "in", "self", ".", "maps", ":", "if", "m", ".", "tpe", "==", "'file'", ":", "if", "m", ".", "key", "[", "0", ":", "3", "]", "==", "'col'", ":", "print", "(", "'reading column..'", ")" ]
process the file according to the mapping rules. The cols list must match the columns in the filename
[ "process", "the", "file", "according", "to", "the", "mapping", "rules", ".", "The", "cols", "list", "must", "match", "the", "columns", "in", "the", "filename" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L156-L165
train
acutesoftware/AIKIF
aikif/mapper.py
Mapper.create_map_from_file
def create_map_from_file(self, data_filename): """ reads the data_filename into a matrix and calls the main function '' to generate a .rule file based on the data in the map For all datafiles mapped, there exists a .rule file to define it """ op_filename = data_filename + '.rule' dataset = mod_datatable.DataTable(data_filename, ',') dataset.load_to_array() l_map = self.generate_map_from_dataset(dataset) with open(op_filename, 'w') as f: f.write('# rules file autogenerated by mapper.py v0.1\n') f.write('filename:source=' + data_filename + '\n') f.write('filename:rule=' + op_filename + '\n\n') for row in l_map: #print('ROW = ' , row) if type(row) is str: f.write(row + '\n') else: for v in row: f.write(v)
python
def create_map_from_file(self, data_filename): """ reads the data_filename into a matrix and calls the main function '' to generate a .rule file based on the data in the map For all datafiles mapped, there exists a .rule file to define it """ op_filename = data_filename + '.rule' dataset = mod_datatable.DataTable(data_filename, ',') dataset.load_to_array() l_map = self.generate_map_from_dataset(dataset) with open(op_filename, 'w') as f: f.write('# rules file autogenerated by mapper.py v0.1\n') f.write('filename:source=' + data_filename + '\n') f.write('filename:rule=' + op_filename + '\n\n') for row in l_map: #print('ROW = ' , row) if type(row) is str: f.write(row + '\n') else: for v in row: f.write(v)
[ "def", "create_map_from_file", "(", "self", ",", "data_filename", ")", ":", "op_filename", "=", "data_filename", "+", "'.rule'", "dataset", "=", "mod_datatable", ".", "DataTable", "(", "data_filename", ",", "','", ")", "dataset", ".", "load_to_array", "(", ")", "l_map", "=", "self", ".", "generate_map_from_dataset", "(", "dataset", ")", "with", "open", "(", "op_filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'# rules file autogenerated by mapper.py v0.1\\n'", ")", "f", ".", "write", "(", "'filename:source='", "+", "data_filename", "+", "'\\n'", ")", "f", ".", "write", "(", "'filename:rule='", "+", "op_filename", "+", "'\\n\\n'", ")", "for", "row", "in", "l_map", ":", "#print('ROW = ' , row)", "if", "type", "(", "row", ")", "is", "str", ":", "f", ".", "write", "(", "row", "+", "'\\n'", ")", "else", ":", "for", "v", "in", "row", ":", "f", ".", "write", "(", "v", ")" ]
reads the data_filename into a matrix and calls the main function '' to generate a .rule file based on the data in the map For all datafiles mapped, there exists a .rule file to define it
[ "reads", "the", "data_filename", "into", "a", "matrix", "and", "calls", "the", "main", "function", "to", "generate", "a", ".", "rule", "file", "based", "on", "the", "data", "in", "the", "map", "For", "all", "datafiles", "mapped", "there", "exists", "a", ".", "rule", "file", "to", "define", "it" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L198-L222
train
acutesoftware/AIKIF
scripts/examples/aggie/aggie.py
Aggie.run
def run(self): """ loops until exit command given """ while self.status != 'EXIT': print(self.process_input(self.get_input())) print('Bye')
python
def run(self): """ loops until exit command given """ while self.status != 'EXIT': print(self.process_input(self.get_input())) print('Bye')
[ "def", "run", "(", "self", ")", ":", "while", "self", ".", "status", "!=", "'EXIT'", ":", "print", "(", "self", ".", "process_input", "(", "self", ".", "get_input", "(", ")", ")", ")", "print", "(", "'Bye'", ")" ]
loops until exit command given
[ "loops", "until", "exit", "command", "given" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/aggie/aggie.py#L45-L52
train
acutesoftware/AIKIF
scripts/examples/aggie/aggie.py
Aggie.process_input
def process_input(self, question): """ takes a question and returns the best answer based on known skills """ ans = '' if self.status == 'EXIT': print('bye') sys.exit() if '?' in question: ans = self.info.find_answer(question) elif question.startswith(':LIST'): ans = 'List of Raw Input\n' for i in self.info.raw_input: ans += str(i) + '\n' else: #ans = 'I dont'' know' ans = 'Adding info..' self.info.raw_input.append(question) self.lg.record_process('aggie.py', 'Question > ' + question) self.lg.record_process('aggie.py', 'Answer > ' + ans) return ans
python
def process_input(self, question): """ takes a question and returns the best answer based on known skills """ ans = '' if self.status == 'EXIT': print('bye') sys.exit() if '?' in question: ans = self.info.find_answer(question) elif question.startswith(':LIST'): ans = 'List of Raw Input\n' for i in self.info.raw_input: ans += str(i) + '\n' else: #ans = 'I dont'' know' ans = 'Adding info..' self.info.raw_input.append(question) self.lg.record_process('aggie.py', 'Question > ' + question) self.lg.record_process('aggie.py', 'Answer > ' + ans) return ans
[ "def", "process_input", "(", "self", ",", "question", ")", ":", "ans", "=", "''", "if", "self", ".", "status", "==", "'EXIT'", ":", "print", "(", "'bye'", ")", "sys", ".", "exit", "(", ")", "if", "'?'", "in", "question", ":", "ans", "=", "self", ".", "info", ".", "find_answer", "(", "question", ")", "elif", "question", ".", "startswith", "(", "':LIST'", ")", ":", "ans", "=", "'List of Raw Input\\n'", "for", "i", "in", "self", ".", "info", ".", "raw_input", ":", "ans", "+=", "str", "(", "i", ")", "+", "'\\n'", "else", ":", "#ans = 'I dont'' know'", "ans", "=", "'Adding info..'", "self", ".", "info", ".", "raw_input", ".", "append", "(", "question", ")", "self", ".", "lg", ".", "record_process", "(", "'aggie.py'", ",", "'Question > '", "+", "question", ")", "self", ".", "lg", ".", "record_process", "(", "'aggie.py'", ",", "'Answer > '", "+", "ans", ")", "return", "ans" ]
takes a question and returns the best answer based on known skills
[ "takes", "a", "question", "and", "returns", "the", "best", "answer", "based", "on", "known", "skills" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/aggie/aggie.py#L60-L83
train
acutesoftware/AIKIF
aikif/web_app/page_data.py
show_data_file
def show_data_file(fname): """ shows a data file in CSV format - all files live in CORE folder """ txt = '<H2>' + fname + '</H2>' print (fname) #try: txt += web.read_csv_to_html_table(fname, 'Y') # it is ok to use a table for actual table data #except: # txt += '<H2>ERROR - cant read file</H2>' #txt += web.read_csv_to_html_list(fname) # only use this for single column lists txt += '</div>\n' return txt
python
def show_data_file(fname): """ shows a data file in CSV format - all files live in CORE folder """ txt = '<H2>' + fname + '</H2>' print (fname) #try: txt += web.read_csv_to_html_table(fname, 'Y') # it is ok to use a table for actual table data #except: # txt += '<H2>ERROR - cant read file</H2>' #txt += web.read_csv_to_html_list(fname) # only use this for single column lists txt += '</div>\n' return txt
[ "def", "show_data_file", "(", "fname", ")", ":", "txt", "=", "'<H2>'", "+", "fname", "+", "'</H2>'", "print", "(", "fname", ")", "#try:", "txt", "+=", "web", ".", "read_csv_to_html_table", "(", "fname", ",", "'Y'", ")", "# it is ok to use a table for actual table data", "#except:", "#\ttxt += '<H2>ERROR - cant read file</H2>'", "#txt += web.read_csv_to_html_list(fname) # only use this for single column lists", "txt", "+=", "'</div>\\n'", "return", "txt" ]
shows a data file in CSV format - all files live in CORE folder
[ "shows", "a", "data", "file", "in", "CSV", "format", "-", "all", "files", "live", "in", "CORE", "folder" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_data.py#L28-L39
train
Nachtfeuer/pipeline
spline/components/bash.py
managed_process
def managed_process(process): """Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax.""" try: yield process finally: for stream in [process.stdout, process.stdin, process.stderr]: if stream: stream.close() process.wait()
python
def managed_process(process): """Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax.""" try: yield process finally: for stream in [process.stdout, process.stdin, process.stderr]: if stream: stream.close() process.wait()
[ "def", "managed_process", "(", "process", ")", ":", "try", ":", "yield", "process", "finally", ":", "for", "stream", "in", "[", "process", ".", "stdout", ",", "process", ".", "stdin", ",", "process", ".", "stderr", "]", ":", "if", "stream", ":", "stream", ".", "close", "(", ")", "process", ".", "wait", "(", ")" ]
Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax.
[ "Wrapper", "for", "subprocess", ".", "Popen", "to", "work", "across", "various", "Python", "versions", "when", "using", "the", "with", "syntax", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L33-L41
train
Nachtfeuer/pipeline
spline/components/bash.py
Bash.get_temporary_scripts_path
def get_temporary_scripts_path(self): """ Get path for temporary scripts. Returns: str: path for temporary scripts or None if not set """ result = None if len(self.config.temporary_scripts_path) > 0: if os.path.isdir(self.config.temporary_scripts_path): result = self.config.temporary_scripts_path return result
python
def get_temporary_scripts_path(self): """ Get path for temporary scripts. Returns: str: path for temporary scripts or None if not set """ result = None if len(self.config.temporary_scripts_path) > 0: if os.path.isdir(self.config.temporary_scripts_path): result = self.config.temporary_scripts_path return result
[ "def", "get_temporary_scripts_path", "(", "self", ")", ":", "result", "=", "None", "if", "len", "(", "self", ".", "config", ".", "temporary_scripts_path", ")", ">", "0", ":", "if", "os", ".", "path", ".", "isdir", "(", "self", ".", "config", ".", "temporary_scripts_path", ")", ":", "result", "=", "self", ".", "config", ".", "temporary_scripts_path", "return", "result" ]
Get path for temporary scripts. Returns: str: path for temporary scripts or None if not set
[ "Get", "path", "for", "temporary", "scripts", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L84-L95
train
Nachtfeuer/pipeline
spline/components/bash.py
Bash.create_file_for
def create_file_for(self, script): """ Create a temporary, executable bash file. It also does render given script (string) with the model and the provided environment variables and optional also an item when using the B{with} field. Args: script (str): either pather and filename or Bash code. Returns: str: path and filename of a temporary file. """ temp = tempfile.NamedTemporaryFile( prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False, dir=self.get_temporary_scripts_path()) self.update_environment_variables(temp.name) rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item, variables=self.config.variables) if rendered_script is None: self.success = False temp.close() os.remove(temp.name) return None to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s} if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script): with open(rendered_script) as handle: content = str(handle.read()) temp.writelines(content) else: temp.write(u"#!/bin/bash\n%s" % self.render_bash_options()) temp.write(to_file_map[sys.version_info.major](rendered_script)) temp.close() # make Bash script executable os.chmod(temp.name, 0o700) return temp.name
python
def create_file_for(self, script): """ Create a temporary, executable bash file. It also does render given script (string) with the model and the provided environment variables and optional also an item when using the B{with} field. Args: script (str): either pather and filename or Bash code. Returns: str: path and filename of a temporary file. """ temp = tempfile.NamedTemporaryFile( prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False, dir=self.get_temporary_scripts_path()) self.update_environment_variables(temp.name) rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item, variables=self.config.variables) if rendered_script is None: self.success = False temp.close() os.remove(temp.name) return None to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s} if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script): with open(rendered_script) as handle: content = str(handle.read()) temp.writelines(content) else: temp.write(u"#!/bin/bash\n%s" % self.render_bash_options()) temp.write(to_file_map[sys.version_info.major](rendered_script)) temp.close() # make Bash script executable os.chmod(temp.name, 0o700) return temp.name
[ "def", "create_file_for", "(", "self", ",", "script", ")", ":", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "\"pipeline-script-\"", ",", "mode", "=", "'w+t'", ",", "suffix", "=", "\".sh\"", ",", "delete", "=", "False", ",", "dir", "=", "self", ".", "get_temporary_scripts_path", "(", ")", ")", "self", ".", "update_environment_variables", "(", "temp", ".", "name", ")", "rendered_script", "=", "render", "(", "script", ",", "model", "=", "self", ".", "config", ".", "model", ",", "env", "=", "self", ".", "env", ",", "item", "=", "self", ".", "config", ".", "item", ",", "variables", "=", "self", ".", "config", ".", "variables", ")", "if", "rendered_script", "is", "None", ":", "self", ".", "success", "=", "False", "temp", ".", "close", "(", ")", "os", ".", "remove", "(", "temp", ".", "name", ")", "return", "None", "to_file_map", "=", "{", "2", ":", "lambda", "s", ":", "s", ".", "encode", "(", "'utf-8'", ")", ",", "3", ":", "lambda", "s", ":", "s", "}", "if", "all", "(", "ord", "(", "ch", ")", "<", "128", "for", "ch", "in", "rendered_script", ")", "and", "os", ".", "path", ".", "isfile", "(", "rendered_script", ")", ":", "with", "open", "(", "rendered_script", ")", "as", "handle", ":", "content", "=", "str", "(", "handle", ".", "read", "(", ")", ")", "temp", ".", "writelines", "(", "content", ")", "else", ":", "temp", ".", "write", "(", "u\"#!/bin/bash\\n%s\"", "%", "self", ".", "render_bash_options", "(", ")", ")", "temp", ".", "write", "(", "to_file_map", "[", "sys", ".", "version_info", ".", "major", "]", "(", "rendered_script", ")", ")", "temp", ".", "close", "(", ")", "# make Bash script executable", "os", ".", "chmod", "(", "temp", ".", "name", ",", "0o700", ")", "return", "temp", ".", "name" ]
Create a temporary, executable bash file. It also does render given script (string) with the model and the provided environment variables and optional also an item when using the B{with} field. Args: script (str): either pather and filename or Bash code. Returns: str: path and filename of a temporary file.
[ "Create", "a", "temporary", "executable", "bash", "file", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L97-L136
train
Nachtfeuer/pipeline
spline/components/bash.py
Bash.render_bash_options
def render_bash_options(self): """Rendering Bash options.""" options = '' if self.config.debug: options += "set -x\n" if self.config.strict: options += "set -euo pipefail\n" return options
python
def render_bash_options(self): """Rendering Bash options.""" options = '' if self.config.debug: options += "set -x\n" if self.config.strict: options += "set -euo pipefail\n" return options
[ "def", "render_bash_options", "(", "self", ")", ":", "options", "=", "''", "if", "self", ".", "config", ".", "debug", ":", "options", "+=", "\"set -x\\n\"", "if", "self", ".", "config", ".", "strict", ":", "options", "+=", "\"set -euo pipefail\\n\"", "return", "options" ]
Rendering Bash options.
[ "Rendering", "Bash", "options", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L138-L145
train
Nachtfeuer/pipeline
spline/components/bash.py
Bash.process_file
def process_file(self, filename): """Processing one file.""" if self.config.dry_run: if not self.config.internal: self.logger.info("Dry run mode for script %s", filename) with open(filename) as handle: for line in handle: yield line[0:-1] if line[-1] == '\n' else line else: if not self.config.internal: self.logger.info("Running script %s", filename) for line in self.process_script(filename): yield line
python
def process_file(self, filename): """Processing one file.""" if self.config.dry_run: if not self.config.internal: self.logger.info("Dry run mode for script %s", filename) with open(filename) as handle: for line in handle: yield line[0:-1] if line[-1] == '\n' else line else: if not self.config.internal: self.logger.info("Running script %s", filename) for line in self.process_script(filename): yield line
[ "def", "process_file", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "config", ".", "dry_run", ":", "if", "not", "self", ".", "config", ".", "internal", ":", "self", ".", "logger", ".", "info", "(", "\"Dry run mode for script %s\"", ",", "filename", ")", "with", "open", "(", "filename", ")", "as", "handle", ":", "for", "line", "in", "handle", ":", "yield", "line", "[", "0", ":", "-", "1", "]", "if", "line", "[", "-", "1", "]", "==", "'\\n'", "else", "line", "else", ":", "if", "not", "self", ".", "config", ".", "internal", ":", "self", ".", "logger", ".", "info", "(", "\"Running script %s\"", ",", "filename", ")", "for", "line", "in", "self", ".", "process_script", "(", "filename", ")", ":", "yield", "line" ]
Processing one file.
[ "Processing", "one", "file", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L170-L182
train
sethmlarson/selectors2
selectors2.py
BaseSelector.unregister
def unregister(self, fileobj): """ Unregister a file object from being monitored. """ try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) # Getting the fileno of a closed socket on Windows errors with EBADF. except socket.error as err: if err.errno != errno.EBADF: raise else: for key in self._fd_to_key.values(): if key.fileobj is fileobj: self._fd_to_key.pop(key.fd) break else: raise KeyError("{0!r} is not registered".format(fileobj)) return key
python
def unregister(self, fileobj): """ Unregister a file object from being monitored. """ try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) # Getting the fileno of a closed socket on Windows errors with EBADF. except socket.error as err: if err.errno != errno.EBADF: raise else: for key in self._fd_to_key.values(): if key.fileobj is fileobj: self._fd_to_key.pop(key.fd) break else: raise KeyError("{0!r} is not registered".format(fileobj)) return key
[ "def", "unregister", "(", "self", ",", "fileobj", ")", ":", "try", ":", "key", "=", "self", ".", "_fd_to_key", ".", "pop", "(", "self", ".", "_fileobj_lookup", "(", "fileobj", ")", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"{0!r} is not registered\"", ".", "format", "(", "fileobj", ")", ")", "# Getting the fileno of a closed socket on Windows errors with EBADF.", "except", "socket", ".", "error", "as", "err", ":", "if", "err", ".", "errno", "!=", "errno", ".", "EBADF", ":", "raise", "else", ":", "for", "key", "in", "self", ".", "_fd_to_key", ".", "values", "(", ")", ":", "if", "key", ".", "fileobj", "is", "fileobj", ":", "self", ".", "_fd_to_key", ".", "pop", "(", "key", ".", "fd", ")", "break", "else", ":", "raise", "KeyError", "(", "\"{0!r} is not registered\"", ".", "format", "(", "fileobj", ")", ")", "return", "key" ]
Unregister a file object from being monitored.
[ "Unregister", "a", "file", "object", "from", "being", "monitored", "." ]
9bdf3d86578d1a84738cac6eb4127281b75bd669
https://github.com/sethmlarson/selectors2/blob/9bdf3d86578d1a84738cac6eb4127281b75bd669/selectors2.py#L161-L179
train
acutesoftware/AIKIF
aikif/comms.py
Message.prepare
def prepare(self): """ does some basic validation """ try: assert(type(self.sender) is Channel) assert(type(self.receiver) is Channel) return True except: return False
python
def prepare(self): """ does some basic validation """ try: assert(type(self.sender) is Channel) assert(type(self.receiver) is Channel) return True except: return False
[ "def", "prepare", "(", "self", ")", ":", "try", ":", "assert", "(", "type", "(", "self", ".", "sender", ")", "is", "Channel", ")", "assert", "(", "type", "(", "self", ".", "receiver", ")", "is", "Channel", ")", "return", "True", "except", ":", "return", "False" ]
does some basic validation
[ "does", "some", "basic", "validation" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/comms.py#L97-L106
train
acutesoftware/AIKIF
aikif/comms.py
Message.send
def send(self): """ this handles the message transmission """ #print('sending message to ' + self.receiver) if self.prepare(): ## TODO - send message via library print('sending message') lg.record_process('comms.py', 'Sending message ' + self.title) return True else: return False
python
def send(self): """ this handles the message transmission """ #print('sending message to ' + self.receiver) if self.prepare(): ## TODO - send message via library print('sending message') lg.record_process('comms.py', 'Sending message ' + self.title) return True else: return False
[ "def", "send", "(", "self", ")", ":", "#print('sending message to ' + self.receiver)", "if", "self", ".", "prepare", "(", ")", ":", "## TODO - send message via library", "print", "(", "'sending message'", ")", "lg", ".", "record_process", "(", "'comms.py'", ",", "'Sending message '", "+", "self", ".", "title", ")", "return", "True", "else", ":", "return", "False" ]
this handles the message transmission
[ "this", "handles", "the", "message", "transmission" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/comms.py#L109-L121
train
acutesoftware/AIKIF
aikif/index.py
buildIndex
def buildIndex(ipFile, ndxFile, append='Y', silent='N', useShortFileName='Y'): """ this creates an index of a text file specifically for use in AIKIF separates the ontology descriptions highest followed by values and lastly a final pass to get all delimited word parts. """ if silent == 'N': pass if append == 'N': try: os.remove(ndxFile) except Exception as ex: print('file already deleted - ignore' + str(ex)) delims = [',', chr(31), '', '$', '&', '"', '%', '/', '\\', '.', ';', ':', '!', '?', '-', '_', ' ', '\n', '*', '\'', '(', ')', '[', ']', '{', '}'] # 1st pass - index the ontologies, including 2 depths up (later - TODO) #buildIndex(ipFile, ndxFile, ' ', 1, 'Y') # 2nd pass - use ALL delims to catch each word as part of hyphenated - eg AI Build py totWords, totLines, uniqueWords = getWordList(ipFile, delims) AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName) if silent == 'N': print(format_op_row(ipFile, totLines, totWords, uniqueWords)) show('uniqueWords', uniqueWords, 5) DisplayIndexAsDictionary(uniqueWords)
python
def buildIndex(ipFile, ndxFile, append='Y', silent='N', useShortFileName='Y'): """ this creates an index of a text file specifically for use in AIKIF separates the ontology descriptions highest followed by values and lastly a final pass to get all delimited word parts. """ if silent == 'N': pass if append == 'N': try: os.remove(ndxFile) except Exception as ex: print('file already deleted - ignore' + str(ex)) delims = [',', chr(31), '', '$', '&', '"', '%', '/', '\\', '.', ';', ':', '!', '?', '-', '_', ' ', '\n', '*', '\'', '(', ')', '[', ']', '{', '}'] # 1st pass - index the ontologies, including 2 depths up (later - TODO) #buildIndex(ipFile, ndxFile, ' ', 1, 'Y') # 2nd pass - use ALL delims to catch each word as part of hyphenated - eg AI Build py totWords, totLines, uniqueWords = getWordList(ipFile, delims) AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName) if silent == 'N': print(format_op_row(ipFile, totLines, totWords, uniqueWords)) show('uniqueWords', uniqueWords, 5) DisplayIndexAsDictionary(uniqueWords)
[ "def", "buildIndex", "(", "ipFile", ",", "ndxFile", ",", "append", "=", "'Y'", ",", "silent", "=", "'N'", ",", "useShortFileName", "=", "'Y'", ")", ":", "if", "silent", "==", "'N'", ":", "pass", "if", "append", "==", "'N'", ":", "try", ":", "os", ".", "remove", "(", "ndxFile", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'file already deleted - ignore'", "+", "str", "(", "ex", ")", ")", "delims", "=", "[", "','", ",", "chr", "(", "31", ")", ",", "'\u001f'", ",", "'$'", ",", "'&'", ",", "'\"'", ",", "'%'", ",", "'/'", ",", "'\\\\'", ",", "'.'", ",", "';'", ",", "':'", ",", "'!'", ",", "'?'", ",", "'-'", ",", "'_'", ",", "' '", ",", "'\\n'", ",", "'*'", ",", "'\\''", ",", "'('", ",", "')'", ",", "'['", ",", "']'", ",", "'{'", ",", "'}'", "]", "# 1st pass - index the ontologies, including 2 depths up (later - TODO)", "#buildIndex(ipFile, ndxFile, ' ', 1, 'Y')", "# 2nd pass - use ALL delims to catch each word as part of hyphenated - eg AI Build py", "totWords", ",", "totLines", ",", "uniqueWords", "=", "getWordList", "(", "ipFile", ",", "delims", ")", "AppendIndexDictionaryToFile", "(", "uniqueWords", ",", "ndxFile", ",", "ipFile", ",", "useShortFileName", ")", "if", "silent", "==", "'N'", ":", "print", "(", "format_op_row", "(", "ipFile", ",", "totLines", ",", "totWords", ",", "uniqueWords", ")", ")", "show", "(", "'uniqueWords'", ",", "uniqueWords", ",", "5", ")", "DisplayIndexAsDictionary", "(", "uniqueWords", ")" ]
this creates an index of a text file specifically for use in AIKIF separates the ontology descriptions highest followed by values and lastly a final pass to get all delimited word parts.
[ "this", "creates", "an", "index", "of", "a", "text", "file", "specifically", "for", "use", "in", "AIKIF", "separates", "the", "ontology", "descriptions", "highest", "followed", "by", "values", "and", "lastly", "a", "final", "pass", "to", "get", "all", "delimited", "word", "parts", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L61-L87
train
acutesoftware/AIKIF
aikif/index.py
format_op_row
def format_op_row(ipFile, totLines, totWords, uniqueWords): """ Format the output row with stats """ txt = os.path.basename(ipFile).ljust(36) + ' ' txt += str(totLines).rjust(7) + ' ' txt += str(totWords).rjust(7) + ' ' txt += str(len(uniqueWords)).rjust(7) + ' ' return txt
python
def format_op_row(ipFile, totLines, totWords, uniqueWords): """ Format the output row with stats """ txt = os.path.basename(ipFile).ljust(36) + ' ' txt += str(totLines).rjust(7) + ' ' txt += str(totWords).rjust(7) + ' ' txt += str(len(uniqueWords)).rjust(7) + ' ' return txt
[ "def", "format_op_row", "(", "ipFile", ",", "totLines", ",", "totWords", ",", "uniqueWords", ")", ":", "txt", "=", "os", ".", "path", ".", "basename", "(", "ipFile", ")", ".", "ljust", "(", "36", ")", "+", "' '", "txt", "+=", "str", "(", "totLines", ")", ".", "rjust", "(", "7", ")", "+", "' '", "txt", "+=", "str", "(", "totWords", ")", ".", "rjust", "(", "7", ")", "+", "' '", "txt", "+=", "str", "(", "len", "(", "uniqueWords", ")", ")", ".", "rjust", "(", "7", ")", "+", "' '", "return", "txt" ]
Format the output row with stats
[ "Format", "the", "output", "row", "with", "stats" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L89-L97
train
acutesoftware/AIKIF
aikif/index.py
format_op_hdr
def format_op_hdr(): """ Build the header """ txt = 'Base Filename'.ljust(36) + ' ' txt += 'Lines'.rjust(7) + ' ' txt += 'Words'.rjust(7) + ' ' txt += 'Unique'.ljust(8) + '' return txt
python
def format_op_hdr(): """ Build the header """ txt = 'Base Filename'.ljust(36) + ' ' txt += 'Lines'.rjust(7) + ' ' txt += 'Words'.rjust(7) + ' ' txt += 'Unique'.ljust(8) + '' return txt
[ "def", "format_op_hdr", "(", ")", ":", "txt", "=", "'Base Filename'", ".", "ljust", "(", "36", ")", "+", "' '", "txt", "+=", "'Lines'", ".", "rjust", "(", "7", ")", "+", "' '", "txt", "+=", "'Words'", ".", "rjust", "(", "7", ")", "+", "' '", "txt", "+=", "'Unique'", ".", "ljust", "(", "8", ")", "+", "''", "return", "txt" ]
Build the header
[ "Build", "the", "header" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L99-L107
train
acutesoftware/AIKIF
aikif/index.py
AppendIndexDictionaryToFile
def AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName='Y'): """ Save the list of unique words to the master list """ if useShortFileName == 'Y': f = os.path.basename(ipFile) else: f = ipFile with open(ndxFile, "a", encoding='utf-8', errors='replace') as ndx: word_keys = uniqueWords.keys() #uniqueWords.sort() for word in sorted(word_keys): if word != '': line_nums = uniqueWords[word] ndx.write(f + ', ' + word + ', ') for line_num in line_nums: ndx.write(str(line_num)) ndx.write('\n')
python
def AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName='Y'): """ Save the list of unique words to the master list """ if useShortFileName == 'Y': f = os.path.basename(ipFile) else: f = ipFile with open(ndxFile, "a", encoding='utf-8', errors='replace') as ndx: word_keys = uniqueWords.keys() #uniqueWords.sort() for word in sorted(word_keys): if word != '': line_nums = uniqueWords[word] ndx.write(f + ', ' + word + ', ') for line_num in line_nums: ndx.write(str(line_num)) ndx.write('\n')
[ "def", "AppendIndexDictionaryToFile", "(", "uniqueWords", ",", "ndxFile", ",", "ipFile", ",", "useShortFileName", "=", "'Y'", ")", ":", "if", "useShortFileName", "==", "'Y'", ":", "f", "=", "os", ".", "path", ".", "basename", "(", "ipFile", ")", "else", ":", "f", "=", "ipFile", "with", "open", "(", "ndxFile", ",", "\"a\"", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "ndx", ":", "word_keys", "=", "uniqueWords", ".", "keys", "(", ")", "#uniqueWords.sort()", "for", "word", "in", "sorted", "(", "word_keys", ")", ":", "if", "word", "!=", "''", ":", "line_nums", "=", "uniqueWords", "[", "word", "]", "ndx", ".", "write", "(", "f", "+", "', '", "+", "word", "+", "', '", ")", "for", "line_num", "in", "line_nums", ":", "ndx", ".", "write", "(", "str", "(", "line_num", ")", ")", "ndx", ".", "write", "(", "'\\n'", ")" ]
Save the list of unique words to the master list
[ "Save", "the", "list", "of", "unique", "words", "to", "the", "master", "list" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L110-L127
train
acutesoftware/AIKIF
aikif/index.py
DisplayIndexAsDictionary
def DisplayIndexAsDictionary(word_occurrences): """ print the index as a dict """ word_keys = word_occurrences.keys() for num, word in enumerate(word_keys): line_nums = word_occurrences[word] print(word + " ") if num > 3: break
python
def DisplayIndexAsDictionary(word_occurrences): """ print the index as a dict """ word_keys = word_occurrences.keys() for num, word in enumerate(word_keys): line_nums = word_occurrences[word] print(word + " ") if num > 3: break
[ "def", "DisplayIndexAsDictionary", "(", "word_occurrences", ")", ":", "word_keys", "=", "word_occurrences", ".", "keys", "(", ")", "for", "num", ",", "word", "in", "enumerate", "(", "word_keys", ")", ":", "line_nums", "=", "word_occurrences", "[", "word", "]", "print", "(", "word", "+", "\" \"", ")", "if", "num", ">", "3", ":", "break" ]
print the index as a dict
[ "print", "the", "index", "as", "a", "dict" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L129-L138
train
acutesoftware/AIKIF
aikif/index.py
show
def show(title, lst, full=-1): """ for testing, simply shows a list details """ txt = title + ' (' + str(len(lst)) + ') items :\n ' num = 0 for i in lst: if full == -1 or num < full: if type(i) is str: txt = txt + i + ',\n ' else: txt = txt + i + ', [' for j in i: txt = txt + j + ', ' txt = txt + ']\n' num = num + 1 try: print(txt) except Exception as ex: print('index.show() - cant print line, error ' + str(ex))
python
def show(title, lst, full=-1): """ for testing, simply shows a list details """ txt = title + ' (' + str(len(lst)) + ') items :\n ' num = 0 for i in lst: if full == -1 or num < full: if type(i) is str: txt = txt + i + ',\n ' else: txt = txt + i + ', [' for j in i: txt = txt + j + ', ' txt = txt + ']\n' num = num + 1 try: print(txt) except Exception as ex: print('index.show() - cant print line, error ' + str(ex))
[ "def", "show", "(", "title", ",", "lst", ",", "full", "=", "-", "1", ")", ":", "txt", "=", "title", "+", "' ('", "+", "str", "(", "len", "(", "lst", ")", ")", "+", "') items :\\n '", "num", "=", "0", "for", "i", "in", "lst", ":", "if", "full", "==", "-", "1", "or", "num", "<", "full", ":", "if", "type", "(", "i", ")", "is", "str", ":", "txt", "=", "txt", "+", "i", "+", "',\\n '", "else", ":", "txt", "=", "txt", "+", "i", "+", "', ['", "for", "j", "in", "i", ":", "txt", "=", "txt", "+", "j", "+", "', '", "txt", "=", "txt", "+", "']\\n'", "num", "=", "num", "+", "1", "try", ":", "print", "(", "txt", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'index.show() - cant print line, error '", "+", "str", "(", "ex", ")", ")" ]
for testing, simply shows a list details
[ "for", "testing", "simply", "shows", "a", "list", "details" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L140-L159
train
acutesoftware/AIKIF
aikif/index.py
getWordList
def getWordList(ipFile, delim): """ extract a unique list of words and have line numbers that word appears """ indexedWords = {} totWords = 0 totLines = 0 with codecs.open(ipFile, "r",encoding='utf-8', errors='replace') as f: for line in f: totLines = totLines + 1 words = multi_split(line, delim) totWords = totWords + len(words) for word in words: cleanedWord = word.lower().strip() if cleanedWord not in indexedWords: indexedWords[cleanedWord] = str(totLines) else: indexedWords[cleanedWord] = indexedWords[cleanedWord] + ' ' + str(totLines) return totWords, totLines, indexedWords
python
def getWordList(ipFile, delim): """ extract a unique list of words and have line numbers that word appears """ indexedWords = {} totWords = 0 totLines = 0 with codecs.open(ipFile, "r",encoding='utf-8', errors='replace') as f: for line in f: totLines = totLines + 1 words = multi_split(line, delim) totWords = totWords + len(words) for word in words: cleanedWord = word.lower().strip() if cleanedWord not in indexedWords: indexedWords[cleanedWord] = str(totLines) else: indexedWords[cleanedWord] = indexedWords[cleanedWord] + ' ' + str(totLines) return totWords, totLines, indexedWords
[ "def", "getWordList", "(", "ipFile", ",", "delim", ")", ":", "indexedWords", "=", "{", "}", "totWords", "=", "0", "totLines", "=", "0", "with", "codecs", ".", "open", "(", "ipFile", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "totLines", "=", "totLines", "+", "1", "words", "=", "multi_split", "(", "line", ",", "delim", ")", "totWords", "=", "totWords", "+", "len", "(", "words", ")", "for", "word", "in", "words", ":", "cleanedWord", "=", "word", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "cleanedWord", "not", "in", "indexedWords", ":", "indexedWords", "[", "cleanedWord", "]", "=", "str", "(", "totLines", ")", "else", ":", "indexedWords", "[", "cleanedWord", "]", "=", "indexedWords", "[", "cleanedWord", "]", "+", "' '", "+", "str", "(", "totLines", ")", "return", "totWords", ",", "totLines", ",", "indexedWords" ]
extract a unique list of words and have line numbers that word appears
[ "extract", "a", "unique", "list", "of", "words", "and", "have", "line", "numbers", "that", "word", "appears" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L161-L179
train
acutesoftware/AIKIF
aikif/index.py
multi_split
def multi_split(txt, delims): """ split by multiple delimiters """ res = [txt] for delimChar in delims: txt, res = res, [] for word in txt: if len(word) > 1: res += word.split(delimChar) return res
python
def multi_split(txt, delims): """ split by multiple delimiters """ res = [txt] for delimChar in delims: txt, res = res, [] for word in txt: if len(word) > 1: res += word.split(delimChar) return res
[ "def", "multi_split", "(", "txt", ",", "delims", ")", ":", "res", "=", "[", "txt", "]", "for", "delimChar", "in", "delims", ":", "txt", ",", "res", "=", "res", ",", "[", "]", "for", "word", "in", "txt", ":", "if", "len", "(", "word", ")", ">", "1", ":", "res", "+=", "word", ".", "split", "(", "delimChar", ")", "return", "res" ]
split by multiple delimiters
[ "split", "by", "multiple", "delimiters" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L181-L191
train
Nachtfeuer/pipeline
spline/components/script.py
Script.creator
def creator(entry, config): """Preparing and creating script.""" script = render(config.script, model=config.model, env=config.env, item=config.item) temp = tempfile.NamedTemporaryFile(prefix="script-", suffix=".py", mode='w+t', delete=False) temp.writelines(script) temp.close() language = 'python' if 'type' not in entry else entry['type'] template_file = os.path.join(os.path.dirname(__file__), 'templates/%s-script.sh.j2' % language) with open(template_file) as handle: template = handle.read() config.script = render(template, script=temp.name) return Script(config)
python
def creator(entry, config): """Preparing and creating script.""" script = render(config.script, model=config.model, env=config.env, item=config.item) temp = tempfile.NamedTemporaryFile(prefix="script-", suffix=".py", mode='w+t', delete=False) temp.writelines(script) temp.close() language = 'python' if 'type' not in entry else entry['type'] template_file = os.path.join(os.path.dirname(__file__), 'templates/%s-script.sh.j2' % language) with open(template_file) as handle: template = handle.read() config.script = render(template, script=temp.name) return Script(config)
[ "def", "creator", "(", "entry", ",", "config", ")", ":", "script", "=", "render", "(", "config", ".", "script", ",", "model", "=", "config", ".", "model", ",", "env", "=", "config", ".", "env", ",", "item", "=", "config", ".", "item", ")", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "\"script-\"", ",", "suffix", "=", "\".py\"", ",", "mode", "=", "'w+t'", ",", "delete", "=", "False", ")", "temp", ".", "writelines", "(", "script", ")", "temp", ".", "close", "(", ")", "language", "=", "'python'", "if", "'type'", "not", "in", "entry", "else", "entry", "[", "'type'", "]", "template_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates/%s-script.sh.j2'", "%", "language", ")", "with", "open", "(", "template_file", ")", "as", "handle", ":", "template", "=", "handle", ".", "read", "(", ")", "config", ".", "script", "=", "render", "(", "template", ",", "script", "=", "temp", ".", "name", ")", "return", "Script", "(", "config", ")" ]
Preparing and creating script.
[ "Preparing", "and", "creating", "script", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/script.py#L43-L58
train
acutesoftware/AIKIF
aikif/cls_log.py
force_to_string
def force_to_string(unknown): """ converts and unknown type to string for display purposes. """ result = '' if type(unknown) is str: result = unknown if type(unknown) is int: result = str(unknown) if type(unknown) is float: result = str(unknown) if type(unknown) is dict: result = Dict2String(unknown) if type(unknown) is list: result = List2String(unknown) return result
python
def force_to_string(unknown): """ converts and unknown type to string for display purposes. """ result = '' if type(unknown) is str: result = unknown if type(unknown) is int: result = str(unknown) if type(unknown) is float: result = str(unknown) if type(unknown) is dict: result = Dict2String(unknown) if type(unknown) is list: result = List2String(unknown) return result
[ "def", "force_to_string", "(", "unknown", ")", ":", "result", "=", "''", "if", "type", "(", "unknown", ")", "is", "str", ":", "result", "=", "unknown", "if", "type", "(", "unknown", ")", "is", "int", ":", "result", "=", "str", "(", "unknown", ")", "if", "type", "(", "unknown", ")", "is", "float", ":", "result", "=", "str", "(", "unknown", ")", "if", "type", "(", "unknown", ")", "is", "dict", ":", "result", "=", "Dict2String", "(", "unknown", ")", "if", "type", "(", "unknown", ")", "is", "list", ":", "result", "=", "List2String", "(", "unknown", ")", "return", "result" ]
converts and unknown type to string for display purposes.
[ "converts", "and", "unknown", "type", "to", "string", "for", "display", "purposes", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L311-L327
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.add_watch_point
def add_watch_point(self, string, rating, importance=5): """ For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display """ d = {} d['string'] = string d['rating'] = rating d['importance'] = importance self.watch_points.append(d)
python
def add_watch_point(self, string, rating, importance=5): """ For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display """ d = {} d['string'] = string d['rating'] = rating d['importance'] = importance self.watch_points.append(d)
[ "def", "add_watch_point", "(", "self", ",", "string", ",", "rating", ",", "importance", "=", "5", ")", ":", "d", "=", "{", "}", "d", "[", "'string'", "]", "=", "string", "d", "[", "'rating'", "]", "=", "rating", "d", "[", "'importance'", "]", "=", "importance", "self", ".", "watch_points", ".", "append", "(", "d", ")" ]
For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display
[ "For", "a", "log", "session", "you", "can", "add", "as", "many", "watch", "points", "which", "are", "used", "in", "the", "aggregation", "and", "extraction", "of", "key", "things", "that", "happen", ".", "Each", "watch", "point", "has", "a", "rating", "(", "up", "to", "you", "and", "can", "range", "from", "success", "to", "total", "failure", "and", "an", "importance", "for", "finer", "control", "of", "display" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L46-L59
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.estimate_complexity
def estimate_complexity(self, x,y,z,n): """ calculates a rough guess of runtime based on product of parameters """ num_calculations = x * y * z * n run_time = num_calculations / 100000 # a 2014 PC does about 100k calcs in a second (guess based on prior logs) return self.show_time_as_short_string(run_time)
python
def estimate_complexity(self, x,y,z,n): """ calculates a rough guess of runtime based on product of parameters """ num_calculations = x * y * z * n run_time = num_calculations / 100000 # a 2014 PC does about 100k calcs in a second (guess based on prior logs) return self.show_time_as_short_string(run_time)
[ "def", "estimate_complexity", "(", "self", ",", "x", ",", "y", ",", "z", ",", "n", ")", ":", "num_calculations", "=", "x", "*", "y", "*", "z", "*", "n", "run_time", "=", "num_calculations", "/", "100000", "# a 2014 PC does about 100k calcs in a second (guess based on prior logs)", "return", "self", ".", "show_time_as_short_string", "(", "run_time", ")" ]
calculates a rough guess of runtime based on product of parameters
[ "calculates", "a", "rough", "guess", "of", "runtime", "based", "on", "product", "of", "parameters" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L89-L95
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.show_time_as_short_string
def show_time_as_short_string(self, seconds): """ converts seconds to a string in terms of seconds -> years to show complexity of algorithm """ if seconds < 60: return str(seconds) + ' seconds' elif seconds < 3600: return str(round(seconds/60, 1)) + ' minutes' elif seconds < 3600*24: return str(round(seconds/(60*24), 1)) + ' hours' elif seconds < 3600*24*365: return str(round(seconds/(3600*24), 1)) + ' days' else: print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' ) return str(round(seconds/(60*24*365), 1)) + ' years'
python
def show_time_as_short_string(self, seconds): """ converts seconds to a string in terms of seconds -> years to show complexity of algorithm """ if seconds < 60: return str(seconds) + ' seconds' elif seconds < 3600: return str(round(seconds/60, 1)) + ' minutes' elif seconds < 3600*24: return str(round(seconds/(60*24), 1)) + ' hours' elif seconds < 3600*24*365: return str(round(seconds/(3600*24), 1)) + ' days' else: print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' ) return str(round(seconds/(60*24*365), 1)) + ' years'
[ "def", "show_time_as_short_string", "(", "self", ",", "seconds", ")", ":", "if", "seconds", "<", "60", ":", "return", "str", "(", "seconds", ")", "+", "' seconds'", "elif", "seconds", "<", "3600", ":", "return", "str", "(", "round", "(", "seconds", "/", "60", ",", "1", ")", ")", "+", "' minutes'", "elif", "seconds", "<", "3600", "*", "24", ":", "return", "str", "(", "round", "(", "seconds", "/", "(", "60", "*", "24", ")", ",", "1", ")", ")", "+", "' hours'", "elif", "seconds", "<", "3600", "*", "24", "*", "365", ":", "return", "str", "(", "round", "(", "seconds", "/", "(", "3600", "*", "24", ")", ",", "1", ")", ")", "+", "' days'", "else", ":", "print", "(", "'WARNING - this will take '", "+", "str", "(", "seconds", "/", "(", "60", "*", "24", "*", "365", ")", ")", "+", "' YEARS to run'", ")", "return", "str", "(", "round", "(", "seconds", "/", "(", "60", "*", "24", "*", "365", ")", ",", "1", ")", ")", "+", "' years'" ]
converts seconds to a string in terms of seconds -> years to show complexity of algorithm
[ "converts", "seconds", "to", "a", "string", "in", "terms", "of", "seconds", "-", ">", "years", "to", "show", "complexity", "of", "algorithm" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L98-L113
train
acutesoftware/AIKIF
aikif/cls_log.py
Log._log
def _log(self, fname, txt, prg=''): """ logs an entry to fname along with standard date and user details """ if os.sep not in fname: fname = self.log_folder + os.sep + fname delim = ',' q = '"' dte = TodayAsString() usr = GetUserName() hst = GetHostName() i = self.session_id if prg == '': prg = 'cls_log.log' logEntry = q + dte + q + delim + q + i + q + delim + q + usr + q + delim + q + hst + q + delim + q + prg + q + delim + q + txt + q + delim + '\n' with open(fname, "a", encoding='utf-8', errors='replace') as myfile: myfile.write(logEntry)
python
def _log(self, fname, txt, prg=''): """ logs an entry to fname along with standard date and user details """ if os.sep not in fname: fname = self.log_folder + os.sep + fname delim = ',' q = '"' dte = TodayAsString() usr = GetUserName() hst = GetHostName() i = self.session_id if prg == '': prg = 'cls_log.log' logEntry = q + dte + q + delim + q + i + q + delim + q + usr + q + delim + q + hst + q + delim + q + prg + q + delim + q + txt + q + delim + '\n' with open(fname, "a", encoding='utf-8', errors='replace') as myfile: myfile.write(logEntry)
[ "def", "_log", "(", "self", ",", "fname", ",", "txt", ",", "prg", "=", "''", ")", ":", "if", "os", ".", "sep", "not", "in", "fname", ":", "fname", "=", "self", ".", "log_folder", "+", "os", ".", "sep", "+", "fname", "delim", "=", "','", "q", "=", "'\"'", "dte", "=", "TodayAsString", "(", ")", "usr", "=", "GetUserName", "(", ")", "hst", "=", "GetHostName", "(", ")", "i", "=", "self", ".", "session_id", "if", "prg", "==", "''", ":", "prg", "=", "'cls_log.log'", "logEntry", "=", "q", "+", "dte", "+", "q", "+", "delim", "+", "q", "+", "i", "+", "q", "+", "delim", "+", "q", "+", "usr", "+", "q", "+", "delim", "+", "q", "+", "hst", "+", "q", "+", "delim", "+", "q", "+", "prg", "+", "q", "+", "delim", "+", "q", "+", "txt", "+", "q", "+", "delim", "+", "'\\n'", "with", "open", "(", "fname", ",", "\"a\"", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "myfile", ":", "myfile", ".", "write", "(", "logEntry", ")" ]
logs an entry to fname along with standard date and user details
[ "logs", "an", "entry", "to", "fname", "along", "with", "standard", "date", "and", "user", "details" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L115-L132
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.record_source
def record_source(self, src, prg=''): """ function to collect raw data from the web and hard drive Examples - new source file for ontologies, email contacts list, folder for xmas photos """ self._log(self.logFileSource , force_to_string(src), prg)
python
def record_source(self, src, prg=''): """ function to collect raw data from the web and hard drive Examples - new source file for ontologies, email contacts list, folder for xmas photos """ self._log(self.logFileSource , force_to_string(src), prg)
[ "def", "record_source", "(", "self", ",", "src", ",", "prg", "=", "''", ")", ":", "self", ".", "_log", "(", "self", ".", "logFileSource", ",", "force_to_string", "(", "src", ")", ",", "prg", ")" ]
function to collect raw data from the web and hard drive Examples - new source file for ontologies, email contacts list, folder for xmas photos
[ "function", "to", "collect", "raw", "data", "from", "the", "web", "and", "hard", "drive", "Examples", "-", "new", "source", "file", "for", "ontologies", "email", "contacts", "list", "folder", "for", "xmas", "photos" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L135-L140
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.record_command
def record_command(self, cmd, prg=''): """ record the command passed - this is usually the name of the program being run or task being run """ self._log(self.logFileCommand , force_to_string(cmd), prg)
python
def record_command(self, cmd, prg=''): """ record the command passed - this is usually the name of the program being run or task being run """ self._log(self.logFileCommand , force_to_string(cmd), prg)
[ "def", "record_command", "(", "self", ",", "cmd", ",", "prg", "=", "''", ")", ":", "self", ".", "_log", "(", "self", ".", "logFileCommand", ",", "force_to_string", "(", "cmd", ")", ",", "prg", ")" ]
record the command passed - this is usually the name of the program being run or task being run
[ "record", "the", "command", "passed", "-", "this", "is", "usually", "the", "name", "of", "the", "program", "being", "run", "or", "task", "being", "run" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L148-L153
train
acutesoftware/AIKIF
aikif/cls_log.py
Log.record_result
def record_result(self, res, prg=''): """ record the output of the command. Records the result, can have multiple results, so will need to work out a consistent way to aggregate this """ self._log(self.logFileResult , force_to_string(res), prg)
python
def record_result(self, res, prg=''): """ record the output of the command. Records the result, can have multiple results, so will need to work out a consistent way to aggregate this """ self._log(self.logFileResult , force_to_string(res), prg)
[ "def", "record_result", "(", "self", ",", "res", ",", "prg", "=", "''", ")", ":", "self", ".", "_log", "(", "self", ".", "logFileResult", ",", "force_to_string", "(", "res", ")", ",", "prg", ")" ]
record the output of the command. Records the result, can have multiple results, so will need to work out a consistent way to aggregate this
[ "record", "the", "output", "of", "the", "command", ".", "Records", "the", "result", "can", "have", "multiple", "results", "so", "will", "need", "to", "work", "out", "a", "consistent", "way", "to", "aggregate", "this" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L155-L160
train
acutesoftware/AIKIF
aikif/cls_log.py
LogSummary.extract_logs
def extract_logs(self, fname, prg): """ read a logfile and return entries for a program """ op = [] with open(fname, 'r') as f: for line in f: if prg in line: op.append(line) return op
python
def extract_logs(self, fname, prg): """ read a logfile and return entries for a program """ op = [] with open(fname, 'r') as f: for line in f: if prg in line: op.append(line) return op
[ "def", "extract_logs", "(", "self", ",", "fname", ",", "prg", ")", ":", "op", "=", "[", "]", "with", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "prg", "in", "line", ":", "op", ".", "append", "(", "line", ")", "return", "op" ]
read a logfile and return entries for a program
[ "read", "a", "logfile", "and", "return", "entries", "for", "a", "program" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L216-L225
train
acutesoftware/AIKIF
aikif/cls_log.py
LogSummary.summarise_events
def summarise_events(self): """ takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4 """ all_dates = [] d_command = self._count_by_date(self.command_file, all_dates) d_result = self._count_by_date(self.result_file, all_dates) d_process = self._count_by_date(self.process_file, all_dates) d_source = self._count_by_date(self.source_file, all_dates) with open(self.log_sum, "w") as sum_file: sum_file.write('date,command,result,process,source\n') for dte in sorted(set(all_dates)): sum_file.write(dte + ',') if dte in d_command: sum_file.write(str(d_command[dte]) + ',') else: sum_file.write('0,') if dte in d_result: sum_file.write(str(d_result[dte]) + ',') else: sum_file.write('0,') if dte in d_process: sum_file.write(str(d_process[dte]) + ',') else: sum_file.write('0,') if dte in d_source: sum_file.write(str(d_source[dte]) + '\n') else: sum_file.write('0\n')
python
def summarise_events(self): """ takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4 """ all_dates = [] d_command = self._count_by_date(self.command_file, all_dates) d_result = self._count_by_date(self.result_file, all_dates) d_process = self._count_by_date(self.process_file, all_dates) d_source = self._count_by_date(self.source_file, all_dates) with open(self.log_sum, "w") as sum_file: sum_file.write('date,command,result,process,source\n') for dte in sorted(set(all_dates)): sum_file.write(dte + ',') if dte in d_command: sum_file.write(str(d_command[dte]) + ',') else: sum_file.write('0,') if dte in d_result: sum_file.write(str(d_result[dte]) + ',') else: sum_file.write('0,') if dte in d_process: sum_file.write(str(d_process[dte]) + ',') else: sum_file.write('0,') if dte in d_source: sum_file.write(str(d_source[dte]) + '\n') else: sum_file.write('0\n')
[ "def", "summarise_events", "(", "self", ")", ":", "all_dates", "=", "[", "]", "d_command", "=", "self", ".", "_count_by_date", "(", "self", ".", "command_file", ",", "all_dates", ")", "d_result", "=", "self", ".", "_count_by_date", "(", "self", ".", "result_file", ",", "all_dates", ")", "d_process", "=", "self", ".", "_count_by_date", "(", "self", ".", "process_file", ",", "all_dates", ")", "d_source", "=", "self", ".", "_count_by_date", "(", "self", ".", "source_file", ",", "all_dates", ")", "with", "open", "(", "self", ".", "log_sum", ",", "\"w\"", ")", "as", "sum_file", ":", "sum_file", ".", "write", "(", "'date,command,result,process,source\\n'", ")", "for", "dte", "in", "sorted", "(", "set", "(", "all_dates", ")", ")", ":", "sum_file", ".", "write", "(", "dte", "+", "','", ")", "if", "dte", "in", "d_command", ":", "sum_file", ".", "write", "(", "str", "(", "d_command", "[", "dte", "]", ")", "+", "','", ")", "else", ":", "sum_file", ".", "write", "(", "'0,'", ")", "if", "dte", "in", "d_result", ":", "sum_file", ".", "write", "(", "str", "(", "d_result", "[", "dte", "]", ")", "+", "','", ")", "else", ":", "sum_file", ".", "write", "(", "'0,'", ")", "if", "dte", "in", "d_process", ":", "sum_file", ".", "write", "(", "str", "(", "d_process", "[", "dte", "]", ")", "+", "','", ")", "else", ":", "sum_file", ".", "write", "(", "'0,'", ")", "if", "dte", "in", "d_source", ":", "sum_file", ".", "write", "(", "str", "(", "d_source", "[", "dte", "]", ")", "+", "'\\n'", ")", "else", ":", "sum_file", ".", "write", "(", "'0\\n'", ")" ]
takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4
[ "takes", "the", "logfiles", "and", "produces", "an", "event", "summary", "matrix", "date", "command", "result", "process", "source", "20140421", "9", "40", "178", "9", "20140423", "0", "0", "6", "0", "20140424", "19", "1", "47", "19", "20140425", "24", "0", "117", "24", "20140426", "16", "0", "83", "16", "20140427", "1", "0", "6", "1", "20140429", "0", "0", "0", "4" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L227-L265
train
acutesoftware/AIKIF
aikif/cls_log.py
LogSummary._count_by_date
def _count_by_date(self, fname, all_dates): """ reads a logfile and returns a dictionary by date showing the count of log entries """ if not os.path.isfile(fname): return {} d_log_sum = {} with open(fname, "r") as raw_log: for line in raw_log: cols = line.split(',') dte = cols[0].strip('"')[0:10].replace('-', '') all_dates.append(dte) if dte in d_log_sum: d_log_sum[dte] += 1 else: d_log_sum[dte] = 1 return d_log_sum
python
def _count_by_date(self, fname, all_dates): """ reads a logfile and returns a dictionary by date showing the count of log entries """ if not os.path.isfile(fname): return {} d_log_sum = {} with open(fname, "r") as raw_log: for line in raw_log: cols = line.split(',') dte = cols[0].strip('"')[0:10].replace('-', '') all_dates.append(dte) if dte in d_log_sum: d_log_sum[dte] += 1 else: d_log_sum[dte] = 1 return d_log_sum
[ "def", "_count_by_date", "(", "self", ",", "fname", ",", "all_dates", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "return", "{", "}", "d_log_sum", "=", "{", "}", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "raw_log", ":", "for", "line", "in", "raw_log", ":", "cols", "=", "line", ".", "split", "(", "','", ")", "dte", "=", "cols", "[", "0", "]", ".", "strip", "(", "'\"'", ")", "[", "0", ":", "10", "]", ".", "replace", "(", "'-'", ",", "''", ")", "all_dates", ".", "append", "(", "dte", ")", "if", "dte", "in", "d_log_sum", ":", "d_log_sum", "[", "dte", "]", "+=", "1", "else", ":", "d_log_sum", "[", "dte", "]", "=", "1", "return", "d_log_sum" ]
reads a logfile and returns a dictionary by date showing the count of log entries
[ "reads", "a", "logfile", "and", "returns", "a", "dictionary", "by", "date", "showing", "the", "count", "of", "log", "entries" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L267-L284
train
acutesoftware/AIKIF
aikif/agents/agent_map_data.py
AgentMapDataFile.map_data
def map_data(self): """ provides a mapping from the CSV file to the aikif data structures. """ with open(self.src_file, "r") as f: for line in f: cols = line.split(',') print(cols)
python
def map_data(self): """ provides a mapping from the CSV file to the aikif data structures. """ with open(self.src_file, "r") as f: for line in f: cols = line.split(',') print(cols)
[ "def", "map_data", "(", "self", ")", ":", "with", "open", "(", "self", ".", "src_file", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "cols", "=", "line", ".", "split", "(", "','", ")", "print", "(", "cols", ")" ]
provides a mapping from the CSV file to the aikif data structures.
[ "provides", "a", "mapping", "from", "the", "CSV", "file", "to", "the", "aikif", "data", "structures", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent_map_data.py#L29-L37
train
mpg-age-bioinformatics/AGEpy
AGEpy/blast.py
variablename
def variablename(var): """ Returns the string of a variable name. """ s=[tpl[0] for tpl in itertools.ifilter(lambda x: var is x[1], globals().items())] s=s[0].upper() return s
python
def variablename(var): """ Returns the string of a variable name. """ s=[tpl[0] for tpl in itertools.ifilter(lambda x: var is x[1], globals().items())] s=s[0].upper() return s
[ "def", "variablename", "(", "var", ")", ":", "s", "=", "[", "tpl", "[", "0", "]", "for", "tpl", "in", "itertools", ".", "ifilter", "(", "lambda", "x", ":", "var", "is", "x", "[", "1", "]", ",", "globals", "(", ")", ".", "items", "(", ")", ")", "]", "s", "=", "s", "[", "0", "]", ".", "upper", "(", ")", "return", "s" ]
Returns the string of a variable name.
[ "Returns", "the", "string", "of", "a", "variable", "name", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L6-L12
train
mpg-age-bioinformatics/AGEpy
AGEpy/blast.py
BLASTquery
def BLASTquery(query,database,program,filter=None,\ format_type=None, expect=None,\ nucl_reward=None, nucl_penalty=None,\ gapcosts=None, matrix=None,\ hitlist_size=None, descriptions=None,\ alignments=None,\ ncbi_gi=None, threshold=None,\ word_size=None, composition_based_statistics=None,\ organism=None, others=None,\ num_threads=None, baseURL="http://blast.ncbi.nlm.nih.gov",\ verbose=False): """ Performs a blast query online. As in https://ncbi.github.io/blast-cloud/ :param query: Search query. Allowed values: Accession, GI, or FASTA. :param database: BLAST database. Allowed values: nt, nr, refseq_rna, refseq_protein, swissprot, pdbaa, pdbnt :param program: BLAST program. Allowed values: blastn, megablast, blastp, blastx, tblastn, tblastx :param filter: Low complexity filtering. Allowed values: F to disable. T or L to enable. Prepend "m" for mask at lookup (e.g., mL) :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. HTML is the default. :param expect: Expect value. Allowed values: Number greater than zero. :param nucl_reward: Reward for matching bases (BLASTN and megaBLAST). Allowed values: Integer greater than zero. :param nucl_penalty: Cost for mismatched bases (BLASTN and megaBLAST). Allowed values: Integer less than zero. :param gapcosts: Gap existence and extension costs. Allowed values: Pair of positive integers separated by a space such as "11 1". :param matrix: Scoring matrix name. Allowed values: One of BLOSUM45, BLOSUM50, BLOSUM62, BLOSUM80, BLOSUM90, PAM250, PAM30 or PAM70. Default: BLOSUM62 for all applicable programs. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param descriptions: Number of descriptions to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param threshold: Neighboring score for initial words. Allowed values: Positive integer (BLASTP default is 11). Does not apply to BLASTN or MegaBLAST). :param word_size: Size of word for initial matches. Allowed values: Positive integer. :param composition_based_statistics: Composition based statistics algorithm to use. Allowed values: One of 0, 1, 2, or 3. See comp_based_stats command line option in the BLAST+ user manual for details. :param organism: an organism as in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome :param others: here you can add other parameters as seen in a blast bookmarked page. Define you query in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome Once your query is defined click on "Bookmark" on right upper side of the page. You can copy fragments of the URL which define the query. Eg. For organism "Homo sapiens (taxid:9606)" you will see the string "EQ_MENU=Homo%20sapiens%20%28taxid%3A9606%29" - this is the string you can use here in others. :param num_threads: Number of virtual CPUs to use. Allowed values: Integer greater than zero (default is 1). Supported only on the cloud. :param verbose: print more :returns: BLAST search request identifier """ if organism: organism=organism.replace(" ", "%20").replace("(", "%28").replace(")", "%29").replace(":", "%3A") EQ_MENU=organism else: EQ_MENU=None URL=baseURL+"/Blast.cgi?" URL=URL+"QUERY="+str(query)+"&DATABASE="+str(database)+"&PROGRAM="+str(program) for o,varname in zip([filter, format_type, expect, nucl_reward, nucl_penalty,\ gapcosts, matrix, hitlist_size, descriptions, alignments,\ ncbi_gi, threshold, word_size, composition_based_statistics,\ EQ_MENU, num_threads],\ ['FILTER' , 'FORMAT_TYPE', 'EXPECT', 'NUCL_REWARD', 'NUCL_PENALTY',\ 'GAPCOSTS', 'MATRIX', 'HITLIST_SIZE', 'DESCRIPTIONS', 'ALIGNMENTS',\ 'NCBI_GI', 'THRESHOLD', 'WORD_SIZE', 'COMPOSITION_BASED_STATISTICS',\ 'EQ_MENU', 'NUM_THREADS']): if o: URL=URL+"&"+ varname +"="+str(o) if others: URL=URL+"&"+others URL=URL+"&CMD=Put" if verbose: print(URL) sys.stdout.flush() response=requests.get(url = URL) r=response.content.split("\n") RID=[ s for s in r if "RID = " in s ] if len(RID) > 0: RID=RID[0].split(" ")[-1] else: print("Could not return an RID for this query.") RID=None return RID
python
def BLASTquery(query,database,program,filter=None,\ format_type=None, expect=None,\ nucl_reward=None, nucl_penalty=None,\ gapcosts=None, matrix=None,\ hitlist_size=None, descriptions=None,\ alignments=None,\ ncbi_gi=None, threshold=None,\ word_size=None, composition_based_statistics=None,\ organism=None, others=None,\ num_threads=None, baseURL="http://blast.ncbi.nlm.nih.gov",\ verbose=False): """ Performs a blast query online. As in https://ncbi.github.io/blast-cloud/ :param query: Search query. Allowed values: Accession, GI, or FASTA. :param database: BLAST database. Allowed values: nt, nr, refseq_rna, refseq_protein, swissprot, pdbaa, pdbnt :param program: BLAST program. Allowed values: blastn, megablast, blastp, blastx, tblastn, tblastx :param filter: Low complexity filtering. Allowed values: F to disable. T or L to enable. Prepend "m" for mask at lookup (e.g., mL) :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. HTML is the default. :param expect: Expect value. Allowed values: Number greater than zero. :param nucl_reward: Reward for matching bases (BLASTN and megaBLAST). Allowed values: Integer greater than zero. :param nucl_penalty: Cost for mismatched bases (BLASTN and megaBLAST). Allowed values: Integer less than zero. :param gapcosts: Gap existence and extension costs. Allowed values: Pair of positive integers separated by a space such as "11 1". :param matrix: Scoring matrix name. Allowed values: One of BLOSUM45, BLOSUM50, BLOSUM62, BLOSUM80, BLOSUM90, PAM250, PAM30 or PAM70. Default: BLOSUM62 for all applicable programs. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param descriptions: Number of descriptions to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param threshold: Neighboring score for initial words. Allowed values: Positive integer (BLASTP default is 11). Does not apply to BLASTN or MegaBLAST). :param word_size: Size of word for initial matches. Allowed values: Positive integer. :param composition_based_statistics: Composition based statistics algorithm to use. Allowed values: One of 0, 1, 2, or 3. See comp_based_stats command line option in the BLAST+ user manual for details. :param organism: an organism as in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome :param others: here you can add other parameters as seen in a blast bookmarked page. Define you query in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome Once your query is defined click on "Bookmark" on right upper side of the page. You can copy fragments of the URL which define the query. Eg. For organism "Homo sapiens (taxid:9606)" you will see the string "EQ_MENU=Homo%20sapiens%20%28taxid%3A9606%29" - this is the string you can use here in others. :param num_threads: Number of virtual CPUs to use. Allowed values: Integer greater than zero (default is 1). Supported only on the cloud. :param verbose: print more :returns: BLAST search request identifier """ if organism: organism=organism.replace(" ", "%20").replace("(", "%28").replace(")", "%29").replace(":", "%3A") EQ_MENU=organism else: EQ_MENU=None URL=baseURL+"/Blast.cgi?" URL=URL+"QUERY="+str(query)+"&DATABASE="+str(database)+"&PROGRAM="+str(program) for o,varname in zip([filter, format_type, expect, nucl_reward, nucl_penalty,\ gapcosts, matrix, hitlist_size, descriptions, alignments,\ ncbi_gi, threshold, word_size, composition_based_statistics,\ EQ_MENU, num_threads],\ ['FILTER' , 'FORMAT_TYPE', 'EXPECT', 'NUCL_REWARD', 'NUCL_PENALTY',\ 'GAPCOSTS', 'MATRIX', 'HITLIST_SIZE', 'DESCRIPTIONS', 'ALIGNMENTS',\ 'NCBI_GI', 'THRESHOLD', 'WORD_SIZE', 'COMPOSITION_BASED_STATISTICS',\ 'EQ_MENU', 'NUM_THREADS']): if o: URL=URL+"&"+ varname +"="+str(o) if others: URL=URL+"&"+others URL=URL+"&CMD=Put" if verbose: print(URL) sys.stdout.flush() response=requests.get(url = URL) r=response.content.split("\n") RID=[ s for s in r if "RID = " in s ] if len(RID) > 0: RID=RID[0].split(" ")[-1] else: print("Could not return an RID for this query.") RID=None return RID
[ "def", "BLASTquery", "(", "query", ",", "database", ",", "program", ",", "filter", "=", "None", ",", "format_type", "=", "None", ",", "expect", "=", "None", ",", "nucl_reward", "=", "None", ",", "nucl_penalty", "=", "None", ",", "gapcosts", "=", "None", ",", "matrix", "=", "None", ",", "hitlist_size", "=", "None", ",", "descriptions", "=", "None", ",", "alignments", "=", "None", ",", "ncbi_gi", "=", "None", ",", "threshold", "=", "None", ",", "word_size", "=", "None", ",", "composition_based_statistics", "=", "None", ",", "organism", "=", "None", ",", "others", "=", "None", ",", "num_threads", "=", "None", ",", "baseURL", "=", "\"http://blast.ncbi.nlm.nih.gov\"", ",", "verbose", "=", "False", ")", ":", "if", "organism", ":", "organism", "=", "organism", ".", "replace", "(", "\" \"", ",", "\"%20\"", ")", ".", "replace", "(", "\"(\"", ",", "\"%28\"", ")", ".", "replace", "(", "\")\"", ",", "\"%29\"", ")", ".", "replace", "(", "\":\"", ",", "\"%3A\"", ")", "EQ_MENU", "=", "organism", "else", ":", "EQ_MENU", "=", "None", "URL", "=", "baseURL", "+", "\"/Blast.cgi?\"", "URL", "=", "URL", "+", "\"QUERY=\"", "+", "str", "(", "query", ")", "+", "\"&DATABASE=\"", "+", "str", "(", "database", ")", "+", "\"&PROGRAM=\"", "+", "str", "(", "program", ")", "for", "o", ",", "varname", "in", "zip", "(", "[", "filter", ",", "format_type", ",", "expect", ",", "nucl_reward", ",", "nucl_penalty", ",", "gapcosts", ",", "matrix", ",", "hitlist_size", ",", "descriptions", ",", "alignments", ",", "ncbi_gi", ",", "threshold", ",", "word_size", ",", "composition_based_statistics", ",", "EQ_MENU", ",", "num_threads", "]", ",", "[", "'FILTER'", ",", "'FORMAT_TYPE'", ",", "'EXPECT'", ",", "'NUCL_REWARD'", ",", "'NUCL_PENALTY'", ",", "'GAPCOSTS'", ",", "'MATRIX'", ",", "'HITLIST_SIZE'", ",", "'DESCRIPTIONS'", ",", "'ALIGNMENTS'", ",", "'NCBI_GI'", ",", "'THRESHOLD'", ",", "'WORD_SIZE'", ",", "'COMPOSITION_BASED_STATISTICS'", ",", "'EQ_MENU'", ",", "'NUM_THREADS'", "]", ")", ":", "if", "o", ":", "URL", "=", "URL", "+", "\"&\"", "+", "varname", "+", "\"=\"", "+", "str", "(", "o", ")", "if", "others", ":", "URL", "=", "URL", "+", "\"&\"", "+", "others", "URL", "=", "URL", "+", "\"&CMD=Put\"", "if", "verbose", ":", "print", "(", "URL", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "response", "=", "requests", ".", "get", "(", "url", "=", "URL", ")", "r", "=", "response", ".", "content", ".", "split", "(", "\"\\n\"", ")", "RID", "=", "[", "s", "for", "s", "in", "r", "if", "\"RID = \"", "in", "s", "]", "if", "len", "(", "RID", ")", ">", "0", ":", "RID", "=", "RID", "[", "0", "]", ".", "split", "(", "\" \"", ")", "[", "-", "1", "]", "else", ":", "print", "(", "\"Could not return an RID for this query.\"", ")", "RID", "=", "None", "return", "RID" ]
Performs a blast query online. As in https://ncbi.github.io/blast-cloud/ :param query: Search query. Allowed values: Accession, GI, or FASTA. :param database: BLAST database. Allowed values: nt, nr, refseq_rna, refseq_protein, swissprot, pdbaa, pdbnt :param program: BLAST program. Allowed values: blastn, megablast, blastp, blastx, tblastn, tblastx :param filter: Low complexity filtering. Allowed values: F to disable. T or L to enable. Prepend "m" for mask at lookup (e.g., mL) :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. HTML is the default. :param expect: Expect value. Allowed values: Number greater than zero. :param nucl_reward: Reward for matching bases (BLASTN and megaBLAST). Allowed values: Integer greater than zero. :param nucl_penalty: Cost for mismatched bases (BLASTN and megaBLAST). Allowed values: Integer less than zero. :param gapcosts: Gap existence and extension costs. Allowed values: Pair of positive integers separated by a space such as "11 1". :param matrix: Scoring matrix name. Allowed values: One of BLOSUM45, BLOSUM50, BLOSUM62, BLOSUM80, BLOSUM90, PAM250, PAM30 or PAM70. Default: BLOSUM62 for all applicable programs. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param descriptions: Number of descriptions to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param threshold: Neighboring score for initial words. Allowed values: Positive integer (BLASTP default is 11). Does not apply to BLASTN or MegaBLAST). :param word_size: Size of word for initial matches. Allowed values: Positive integer. :param composition_based_statistics: Composition based statistics algorithm to use. Allowed values: One of 0, 1, 2, or 3. See comp_based_stats command line option in the BLAST+ user manual for details. :param organism: an organism as in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome :param others: here you can add other parameters as seen in a blast bookmarked page. Define you query in https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome Once your query is defined click on "Bookmark" on right upper side of the page. You can copy fragments of the URL which define the query. Eg. For organism "Homo sapiens (taxid:9606)" you will see the string "EQ_MENU=Homo%20sapiens%20%28taxid%3A9606%29" - this is the string you can use here in others. :param num_threads: Number of virtual CPUs to use. Allowed values: Integer greater than zero (default is 1). Supported only on the cloud. :param verbose: print more :returns: BLAST search request identifier
[ "Performs", "a", "blast", "query", "online", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L14-L94
train
mpg-age-bioinformatics/AGEpy
AGEpy/blast.py
BLASTcheck
def BLASTcheck(rid,baseURL="http://blast.ncbi.nlm.nih.gov"): """ Checks the status of a query. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns status: status for the query. :returns therearehist: yes or no for existing hits on a finished query. """ URL=baseURL+"/Blast.cgi?" URL=URL+"FORMAT_OBJECT=SearchInfo&RID="+rid+"&CMD=Get" response=requests.get(url = URL) r=response.content.split("\n") try: status=[ s for s in r if "Status=" in s ][0].split("=")[-1] ThereAreHits=[ s for s in r if "ThereAreHits=" in s ][0].split("=")[-1] except: status=None ThereAreHits=None print(rid, status, ThereAreHits) sys.stdout.flush() return status, ThereAreHits
python
def BLASTcheck(rid,baseURL="http://blast.ncbi.nlm.nih.gov"): """ Checks the status of a query. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns status: status for the query. :returns therearehist: yes or no for existing hits on a finished query. """ URL=baseURL+"/Blast.cgi?" URL=URL+"FORMAT_OBJECT=SearchInfo&RID="+rid+"&CMD=Get" response=requests.get(url = URL) r=response.content.split("\n") try: status=[ s for s in r if "Status=" in s ][0].split("=")[-1] ThereAreHits=[ s for s in r if "ThereAreHits=" in s ][0].split("=")[-1] except: status=None ThereAreHits=None print(rid, status, ThereAreHits) sys.stdout.flush() return status, ThereAreHits
[ "def", "BLASTcheck", "(", "rid", ",", "baseURL", "=", "\"http://blast.ncbi.nlm.nih.gov\"", ")", ":", "URL", "=", "baseURL", "+", "\"/Blast.cgi?\"", "URL", "=", "URL", "+", "\"FORMAT_OBJECT=SearchInfo&RID=\"", "+", "rid", "+", "\"&CMD=Get\"", "response", "=", "requests", ".", "get", "(", "url", "=", "URL", ")", "r", "=", "response", ".", "content", ".", "split", "(", "\"\\n\"", ")", "try", ":", "status", "=", "[", "s", "for", "s", "in", "r", "if", "\"Status=\"", "in", "s", "]", "[", "0", "]", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", "ThereAreHits", "=", "[", "s", "for", "s", "in", "r", "if", "\"ThereAreHits=\"", "in", "s", "]", "[", "0", "]", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", "except", ":", "status", "=", "None", "ThereAreHits", "=", "None", "print", "(", "rid", ",", "status", ",", "ThereAreHits", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return", "status", ",", "ThereAreHits" ]
Checks the status of a query. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns status: status for the query. :returns therearehist: yes or no for existing hits on a finished query.
[ "Checks", "the", "status", "of", "a", "query", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L96-L121
train
mpg-age-bioinformatics/AGEpy
AGEpy/blast.py
BLASTresults
def BLASTresults(rid, format_type="Tabular", \ hitlist_size= None, alignments=None, \ ncbi_gi = None, format_object=None,\ baseURL="http://blast.ncbi.nlm.nih.gov"): """ Retrieves results for an RID. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. Tabular is the default. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param format_object: Object type. Allowed values: SearchInfo (status check) or Alignment (report formatting). :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns: the result of a BLAST query. If format_type="Tabular" it will parse the content into a Pandas dataframe. """ URL=baseURL+"/Blast.cgi?" URL=URL+"RID="+str(rid)+"&FORMAT_TYPE="+str(format_type) for o in [ hitlist_size, alignments,\ ncbi_gi, format_object]: if o: URL=URL+"&"+ variablename(var) +"="+str(o) URL=URL+"&CMD=Get" response=requests.get(url = URL) response=response.content if format_type=="Tabular": result=response.split("\n") result=[ s.split("\t") for s in result][6:] header=result[:7] content=result[7:] fields=header[5][0].strip("# Fields: ").split(", ") result=pd.DataFrame(content,columns=fields) response=result[:int(header[-1][0].split(" ")[1])] return response
python
def BLASTresults(rid, format_type="Tabular", \ hitlist_size= None, alignments=None, \ ncbi_gi = None, format_object=None,\ baseURL="http://blast.ncbi.nlm.nih.gov"): """ Retrieves results for an RID. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. Tabular is the default. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param format_object: Object type. Allowed values: SearchInfo (status check) or Alignment (report formatting). :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns: the result of a BLAST query. If format_type="Tabular" it will parse the content into a Pandas dataframe. """ URL=baseURL+"/Blast.cgi?" URL=URL+"RID="+str(rid)+"&FORMAT_TYPE="+str(format_type) for o in [ hitlist_size, alignments,\ ncbi_gi, format_object]: if o: URL=URL+"&"+ variablename(var) +"="+str(o) URL=URL+"&CMD=Get" response=requests.get(url = URL) response=response.content if format_type=="Tabular": result=response.split("\n") result=[ s.split("\t") for s in result][6:] header=result[:7] content=result[7:] fields=header[5][0].strip("# Fields: ").split(", ") result=pd.DataFrame(content,columns=fields) response=result[:int(header[-1][0].split(" ")[1])] return response
[ "def", "BLASTresults", "(", "rid", ",", "format_type", "=", "\"Tabular\"", ",", "hitlist_size", "=", "None", ",", "alignments", "=", "None", ",", "ncbi_gi", "=", "None", ",", "format_object", "=", "None", ",", "baseURL", "=", "\"http://blast.ncbi.nlm.nih.gov\"", ")", ":", "URL", "=", "baseURL", "+", "\"/Blast.cgi?\"", "URL", "=", "URL", "+", "\"RID=\"", "+", "str", "(", "rid", ")", "+", "\"&FORMAT_TYPE=\"", "+", "str", "(", "format_type", ")", "for", "o", "in", "[", "hitlist_size", ",", "alignments", ",", "ncbi_gi", ",", "format_object", "]", ":", "if", "o", ":", "URL", "=", "URL", "+", "\"&\"", "+", "variablename", "(", "var", ")", "+", "\"=\"", "+", "str", "(", "o", ")", "URL", "=", "URL", "+", "\"&CMD=Get\"", "response", "=", "requests", ".", "get", "(", "url", "=", "URL", ")", "response", "=", "response", ".", "content", "if", "format_type", "==", "\"Tabular\"", ":", "result", "=", "response", ".", "split", "(", "\"\\n\"", ")", "result", "=", "[", "s", ".", "split", "(", "\"\\t\"", ")", "for", "s", "in", "result", "]", "[", "6", ":", "]", "header", "=", "result", "[", ":", "7", "]", "content", "=", "result", "[", "7", ":", "]", "fields", "=", "header", "[", "5", "]", "[", "0", "]", ".", "strip", "(", "\"# Fields: \"", ")", ".", "split", "(", "\", \"", ")", "result", "=", "pd", ".", "DataFrame", "(", "content", ",", "columns", "=", "fields", ")", "response", "=", "result", "[", ":", "int", "(", "header", "[", "-", "1", "]", "[", "0", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", ")", "]", "return", "response" ]
Retrieves results for an RID. :param rid: BLAST search request identifier. Allowed values: The Request ID (RID) returned when the search was submitted :param format_type: Report type. Allowed values: HTML, Text, XML, XML2, JSON2, or Tabular. Tabular is the default. :param hitlist_size: Number of databases sequences to keep. Allowed values: Integer greater than zero. :param alignments: Number of alignments to print (applies to HTML and Text). Allowed values: Integer greater than zero. :param ncbi_gi: Show NCBI GIs in report. Allowed values: T or F. :param format_object: Object type. Allowed values: SearchInfo (status check) or Alignment (report formatting). :param baseURL: server url. Default=http://blast.ncbi.nlm.nih.gov :returns: the result of a BLAST query. If format_type="Tabular" it will parse the content into a Pandas dataframe.
[ "Retrieves", "results", "for", "an", "RID", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L123-L160
train
Nachtfeuer/pipeline
spline/tools/report/generator.py
generate_html
def generate_html(store): """ Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template. """ spline = { 'version': VERSION, 'url': 'https://github.com/Nachtfeuer/pipeline', 'generated': datetime.now().strftime("%A, %d. %B %Y - %I:%M:%S %p") } html_template_file = os.path.join(os.path.dirname(__file__), 'templates/report.html.j2') with open(html_template_file) as handle: html_template = handle.read() return render(html_template, spline=spline, store=store)
python
def generate_html(store): """ Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template. """ spline = { 'version': VERSION, 'url': 'https://github.com/Nachtfeuer/pipeline', 'generated': datetime.now().strftime("%A, %d. %B %Y - %I:%M:%S %p") } html_template_file = os.path.join(os.path.dirname(__file__), 'templates/report.html.j2') with open(html_template_file) as handle: html_template = handle.read() return render(html_template, spline=spline, store=store)
[ "def", "generate_html", "(", "store", ")", ":", "spline", "=", "{", "'version'", ":", "VERSION", ",", "'url'", ":", "'https://github.com/Nachtfeuer/pipeline'", ",", "'generated'", ":", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%A, %d. %B %Y - %I:%M:%S %p\"", ")", "}", "html_template_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates/report.html.j2'", ")", "with", "open", "(", "html_template_file", ")", "as", "handle", ":", "html_template", "=", "handle", ".", "read", "(", ")", "return", "render", "(", "html_template", ",", "spline", "=", "spline", ",", "store", "=", "store", ")" ]
Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template.
[ "Generating", "HTML", "report", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/generator.py#L26-L45
train
Nachtfeuer/pipeline
spline/tools/condition.py
TokensCompressor.__begin_of_list
def __begin_of_list(self, ast_token): """Handle begin of a list.""" self.list_level += 1 if self.list_level == 1: self.final_ast_tokens.append(ast_token)
python
def __begin_of_list(self, ast_token): """Handle begin of a list.""" self.list_level += 1 if self.list_level == 1: self.final_ast_tokens.append(ast_token)
[ "def", "__begin_of_list", "(", "self", ",", "ast_token", ")", ":", "self", ".", "list_level", "+=", "1", "if", "self", ".", "list_level", "==", "1", ":", "self", ".", "final_ast_tokens", ".", "append", "(", "ast_token", ")" ]
Handle begin of a list.
[ "Handle", "begin", "of", "a", "list", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L41-L45
train
Nachtfeuer/pipeline
spline/tools/condition.py
TokensCompressor.__end_of_list
def __end_of_list(self, ast_token): """Handle end of a list.""" self.list_level -= 1 if self.list_level == 0: if self.list_entry is not None: self.final_ast_tokens.append(self.list_entry) self.list_entry = None self.final_ast_tokens.append(ast_token)
python
def __end_of_list(self, ast_token): """Handle end of a list.""" self.list_level -= 1 if self.list_level == 0: if self.list_entry is not None: self.final_ast_tokens.append(self.list_entry) self.list_entry = None self.final_ast_tokens.append(ast_token)
[ "def", "__end_of_list", "(", "self", ",", "ast_token", ")", ":", "self", ".", "list_level", "-=", "1", "if", "self", ".", "list_level", "==", "0", ":", "if", "self", ".", "list_entry", "is", "not", "None", ":", "self", ".", "final_ast_tokens", ".", "append", "(", "self", ".", "list_entry", ")", "self", ".", "list_entry", "=", "None", "self", ".", "final_ast_tokens", ".", "append", "(", "ast_token", ")" ]
Handle end of a list.
[ "Handle", "end", "of", "a", "list", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L47-L54
train
Nachtfeuer/pipeline
spline/tools/condition.py
TokensCompressor.__default
def __default(self, ast_token): """Handle tokens inside the list or outside the list.""" if self.list_level == 1: if self.list_entry is None: self.list_entry = ast_token elif not isinstance(ast_token, type(self.list_entry)): self.final_ast_tokens.append(ast_token) elif self.list_level == 0: self.final_ast_tokens.append(ast_token)
python
def __default(self, ast_token): """Handle tokens inside the list or outside the list.""" if self.list_level == 1: if self.list_entry is None: self.list_entry = ast_token elif not isinstance(ast_token, type(self.list_entry)): self.final_ast_tokens.append(ast_token) elif self.list_level == 0: self.final_ast_tokens.append(ast_token)
[ "def", "__default", "(", "self", ",", "ast_token", ")", ":", "if", "self", ".", "list_level", "==", "1", ":", "if", "self", ".", "list_entry", "is", "None", ":", "self", ".", "list_entry", "=", "ast_token", "elif", "not", "isinstance", "(", "ast_token", ",", "type", "(", "self", ".", "list_entry", ")", ")", ":", "self", ".", "final_ast_tokens", ".", "append", "(", "ast_token", ")", "elif", "self", ".", "list_level", "==", "0", ":", "self", ".", "final_ast_tokens", ".", "append", "(", "ast_token", ")" ]
Handle tokens inside the list or outside the list.
[ "Handle", "tokens", "inside", "the", "list", "or", "outside", "the", "list", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L56-L64
train
Nachtfeuer/pipeline
spline/tools/condition.py
TokensCompressor.compress
def compress(self): """Main function of compression.""" for ast_token in self.ast_tokens: if type(ast_token) in self.dispatcher: # pylint: disable=unidiomatic-typecheck self.dispatcher[type(ast_token)](ast_token) else: self.dispatcher['default'](ast_token)
python
def compress(self): """Main function of compression.""" for ast_token in self.ast_tokens: if type(ast_token) in self.dispatcher: # pylint: disable=unidiomatic-typecheck self.dispatcher[type(ast_token)](ast_token) else: self.dispatcher['default'](ast_token)
[ "def", "compress", "(", "self", ")", ":", "for", "ast_token", "in", "self", ".", "ast_tokens", ":", "if", "type", "(", "ast_token", ")", "in", "self", ".", "dispatcher", ":", "# pylint: disable=unidiomatic-typecheck", "self", ".", "dispatcher", "[", "type", "(", "ast_token", ")", "]", "(", "ast_token", ")", "else", ":", "self", ".", "dispatcher", "[", "'default'", "]", "(", "ast_token", ")" ]
Main function of compression.
[ "Main", "function", "of", "compression", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L66-L72
train
Nachtfeuer/pipeline
spline/tools/condition.py
Condition.get_tokens
def get_tokens(condition): """ Get AST tokens for Python condition. Returns: list: list of AST tokens """ try: ast_tokens = list(ast.walk(ast.parse(condition.strip()))) except SyntaxError as exception: Logger.get_logger(__name__).error("Syntax error: %s", exception) ast_tokens = [] return ast_tokens
python
def get_tokens(condition): """ Get AST tokens for Python condition. Returns: list: list of AST tokens """ try: ast_tokens = list(ast.walk(ast.parse(condition.strip()))) except SyntaxError as exception: Logger.get_logger(__name__).error("Syntax error: %s", exception) ast_tokens = [] return ast_tokens
[ "def", "get_tokens", "(", "condition", ")", ":", "try", ":", "ast_tokens", "=", "list", "(", "ast", ".", "walk", "(", "ast", ".", "parse", "(", "condition", ".", "strip", "(", ")", ")", ")", ")", "except", "SyntaxError", "as", "exception", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "error", "(", "\"Syntax error: %s\"", ",", "exception", ")", "ast_tokens", "=", "[", "]", "return", "ast_tokens" ]
Get AST tokens for Python condition. Returns: list: list of AST tokens
[ "Get", "AST", "tokens", "for", "Python", "condition", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L129-L141
train
Nachtfeuer/pipeline
spline/tools/condition.py
Condition.match_tokens
def match_tokens(ast_tokens, ast_types): """ Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types """ ast_final_types = [ast.Module, ast.Expr] + ast_types return all(isinstance(ast_token, ast_type) for ast_token, ast_type in zip(ast_tokens, ast_final_types))
python
def match_tokens(ast_tokens, ast_types): """ Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types """ ast_final_types = [ast.Module, ast.Expr] + ast_types return all(isinstance(ast_token, ast_type) for ast_token, ast_type in zip(ast_tokens, ast_final_types))
[ "def", "match_tokens", "(", "ast_tokens", ",", "ast_types", ")", ":", "ast_final_types", "=", "[", "ast", ".", "Module", ",", "ast", ".", "Expr", "]", "+", "ast_types", "return", "all", "(", "isinstance", "(", "ast_token", ",", "ast_type", ")", "for", "ast_token", ",", "ast_type", "in", "zip", "(", "ast_tokens", ",", "ast_final_types", ")", ")" ]
Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types
[ "Verify", "that", "each", "token", "in", "order", "does", "match", "the", "expected", "types", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L144-L167
train
Nachtfeuer/pipeline
spline/tools/condition.py
Condition.find_rule
def find_rule(condition): """ Find rule for given condition. Args: condition (str): Python condition as string. Returns: str, list, function: found rule name, list of AST tokens for condition and verification function. """ final_condition = re.sub('{{.*}}', '42', condition) ast_tokens = Condition.get_tokens(final_condition) ast_compressed_tokens = Condition.compress_tokens(ast_tokens) name = 'undefined' function = lambda tokens: False if len(ast_compressed_tokens) > 0: for rule in Condition.RULES: if Condition.match_tokens(ast_compressed_tokens, rule['types']): name = rule['name'] function = rule['evaluate'] break return name, ast_tokens, function
python
def find_rule(condition): """ Find rule for given condition. Args: condition (str): Python condition as string. Returns: str, list, function: found rule name, list of AST tokens for condition and verification function. """ final_condition = re.sub('{{.*}}', '42', condition) ast_tokens = Condition.get_tokens(final_condition) ast_compressed_tokens = Condition.compress_tokens(ast_tokens) name = 'undefined' function = lambda tokens: False if len(ast_compressed_tokens) > 0: for rule in Condition.RULES: if Condition.match_tokens(ast_compressed_tokens, rule['types']): name = rule['name'] function = rule['evaluate'] break return name, ast_tokens, function
[ "def", "find_rule", "(", "condition", ")", ":", "final_condition", "=", "re", ".", "sub", "(", "'{{.*}}'", ",", "'42'", ",", "condition", ")", "ast_tokens", "=", "Condition", ".", "get_tokens", "(", "final_condition", ")", "ast_compressed_tokens", "=", "Condition", ".", "compress_tokens", "(", "ast_tokens", ")", "name", "=", "'undefined'", "function", "=", "lambda", "tokens", ":", "False", "if", "len", "(", "ast_compressed_tokens", ")", ">", "0", ":", "for", "rule", "in", "Condition", ".", "RULES", ":", "if", "Condition", ".", "match_tokens", "(", "ast_compressed_tokens", ",", "rule", "[", "'types'", "]", ")", ":", "name", "=", "rule", "[", "'name'", "]", "function", "=", "rule", "[", "'evaluate'", "]", "break", "return", "name", ",", "ast_tokens", ",", "function" ]
Find rule for given condition. Args: condition (str): Python condition as string. Returns: str, list, function: found rule name, list of AST tokens for condition and verification function.
[ "Find", "rule", "for", "given", "condition", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L229-L253
train
Nachtfeuer/pipeline
spline/tools/condition.py
Condition.evaluate
def evaluate(condition): """ Evaluate simple condition. >>> Condition.evaluate(' 2 == 2 ') True >>> Condition.evaluate(' not 2 == 2 ') False >>> Condition.evaluate(' not "abc" == "xyz" ') True >>> Condition.evaluate('2 in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('5 in [2, 4, 6, 8, 10]') False >>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]') True >>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('"apple" not in ["kiwi", "orange"]') True Args: condition (str): Python condition as string. Returns: bool: True when condition evaluates to True. """ success = False if len(condition) > 0: try: rule_name, ast_tokens, evaluate_function = Condition.find_rule(condition) if not rule_name == 'undefined': success = evaluate_function(ast_tokens) except AttributeError as exception: Logger.get_logger(__name__).error("Attribute error: %s", exception) else: success = True return success
python
def evaluate(condition): """ Evaluate simple condition. >>> Condition.evaluate(' 2 == 2 ') True >>> Condition.evaluate(' not 2 == 2 ') False >>> Condition.evaluate(' not "abc" == "xyz" ') True >>> Condition.evaluate('2 in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('5 in [2, 4, 6, 8, 10]') False >>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]') True >>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('"apple" not in ["kiwi", "orange"]') True Args: condition (str): Python condition as string. Returns: bool: True when condition evaluates to True. """ success = False if len(condition) > 0: try: rule_name, ast_tokens, evaluate_function = Condition.find_rule(condition) if not rule_name == 'undefined': success = evaluate_function(ast_tokens) except AttributeError as exception: Logger.get_logger(__name__).error("Attribute error: %s", exception) else: success = True return success
[ "def", "evaluate", "(", "condition", ")", ":", "success", "=", "False", "if", "len", "(", "condition", ")", ">", "0", ":", "try", ":", "rule_name", ",", "ast_tokens", ",", "evaluate_function", "=", "Condition", ".", "find_rule", "(", "condition", ")", "if", "not", "rule_name", "==", "'undefined'", ":", "success", "=", "evaluate_function", "(", "ast_tokens", ")", "except", "AttributeError", "as", "exception", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "error", "(", "\"Attribute error: %s\"", ",", "exception", ")", "else", ":", "success", "=", "True", "return", "success" ]
Evaluate simple condition. >>> Condition.evaluate(' 2 == 2 ') True >>> Condition.evaluate(' not 2 == 2 ') False >>> Condition.evaluate(' not "abc" == "xyz" ') True >>> Condition.evaluate('2 in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('5 in [2, 4, 6, 8, 10]') False >>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]') True >>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('"apple" not in ["kiwi", "orange"]') True Args: condition (str): Python condition as string. Returns: bool: True when condition evaluates to True.
[ "Evaluate", "simple", "condition", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L256-L293
train
acutesoftware/AIKIF
scripts/run.py
start_aikif
def start_aikif(): """ starts the web interface and possibly other processes """ if sys.platform[0:3] == 'win': os.system("start go_web_aikif.bat") else: os.system("../aikif/web_app/web_aikif.py") import webbrowser import time time.sleep(1) webbrowser.open('http://127.0.0.1:5000')
python
def start_aikif(): """ starts the web interface and possibly other processes """ if sys.platform[0:3] == 'win': os.system("start go_web_aikif.bat") else: os.system("../aikif/web_app/web_aikif.py") import webbrowser import time time.sleep(1) webbrowser.open('http://127.0.0.1:5000')
[ "def", "start_aikif", "(", ")", ":", "if", "sys", ".", "platform", "[", "0", ":", "3", "]", "==", "'win'", ":", "os", ".", "system", "(", "\"start go_web_aikif.bat\"", ")", "else", ":", "os", ".", "system", "(", "\"../aikif/web_app/web_aikif.py\"", ")", "import", "webbrowser", "import", "time", "time", ".", "sleep", "(", "1", ")", "webbrowser", ".", "open", "(", "'http://127.0.0.1:5000'", ")" ]
starts the web interface and possibly other processes
[ "starts", "the", "web", "interface", "and", "possibly", "other", "processes" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/run.py#L49-L60
train
Nachtfeuer/pipeline
spline/components/tasks.py
get_creator_by_name
def get_creator_by_name(name): """ Get creator function by name. Args: name (str): name of the creator function. Returns: function: creater function. """ return {'docker(container)': Container.creator, 'shell': Bash.creator, 'docker(image)': Image.creator, 'python': Script.creator, 'packer': Packer.creator, 'ansible(simple)': Ansible.creator}[name]
python
def get_creator_by_name(name): """ Get creator function by name. Args: name (str): name of the creator function. Returns: function: creater function. """ return {'docker(container)': Container.creator, 'shell': Bash.creator, 'docker(image)': Image.creator, 'python': Script.creator, 'packer': Packer.creator, 'ansible(simple)': Ansible.creator}[name]
[ "def", "get_creator_by_name", "(", "name", ")", ":", "return", "{", "'docker(container)'", ":", "Container", ".", "creator", ",", "'shell'", ":", "Bash", ".", "creator", ",", "'docker(image)'", ":", "Image", ".", "creator", ",", "'python'", ":", "Script", ".", "creator", ",", "'packer'", ":", "Packer", ".", "creator", ",", "'ansible(simple)'", ":", "Ansible", ".", "creator", "}", "[", "name", "]" ]
Get creator function by name. Args: name (str): name of the creator function. Returns: function: creater function.
[ "Get", "creator", "function", "by", "name", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L36-L49
train
Nachtfeuer/pipeline
spline/components/tasks.py
worker
def worker(data): """Running on shell via multiprocessing.""" creator = get_creator_by_name(data['creator']) shell = creator(data['entry'], ShellConfig(script=data['entry']['script'], title=data['entry']['title'] if 'title' in data['entry'] else '', model=data['model'], env=data['env'], item=data['item'], dry_run=data['dry_run'], debug=data['debug'], strict=data['strict'], variables=data['variables'], temporary_scripts_path=data['temporary_scripts_path'])) output = [] for line in shell.process(): output.append(line) Logger.get_logger(__name__ + '.worker').info(" | %s", line) return {'id': data['id'], 'success': shell.success, 'output': output}
python
def worker(data): """Running on shell via multiprocessing.""" creator = get_creator_by_name(data['creator']) shell = creator(data['entry'], ShellConfig(script=data['entry']['script'], title=data['entry']['title'] if 'title' in data['entry'] else '', model=data['model'], env=data['env'], item=data['item'], dry_run=data['dry_run'], debug=data['debug'], strict=data['strict'], variables=data['variables'], temporary_scripts_path=data['temporary_scripts_path'])) output = [] for line in shell.process(): output.append(line) Logger.get_logger(__name__ + '.worker').info(" | %s", line) return {'id': data['id'], 'success': shell.success, 'output': output}
[ "def", "worker", "(", "data", ")", ":", "creator", "=", "get_creator_by_name", "(", "data", "[", "'creator'", "]", ")", "shell", "=", "creator", "(", "data", "[", "'entry'", "]", ",", "ShellConfig", "(", "script", "=", "data", "[", "'entry'", "]", "[", "'script'", "]", ",", "title", "=", "data", "[", "'entry'", "]", "[", "'title'", "]", "if", "'title'", "in", "data", "[", "'entry'", "]", "else", "''", ",", "model", "=", "data", "[", "'model'", "]", ",", "env", "=", "data", "[", "'env'", "]", ",", "item", "=", "data", "[", "'item'", "]", ",", "dry_run", "=", "data", "[", "'dry_run'", "]", ",", "debug", "=", "data", "[", "'debug'", "]", ",", "strict", "=", "data", "[", "'strict'", "]", ",", "variables", "=", "data", "[", "'variables'", "]", ",", "temporary_scripts_path", "=", "data", "[", "'temporary_scripts_path'", "]", ")", ")", "output", "=", "[", "]", "for", "line", "in", "shell", ".", "process", "(", ")", ":", "output", ".", "append", "(", "line", ")", "Logger", ".", "get_logger", "(", "__name__", "+", "'.worker'", ")", ".", "info", "(", "\" | %s\"", ",", "line", ")", "return", "{", "'id'", ":", "data", "[", "'id'", "]", ",", "'success'", ":", "shell", ".", "success", ",", "'output'", ":", "output", "}" ]
Running on shell via multiprocessing.
[ "Running", "on", "shell", "via", "multiprocessing", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L52-L66
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.get_merged_env
def get_merged_env(self, include_os=False): """ Copying and merging environment variables. Args: include_os (bool): when true then include the environment variables (default: False) Returns: dict: environment variables as defined in the pipeline (optional including system environment variables). """ env = {} if include_os: env.update(os.environ.copy()) for level in range(3): env.update(self.pipeline.data.env_list[level].copy()) return env
python
def get_merged_env(self, include_os=False): """ Copying and merging environment variables. Args: include_os (bool): when true then include the environment variables (default: False) Returns: dict: environment variables as defined in the pipeline (optional including system environment variables). """ env = {} if include_os: env.update(os.environ.copy()) for level in range(3): env.update(self.pipeline.data.env_list[level].copy()) return env
[ "def", "get_merged_env", "(", "self", ",", "include_os", "=", "False", ")", ":", "env", "=", "{", "}", "if", "include_os", ":", "env", ".", "update", "(", "os", ".", "environ", ".", "copy", "(", ")", ")", "for", "level", "in", "range", "(", "3", ")", ":", "env", ".", "update", "(", "self", ".", "pipeline", ".", "data", ".", "env_list", "[", "level", "]", ".", "copy", "(", ")", ")", "return", "env" ]
Copying and merging environment variables. Args: include_os (bool): when true then include the environment variables (default: False) Returns: dict: environment variables as defined in the pipeline (optional including system environment variables).
[ "Copying", "and", "merging", "environment", "variables", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L80-L96
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.prepare_shell_data
def prepare_shell_data(self, shells, key, entry): """Prepare one shell or docker task.""" if self.can_process_shell(entry): if key in ['python']: entry['type'] = key if 'with' in entry and isinstance(entry['with'], str): rendered_with = ast.literal_eval(render(entry['with'], variables=self.pipeline.variables, model=self.pipeline.model, env=self.get_merged_env(include_os=True))) elif 'with' in entry: rendered_with = entry['with'] else: rendered_with = [''] for item in rendered_with: shells.append({ 'id': self.next_task_id, 'creator': key, 'entry': entry, 'model': self.pipeline.model, 'env': self.get_merged_env(), 'item': item, 'dry_run': self.pipeline.options.dry_run, 'debug': self.pipeline.options.debug, 'strict': self.pipeline.options.strict, 'variables': self.pipeline.variables, 'temporary_scripts_path': self.pipeline.options.temporary_scripts_path}) self.next_task_id += 1
python
def prepare_shell_data(self, shells, key, entry): """Prepare one shell or docker task.""" if self.can_process_shell(entry): if key in ['python']: entry['type'] = key if 'with' in entry and isinstance(entry['with'], str): rendered_with = ast.literal_eval(render(entry['with'], variables=self.pipeline.variables, model=self.pipeline.model, env=self.get_merged_env(include_os=True))) elif 'with' in entry: rendered_with = entry['with'] else: rendered_with = [''] for item in rendered_with: shells.append({ 'id': self.next_task_id, 'creator': key, 'entry': entry, 'model': self.pipeline.model, 'env': self.get_merged_env(), 'item': item, 'dry_run': self.pipeline.options.dry_run, 'debug': self.pipeline.options.debug, 'strict': self.pipeline.options.strict, 'variables': self.pipeline.variables, 'temporary_scripts_path': self.pipeline.options.temporary_scripts_path}) self.next_task_id += 1
[ "def", "prepare_shell_data", "(", "self", ",", "shells", ",", "key", ",", "entry", ")", ":", "if", "self", ".", "can_process_shell", "(", "entry", ")", ":", "if", "key", "in", "[", "'python'", "]", ":", "entry", "[", "'type'", "]", "=", "key", "if", "'with'", "in", "entry", "and", "isinstance", "(", "entry", "[", "'with'", "]", ",", "str", ")", ":", "rendered_with", "=", "ast", ".", "literal_eval", "(", "render", "(", "entry", "[", "'with'", "]", ",", "variables", "=", "self", ".", "pipeline", ".", "variables", ",", "model", "=", "self", ".", "pipeline", ".", "model", ",", "env", "=", "self", ".", "get_merged_env", "(", "include_os", "=", "True", ")", ")", ")", "elif", "'with'", "in", "entry", ":", "rendered_with", "=", "entry", "[", "'with'", "]", "else", ":", "rendered_with", "=", "[", "''", "]", "for", "item", "in", "rendered_with", ":", "shells", ".", "append", "(", "{", "'id'", ":", "self", ".", "next_task_id", ",", "'creator'", ":", "key", ",", "'entry'", ":", "entry", ",", "'model'", ":", "self", ".", "pipeline", ".", "model", ",", "'env'", ":", "self", ".", "get_merged_env", "(", ")", ",", "'item'", ":", "item", ",", "'dry_run'", ":", "self", ".", "pipeline", ".", "options", ".", "dry_run", ",", "'debug'", ":", "self", ".", "pipeline", ".", "options", ".", "debug", ",", "'strict'", ":", "self", ".", "pipeline", ".", "options", ".", "strict", ",", "'variables'", ":", "self", ".", "pipeline", ".", "variables", ",", "'temporary_scripts_path'", ":", "self", ".", "pipeline", ".", "options", ".", "temporary_scripts_path", "}", ")", "self", ".", "next_task_id", "+=", "1" ]
Prepare one shell or docker task.
[ "Prepare", "one", "shell", "or", "docker", "task", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L98-L127
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.process
def process(self, document): """Processing a group of tasks.""" self.logger.info("Processing group of tasks (parallel=%s)", self.get_parallel_mode()) self.pipeline.data.env_list[2] = {} output, shells = [], [] result = Adapter({'success': True, 'output': []}) for task_entry in document: key, entry = list(task_entry.items())[0] if (not self.parallel or key == 'env') and len(shells) > 0: result = Adapter(self.process_shells(shells)) output += result.output shells = [] if not result.success: break if key == 'env': self.pipeline.data.env_list[2].update(entry) elif key in ['shell', 'docker(container)', 'docker(image)', 'python', 'packer', 'ansible(simple)']: self.prepare_shell_data(shells, key, entry) if result.success: result = Adapter(self.process_shells(shells)) output += result.output self.event.delegate(result.success) return {'success': result.success, 'output': output}
python
def process(self, document): """Processing a group of tasks.""" self.logger.info("Processing group of tasks (parallel=%s)", self.get_parallel_mode()) self.pipeline.data.env_list[2] = {} output, shells = [], [] result = Adapter({'success': True, 'output': []}) for task_entry in document: key, entry = list(task_entry.items())[0] if (not self.parallel or key == 'env') and len(shells) > 0: result = Adapter(self.process_shells(shells)) output += result.output shells = [] if not result.success: break if key == 'env': self.pipeline.data.env_list[2].update(entry) elif key in ['shell', 'docker(container)', 'docker(image)', 'python', 'packer', 'ansible(simple)']: self.prepare_shell_data(shells, key, entry) if result.success: result = Adapter(self.process_shells(shells)) output += result.output self.event.delegate(result.success) return {'success': result.success, 'output': output}
[ "def", "process", "(", "self", ",", "document", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Processing group of tasks (parallel=%s)\"", ",", "self", ".", "get_parallel_mode", "(", ")", ")", "self", ".", "pipeline", ".", "data", ".", "env_list", "[", "2", "]", "=", "{", "}", "output", ",", "shells", "=", "[", "]", ",", "[", "]", "result", "=", "Adapter", "(", "{", "'success'", ":", "True", ",", "'output'", ":", "[", "]", "}", ")", "for", "task_entry", "in", "document", ":", "key", ",", "entry", "=", "list", "(", "task_entry", ".", "items", "(", ")", ")", "[", "0", "]", "if", "(", "not", "self", ".", "parallel", "or", "key", "==", "'env'", ")", "and", "len", "(", "shells", ")", ">", "0", ":", "result", "=", "Adapter", "(", "self", ".", "process_shells", "(", "shells", ")", ")", "output", "+=", "result", ".", "output", "shells", "=", "[", "]", "if", "not", "result", ".", "success", ":", "break", "if", "key", "==", "'env'", ":", "self", ".", "pipeline", ".", "data", ".", "env_list", "[", "2", "]", ".", "update", "(", "entry", ")", "elif", "key", "in", "[", "'shell'", ",", "'docker(container)'", ",", "'docker(image)'", ",", "'python'", ",", "'packer'", ",", "'ansible(simple)'", "]", ":", "self", ".", "prepare_shell_data", "(", "shells", ",", "key", ",", "entry", ")", "if", "result", ".", "success", ":", "result", "=", "Adapter", "(", "self", ".", "process_shells", "(", "shells", ")", ")", "output", "+=", "result", ".", "output", "self", ".", "event", ".", "delegate", "(", "result", ".", "success", ")", "return", "{", "'success'", ":", "result", ".", "success", ",", "'output'", ":", "output", "}" ]
Processing a group of tasks.
[ "Processing", "a", "group", "of", "tasks", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L135-L164
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.process_shells_parallel
def process_shells_parallel(self, shells): """Processing a list of shells parallel.""" output = [] success = True with closing(multiprocessing.Pool(multiprocessing.cpu_count())) as pool: for result in [Adapter(entry) for entry in pool.map(worker, [shell for shell in shells])]: output += result.output the_shell = [shell for shell in shells if shell['id'] == result.id][0] self.__handle_variable(the_shell['entry'], result.output) if not result.success: success = False if success: self.logger.info("Parallel Processing Bash code: finished") return {'success': True, 'output': output} for line in self.run_cleanup(shells[0]['env'], 99): output.append(line) self.logger.error("Pipeline has failed: immediately leaving!") self.event.failed() return {'success': False, 'output': output}
python
def process_shells_parallel(self, shells): """Processing a list of shells parallel.""" output = [] success = True with closing(multiprocessing.Pool(multiprocessing.cpu_count())) as pool: for result in [Adapter(entry) for entry in pool.map(worker, [shell for shell in shells])]: output += result.output the_shell = [shell for shell in shells if shell['id'] == result.id][0] self.__handle_variable(the_shell['entry'], result.output) if not result.success: success = False if success: self.logger.info("Parallel Processing Bash code: finished") return {'success': True, 'output': output} for line in self.run_cleanup(shells[0]['env'], 99): output.append(line) self.logger.error("Pipeline has failed: immediately leaving!") self.event.failed() return {'success': False, 'output': output}
[ "def", "process_shells_parallel", "(", "self", ",", "shells", ")", ":", "output", "=", "[", "]", "success", "=", "True", "with", "closing", "(", "multiprocessing", ".", "Pool", "(", "multiprocessing", ".", "cpu_count", "(", ")", ")", ")", "as", "pool", ":", "for", "result", "in", "[", "Adapter", "(", "entry", ")", "for", "entry", "in", "pool", ".", "map", "(", "worker", ",", "[", "shell", "for", "shell", "in", "shells", "]", ")", "]", ":", "output", "+=", "result", ".", "output", "the_shell", "=", "[", "shell", "for", "shell", "in", "shells", "if", "shell", "[", "'id'", "]", "==", "result", ".", "id", "]", "[", "0", "]", "self", ".", "__handle_variable", "(", "the_shell", "[", "'entry'", "]", ",", "result", ".", "output", ")", "if", "not", "result", ".", "success", ":", "success", "=", "False", "if", "success", ":", "self", ".", "logger", ".", "info", "(", "\"Parallel Processing Bash code: finished\"", ")", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}", "for", "line", "in", "self", ".", "run_cleanup", "(", "shells", "[", "0", "]", "[", "'env'", "]", ",", "99", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "error", "(", "\"Pipeline has failed: immediately leaving!\"", ")", "self", ".", "event", ".", "failed", "(", ")", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}" ]
Processing a list of shells parallel.
[ "Processing", "a", "list", "of", "shells", "parallel", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L166-L185
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.process_shells_ordered
def process_shells_ordered(self, shells): """Processing a list of shells one after the other.""" output = [] for shell in shells: entry = shell['entry'] config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '', model=shell['model'], env=shell['env'], item=shell['item'], dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'], variables=shell['variables'], temporary_scripts_path=shell['temporary_scripts_path']) result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config)) output += result.output self.__handle_variable(entry, result.output) if not result.success: return {'success': False, 'output': output} return {'success': True, 'output': output}
python
def process_shells_ordered(self, shells): """Processing a list of shells one after the other.""" output = [] for shell in shells: entry = shell['entry'] config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '', model=shell['model'], env=shell['env'], item=shell['item'], dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'], variables=shell['variables'], temporary_scripts_path=shell['temporary_scripts_path']) result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config)) output += result.output self.__handle_variable(entry, result.output) if not result.success: return {'success': False, 'output': output} return {'success': True, 'output': output}
[ "def", "process_shells_ordered", "(", "self", ",", "shells", ")", ":", "output", "=", "[", "]", "for", "shell", "in", "shells", ":", "entry", "=", "shell", "[", "'entry'", "]", "config", "=", "ShellConfig", "(", "script", "=", "entry", "[", "'script'", "]", ",", "title", "=", "entry", "[", "'title'", "]", "if", "'title'", "in", "entry", "else", "''", ",", "model", "=", "shell", "[", "'model'", "]", ",", "env", "=", "shell", "[", "'env'", "]", ",", "item", "=", "shell", "[", "'item'", "]", ",", "dry_run", "=", "shell", "[", "'dry_run'", "]", ",", "debug", "=", "shell", "[", "'debug'", "]", ",", "strict", "=", "shell", "[", "'strict'", "]", ",", "variables", "=", "shell", "[", "'variables'", "]", ",", "temporary_scripts_path", "=", "shell", "[", "'temporary_scripts_path'", "]", ")", "result", "=", "Adapter", "(", "self", ".", "process_shell", "(", "get_creator_by_name", "(", "shell", "[", "'creator'", "]", ")", ",", "entry", ",", "config", ")", ")", "output", "+=", "result", ".", "output", "self", ".", "__handle_variable", "(", "entry", ",", "result", ".", "output", ")", "if", "not", "result", ".", "success", ":", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}" ]
Processing a list of shells one after the other.
[ "Processing", "a", "list", "of", "shells", "one", "after", "the", "other", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L187-L202
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.process_shells
def process_shells(self, shells): """Processing a list of shells.""" result = {'success': True, 'output': []} if self.parallel and len(shells) > 1: result = self.process_shells_parallel(shells) elif len(shells) > 0: result = self.process_shells_ordered(shells) return result
python
def process_shells(self, shells): """Processing a list of shells.""" result = {'success': True, 'output': []} if self.parallel and len(shells) > 1: result = self.process_shells_parallel(shells) elif len(shells) > 0: result = self.process_shells_ordered(shells) return result
[ "def", "process_shells", "(", "self", ",", "shells", ")", ":", "result", "=", "{", "'success'", ":", "True", ",", "'output'", ":", "[", "]", "}", "if", "self", ".", "parallel", "and", "len", "(", "shells", ")", ">", "1", ":", "result", "=", "self", ".", "process_shells_parallel", "(", "shells", ")", "elif", "len", "(", "shells", ")", ">", "0", ":", "result", "=", "self", ".", "process_shells_ordered", "(", "shells", ")", "return", "result" ]
Processing a list of shells.
[ "Processing", "a", "list", "of", "shells", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L204-L211
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.process_shell
def process_shell(self, creator, entry, config): """Processing a shell entry.""" self.logger.info("Processing Bash code: start") output = [] shell = creator(entry, config) for line in shell.process(): output.append(line) self.logger.info(" | %s", line) if shell.success: self.logger.info("Processing Bash code: finished") return {'success': True, 'output': output} for line in self.run_cleanup(config.env, shell.exit_code): output.append(line) self.logger.error("Pipeline has failed: leaving as soon as possible!") self.event.failed() return {'success': False, 'output': output}
python
def process_shell(self, creator, entry, config): """Processing a shell entry.""" self.logger.info("Processing Bash code: start") output = [] shell = creator(entry, config) for line in shell.process(): output.append(line) self.logger.info(" | %s", line) if shell.success: self.logger.info("Processing Bash code: finished") return {'success': True, 'output': output} for line in self.run_cleanup(config.env, shell.exit_code): output.append(line) self.logger.error("Pipeline has failed: leaving as soon as possible!") self.event.failed() return {'success': False, 'output': output}
[ "def", "process_shell", "(", "self", ",", "creator", ",", "entry", ",", "config", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Processing Bash code: start\"", ")", "output", "=", "[", "]", "shell", "=", "creator", "(", "entry", ",", "config", ")", "for", "line", "in", "shell", ".", "process", "(", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "info", "(", "\" | %s\"", ",", "line", ")", "if", "shell", ".", "success", ":", "self", ".", "logger", ".", "info", "(", "\"Processing Bash code: finished\"", ")", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}", "for", "line", "in", "self", ".", "run_cleanup", "(", "config", ".", "env", ",", "shell", ".", "exit_code", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "error", "(", "\"Pipeline has failed: leaving as soon as possible!\"", ")", "self", ".", "event", ".", "failed", "(", ")", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}" ]
Processing a shell entry.
[ "Processing", "a", "shell", "entry", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L230-L249
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.run_cleanup
def run_cleanup(self, env, exit_code): """Run cleanup hook when configured.""" output = [] if self.pipeline.data.hooks and len(self.pipeline.data.hooks.cleanup) > 0: env.update({'PIPELINE_RESULT': 'FAILURE'}) env.update({'PIPELINE_SHELL_EXIT_CODE': str(exit_code)}) config = ShellConfig(script=self.pipeline.data.hooks.cleanup, model=self.pipeline.model, env=env, dry_run=self.pipeline.options.dry_run, debug=self.pipeline.options.debug, strict=self.pipeline.options.strict, temporary_scripts_path=self.pipeline.options.temporary_scripts_path) cleanup_shell = Bash(config) for line in cleanup_shell.process(): output.append(line) self.logger.info(" | %s", line) return output
python
def run_cleanup(self, env, exit_code): """Run cleanup hook when configured.""" output = [] if self.pipeline.data.hooks and len(self.pipeline.data.hooks.cleanup) > 0: env.update({'PIPELINE_RESULT': 'FAILURE'}) env.update({'PIPELINE_SHELL_EXIT_CODE': str(exit_code)}) config = ShellConfig(script=self.pipeline.data.hooks.cleanup, model=self.pipeline.model, env=env, dry_run=self.pipeline.options.dry_run, debug=self.pipeline.options.debug, strict=self.pipeline.options.strict, temporary_scripts_path=self.pipeline.options.temporary_scripts_path) cleanup_shell = Bash(config) for line in cleanup_shell.process(): output.append(line) self.logger.info(" | %s", line) return output
[ "def", "run_cleanup", "(", "self", ",", "env", ",", "exit_code", ")", ":", "output", "=", "[", "]", "if", "self", ".", "pipeline", ".", "data", ".", "hooks", "and", "len", "(", "self", ".", "pipeline", ".", "data", ".", "hooks", ".", "cleanup", ")", ">", "0", ":", "env", ".", "update", "(", "{", "'PIPELINE_RESULT'", ":", "'FAILURE'", "}", ")", "env", ".", "update", "(", "{", "'PIPELINE_SHELL_EXIT_CODE'", ":", "str", "(", "exit_code", ")", "}", ")", "config", "=", "ShellConfig", "(", "script", "=", "self", ".", "pipeline", ".", "data", ".", "hooks", ".", "cleanup", ",", "model", "=", "self", ".", "pipeline", ".", "model", ",", "env", "=", "env", ",", "dry_run", "=", "self", ".", "pipeline", ".", "options", ".", "dry_run", ",", "debug", "=", "self", ".", "pipeline", ".", "options", ".", "debug", ",", "strict", "=", "self", ".", "pipeline", ".", "options", ".", "strict", ",", "temporary_scripts_path", "=", "self", ".", "pipeline", ".", "options", ".", "temporary_scripts_path", ")", "cleanup_shell", "=", "Bash", "(", "config", ")", "for", "line", "in", "cleanup_shell", ".", "process", "(", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "info", "(", "\" | %s\"", ",", "line", ")", "return", "output" ]
Run cleanup hook when configured.
[ "Run", "cleanup", "hook", "when", "configured", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L251-L267
train
Nachtfeuer/pipeline
spline/components/tasks.py
Tasks.__handle_variable
def __handle_variable(self, shell_entry, output): """ Saving output for configured variable name. Args: shell_entry(dict): shell based configuration (shell, docker container or Python). output: list of strings representing output of last shell """ if 'variable' in shell_entry: variable_name = shell_entry['variable'] self.pipeline.variables[variable_name] = "\n".join(output)
python
def __handle_variable(self, shell_entry, output): """ Saving output for configured variable name. Args: shell_entry(dict): shell based configuration (shell, docker container or Python). output: list of strings representing output of last shell """ if 'variable' in shell_entry: variable_name = shell_entry['variable'] self.pipeline.variables[variable_name] = "\n".join(output)
[ "def", "__handle_variable", "(", "self", ",", "shell_entry", ",", "output", ")", ":", "if", "'variable'", "in", "shell_entry", ":", "variable_name", "=", "shell_entry", "[", "'variable'", "]", "self", ".", "pipeline", ".", "variables", "[", "variable_name", "]", "=", "\"\\n\"", ".", "join", "(", "output", ")" ]
Saving output for configured variable name. Args: shell_entry(dict): shell based configuration (shell, docker container or Python). output: list of strings representing output of last shell
[ "Saving", "output", "for", "configured", "variable", "name", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L269-L279
train
acutesoftware/AIKIF
scripts/examples/finance_example.py
main
def main(): """ This is the main body of the process that does the work. Summary: - load the raw data - read in rules list - create log events for AIKIF according to rules [map] - create new facts / reports based on rules [report] OUTPUT = AIKIF mapping : Date_of_transaction => event AIKIF mapping : Amount => fact AIKIF mapping : Details => location New column : trans_type = DB WHERE amount > 0 ELSE CR summing : details contains "CALTEX" into Travel Expense Done """ print('AIKIF example: Processing Finance data\n') data = read_bank_statements('your_statement.csv') print(data) maps = load_column_maps() rules = load_rules() for m in maps: print('AIKIF mapping : ' + m[0] + ' => ' + m[1]) for rule in rules: #print(rule) if rule[0] == 'agg': print('summing : ' + rule[1] + ' into ' + rule[2] ) elif rule[0] == 'derive': print('New column : ' + rule[1] + ' = ' + rule[2] + ' WHERE ' + rule[1] + ' ELSE ' + rule[3] ) print('Done\n')
python
def main(): """ This is the main body of the process that does the work. Summary: - load the raw data - read in rules list - create log events for AIKIF according to rules [map] - create new facts / reports based on rules [report] OUTPUT = AIKIF mapping : Date_of_transaction => event AIKIF mapping : Amount => fact AIKIF mapping : Details => location New column : trans_type = DB WHERE amount > 0 ELSE CR summing : details contains "CALTEX" into Travel Expense Done """ print('AIKIF example: Processing Finance data\n') data = read_bank_statements('your_statement.csv') print(data) maps = load_column_maps() rules = load_rules() for m in maps: print('AIKIF mapping : ' + m[0] + ' => ' + m[1]) for rule in rules: #print(rule) if rule[0] == 'agg': print('summing : ' + rule[1] + ' into ' + rule[2] ) elif rule[0] == 'derive': print('New column : ' + rule[1] + ' = ' + rule[2] + ' WHERE ' + rule[1] + ' ELSE ' + rule[3] ) print('Done\n')
[ "def", "main", "(", ")", ":", "print", "(", "'AIKIF example: Processing Finance data\\n'", ")", "data", "=", "read_bank_statements", "(", "'your_statement.csv'", ")", "print", "(", "data", ")", "maps", "=", "load_column_maps", "(", ")", "rules", "=", "load_rules", "(", ")", "for", "m", "in", "maps", ":", "print", "(", "'AIKIF mapping : '", "+", "m", "[", "0", "]", "+", "' => '", "+", "m", "[", "1", "]", ")", "for", "rule", "in", "rules", ":", "#print(rule)", "if", "rule", "[", "0", "]", "==", "'agg'", ":", "print", "(", "'summing : '", "+", "rule", "[", "1", "]", "+", "' into '", "+", "rule", "[", "2", "]", ")", "elif", "rule", "[", "0", "]", "==", "'derive'", ":", "print", "(", "'New column : '", "+", "rule", "[", "1", "]", "+", "' = '", "+", "rule", "[", "2", "]", "+", "' WHERE '", "+", "rule", "[", "1", "]", "+", "' ELSE '", "+", "rule", "[", "3", "]", ")", "print", "(", "'Done\\n'", ")" ]
This is the main body of the process that does the work. Summary: - load the raw data - read in rules list - create log events for AIKIF according to rules [map] - create new facts / reports based on rules [report] OUTPUT = AIKIF mapping : Date_of_transaction => event AIKIF mapping : Amount => fact AIKIF mapping : Details => location New column : trans_type = DB WHERE amount > 0 ELSE CR summing : details contains "CALTEX" into Travel Expense Done
[ "This", "is", "the", "main", "body", "of", "the", "process", "that", "does", "the", "work", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/finance_example.py#L25-L61
train
HackerEarth/he-sdk-python
hackerearth/parameters.py
BaseAPIParameters._clean_params
def _clean_params(self, params): """Removes parameters whose values are set to None. """ clean_params = {} for key, value in params.iteritems(): if value is not None: clean_params[key] = value return clean_params
python
def _clean_params(self, params): """Removes parameters whose values are set to None. """ clean_params = {} for key, value in params.iteritems(): if value is not None: clean_params[key] = value return clean_params
[ "def", "_clean_params", "(", "self", ",", "params", ")", ":", "clean_params", "=", "{", "}", "for", "key", ",", "value", "in", "params", ".", "iteritems", "(", ")", ":", "if", "value", "is", "not", "None", ":", "clean_params", "[", "key", "]", "=", "value", "return", "clean_params" ]
Removes parameters whose values are set to None.
[ "Removes", "parameters", "whose", "values", "are", "set", "to", "None", "." ]
ca718afaf70a4239af1adf09ee248a076864b5fe
https://github.com/HackerEarth/he-sdk-python/blob/ca718afaf70a4239af1adf09ee248a076864b5fe/hackerearth/parameters.py#L48-L56
train
OpenHydrology/floodestimation
floodestimation/entities.py
Catchment.distance_to
def distance_to(self, other_catchment): """ Returns the distance between the centroids of two catchments in kilometers. :param other_catchment: Catchment to calculate distance to :type other_catchment: :class:`.Catchment` :return: Distance between the catchments in km. :rtype: float """ try: if self.country == other_catchment.country: try: return 0.001 * hypot(self.descriptors.centroid_ngr.x - other_catchment.descriptors.centroid_ngr.x, self.descriptors.centroid_ngr.y - other_catchment.descriptors.centroid_ngr.y) except TypeError: # In case no centroid available, just return infinity which is helpful in most cases return float('+inf') else: # If the catchments are in a different country (e.g. `ni` versus `gb`) then set distance to infinity. return float('+inf') except (TypeError, KeyError): raise InsufficientDataError("Catchment `descriptors` attribute must be set first.")
python
def distance_to(self, other_catchment): """ Returns the distance between the centroids of two catchments in kilometers. :param other_catchment: Catchment to calculate distance to :type other_catchment: :class:`.Catchment` :return: Distance between the catchments in km. :rtype: float """ try: if self.country == other_catchment.country: try: return 0.001 * hypot(self.descriptors.centroid_ngr.x - other_catchment.descriptors.centroid_ngr.x, self.descriptors.centroid_ngr.y - other_catchment.descriptors.centroid_ngr.y) except TypeError: # In case no centroid available, just return infinity which is helpful in most cases return float('+inf') else: # If the catchments are in a different country (e.g. `ni` versus `gb`) then set distance to infinity. return float('+inf') except (TypeError, KeyError): raise InsufficientDataError("Catchment `descriptors` attribute must be set first.")
[ "def", "distance_to", "(", "self", ",", "other_catchment", ")", ":", "try", ":", "if", "self", ".", "country", "==", "other_catchment", ".", "country", ":", "try", ":", "return", "0.001", "*", "hypot", "(", "self", ".", "descriptors", ".", "centroid_ngr", ".", "x", "-", "other_catchment", ".", "descriptors", ".", "centroid_ngr", ".", "x", ",", "self", ".", "descriptors", ".", "centroid_ngr", ".", "y", "-", "other_catchment", ".", "descriptors", ".", "centroid_ngr", ".", "y", ")", "except", "TypeError", ":", "# In case no centroid available, just return infinity which is helpful in most cases", "return", "float", "(", "'+inf'", ")", "else", ":", "# If the catchments are in a different country (e.g. `ni` versus `gb`) then set distance to infinity.", "return", "float", "(", "'+inf'", ")", "except", "(", "TypeError", ",", "KeyError", ")", ":", "raise", "InsufficientDataError", "(", "\"Catchment `descriptors` attribute must be set first.\"", ")" ]
Returns the distance between the centroids of two catchments in kilometers. :param other_catchment: Catchment to calculate distance to :type other_catchment: :class:`.Catchment` :return: Distance between the catchments in km. :rtype: float
[ "Returns", "the", "distance", "between", "the", "centroids", "of", "two", "catchments", "in", "kilometers", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L137-L158
train
OpenHydrology/floodestimation
floodestimation/entities.py
Descriptors.urbext
def urbext(self, year): """ Estimate the `urbext2000` parameter for a given year assuming a nation-wide urbanisation curve. Methodology source: eqn 5.5, report FD1919/TR :param year: Year to provide estimate for :type year: float :return: Urban extent parameter :rtype: float """ # Decimal places increased to ensure year 2000 corresponds with 1 urban_expansion = 0.7851 + 0.2124 * atan((year - 1967.5) / 20.331792998) try: return self.catchment.descriptors.urbext2000 * urban_expansion except TypeError: # Sometimes urbext2000 is not set, assume zero return 0
python
def urbext(self, year): """ Estimate the `urbext2000` parameter for a given year assuming a nation-wide urbanisation curve. Methodology source: eqn 5.5, report FD1919/TR :param year: Year to provide estimate for :type year: float :return: Urban extent parameter :rtype: float """ # Decimal places increased to ensure year 2000 corresponds with 1 urban_expansion = 0.7851 + 0.2124 * atan((year - 1967.5) / 20.331792998) try: return self.catchment.descriptors.urbext2000 * urban_expansion except TypeError: # Sometimes urbext2000 is not set, assume zero return 0
[ "def", "urbext", "(", "self", ",", "year", ")", ":", "# Decimal places increased to ensure year 2000 corresponds with 1", "urban_expansion", "=", "0.7851", "+", "0.2124", "*", "atan", "(", "(", "year", "-", "1967.5", ")", "/", "20.331792998", ")", "try", ":", "return", "self", ".", "catchment", ".", "descriptors", ".", "urbext2000", "*", "urban_expansion", "except", "TypeError", ":", "# Sometimes urbext2000 is not set, assume zero", "return", "0" ]
Estimate the `urbext2000` parameter for a given year assuming a nation-wide urbanisation curve. Methodology source: eqn 5.5, report FD1919/TR :param year: Year to provide estimate for :type year: float :return: Urban extent parameter :rtype: float
[ "Estimate", "the", "urbext2000", "parameter", "for", "a", "given", "year", "assuming", "a", "nation", "-", "wide", "urbanisation", "curve", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L273-L291
train
OpenHydrology/floodestimation
floodestimation/entities.py
PotDataset.continuous_periods
def continuous_periods(self): """ Return a list of continuous data periods by removing the data gaps from the overall record. """ result = [] # For the first period start_date = self.start_date for gap in self.pot_data_gaps: end_date = gap.start_date - timedelta(days=1) result.append(PotPeriod(start_date, end_date)) # For the next period start_date = gap.end_date + timedelta(days=1) # For the last period end_date = self.end_date result.append(PotPeriod(start_date, end_date)) return result
python
def continuous_periods(self): """ Return a list of continuous data periods by removing the data gaps from the overall record. """ result = [] # For the first period start_date = self.start_date for gap in self.pot_data_gaps: end_date = gap.start_date - timedelta(days=1) result.append(PotPeriod(start_date, end_date)) # For the next period start_date = gap.end_date + timedelta(days=1) # For the last period end_date = self.end_date result.append(PotPeriod(start_date, end_date)) return result
[ "def", "continuous_periods", "(", "self", ")", ":", "result", "=", "[", "]", "# For the first period", "start_date", "=", "self", ".", "start_date", "for", "gap", "in", "self", ".", "pot_data_gaps", ":", "end_date", "=", "gap", ".", "start_date", "-", "timedelta", "(", "days", "=", "1", ")", "result", ".", "append", "(", "PotPeriod", "(", "start_date", ",", "end_date", ")", ")", "# For the next period", "start_date", "=", "gap", ".", "end_date", "+", "timedelta", "(", "days", "=", "1", ")", "# For the last period", "end_date", "=", "self", ".", "end_date", "result", ".", "append", "(", "PotPeriod", "(", "start_date", ",", "end_date", ")", ")", "return", "result" ]
Return a list of continuous data periods by removing the data gaps from the overall record.
[ "Return", "a", "list", "of", "continuous", "data", "periods", "by", "removing", "the", "data", "gaps", "from", "the", "overall", "record", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L401-L418
train
Nachtfeuer/pipeline
spline/tools/memfiles.py
InMemoryFiles.add_path
def add_path(self, path, path_filter=None): """ Adding all files from given path to the object. Args: path (str): valid, existing directory """ for root, _, files in os.walk(path): for filename in files: full_path_and_filename = os.path.join(root, filename) if path_filter is None or path_filter(full_path_and_filename): relative_path_and_filename = full_path_and_filename.replace(path + '/', '') with open(full_path_and_filename, 'rb') as handle: self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')
python
def add_path(self, path, path_filter=None): """ Adding all files from given path to the object. Args: path (str): valid, existing directory """ for root, _, files in os.walk(path): for filename in files: full_path_and_filename = os.path.join(root, filename) if path_filter is None or path_filter(full_path_and_filename): relative_path_and_filename = full_path_and_filename.replace(path + '/', '') with open(full_path_and_filename, 'rb') as handle: self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')
[ "def", "add_path", "(", "self", ",", "path", ",", "path_filter", "=", "None", ")", ":", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "filename", "in", "files", ":", "full_path_and_filename", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "if", "path_filter", "is", "None", "or", "path_filter", "(", "full_path_and_filename", ")", ":", "relative_path_and_filename", "=", "full_path_and_filename", ".", "replace", "(", "path", "+", "'/'", ",", "''", ")", "with", "open", "(", "full_path_and_filename", ",", "'rb'", ")", "as", "handle", ":", "self", ".", "files", "[", "relative_path_and_filename", "]", "=", "b64encode", "(", "handle", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")" ]
Adding all files from given path to the object. Args: path (str): valid, existing directory
[ "Adding", "all", "files", "from", "given", "path", "to", "the", "object", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L42-L55
train
Nachtfeuer/pipeline
spline/tools/memfiles.py
InMemoryFiles.from_json
def from_json(data): """ Convert JSON into a in memory file storage. Args: data (str): valid JSON with path and filenames and the base64 encoding of the file content. Returns: InMemoryFiles: in memory file storage """ memfiles = InMemoryFiles() memfiles.files = json.loads(data) return memfiles
python
def from_json(data): """ Convert JSON into a in memory file storage. Args: data (str): valid JSON with path and filenames and the base64 encoding of the file content. Returns: InMemoryFiles: in memory file storage """ memfiles = InMemoryFiles() memfiles.files = json.loads(data) return memfiles
[ "def", "from_json", "(", "data", ")", ":", "memfiles", "=", "InMemoryFiles", "(", ")", "memfiles", ".", "files", "=", "json", ".", "loads", "(", "data", ")", "return", "memfiles" ]
Convert JSON into a in memory file storage. Args: data (str): valid JSON with path and filenames and the base64 encoding of the file content. Returns: InMemoryFiles: in memory file storage
[ "Convert", "JSON", "into", "a", "in", "memory", "file", "storage", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L84-L97
train
acutesoftware/AIKIF
aikif/toolbox/file_tools.py
delete_file
def delete_file(f, ignore_errors=False): """ delete a single file """ try: os.remove(f) except Exception as ex: if ignore_errors: return print('ERROR deleting file ' + str(ex))
python
def delete_file(f, ignore_errors=False): """ delete a single file """ try: os.remove(f) except Exception as ex: if ignore_errors: return print('ERROR deleting file ' + str(ex))
[ "def", "delete_file", "(", "f", ",", "ignore_errors", "=", "False", ")", ":", "try", ":", "os", ".", "remove", "(", "f", ")", "except", "Exception", "as", "ex", ":", "if", "ignore_errors", ":", "return", "print", "(", "'ERROR deleting file '", "+", "str", "(", "ex", ")", ")" ]
delete a single file
[ "delete", "a", "single", "file" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L36-L45
train
acutesoftware/AIKIF
aikif/toolbox/file_tools.py
delete_files_in_folder
def delete_files_in_folder(fldr): """ delete all files in folder 'fldr' """ fl = glob.glob(fldr + os.sep + '*.*') for f in fl: delete_file(f, True)
python
def delete_files_in_folder(fldr): """ delete all files in folder 'fldr' """ fl = glob.glob(fldr + os.sep + '*.*') for f in fl: delete_file(f, True)
[ "def", "delete_files_in_folder", "(", "fldr", ")", ":", "fl", "=", "glob", ".", "glob", "(", "fldr", "+", "os", ".", "sep", "+", "'*.*'", ")", "for", "f", "in", "fl", ":", "delete_file", "(", "f", ",", "True", ")" ]
delete all files in folder 'fldr'
[ "delete", "all", "files", "in", "folder", "fldr" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L47-L53
train
acutesoftware/AIKIF
aikif/toolbox/file_tools.py
copy_file
def copy_file(src, dest): """ copy single file """ try: shutil.copy2(src , dest) except Exception as ex: print('ERROR copying file' + str(ex))
python
def copy_file(src, dest): """ copy single file """ try: shutil.copy2(src , dest) except Exception as ex: print('ERROR copying file' + str(ex))
[ "def", "copy_file", "(", "src", ",", "dest", ")", ":", "try", ":", "shutil", ".", "copy2", "(", "src", ",", "dest", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'ERROR copying file'", "+", "str", "(", "ex", ")", ")" ]
copy single file
[ "copy", "single", "file" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L55-L62
train
acutesoftware/AIKIF
aikif/toolbox/file_tools.py
copy_files_to_folder
def copy_files_to_folder(src, dest, xtn='*.txt'): """ copies all the files from src to dest folder """ try: all_files = glob.glob(os.path.join(src,xtn)) for f in all_files: copy_file(f, dest) except Exception as ex: print('ERROR copy_files_to_folder - ' + str(ex))
python
def copy_files_to_folder(src, dest, xtn='*.txt'): """ copies all the files from src to dest folder """ try: all_files = glob.glob(os.path.join(src,xtn)) for f in all_files: copy_file(f, dest) except Exception as ex: print('ERROR copy_files_to_folder - ' + str(ex))
[ "def", "copy_files_to_folder", "(", "src", ",", "dest", ",", "xtn", "=", "'*.txt'", ")", ":", "try", ":", "all_files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "src", ",", "xtn", ")", ")", "for", "f", "in", "all_files", ":", "copy_file", "(", "f", ",", "dest", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'ERROR copy_files_to_folder - '", "+", "str", "(", "ex", ")", ")" ]
copies all the files from src to dest folder
[ "copies", "all", "the", "files", "from", "src", "to", "dest", "folder" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L68-L78
train
acutesoftware/AIKIF
scripts/install_data.py
main
def main(): """ script to setup folder structures for AIKIF and prepare data tables. """ print('\n\n /------- AIKIF Installation --------\\') print(' | s. show current setup |') print(' | f. setup folder structures |') print(' | c. create sample data |') # not yet - wait for beta release print(' w. wipe data and install everything from scratch') print(' | q. quit |') print(' \\-----------------------------------/') cmd = input('?') if cmd == 's': show_setup() elif cmd == 'f': setup_folders() elif cmd == 'c': create_sample_data() #elif cmd == 'w': # wipe_and_rebuild_all() elif cmd == 'q': exit(0) main()
python
def main(): """ script to setup folder structures for AIKIF and prepare data tables. """ print('\n\n /------- AIKIF Installation --------\\') print(' | s. show current setup |') print(' | f. setup folder structures |') print(' | c. create sample data |') # not yet - wait for beta release print(' w. wipe data and install everything from scratch') print(' | q. quit |') print(' \\-----------------------------------/') cmd = input('?') if cmd == 's': show_setup() elif cmd == 'f': setup_folders() elif cmd == 'c': create_sample_data() #elif cmd == 'w': # wipe_and_rebuild_all() elif cmd == 'q': exit(0) main()
[ "def", "main", "(", ")", ":", "print", "(", "'\\n\\n /------- AIKIF Installation --------\\\\'", ")", "print", "(", "' | s. show current setup |'", ")", "print", "(", "' | f. setup folder structures |'", ")", "print", "(", "' | c. create sample data |'", ")", "# not yet - wait for beta release print(' w. wipe data and install everything from scratch')", "print", "(", "' | q. quit |'", ")", "print", "(", "' \\\\-----------------------------------/'", ")", "cmd", "=", "input", "(", "'?'", ")", "if", "cmd", "==", "'s'", ":", "show_setup", "(", ")", "elif", "cmd", "==", "'f'", ":", "setup_folders", "(", ")", "elif", "cmd", "==", "'c'", ":", "create_sample_data", "(", ")", "#elif cmd == 'w':", "# wipe_and_rebuild_all()", "elif", "cmd", "==", "'q'", ":", "exit", "(", "0", ")", "main", "(", ")" ]
script to setup folder structures for AIKIF and prepare data tables.
[ "script", "to", "setup", "folder", "structures", "for", "AIKIF", "and", "prepare", "data", "tables", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/install_data.py#L8-L31
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
load_graph_from_rdf
def load_graph_from_rdf(fname): """ reads an RDF file into a graph """ print("reading RDF from " + fname + "....") store = Graph() store.parse(fname, format="n3") print("Loaded " + str(len(store)) + " tuples") return store
python
def load_graph_from_rdf(fname): """ reads an RDF file into a graph """ print("reading RDF from " + fname + "....") store = Graph() store.parse(fname, format="n3") print("Loaded " + str(len(store)) + " tuples") return store
[ "def", "load_graph_from_rdf", "(", "fname", ")", ":", "print", "(", "\"reading RDF from \"", "+", "fname", "+", "\"....\"", ")", "store", "=", "Graph", "(", ")", "store", ".", "parse", "(", "fname", ",", "format", "=", "\"n3\"", ")", "print", "(", "\"Loaded \"", "+", "str", "(", "len", "(", "store", ")", ")", "+", "\" tuples\"", ")", "return", "store" ]
reads an RDF file into a graph
[ "reads", "an", "RDF", "file", "into", "a", "graph" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L20-L26
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
show_graph_summary
def show_graph_summary(g): """ display sample data from a graph """ sample_data = [] print("list(g[RDFS.Class]) = " + str(len(list(g[RDFS.Class])))) # Get Subject Lists num_subj = 0 for subj in g.subjects(RDF.type): num_subj += 1 if num_subj < 5: sample_data.append("subjects.subject: " + get_string_from_rdf(subj)) print("g.subjects(RDF.type) = " + str(num_subj)) # Get Sample of Subjects, Predicates, Objects num_subj = 0 for subj, pred, obj in g: num_subj += 1 if num_subj < 5: sample_data.append("g.subject : " + get_string_from_rdf(pred)) sample_data.append("g.predicate : " + get_string_from_rdf(subj)) sample_data.append("g.object : " + get_string_from_rdf(obj)) print("g.obj(RDF.type) = " + str(num_subj)) print ("------ Sample Data ------") for line in sample_data: print(line)
python
def show_graph_summary(g): """ display sample data from a graph """ sample_data = [] print("list(g[RDFS.Class]) = " + str(len(list(g[RDFS.Class])))) # Get Subject Lists num_subj = 0 for subj in g.subjects(RDF.type): num_subj += 1 if num_subj < 5: sample_data.append("subjects.subject: " + get_string_from_rdf(subj)) print("g.subjects(RDF.type) = " + str(num_subj)) # Get Sample of Subjects, Predicates, Objects num_subj = 0 for subj, pred, obj in g: num_subj += 1 if num_subj < 5: sample_data.append("g.subject : " + get_string_from_rdf(pred)) sample_data.append("g.predicate : " + get_string_from_rdf(subj)) sample_data.append("g.object : " + get_string_from_rdf(obj)) print("g.obj(RDF.type) = " + str(num_subj)) print ("------ Sample Data ------") for line in sample_data: print(line)
[ "def", "show_graph_summary", "(", "g", ")", ":", "sample_data", "=", "[", "]", "print", "(", "\"list(g[RDFS.Class]) = \"", "+", "str", "(", "len", "(", "list", "(", "g", "[", "RDFS", ".", "Class", "]", ")", ")", ")", ")", "# Get Subject Lists", "num_subj", "=", "0", "for", "subj", "in", "g", ".", "subjects", "(", "RDF", ".", "type", ")", ":", "num_subj", "+=", "1", "if", "num_subj", "<", "5", ":", "sample_data", ".", "append", "(", "\"subjects.subject: \"", "+", "get_string_from_rdf", "(", "subj", ")", ")", "print", "(", "\"g.subjects(RDF.type) = \"", "+", "str", "(", "num_subj", ")", ")", "# Get Sample of Subjects, Predicates, Objects", "num_subj", "=", "0", "for", "subj", ",", "pred", ",", "obj", "in", "g", ":", "num_subj", "+=", "1", "if", "num_subj", "<", "5", ":", "sample_data", ".", "append", "(", "\"g.subject : \"", "+", "get_string_from_rdf", "(", "pred", ")", ")", "sample_data", ".", "append", "(", "\"g.predicate : \"", "+", "get_string_from_rdf", "(", "subj", ")", ")", "sample_data", ".", "append", "(", "\"g.object : \"", "+", "get_string_from_rdf", "(", "obj", ")", ")", "print", "(", "\"g.obj(RDF.type) = \"", "+", "str", "(", "num_subj", ")", ")", "print", "(", "\"------ Sample Data ------\"", ")", "for", "line", "in", "sample_data", ":", "print", "(", "line", ")" ]
display sample data from a graph
[ "display", "sample", "data", "from", "a", "graph" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L28-L54
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
export
def export(g, csv_fname): """ export a graph to CSV for simpler viewing """ with open(csv_fname, "w") as f: num_tuples = 0 f.write('"num","subject","predicate","object"\n') for subj, pred, obj in g: num_tuples += 1 f.write('"' + str(num_tuples) + '",') f.write('"' + get_string_from_rdf(subj) + '",') f.write('"' + get_string_from_rdf(pred) + '",') f.write('"' + get_string_from_rdf(obj) + '"\n') print("Finished exporting " , num_tuples, " tuples")
python
def export(g, csv_fname): """ export a graph to CSV for simpler viewing """ with open(csv_fname, "w") as f: num_tuples = 0 f.write('"num","subject","predicate","object"\n') for subj, pred, obj in g: num_tuples += 1 f.write('"' + str(num_tuples) + '",') f.write('"' + get_string_from_rdf(subj) + '",') f.write('"' + get_string_from_rdf(pred) + '",') f.write('"' + get_string_from_rdf(obj) + '"\n') print("Finished exporting " , num_tuples, " tuples")
[ "def", "export", "(", "g", ",", "csv_fname", ")", ":", "with", "open", "(", "csv_fname", ",", "\"w\"", ")", "as", "f", ":", "num_tuples", "=", "0", "f", ".", "write", "(", "'\"num\",\"subject\",\"predicate\",\"object\"\\n'", ")", "for", "subj", ",", "pred", ",", "obj", "in", "g", ":", "num_tuples", "+=", "1", "f", ".", "write", "(", "'\"'", "+", "str", "(", "num_tuples", ")", "+", "'\",'", ")", "f", ".", "write", "(", "'\"'", "+", "get_string_from_rdf", "(", "subj", ")", "+", "'\",'", ")", "f", ".", "write", "(", "'\"'", "+", "get_string_from_rdf", "(", "pred", ")", "+", "'\",'", ")", "f", ".", "write", "(", "'\"'", "+", "get_string_from_rdf", "(", "obj", ")", "+", "'\"\\n'", ")", "print", "(", "\"Finished exporting \"", ",", "num_tuples", ",", "\" tuples\"", ")" ]
export a graph to CSV for simpler viewing
[ "export", "a", "graph", "to", "CSV", "for", "simpler", "viewing" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L56-L67
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
get_string_from_rdf
def get_string_from_rdf(src): """ extracts the real content from an RDF info object """ res = src.split("/") #[:-1] return "".join([l.replace('"', '""') for l in res[len(res) - 1]])
python
def get_string_from_rdf(src): """ extracts the real content from an RDF info object """ res = src.split("/") #[:-1] return "".join([l.replace('"', '""') for l in res[len(res) - 1]])
[ "def", "get_string_from_rdf", "(", "src", ")", ":", "res", "=", "src", ".", "split", "(", "\"/\"", ")", "#[:-1]", "return", "\"\"", ".", "join", "(", "[", "l", ".", "replace", "(", "'\"'", ",", "'\"\"'", ")", "for", "l", "in", "res", "[", "len", "(", "res", ")", "-", "1", "]", "]", ")" ]
extracts the real content from an RDF info object
[ "extracts", "the", "real", "content", "from", "an", "RDF", "info", "object" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L69-L72
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
create_sample_file
def create_sample_file(ip, op, num_lines): """ make a short version of an RDF file """ with open(ip, "rb") as f: with open(op, "wb") as fout: for _ in range(num_lines): fout.write(f.readline() )
python
def create_sample_file(ip, op, num_lines): """ make a short version of an RDF file """ with open(ip, "rb") as f: with open(op, "wb") as fout: for _ in range(num_lines): fout.write(f.readline() )
[ "def", "create_sample_file", "(", "ip", ",", "op", ",", "num_lines", ")", ":", "with", "open", "(", "ip", ",", "\"rb\"", ")", "as", "f", ":", "with", "open", "(", "op", ",", "\"wb\"", ")", "as", "fout", ":", "for", "_", "in", "range", "(", "num_lines", ")", ":", "fout", ".", "write", "(", "f", ".", "readline", "(", ")", ")" ]
make a short version of an RDF file
[ "make", "a", "short", "version", "of", "an", "RDF", "file" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L75-L80
train
Nachtfeuer/pipeline
spline/tools/query.py
Select.flatten
def flatten(*sequence): """Flatten nested sequences into one.""" result = [] for entry in sequence: if isinstance(entry, list): result += Select.flatten(*entry) elif isinstance(entry, tuple): result += Select.flatten(*entry) else: result.append(entry) return result
python
def flatten(*sequence): """Flatten nested sequences into one.""" result = [] for entry in sequence: if isinstance(entry, list): result += Select.flatten(*entry) elif isinstance(entry, tuple): result += Select.flatten(*entry) else: result.append(entry) return result
[ "def", "flatten", "(", "*", "sequence", ")", ":", "result", "=", "[", "]", "for", "entry", "in", "sequence", ":", "if", "isinstance", "(", "entry", ",", "list", ")", ":", "result", "+=", "Select", ".", "flatten", "(", "*", "entry", ")", "elif", "isinstance", "(", "entry", ",", "tuple", ")", ":", "result", "+=", "Select", ".", "flatten", "(", "*", "entry", ")", "else", ":", "result", ".", "append", "(", "entry", ")", "return", "result" ]
Flatten nested sequences into one.
[ "Flatten", "nested", "sequences", "into", "one", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/query.py#L42-L52
train
Nachtfeuer/pipeline
spline/tools/query.py
Select.build
def build(self): """Do the query.""" result = [] for entry in self.sequence: ignore = False for filter_function in self.filter_functions: if not filter_function(entry): ignore = True break if not ignore: value = entry for transform_function in self.transform_functions: value = transform_function(value) result.append(value) return result
python
def build(self): """Do the query.""" result = [] for entry in self.sequence: ignore = False for filter_function in self.filter_functions: if not filter_function(entry): ignore = True break if not ignore: value = entry for transform_function in self.transform_functions: value = transform_function(value) result.append(value) return result
[ "def", "build", "(", "self", ")", ":", "result", "=", "[", "]", "for", "entry", "in", "self", ".", "sequence", ":", "ignore", "=", "False", "for", "filter_function", "in", "self", ".", "filter_functions", ":", "if", "not", "filter_function", "(", "entry", ")", ":", "ignore", "=", "True", "break", "if", "not", "ignore", ":", "value", "=", "entry", "for", "transform_function", "in", "self", ".", "transform_functions", ":", "value", "=", "transform_function", "(", "value", ")", "result", ".", "append", "(", "value", ")", "return", "result" ]
Do the query.
[ "Do", "the", "query", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/query.py#L64-L78
train
acutesoftware/AIKIF
aikif/toolbox/zip_tools.py
extract_all
def extract_all(zipfile, dest_folder): """ reads the zip file, determines compression and unzips recursively until source files are extracted """ z = ZipFile(zipfile) print(z) z.extract(dest_folder)
python
def extract_all(zipfile, dest_folder): """ reads the zip file, determines compression and unzips recursively until source files are extracted """ z = ZipFile(zipfile) print(z) z.extract(dest_folder)
[ "def", "extract_all", "(", "zipfile", ",", "dest_folder", ")", ":", "z", "=", "ZipFile", "(", "zipfile", ")", "print", "(", "z", ")", "z", ".", "extract", "(", "dest_folder", ")" ]
reads the zip file, determines compression and unzips recursively until source files are extracted
[ "reads", "the", "zip", "file", "determines", "compression", "and", "unzips", "recursively", "until", "source", "files", "are", "extracted" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L11-L19
train
acutesoftware/AIKIF
aikif/toolbox/zip_tools.py
create_zip_from_file
def create_zip_from_file(zip_file, fname): """ add a file to the archive """ with zipfile.ZipFile(zip_file, 'w') as myzip: myzip.write(fname)
python
def create_zip_from_file(zip_file, fname): """ add a file to the archive """ with zipfile.ZipFile(zip_file, 'w') as myzip: myzip.write(fname)
[ "def", "create_zip_from_file", "(", "zip_file", ",", "fname", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'w'", ")", "as", "myzip", ":", "myzip", ".", "write", "(", "fname", ")" ]
add a file to the archive
[ "add", "a", "file", "to", "the", "archive" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L21-L26
train
acutesoftware/AIKIF
aikif/toolbox/zip_tools.py
create_zip_from_folder
def create_zip_from_folder(zip_file, fldr, mode="r"): """ add all the files from the folder fldr to the archive """ #print('zip from folder - adding folder : ', fldr) zipf = zipfile.ZipFile(zip_file, 'w') for root, dirs, files in os.walk(fldr): for file in files: fullname = os.path.join(root, file) #print('zip - adding file : ', fullname) zipf.write(fullname) zipf.close()
python
def create_zip_from_folder(zip_file, fldr, mode="r"): """ add all the files from the folder fldr to the archive """ #print('zip from folder - adding folder : ', fldr) zipf = zipfile.ZipFile(zip_file, 'w') for root, dirs, files in os.walk(fldr): for file in files: fullname = os.path.join(root, file) #print('zip - adding file : ', fullname) zipf.write(fullname) zipf.close()
[ "def", "create_zip_from_folder", "(", "zip_file", ",", "fldr", ",", "mode", "=", "\"r\"", ")", ":", "#print('zip from folder - adding folder : ', fldr)", "zipf", "=", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'w'", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "fldr", ")", ":", "for", "file", "in", "files", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", "#print('zip - adding file : ', fullname)", "zipf", ".", "write", "(", "fullname", ")", "zipf", ".", "close", "(", ")" ]
add all the files from the folder fldr to the archive
[ "add", "all", "the", "files", "from", "the", "folder", "fldr", "to", "the", "archive" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L38-L52
train
nocarryr/python-dispatch
pydispatch/aioutils.py
AioWeakMethodContainer.add_method
def add_method(self, loop, callback): """Add a coroutine function Args: loop: The :class:`event loop <asyncio.BaseEventLoop>` instance on which to schedule callbacks callback: The :term:`coroutine function` to add """ f, obj = get_method_vars(callback) wrkey = (f, id(obj)) self[wrkey] = obj self.event_loop_map[wrkey] = loop
python
def add_method(self, loop, callback): """Add a coroutine function Args: loop: The :class:`event loop <asyncio.BaseEventLoop>` instance on which to schedule callbacks callback: The :term:`coroutine function` to add """ f, obj = get_method_vars(callback) wrkey = (f, id(obj)) self[wrkey] = obj self.event_loop_map[wrkey] = loop
[ "def", "add_method", "(", "self", ",", "loop", ",", "callback", ")", ":", "f", ",", "obj", "=", "get_method_vars", "(", "callback", ")", "wrkey", "=", "(", "f", ",", "id", "(", "obj", ")", ")", "self", "[", "wrkey", "]", "=", "obj", "self", ".", "event_loop_map", "[", "wrkey", "]", "=", "loop" ]
Add a coroutine function Args: loop: The :class:`event loop <asyncio.BaseEventLoop>` instance on which to schedule callbacks callback: The :term:`coroutine function` to add
[ "Add", "a", "coroutine", "function" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L229-L240
train
nocarryr/python-dispatch
pydispatch/aioutils.py
AioWeakMethodContainer.iter_methods
def iter_methods(self): """Iterate over stored coroutine functions Yields: Stored :term:`coroutine function` objects .. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances` """ for wrkey, obj in self.iter_instances(): f, obj_id = wrkey loop = self.event_loop_map[wrkey] m = getattr(obj, f.__name__) yield loop, m
python
def iter_methods(self): """Iterate over stored coroutine functions Yields: Stored :term:`coroutine function` objects .. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances` """ for wrkey, obj in self.iter_instances(): f, obj_id = wrkey loop = self.event_loop_map[wrkey] m = getattr(obj, f.__name__) yield loop, m
[ "def", "iter_methods", "(", "self", ")", ":", "for", "wrkey", ",", "obj", "in", "self", ".", "iter_instances", "(", ")", ":", "f", ",", "obj_id", "=", "wrkey", "loop", "=", "self", ".", "event_loop_map", "[", "wrkey", "]", "m", "=", "getattr", "(", "obj", ",", "f", ".", "__name__", ")", "yield", "loop", ",", "m" ]
Iterate over stored coroutine functions Yields: Stored :term:`coroutine function` objects .. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
[ "Iterate", "over", "stored", "coroutine", "functions" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L248-L260
train
nocarryr/python-dispatch
pydispatch/aioutils.py
AioWeakMethodContainer.submit_coroutine
def submit_coroutine(self, coro, loop): """Schedule and await a coroutine on the specified loop The coroutine is wrapped and scheduled using :func:`asyncio.run_coroutine_threadsafe`. While the coroutine is "awaited", the result is not available as method returns immediately. Args: coro: The :term:`coroutine` to schedule loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to schedule the coroutine Note: This method is used internally by :meth:`__call__` and is not meant to be called directly. """ async def _do_call(_coro): with _IterationGuard(self): await _coro asyncio.run_coroutine_threadsafe(_do_call(coro), loop=loop)
python
def submit_coroutine(self, coro, loop): """Schedule and await a coroutine on the specified loop The coroutine is wrapped and scheduled using :func:`asyncio.run_coroutine_threadsafe`. While the coroutine is "awaited", the result is not available as method returns immediately. Args: coro: The :term:`coroutine` to schedule loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to schedule the coroutine Note: This method is used internally by :meth:`__call__` and is not meant to be called directly. """ async def _do_call(_coro): with _IterationGuard(self): await _coro asyncio.run_coroutine_threadsafe(_do_call(coro), loop=loop)
[ "def", "submit_coroutine", "(", "self", ",", "coro", ",", "loop", ")", ":", "async", "def", "_do_call", "(", "_coro", ")", ":", "with", "_IterationGuard", "(", "self", ")", ":", "await", "_coro", "asyncio", ".", "run_coroutine_threadsafe", "(", "_do_call", "(", "coro", ")", ",", "loop", "=", "loop", ")" ]
Schedule and await a coroutine on the specified loop The coroutine is wrapped and scheduled using :func:`asyncio.run_coroutine_threadsafe`. While the coroutine is "awaited", the result is not available as method returns immediately. Args: coro: The :term:`coroutine` to schedule loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to schedule the coroutine Note: This method is used internally by :meth:`__call__` and is not meant to be called directly.
[ "Schedule", "and", "await", "a", "coroutine", "on", "the", "specified", "loop" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L264-L283
train
acutesoftware/AIKIF
aikif/lib/cls_file.py
File.launch
def launch(self): """ launch a file - used for starting html pages """ #os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x import subprocess try: retcode = subprocess.call(self.fullname, shell=True) if retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) return False else: print("Child returned", retcode, file=sys.stderr) return True except OSError as e: print("Execution failed:", e, file=sys.stderr) return False
python
def launch(self): """ launch a file - used for starting html pages """ #os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x import subprocess try: retcode = subprocess.call(self.fullname, shell=True) if retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) return False else: print("Child returned", retcode, file=sys.stderr) return True except OSError as e: print("Execution failed:", e, file=sys.stderr) return False
[ "def", "launch", "(", "self", ")", ":", "#os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x", "import", "subprocess", "try", ":", "retcode", "=", "subprocess", ".", "call", "(", "self", ".", "fullname", ",", "shell", "=", "True", ")", "if", "retcode", "<", "0", ":", "print", "(", "\"Child was terminated by signal\"", ",", "-", "retcode", ",", "file", "=", "sys", ".", "stderr", ")", "return", "False", "else", ":", "print", "(", "\"Child returned\"", ",", "retcode", ",", "file", "=", "sys", ".", "stderr", ")", "return", "True", "except", "OSError", "as", "e", ":", "print", "(", "\"Execution failed:\"", ",", "e", ",", "file", "=", "sys", ".", "stderr", ")", "return", "False" ]
launch a file - used for starting html pages
[ "launch", "a", "file", "-", "used", "for", "starting", "html", "pages" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L57-L71
train
acutesoftware/AIKIF
aikif/lib/cls_file.py
File.delete
def delete(self): """ delete a file, don't really care if it doesn't exist """ if self.fullname != "": try: os.remove(self.fullname) except IOError: print("Cant delete ",self.fullname)
python
def delete(self): """ delete a file, don't really care if it doesn't exist """ if self.fullname != "": try: os.remove(self.fullname) except IOError: print("Cant delete ",self.fullname)
[ "def", "delete", "(", "self", ")", ":", "if", "self", ".", "fullname", "!=", "\"\"", ":", "try", ":", "os", ".", "remove", "(", "self", ".", "fullname", ")", "except", "IOError", ":", "print", "(", "\"Cant delete \"", ",", "self", ".", "fullname", ")" ]
delete a file, don't really care if it doesn't exist
[ "delete", "a", "file", "don", "t", "really", "care", "if", "it", "doesn", "t", "exist" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L76-L82
train