repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
sio2project/filetracker
filetracker/servers/storage.py
FileStorage.stored_version
def stored_version(self, name): """Returns the version of file `name` or None if it doesn't exist.""" link_path = self._link_path(name) if not _path_exists(link_path): return None return _file_version(link_path)
python
def stored_version(self, name): """Returns the version of file `name` or None if it doesn't exist.""" link_path = self._link_path(name) if not _path_exists(link_path): return None return _file_version(link_path)
[ "def", "stored_version", "(", "self", ",", "name", ")", ":", "link_path", "=", "self", ".", "_link_path", "(", "name", ")", "if", "not", "_path_exists", "(", "link_path", ")", ":", "return", "None", "return", "_file_version", "(", "link_path", ")" ]
Returns the version of file `name` or None if it doesn't exist.
[ "Returns", "the", "version", "of", "file", "name", "or", "None", "if", "it", "doesn", "t", "exist", "." ]
359b474850622e3d0c25ee2596d7242c02f84efb
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/servers/storage.py#L289-L294
train
sio2project/filetracker
filetracker/servers/storage.py
_InputStreamWrapper.save
def save(self, new_path=None): """Moves or creates the file with stream contents to a new location. Args: new_path: path to move to, if None a temporary file is created. """ self.saved_in_temp = new_path is None if new_path is None: fd, new_path = tempfile.mkstemp() os.close(fd) if self.current_path: shutil.move(self.current_path, new_path) else: with open(new_path, 'wb') as dest: _copy_stream(self._data, dest, self._size) self.current_path = new_path
python
def save(self, new_path=None): """Moves or creates the file with stream contents to a new location. Args: new_path: path to move to, if None a temporary file is created. """ self.saved_in_temp = new_path is None if new_path is None: fd, new_path = tempfile.mkstemp() os.close(fd) if self.current_path: shutil.move(self.current_path, new_path) else: with open(new_path, 'wb') as dest: _copy_stream(self._data, dest, self._size) self.current_path = new_path
[ "def", "save", "(", "self", ",", "new_path", "=", "None", ")", ":", "self", ".", "saved_in_temp", "=", "new_path", "is", "None", "if", "new_path", "is", "None", ":", "fd", ",", "new_path", "=", "tempfile", ".", "mkstemp", "(", ")", "os", ".", "close", "(", "fd", ")", "if", "self", ".", "current_path", ":", "shutil", ".", "move", "(", "self", ".", "current_path", ",", "new_path", ")", "else", ":", "with", "open", "(", "new_path", ",", "'wb'", ")", "as", "dest", ":", "_copy_stream", "(", "self", ".", "_data", ",", "dest", ",", "self", ".", "_size", ")", "self", ".", "current_path", "=", "new_path" ]
Moves or creates the file with stream contents to a new location. Args: new_path: path to move to, if None a temporary file is created.
[ "Moves", "or", "creates", "the", "file", "with", "stream", "contents", "to", "a", "new", "location", "." ]
359b474850622e3d0c25ee2596d7242c02f84efb
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/servers/storage.py#L353-L369
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.get_lps
def get_lps(self): """Iterator that returns all the lp objects from linguistic processors layers from the header @rtype: L{Clp} @return: list of lp objects """ if self.header is not None: for linguisticProcessor in self.header: for lp in linguisticProcessor: yield lp
python
def get_lps(self): """Iterator that returns all the lp objects from linguistic processors layers from the header @rtype: L{Clp} @return: list of lp objects """ if self.header is not None: for linguisticProcessor in self.header: for lp in linguisticProcessor: yield lp
[ "def", "get_lps", "(", "self", ")", ":", "if", "self", ".", "header", "is", "not", "None", ":", "for", "linguisticProcessor", "in", "self", ".", "header", ":", "for", "lp", "in", "linguisticProcessor", ":", "yield", "lp" ]
Iterator that returns all the lp objects from linguistic processors layers from the header @rtype: L{Clp} @return: list of lp objects
[ "Iterator", "that", "returns", "all", "the", "lp", "objects", "from", "linguistic", "processors", "layers", "from", "the", "header" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L404-L412
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.get_trees_as_list
def get_trees_as_list(self): """ Iterator that returns the constituency trees @rtype: L{Ctree} @return: iterator to all the constituency trees """ mytrees = [] if self.constituency_layer is not None: for tree in self.constituency_layer.get_trees(): mytrees.append(tree) return mytrees
python
def get_trees_as_list(self): """ Iterator that returns the constituency trees @rtype: L{Ctree} @return: iterator to all the constituency trees """ mytrees = [] if self.constituency_layer is not None: for tree in self.constituency_layer.get_trees(): mytrees.append(tree) return mytrees
[ "def", "get_trees_as_list", "(", "self", ")", ":", "mytrees", "=", "[", "]", "if", "self", ".", "constituency_layer", "is", "not", "None", ":", "for", "tree", "in", "self", ".", "constituency_layer", ".", "get_trees", "(", ")", ":", "mytrees", ".", "append", "(", "tree", ")", "return", "mytrees" ]
Iterator that returns the constituency trees @rtype: L{Ctree} @return: iterator to all the constituency trees
[ "Iterator", "that", "returns", "the", "constituency", "trees" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L425-L435
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.convert_factualitylayer_to_factualities
def convert_factualitylayer_to_factualities(self): """ Takes information from factuality layer in old representation Creates new factuality representation and removes the old layer """ if self.factuality_layer is not None: this_node = self.factuality_layer.get_node() if this_node.tag == 'factualitylayer': new_node = Cfactualities() #create dictionary from token ids to the term ids token2term = {} for t in self.get_terms(): s = t.get_span() for w in s.get_span_ids(): token2term[w] = t.get_id() fnr = 0 for fv in self.get_factvalues(): fnr += 1 conf = fv.get_confidence() wid = fv.get_id() tid = token2term.get(wid) fnode = Cfactuality() #set span with tid as element fspan = Cspan() fspan.add_target_id(tid) fnode.set_span(fspan) #add factVal element with val, resource = factbank, + confidence if present fVal = Cfactval() fVal.set_resource('factbank') fVal.set_value(fv.get_prediction()) if conf: fVal.set_confidence(conf) fnode.set_id('f' + str(fnr)) fnode.add_factval(fVal) new_node.add_factuality(fnode) self.root.remove(this_node) self.root.append(new_node.get_node()) self.factuality_layer = new_node
python
def convert_factualitylayer_to_factualities(self): """ Takes information from factuality layer in old representation Creates new factuality representation and removes the old layer """ if self.factuality_layer is not None: this_node = self.factuality_layer.get_node() if this_node.tag == 'factualitylayer': new_node = Cfactualities() #create dictionary from token ids to the term ids token2term = {} for t in self.get_terms(): s = t.get_span() for w in s.get_span_ids(): token2term[w] = t.get_id() fnr = 0 for fv in self.get_factvalues(): fnr += 1 conf = fv.get_confidence() wid = fv.get_id() tid = token2term.get(wid) fnode = Cfactuality() #set span with tid as element fspan = Cspan() fspan.add_target_id(tid) fnode.set_span(fspan) #add factVal element with val, resource = factbank, + confidence if present fVal = Cfactval() fVal.set_resource('factbank') fVal.set_value(fv.get_prediction()) if conf: fVal.set_confidence(conf) fnode.set_id('f' + str(fnr)) fnode.add_factval(fVal) new_node.add_factuality(fnode) self.root.remove(this_node) self.root.append(new_node.get_node()) self.factuality_layer = new_node
[ "def", "convert_factualitylayer_to_factualities", "(", "self", ")", ":", "if", "self", ".", "factuality_layer", "is", "not", "None", ":", "this_node", "=", "self", ".", "factuality_layer", ".", "get_node", "(", ")", "if", "this_node", ".", "tag", "==", "'factualitylayer'", ":", "new_node", "=", "Cfactualities", "(", ")", "#create dictionary from token ids to the term ids", "token2term", "=", "{", "}", "for", "t", "in", "self", ".", "get_terms", "(", ")", ":", "s", "=", "t", ".", "get_span", "(", ")", "for", "w", "in", "s", ".", "get_span_ids", "(", ")", ":", "token2term", "[", "w", "]", "=", "t", ".", "get_id", "(", ")", "fnr", "=", "0", "for", "fv", "in", "self", ".", "get_factvalues", "(", ")", ":", "fnr", "+=", "1", "conf", "=", "fv", ".", "get_confidence", "(", ")", "wid", "=", "fv", ".", "get_id", "(", ")", "tid", "=", "token2term", ".", "get", "(", "wid", ")", "fnode", "=", "Cfactuality", "(", ")", "#set span with tid as element", "fspan", "=", "Cspan", "(", ")", "fspan", ".", "add_target_id", "(", "tid", ")", "fnode", ".", "set_span", "(", "fspan", ")", "#add factVal element with val, resource = factbank, + confidence if present", "fVal", "=", "Cfactval", "(", ")", "fVal", ".", "set_resource", "(", "'factbank'", ")", "fVal", ".", "set_value", "(", "fv", ".", "get_prediction", "(", ")", ")", "if", "conf", ":", "fVal", ".", "set_confidence", "(", "conf", ")", "fnode", ".", "set_id", "(", "'f'", "+", "str", "(", "fnr", ")", ")", "fnode", ".", "add_factval", "(", "fVal", ")", "new_node", ".", "add_factuality", "(", "fnode", ")", "self", ".", "root", ".", "remove", "(", "this_node", ")", "self", ".", "root", ".", "append", "(", "new_node", ".", "get_node", "(", ")", ")", "self", ".", "factuality_layer", "=", "new_node" ]
Takes information from factuality layer in old representation Creates new factuality representation and removes the old layer
[ "Takes", "information", "from", "factuality", "layer", "in", "old", "representation", "Creates", "new", "factuality", "representation", "and", "removes", "the", "old", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L914-L951
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.get_constituency_extractor
def get_constituency_extractor(self): """ Returns a constituency extractor object @rtype: L{Cconstituency_extractor} @return: a constituency extractor object """ if self.constituency_layer is not None: ##Otherwise there are no constituens if self.my_constituency_extractor is None: self.my_constituency_extractor = Cconstituency_extractor(self) return self.my_constituency_extractor else: return None
python
def get_constituency_extractor(self): """ Returns a constituency extractor object @rtype: L{Cconstituency_extractor} @return: a constituency extractor object """ if self.constituency_layer is not None: ##Otherwise there are no constituens if self.my_constituency_extractor is None: self.my_constituency_extractor = Cconstituency_extractor(self) return self.my_constituency_extractor else: return None
[ "def", "get_constituency_extractor", "(", "self", ")", ":", "if", "self", ".", "constituency_layer", "is", "not", "None", ":", "##Otherwise there are no constituens", "if", "self", ".", "my_constituency_extractor", "is", "None", ":", "self", ".", "my_constituency_extractor", "=", "Cconstituency_extractor", "(", "self", ")", "return", "self", ".", "my_constituency_extractor", "else", ":", "return", "None" ]
Returns a constituency extractor object @rtype: L{Cconstituency_extractor} @return: a constituency extractor object
[ "Returns", "a", "constituency", "extractor", "object" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L954-L966
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.get_dependency_extractor
def get_dependency_extractor(self): """ Returns a dependency extractor object @rtype: L{Cdependency_extractor} @return: a dependency extractor object """ if self.dependency_layer is not None: #otherwise there are no dependencies if self.my_dependency_extractor is None: self.my_dependency_extractor = Cdependency_extractor(self) return self.my_dependency_extractor else: return None
python
def get_dependency_extractor(self): """ Returns a dependency extractor object @rtype: L{Cdependency_extractor} @return: a dependency extractor object """ if self.dependency_layer is not None: #otherwise there are no dependencies if self.my_dependency_extractor is None: self.my_dependency_extractor = Cdependency_extractor(self) return self.my_dependency_extractor else: return None
[ "def", "get_dependency_extractor", "(", "self", ")", ":", "if", "self", ".", "dependency_layer", "is", "not", "None", ":", "#otherwise there are no dependencies", "if", "self", ".", "my_dependency_extractor", "is", "None", ":", "self", ".", "my_dependency_extractor", "=", "Cdependency_extractor", "(", "self", ")", "return", "self", ".", "my_dependency_extractor", "else", ":", "return", "None" ]
Returns a dependency extractor object @rtype: L{Cdependency_extractor} @return: a dependency extractor object
[ "Returns", "a", "dependency", "extractor", "object" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L968-L979
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_wf
def add_wf(self,wf_obj): """ Adds a token to the text layer @type wf_obj: L{Cwf} @param wf_obj: the token object """ if self.text_layer is None: self.text_layer = Ctext(type=self.type) self.root.append(self.text_layer.get_node()) self.text_layer.add_wf(wf_obj)
python
def add_wf(self,wf_obj): """ Adds a token to the text layer @type wf_obj: L{Cwf} @param wf_obj: the token object """ if self.text_layer is None: self.text_layer = Ctext(type=self.type) self.root.append(self.text_layer.get_node()) self.text_layer.add_wf(wf_obj)
[ "def", "add_wf", "(", "self", ",", "wf_obj", ")", ":", "if", "self", ".", "text_layer", "is", "None", ":", "self", ".", "text_layer", "=", "Ctext", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "text_layer", ".", "get_node", "(", ")", ")", "self", ".", "text_layer", ".", "add_wf", "(", "wf_obj", ")" ]
Adds a token to the text layer @type wf_obj: L{Cwf} @param wf_obj: the token object
[ "Adds", "a", "token", "to", "the", "text", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L982-L991
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_term
def add_term(self,term_obj): """ Adds a term to the term layer @type term_obj: L{Cterm} @param term_obj: the term object """ if self.term_layer is None: self.term_layer = Cterms(type=self.type) self.root.append(self.term_layer.get_node()) self.term_layer.add_term(term_obj)
python
def add_term(self,term_obj): """ Adds a term to the term layer @type term_obj: L{Cterm} @param term_obj: the term object """ if self.term_layer is None: self.term_layer = Cterms(type=self.type) self.root.append(self.term_layer.get_node()) self.term_layer.add_term(term_obj)
[ "def", "add_term", "(", "self", ",", "term_obj", ")", ":", "if", "self", ".", "term_layer", "is", "None", ":", "self", ".", "term_layer", "=", "Cterms", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "term_layer", ".", "get_node", "(", ")", ")", "self", ".", "term_layer", ".", "add_term", "(", "term_obj", ")" ]
Adds a term to the term layer @type term_obj: L{Cterm} @param term_obj: the term object
[ "Adds", "a", "term", "to", "the", "term", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1022-L1031
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_chunk
def add_chunk(self,chunk_obj): """ Adds a chunk to the chunk layer @type chunk_obj: L{Cchunk} @param chunk_obj: the chunk object """ if self.chunk_layer is None: self.chunk_layer = Cchunks(type=self.type) self.root.append(self.chunk_layer.get_node()) self.chunk_layer.add_chunk(chunk_obj)
python
def add_chunk(self,chunk_obj): """ Adds a chunk to the chunk layer @type chunk_obj: L{Cchunk} @param chunk_obj: the chunk object """ if self.chunk_layer is None: self.chunk_layer = Cchunks(type=self.type) self.root.append(self.chunk_layer.get_node()) self.chunk_layer.add_chunk(chunk_obj)
[ "def", "add_chunk", "(", "self", ",", "chunk_obj", ")", ":", "if", "self", ".", "chunk_layer", "is", "None", ":", "self", ".", "chunk_layer", "=", "Cchunks", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "chunk_layer", ".", "get_node", "(", ")", ")", "self", ".", "chunk_layer", ".", "add_chunk", "(", "chunk_obj", ")" ]
Adds a chunk to the chunk layer @type chunk_obj: L{Cchunk} @param chunk_obj: the chunk object
[ "Adds", "a", "chunk", "to", "the", "chunk", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1034-L1043
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.create_term
def create_term(self, lemma, pos, morphofeat, tokens, id=None): """ Create a new term and add it to the term layer @type lemma: string @param lemma: The lemma of the term @type pos: string @param pos: The postrag(rst letter) of the POS attribute @type morphofeat: string @param morphofeat: The morphofeat (full morphological features) of the term @type tokens: sequence of L{Cwf} @param tokens: the token(s) that this term describes @type id: string @param id: the id of the term, if not given an id tXXX will be created """ if id is None: n = 1 if self.term_layer is None else len(self.term_layer.idx) + 1 id = "t{n}".format(**locals()) new_term = Cterm(type=self.type) new_term.set_id(id) new_term.set_lemma(lemma) new_term.set_pos(pos) new_term.set_morphofeat(morphofeat) new_span = Cspan() for token in tokens: new_span.add_target_id(token.get_id()) new_term.set_span(new_span) self.add_term(new_term) return new_term
python
def create_term(self, lemma, pos, morphofeat, tokens, id=None): """ Create a new term and add it to the term layer @type lemma: string @param lemma: The lemma of the term @type pos: string @param pos: The postrag(rst letter) of the POS attribute @type morphofeat: string @param morphofeat: The morphofeat (full morphological features) of the term @type tokens: sequence of L{Cwf} @param tokens: the token(s) that this term describes @type id: string @param id: the id of the term, if not given an id tXXX will be created """ if id is None: n = 1 if self.term_layer is None else len(self.term_layer.idx) + 1 id = "t{n}".format(**locals()) new_term = Cterm(type=self.type) new_term.set_id(id) new_term.set_lemma(lemma) new_term.set_pos(pos) new_term.set_morphofeat(morphofeat) new_span = Cspan() for token in tokens: new_span.add_target_id(token.get_id()) new_term.set_span(new_span) self.add_term(new_term) return new_term
[ "def", "create_term", "(", "self", ",", "lemma", ",", "pos", ",", "morphofeat", ",", "tokens", ",", "id", "=", "None", ")", ":", "if", "id", "is", "None", ":", "n", "=", "1", "if", "self", ".", "term_layer", "is", "None", "else", "len", "(", "self", ".", "term_layer", ".", "idx", ")", "+", "1", "id", "=", "\"t{n}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "new_term", "=", "Cterm", "(", "type", "=", "self", ".", "type", ")", "new_term", ".", "set_id", "(", "id", ")", "new_term", ".", "set_lemma", "(", "lemma", ")", "new_term", ".", "set_pos", "(", "pos", ")", "new_term", ".", "set_morphofeat", "(", "morphofeat", ")", "new_span", "=", "Cspan", "(", ")", "for", "token", "in", "tokens", ":", "new_span", ".", "add_target_id", "(", "token", ".", "get_id", "(", ")", ")", "new_term", ".", "set_span", "(", "new_span", ")", "self", ".", "add_term", "(", "new_term", ")", "return", "new_term" ]
Create a new term and add it to the term layer @type lemma: string @param lemma: The lemma of the term @type pos: string @param pos: The postrag(rst letter) of the POS attribute @type morphofeat: string @param morphofeat: The morphofeat (full morphological features) of the term @type tokens: sequence of L{Cwf} @param tokens: the token(s) that this term describes @type id: string @param id: the id of the term, if not given an id tXXX will be created
[ "Create", "a", "new", "term", "and", "add", "it", "to", "the", "term", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1045-L1072
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_markable
def add_markable(self,markable_obj): """ Adds a markable to the markable layer @type markable_obj: L{Cmarkable} @param markable_obj: the markable object """ if self.markable_layer is None: self.markable_layer = Cmarkables(type=self.type) self.root.append(self.markable_layer.get_node()) self.markable_layer.add_markable(markable_obj)
python
def add_markable(self,markable_obj): """ Adds a markable to the markable layer @type markable_obj: L{Cmarkable} @param markable_obj: the markable object """ if self.markable_layer is None: self.markable_layer = Cmarkables(type=self.type) self.root.append(self.markable_layer.get_node()) self.markable_layer.add_markable(markable_obj)
[ "def", "add_markable", "(", "self", ",", "markable_obj", ")", ":", "if", "self", ".", "markable_layer", "is", "None", ":", "self", ".", "markable_layer", "=", "Cmarkables", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "markable_layer", ".", "get_node", "(", ")", ")", "self", ".", "markable_layer", ".", "add_markable", "(", "markable_obj", ")" ]
Adds a markable to the markable layer @type markable_obj: L{Cmarkable} @param markable_obj: the markable object
[ "Adds", "a", "markable", "to", "the", "markable", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1074-L1083
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_opinion
def add_opinion(self,opinion_obj): """ Adds an opinion to the opinion layer @type opinion_obj: L{Copinion} @param opinion_obj: the opinion object """ if self.opinion_layer is None: self.opinion_layer = Copinions() self.root.append(self.opinion_layer.get_node()) self.opinion_layer.add_opinion(opinion_obj)
python
def add_opinion(self,opinion_obj): """ Adds an opinion to the opinion layer @type opinion_obj: L{Copinion} @param opinion_obj: the opinion object """ if self.opinion_layer is None: self.opinion_layer = Copinions() self.root.append(self.opinion_layer.get_node()) self.opinion_layer.add_opinion(opinion_obj)
[ "def", "add_opinion", "(", "self", ",", "opinion_obj", ")", ":", "if", "self", ".", "opinion_layer", "is", "None", ":", "self", ".", "opinion_layer", "=", "Copinions", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "opinion_layer", ".", "get_node", "(", ")", ")", "self", ".", "opinion_layer", ".", "add_opinion", "(", "opinion_obj", ")" ]
Adds an opinion to the opinion layer @type opinion_obj: L{Copinion} @param opinion_obj: the opinion object
[ "Adds", "an", "opinion", "to", "the", "opinion", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1086-L1095
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_statement
def add_statement(self,statement_obj): """ Adds a statement to the attribution layer @type statement_obj: L{Cstatement} @param statement_obj: the statement object """ if self.attribution_layer is None: self.attribution_layer = Cattribution() self.root.append(self.attribution_layer.get_node()) self.attribution_layer.add_statement(statement_obj)
python
def add_statement(self,statement_obj): """ Adds a statement to the attribution layer @type statement_obj: L{Cstatement} @param statement_obj: the statement object """ if self.attribution_layer is None: self.attribution_layer = Cattribution() self.root.append(self.attribution_layer.get_node()) self.attribution_layer.add_statement(statement_obj)
[ "def", "add_statement", "(", "self", ",", "statement_obj", ")", ":", "if", "self", ".", "attribution_layer", "is", "None", ":", "self", ".", "attribution_layer", "=", "Cattribution", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "attribution_layer", ".", "get_node", "(", ")", ")", "self", ".", "attribution_layer", ".", "add_statement", "(", "statement_obj", ")" ]
Adds a statement to the attribution layer @type statement_obj: L{Cstatement} @param statement_obj: the statement object
[ "Adds", "a", "statement", "to", "the", "attribution", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1097-L1106
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_predicate
def add_predicate(self, predicate_obj): """ Adds a predicate to the semantic layer @type predicate_obj: L{Cpredicate} @param predicate_obj: the predicate object """ if self.srl_layer is None: self.srl_layer = Csrl() self.root.append(self.srl_layer.get_node()) self.srl_layer.add_predicate(predicate_obj)
python
def add_predicate(self, predicate_obj): """ Adds a predicate to the semantic layer @type predicate_obj: L{Cpredicate} @param predicate_obj: the predicate object """ if self.srl_layer is None: self.srl_layer = Csrl() self.root.append(self.srl_layer.get_node()) self.srl_layer.add_predicate(predicate_obj)
[ "def", "add_predicate", "(", "self", ",", "predicate_obj", ")", ":", "if", "self", ".", "srl_layer", "is", "None", ":", "self", ".", "srl_layer", "=", "Csrl", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "srl_layer", ".", "get_node", "(", ")", ")", "self", ".", "srl_layer", ".", "add_predicate", "(", "predicate_obj", ")" ]
Adds a predicate to the semantic layer @type predicate_obj: L{Cpredicate} @param predicate_obj: the predicate object
[ "Adds", "a", "predicate", "to", "the", "semantic", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1110-L1119
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_timex
def add_timex(self, time_obj): """ Adds a timex entry to the time layer @type time_obj: L{Ctime} @param time_obj: time time object """ if self.timex_layer is None: self.timex_layer = CtimeExpressions() self.root.append(self.timex_layer.get_node()) self.timex_layer.add_timex(time_obj)
python
def add_timex(self, time_obj): """ Adds a timex entry to the time layer @type time_obj: L{Ctime} @param time_obj: time time object """ if self.timex_layer is None: self.timex_layer = CtimeExpressions() self.root.append(self.timex_layer.get_node()) self.timex_layer.add_timex(time_obj)
[ "def", "add_timex", "(", "self", ",", "time_obj", ")", ":", "if", "self", ".", "timex_layer", "is", "None", ":", "self", ".", "timex_layer", "=", "CtimeExpressions", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "timex_layer", ".", "get_node", "(", ")", ")", "self", ".", "timex_layer", ".", "add_timex", "(", "time_obj", ")" ]
Adds a timex entry to the time layer @type time_obj: L{Ctime} @param time_obj: time time object
[ "Adds", "a", "timex", "entry", "to", "the", "time", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1121-L1130
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.set_header
def set_header(self,header): """ Sets the header of the object @type header: L{CHeader} @param header: the header object """ self.header = header self.root.insert(0,header.get_node())
python
def set_header(self,header): """ Sets the header of the object @type header: L{CHeader} @param header: the header object """ self.header = header self.root.insert(0,header.get_node())
[ "def", "set_header", "(", "self", ",", "header", ")", ":", "self", ".", "header", "=", "header", "self", ".", "root", ".", "insert", "(", "0", ",", "header", ".", "get_node", "(", ")", ")" ]
Sets the header of the object @type header: L{CHeader} @param header: the header object
[ "Sets", "the", "header", "of", "the", "object" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1133-L1140
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_linguistic_processor
def add_linguistic_processor(self, layer, my_lp): """ Adds a linguistic processor to the header @type my_lp: L{Clp} @param my_lp: linguistic processor object @type layer: string @param layer: the layer to which the processor is related to """ if self.header is None: self.header = CHeader(type=self.type) self.root.insert(0,self.header.get_node()) self.header.add_linguistic_processor(layer,my_lp)
python
def add_linguistic_processor(self, layer, my_lp): """ Adds a linguistic processor to the header @type my_lp: L{Clp} @param my_lp: linguistic processor object @type layer: string @param layer: the layer to which the processor is related to """ if self.header is None: self.header = CHeader(type=self.type) self.root.insert(0,self.header.get_node()) self.header.add_linguistic_processor(layer,my_lp)
[ "def", "add_linguistic_processor", "(", "self", ",", "layer", ",", "my_lp", ")", ":", "if", "self", ".", "header", "is", "None", ":", "self", ".", "header", "=", "CHeader", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "insert", "(", "0", ",", "self", ".", "header", ".", "get_node", "(", ")", ")", "self", ".", "header", ".", "add_linguistic_processor", "(", "layer", ",", "my_lp", ")" ]
Adds a linguistic processor to the header @type my_lp: L{Clp} @param my_lp: linguistic processor object @type layer: string @param layer: the layer to which the processor is related to
[ "Adds", "a", "linguistic", "processor", "to", "the", "header" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1142-L1153
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.create_linguistic_processor
def create_linguistic_processor(self, layer, name, version, **kwargs): """ Create a new linguistic processor element and add it to the header @type layer: string @param layer: the layer to which the processor is related to @type name: string @param name: the name of the linguistic processor @type version: string @param version: the version of the linguistic processor @param kwargs: arguments passed to processor constructor, e.g. timestamp """ lp = Clp(name=name, version=version, **kwargs) self.add_linguistic_processor(layer, lp) return lp
python
def create_linguistic_processor(self, layer, name, version, **kwargs): """ Create a new linguistic processor element and add it to the header @type layer: string @param layer: the layer to which the processor is related to @type name: string @param name: the name of the linguistic processor @type version: string @param version: the version of the linguistic processor @param kwargs: arguments passed to processor constructor, e.g. timestamp """ lp = Clp(name=name, version=version, **kwargs) self.add_linguistic_processor(layer, lp) return lp
[ "def", "create_linguistic_processor", "(", "self", ",", "layer", ",", "name", ",", "version", ",", "*", "*", "kwargs", ")", ":", "lp", "=", "Clp", "(", "name", "=", "name", ",", "version", "=", "version", ",", "*", "*", "kwargs", ")", "self", ".", "add_linguistic_processor", "(", "layer", ",", "lp", ")", "return", "lp" ]
Create a new linguistic processor element and add it to the header @type layer: string @param layer: the layer to which the processor is related to @type name: string @param name: the name of the linguistic processor @type version: string @param version: the version of the linguistic processor @param kwargs: arguments passed to processor constructor, e.g. timestamp
[ "Create", "a", "new", "linguistic", "processor", "element", "and", "add", "it", "to", "the", "header" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1155-L1168
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_dependency
def add_dependency(self,my_dep): """ Adds a dependency to the dependency layer @type my_dep: L{Cdependency} @param my_dep: dependency object """ if self.dependency_layer is None: self.dependency_layer = Cdependencies() self.root.append(self.dependency_layer.get_node()) self.dependency_layer.add_dependency(my_dep)
python
def add_dependency(self,my_dep): """ Adds a dependency to the dependency layer @type my_dep: L{Cdependency} @param my_dep: dependency object """ if self.dependency_layer is None: self.dependency_layer = Cdependencies() self.root.append(self.dependency_layer.get_node()) self.dependency_layer.add_dependency(my_dep)
[ "def", "add_dependency", "(", "self", ",", "my_dep", ")", ":", "if", "self", ".", "dependency_layer", "is", "None", ":", "self", ".", "dependency_layer", "=", "Cdependencies", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "dependency_layer", ".", "get_node", "(", ")", ")", "self", ".", "dependency_layer", ".", "add_dependency", "(", "my_dep", ")" ]
Adds a dependency to the dependency layer @type my_dep: L{Cdependency} @param my_dep: dependency object
[ "Adds", "a", "dependency", "to", "the", "dependency", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1170-L1179
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.create_dependency
def create_dependency(self, _from, to, function, comment=None): """ Create a new dependency object and add it to the dependency layer @type _from: string @param _from: term id of the child node @type _to: string @param _to: term id of the parent node @type function: string @param function: grammatical function (relation) between parent and child @type comment: string @param comment: optional comment to be included """ new_dependency = Cdependency() new_dependency.set_from(_from) new_dependency.set_to(to) new_dependency.set_function(function) if comment: new_dependency.set_comment(comment) self.add_dependency(new_dependency) return new_dependency
python
def create_dependency(self, _from, to, function, comment=None): """ Create a new dependency object and add it to the dependency layer @type _from: string @param _from: term id of the child node @type _to: string @param _to: term id of the parent node @type function: string @param function: grammatical function (relation) between parent and child @type comment: string @param comment: optional comment to be included """ new_dependency = Cdependency() new_dependency.set_from(_from) new_dependency.set_to(to) new_dependency.set_function(function) if comment: new_dependency.set_comment(comment) self.add_dependency(new_dependency) return new_dependency
[ "def", "create_dependency", "(", "self", ",", "_from", ",", "to", ",", "function", ",", "comment", "=", "None", ")", ":", "new_dependency", "=", "Cdependency", "(", ")", "new_dependency", ".", "set_from", "(", "_from", ")", "new_dependency", ".", "set_to", "(", "to", ")", "new_dependency", ".", "set_function", "(", "function", ")", "if", "comment", ":", "new_dependency", ".", "set_comment", "(", "comment", ")", "self", ".", "add_dependency", "(", "new_dependency", ")", "return", "new_dependency" ]
Create a new dependency object and add it to the dependency layer @type _from: string @param _from: term id of the child node @type _to: string @param _to: term id of the parent node @type function: string @param function: grammatical function (relation) between parent and child @type comment: string @param comment: optional comment to be included
[ "Create", "a", "new", "dependency", "object", "and", "add", "it", "to", "the", "dependency", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1181-L1200
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_tlink
def add_tlink(self,my_tlink): """ Adds a tlink to the temporalRelations layer @type my_tlink: L{Ctlink} @param my_tlink: tlink object """ if self.temporalRelations_layer is None: self.temporalRelations_layer = CtemporalRelations() self.root.append(self.temporalRelations_layer.get_node()) self.temporalRelations_layer.add_tlink(my_tlink)
python
def add_tlink(self,my_tlink): """ Adds a tlink to the temporalRelations layer @type my_tlink: L{Ctlink} @param my_tlink: tlink object """ if self.temporalRelations_layer is None: self.temporalRelations_layer = CtemporalRelations() self.root.append(self.temporalRelations_layer.get_node()) self.temporalRelations_layer.add_tlink(my_tlink)
[ "def", "add_tlink", "(", "self", ",", "my_tlink", ")", ":", "if", "self", ".", "temporalRelations_layer", "is", "None", ":", "self", ".", "temporalRelations_layer", "=", "CtemporalRelations", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "temporalRelations_layer", ".", "get_node", "(", ")", ")", "self", ".", "temporalRelations_layer", ".", "add_tlink", "(", "my_tlink", ")" ]
Adds a tlink to the temporalRelations layer @type my_tlink: L{Ctlink} @param my_tlink: tlink object
[ "Adds", "a", "tlink", "to", "the", "temporalRelations", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1202-L1211
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_predicateAnchor
def add_predicateAnchor(self,my_predAnch): """ Adds a predAnch to the temporalRelations layer @type my_predAnch: L{CpredicateAnchor} @param my_predAnch: predicateAnchor object """ if self.temporalRelations_layer is None: self.temporalRelations_layer = CtemporalRelations() self.root.append(self.temporalRelations_layer.get_node()) self.temporalRelations_layer.add_predicateAnchor(my_predAnch)
python
def add_predicateAnchor(self,my_predAnch): """ Adds a predAnch to the temporalRelations layer @type my_predAnch: L{CpredicateAnchor} @param my_predAnch: predicateAnchor object """ if self.temporalRelations_layer is None: self.temporalRelations_layer = CtemporalRelations() self.root.append(self.temporalRelations_layer.get_node()) self.temporalRelations_layer.add_predicateAnchor(my_predAnch)
[ "def", "add_predicateAnchor", "(", "self", ",", "my_predAnch", ")", ":", "if", "self", ".", "temporalRelations_layer", "is", "None", ":", "self", ".", "temporalRelations_layer", "=", "CtemporalRelations", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "temporalRelations_layer", ".", "get_node", "(", ")", ")", "self", ".", "temporalRelations_layer", ".", "add_predicateAnchor", "(", "my_predAnch", ")" ]
Adds a predAnch to the temporalRelations layer @type my_predAnch: L{CpredicateAnchor} @param my_predAnch: predicateAnchor object
[ "Adds", "a", "predAnch", "to", "the", "temporalRelations", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1213-L1222
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_clink
def add_clink(self,my_clink): """ Adds a clink to the causalRelations layer @type my_clink: L{Cclink} @param my_clink: clink object """ if self.causalRelations_layer is None: self.causalRelations_layer = CcausalRelations() self.root.append(self.causalRelations_layer.get_node()) self.causalRelations_layer.add_clink(my_clink)
python
def add_clink(self,my_clink): """ Adds a clink to the causalRelations layer @type my_clink: L{Cclink} @param my_clink: clink object """ if self.causalRelations_layer is None: self.causalRelations_layer = CcausalRelations() self.root.append(self.causalRelations_layer.get_node()) self.causalRelations_layer.add_clink(my_clink)
[ "def", "add_clink", "(", "self", ",", "my_clink", ")", ":", "if", "self", ".", "causalRelations_layer", "is", "None", ":", "self", ".", "causalRelations_layer", "=", "CcausalRelations", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "causalRelations_layer", ".", "get_node", "(", ")", ")", "self", ".", "causalRelations_layer", ".", "add_clink", "(", "my_clink", ")" ]
Adds a clink to the causalRelations layer @type my_clink: L{Cclink} @param my_clink: clink object
[ "Adds", "a", "clink", "to", "the", "causalRelations", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1224-L1233
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_factuality
def add_factuality(self,my_fact): """ Adds a factuality to the factuality layer @type my_fact: L{Cfactuality} @param my_fact: factuality object """ if self.factuality_layer is None: self.factuality_layer = Cfactualities() self.root.append(self.factuality_layer.get_node()) self.factuality_layer.add_factuality(my_fact)
python
def add_factuality(self,my_fact): """ Adds a factuality to the factuality layer @type my_fact: L{Cfactuality} @param my_fact: factuality object """ if self.factuality_layer is None: self.factuality_layer = Cfactualities() self.root.append(self.factuality_layer.get_node()) self.factuality_layer.add_factuality(my_fact)
[ "def", "add_factuality", "(", "self", ",", "my_fact", ")", ":", "if", "self", ".", "factuality_layer", "is", "None", ":", "self", ".", "factuality_layer", "=", "Cfactualities", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "factuality_layer", ".", "get_node", "(", ")", ")", "self", ".", "factuality_layer", ".", "add_factuality", "(", "my_fact", ")" ]
Adds a factuality to the factuality layer @type my_fact: L{Cfactuality} @param my_fact: factuality object
[ "Adds", "a", "factuality", "to", "the", "factuality", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1235-L1244
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_entity
def add_entity(self,entity): """ Adds an entity to the entity layer @type entity: L{Centity} @param entity: the entity object """ if self.entity_layer is None: self.entity_layer = Centities(type=self.type) self.root.append(self.entity_layer.get_node()) self.entity_layer.add_entity(entity)
python
def add_entity(self,entity): """ Adds an entity to the entity layer @type entity: L{Centity} @param entity: the entity object """ if self.entity_layer is None: self.entity_layer = Centities(type=self.type) self.root.append(self.entity_layer.get_node()) self.entity_layer.add_entity(entity)
[ "def", "add_entity", "(", "self", ",", "entity", ")", ":", "if", "self", ".", "entity_layer", "is", "None", ":", "self", ".", "entity_layer", "=", "Centities", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "entity_layer", ".", "get_node", "(", ")", ")", "self", ".", "entity_layer", ".", "add_entity", "(", "entity", ")" ]
Adds an entity to the entity layer @type entity: L{Centity} @param entity: the entity object
[ "Adds", "an", "entity", "to", "the", "entity", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1246-L1255
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_coreference
def add_coreference(self, coreference): """ Adds an coreference to the coreference layer @type coreference: L{Ccoreference} @param coreference: the coreference object """ if self.coreference_layer is None: self.coreference_layer = Ccoreferences(type=self.type) self.root.append(self.coreference_layer.get_node()) self.coreference_layer.add_coreference(coreference)
python
def add_coreference(self, coreference): """ Adds an coreference to the coreference layer @type coreference: L{Ccoreference} @param coreference: the coreference object """ if self.coreference_layer is None: self.coreference_layer = Ccoreferences(type=self.type) self.root.append(self.coreference_layer.get_node()) self.coreference_layer.add_coreference(coreference)
[ "def", "add_coreference", "(", "self", ",", "coreference", ")", ":", "if", "self", ".", "coreference_layer", "is", "None", ":", "self", ".", "coreference_layer", "=", "Ccoreferences", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "coreference_layer", ".", "get_node", "(", ")", ")", "self", ".", "coreference_layer", ".", "add_coreference", "(", "coreference", ")" ]
Adds an coreference to the coreference layer @type coreference: L{Ccoreference} @param coreference: the coreference object
[ "Adds", "an", "coreference", "to", "the", "coreference", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1279-L1288
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.create_coreference
def create_coreference(self, coref_type, term_ids, id=None): """ Create a new coreference object and add it to the coreferences layer @type coref_type: string @param coref_type: type of the coreference object @type term_ids: list @param term_ids: list of term ids @type id: string @param id: optional id of the entity """ if id is None: if self.coreference_layer is None: i = 1 else: corefs = (l for l in self.coreference_layer.get_corefs() if l.get_type == coref_type) i = len(list(corefs)) + 1 id = "co{coref_type}{i}".format(**locals()) new_coref = Ccoreference(type=self.type) new_coref.set_id(id) new_coref.set_type(coref_type) new_coref.add_span(term_ids) self.add_coreference(new_coref) return new_coref
python
def create_coreference(self, coref_type, term_ids, id=None): """ Create a new coreference object and add it to the coreferences layer @type coref_type: string @param coref_type: type of the coreference object @type term_ids: list @param term_ids: list of term ids @type id: string @param id: optional id of the entity """ if id is None: if self.coreference_layer is None: i = 1 else: corefs = (l for l in self.coreference_layer.get_corefs() if l.get_type == coref_type) i = len(list(corefs)) + 1 id = "co{coref_type}{i}".format(**locals()) new_coref = Ccoreference(type=self.type) new_coref.set_id(id) new_coref.set_type(coref_type) new_coref.add_span(term_ids) self.add_coreference(new_coref) return new_coref
[ "def", "create_coreference", "(", "self", ",", "coref_type", ",", "term_ids", ",", "id", "=", "None", ")", ":", "if", "id", "is", "None", ":", "if", "self", ".", "coreference_layer", "is", "None", ":", "i", "=", "1", "else", ":", "corefs", "=", "(", "l", "for", "l", "in", "self", ".", "coreference_layer", ".", "get_corefs", "(", ")", "if", "l", ".", "get_type", "==", "coref_type", ")", "i", "=", "len", "(", "list", "(", "corefs", ")", ")", "+", "1", "id", "=", "\"co{coref_type}{i}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "new_coref", "=", "Ccoreference", "(", "type", "=", "self", ".", "type", ")", "new_coref", ".", "set_id", "(", "id", ")", "new_coref", ".", "set_type", "(", "coref_type", ")", "new_coref", ".", "add_span", "(", "term_ids", ")", "self", ".", "add_coreference", "(", "new_coref", ")", "return", "new_coref" ]
Create a new coreference object and add it to the coreferences layer @type coref_type: string @param coref_type: type of the coreference object @type term_ids: list @param term_ids: list of term ids @type id: string @param id: optional id of the entity
[ "Create", "a", "new", "coreference", "object", "and", "add", "it", "to", "the", "coreferences", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1290-L1313
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_constituency_tree
def add_constituency_tree(self,my_tree): """ Adds a constituency tree to the constituency layer @type my_tree: L{Ctree} @param my_tree: the constituency tree object """ if self.constituency_layer is None: self.constituency_layer = Cconstituency() self.root.append(self.constituency_layer.get_node()) self.constituency_layer.add_tree(my_tree)
python
def add_constituency_tree(self,my_tree): """ Adds a constituency tree to the constituency layer @type my_tree: L{Ctree} @param my_tree: the constituency tree object """ if self.constituency_layer is None: self.constituency_layer = Cconstituency() self.root.append(self.constituency_layer.get_node()) self.constituency_layer.add_tree(my_tree)
[ "def", "add_constituency_tree", "(", "self", ",", "my_tree", ")", ":", "if", "self", ".", "constituency_layer", "is", "None", ":", "self", ".", "constituency_layer", "=", "Cconstituency", "(", ")", "self", ".", "root", ".", "append", "(", "self", ".", "constituency_layer", ".", "get_node", "(", ")", ")", "self", ".", "constituency_layer", ".", "add_tree", "(", "my_tree", ")" ]
Adds a constituency tree to the constituency layer @type my_tree: L{Ctree} @param my_tree: the constituency tree object
[ "Adds", "a", "constituency", "tree", "to", "the", "constituency", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1315-L1324
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_property
def add_property(self,label,term_span,pid=None): """ Adds a property to the property layer @type label: string @param label: the type of property @type term_span: list @param term_span: list of term ids @type pid: string @param pid: the identifier for the property (use None to automatically generate one) """ if self.features_layer is None: self.features_layer = Cfeatures(type=self.type) self.root.append(self.features_layer.get_node()) self.features_layer.add_property(pid, label,term_span)
python
def add_property(self,label,term_span,pid=None): """ Adds a property to the property layer @type label: string @param label: the type of property @type term_span: list @param term_span: list of term ids @type pid: string @param pid: the identifier for the property (use None to automatically generate one) """ if self.features_layer is None: self.features_layer = Cfeatures(type=self.type) self.root.append(self.features_layer.get_node()) self.features_layer.add_property(pid, label,term_span)
[ "def", "add_property", "(", "self", ",", "label", ",", "term_span", ",", "pid", "=", "None", ")", ":", "if", "self", ".", "features_layer", "is", "None", ":", "self", ".", "features_layer", "=", "Cfeatures", "(", "type", "=", "self", ".", "type", ")", "self", ".", "root", ".", "append", "(", "self", ".", "features_layer", ".", "get_node", "(", ")", ")", "self", ".", "features_layer", ".", "add_property", "(", "pid", ",", "label", ",", "term_span", ")" ]
Adds a property to the property layer @type label: string @param label: the type of property @type term_span: list @param term_span: list of term ids @type pid: string @param pid: the identifier for the property (use None to automatically generate one)
[ "Adds", "a", "property", "to", "the", "property", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1327-L1340
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.get_dict_tokens_for_termid
def get_dict_tokens_for_termid(self, term_id): """ Returns the tokens ids that are the span of the term specified @type term_id: string @param term_id: the term idenfier @rtype: list @return: list of token ids that are the span of the term """ if self.dict_tokens_for_tid is None: self.dict_tokens_for_tid = {} for term in self.get_terms(): self.dict_tokens_for_tid[term.get_id()] = term.get_span().get_span_ids() return self.dict_tokens_for_tid.get(term_id,[])
python
def get_dict_tokens_for_termid(self, term_id): """ Returns the tokens ids that are the span of the term specified @type term_id: string @param term_id: the term idenfier @rtype: list @return: list of token ids that are the span of the term """ if self.dict_tokens_for_tid is None: self.dict_tokens_for_tid = {} for term in self.get_terms(): self.dict_tokens_for_tid[term.get_id()] = term.get_span().get_span_ids() return self.dict_tokens_for_tid.get(term_id,[])
[ "def", "get_dict_tokens_for_termid", "(", "self", ",", "term_id", ")", ":", "if", "self", ".", "dict_tokens_for_tid", "is", "None", ":", "self", ".", "dict_tokens_for_tid", "=", "{", "}", "for", "term", "in", "self", ".", "get_terms", "(", ")", ":", "self", ".", "dict_tokens_for_tid", "[", "term", ".", "get_id", "(", ")", "]", "=", "term", ".", "get_span", "(", ")", ".", "get_span_ids", "(", ")", "return", "self", ".", "dict_tokens_for_tid", ".", "get", "(", "term_id", ",", "[", "]", ")" ]
Returns the tokens ids that are the span of the term specified @type term_id: string @param term_id: the term idenfier @rtype: list @return: list of token ids that are the span of the term
[ "Returns", "the", "tokens", "ids", "that", "are", "the", "span", "of", "the", "term", "specified" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1344-L1357
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.map_tokens_to_terms
def map_tokens_to_terms(self,list_tokens): """ Maps a list of token ids to the corresponding term ids @type list_tokens: list @param list_tokens: list of token identifiers @rtype: list @return: list of term idenfitiers """ if self.terms_for_token is None: self.terms_for_token = {} for term in self.get_terms(): termid = term.get_id() token_ids = term.get_span().get_span_ids() for tokid in token_ids: if tokid not in self.terms_for_token: self.terms_for_token[tokid] = [termid] else: self.terms_for_token[tokid].append(termid) ret = set() for my_id in list_tokens: term_ids = self.terms_for_token.get(my_id,[]) ret |= set(term_ids) return sorted(list(ret))
python
def map_tokens_to_terms(self,list_tokens): """ Maps a list of token ids to the corresponding term ids @type list_tokens: list @param list_tokens: list of token identifiers @rtype: list @return: list of term idenfitiers """ if self.terms_for_token is None: self.terms_for_token = {} for term in self.get_terms(): termid = term.get_id() token_ids = term.get_span().get_span_ids() for tokid in token_ids: if tokid not in self.terms_for_token: self.terms_for_token[tokid] = [termid] else: self.terms_for_token[tokid].append(termid) ret = set() for my_id in list_tokens: term_ids = self.terms_for_token.get(my_id,[]) ret |= set(term_ids) return sorted(list(ret))
[ "def", "map_tokens_to_terms", "(", "self", ",", "list_tokens", ")", ":", "if", "self", ".", "terms_for_token", "is", "None", ":", "self", ".", "terms_for_token", "=", "{", "}", "for", "term", "in", "self", ".", "get_terms", "(", ")", ":", "termid", "=", "term", ".", "get_id", "(", ")", "token_ids", "=", "term", ".", "get_span", "(", ")", ".", "get_span_ids", "(", ")", "for", "tokid", "in", "token_ids", ":", "if", "tokid", "not", "in", "self", ".", "terms_for_token", ":", "self", ".", "terms_for_token", "[", "tokid", "]", "=", "[", "termid", "]", "else", ":", "self", ".", "terms_for_token", "[", "tokid", "]", ".", "append", "(", "termid", ")", "ret", "=", "set", "(", ")", "for", "my_id", "in", "list_tokens", ":", "term_ids", "=", "self", ".", "terms_for_token", ".", "get", "(", "my_id", ",", "[", "]", ")", "ret", "|=", "set", "(", "term_ids", ")", "return", "sorted", "(", "list", "(", "ret", ")", ")" ]
Maps a list of token ids to the corresponding term ids @type list_tokens: list @param list_tokens: list of token identifiers @rtype: list @return: list of term idenfitiers
[ "Maps", "a", "list", "of", "token", "ids", "to", "the", "corresponding", "term", "ids" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1360-L1383
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_external_reference_to_term
def add_external_reference_to_term(self,term_id, external_ref): """ Adds an external reference to the given term identifier @type term_id: string @param term_id: the term identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.term_layer is not None: self.term_layer.add_external_reference(term_id, external_ref)
python
def add_external_reference_to_term(self,term_id, external_ref): """ Adds an external reference to the given term identifier @type term_id: string @param term_id: the term identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.term_layer is not None: self.term_layer.add_external_reference(term_id, external_ref)
[ "def", "add_external_reference_to_term", "(", "self", ",", "term_id", ",", "external_ref", ")", ":", "if", "self", ".", "term_layer", "is", "not", "None", ":", "self", ".", "term_layer", ".", "add_external_reference", "(", "term_id", ",", "external_ref", ")" ]
Adds an external reference to the given term identifier @type term_id: string @param term_id: the term identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference}
[ "Adds", "an", "external", "reference", "to", "the", "given", "term", "identifier" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1406-L1415
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_external_reference_to_role
def add_external_reference_to_role(self,role_id,external_ref): """ Adds an external reference to the given role identifier in the SRL layer @type role_id: string @param role_id: the role identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.srl_layer is not None: self.srl_layer.add_external_reference_to_role(role_id,external_ref)
python
def add_external_reference_to_role(self,role_id,external_ref): """ Adds an external reference to the given role identifier in the SRL layer @type role_id: string @param role_id: the role identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.srl_layer is not None: self.srl_layer.add_external_reference_to_role(role_id,external_ref)
[ "def", "add_external_reference_to_role", "(", "self", ",", "role_id", ",", "external_ref", ")", ":", "if", "self", ".", "srl_layer", "is", "not", "None", ":", "self", ".", "srl_layer", ".", "add_external_reference_to_role", "(", "role_id", ",", "external_ref", ")" ]
Adds an external reference to the given role identifier in the SRL layer @type role_id: string @param role_id: the role identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference}
[ "Adds", "an", "external", "reference", "to", "the", "given", "role", "identifier", "in", "the", "SRL", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1426-L1435
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.remove_external_references_from_srl_layer
def remove_external_references_from_srl_layer(self): """ Removes all external references present in the term layer """ if self.srl_layer is not None: for pred in self.srl_layer.get_predicates(): pred.remove_external_references() pred.remove_external_references_from_roles()
python
def remove_external_references_from_srl_layer(self): """ Removes all external references present in the term layer """ if self.srl_layer is not None: for pred in self.srl_layer.get_predicates(): pred.remove_external_references() pred.remove_external_references_from_roles()
[ "def", "remove_external_references_from_srl_layer", "(", "self", ")", ":", "if", "self", ".", "srl_layer", "is", "not", "None", ":", "for", "pred", "in", "self", ".", "srl_layer", ".", "get_predicates", "(", ")", ":", "pred", ".", "remove_external_references", "(", ")", "pred", ".", "remove_external_references_from_roles", "(", ")" ]
Removes all external references present in the term layer
[ "Removes", "all", "external", "references", "present", "in", "the", "term", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1439-L1446
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.add_external_reference_to_entity
def add_external_reference_to_entity(self,entity_id, external_ref): """ Adds an external reference to the given entity identifier in the entity layer @type entity_id: string @param entity_id: the entity identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.entity_layer is not None: self.entity_layer.add_external_reference_to_entity(entity_id,external_ref)
python
def add_external_reference_to_entity(self,entity_id, external_ref): """ Adds an external reference to the given entity identifier in the entity layer @type entity_id: string @param entity_id: the entity identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference} """ if self.entity_layer is not None: self.entity_layer.add_external_reference_to_entity(entity_id,external_ref)
[ "def", "add_external_reference_to_entity", "(", "self", ",", "entity_id", ",", "external_ref", ")", ":", "if", "self", ".", "entity_layer", "is", "not", "None", ":", "self", ".", "entity_layer", ".", "add_external_reference_to_entity", "(", "entity_id", ",", "external_ref", ")" ]
Adds an external reference to the given entity identifier in the entity layer @type entity_id: string @param entity_id: the entity identifier @param external_ref: an external reference object @type external_ref: L{CexternalReference}
[ "Adds", "an", "external", "reference", "to", "the", "given", "entity", "identifier", "in", "the", "entity", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1448-L1457
train
teepark/greenhouse
greenhouse/io/files.py
FileBase.read
def read(self, size=-1): """read a number of bytes from the file and return it as a string .. note:: this method will block if there is no data already available :param size: the maximum number of bytes to read from the file. < 0 means read the file to the end :type size: int :returns: a string of the read file contents """ chunksize = size < 0 and self.CHUNKSIZE or min(self.CHUNKSIZE, size) buf = self._rbuf buf.seek(0, os.SEEK_END) collected = buf.tell() while 1: if size >= 0 and collected >= size: # we have read enough already break output = self._read_chunk(chunksize) if output is None: continue if not output: # nothing more to read break collected += len(output) buf.write(output) # get rid of the old buffer rc = buf.getvalue() buf.seek(0) buf.truncate() if size >= 0: # leave the overflow in the buffer buf.write(rc[size:]) return rc[:size] return rc
python
def read(self, size=-1): """read a number of bytes from the file and return it as a string .. note:: this method will block if there is no data already available :param size: the maximum number of bytes to read from the file. < 0 means read the file to the end :type size: int :returns: a string of the read file contents """ chunksize = size < 0 and self.CHUNKSIZE or min(self.CHUNKSIZE, size) buf = self._rbuf buf.seek(0, os.SEEK_END) collected = buf.tell() while 1: if size >= 0 and collected >= size: # we have read enough already break output = self._read_chunk(chunksize) if output is None: continue if not output: # nothing more to read break collected += len(output) buf.write(output) # get rid of the old buffer rc = buf.getvalue() buf.seek(0) buf.truncate() if size >= 0: # leave the overflow in the buffer buf.write(rc[size:]) return rc[:size] return rc
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ")", ":", "chunksize", "=", "size", "<", "0", "and", "self", ".", "CHUNKSIZE", "or", "min", "(", "self", ".", "CHUNKSIZE", ",", "size", ")", "buf", "=", "self", ".", "_rbuf", "buf", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "collected", "=", "buf", ".", "tell", "(", ")", "while", "1", ":", "if", "size", ">=", "0", "and", "collected", ">=", "size", ":", "# we have read enough already", "break", "output", "=", "self", ".", "_read_chunk", "(", "chunksize", ")", "if", "output", "is", "None", ":", "continue", "if", "not", "output", ":", "# nothing more to read", "break", "collected", "+=", "len", "(", "output", ")", "buf", ".", "write", "(", "output", ")", "# get rid of the old buffer", "rc", "=", "buf", ".", "getvalue", "(", ")", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "if", "size", ">=", "0", ":", "# leave the overflow in the buffer", "buf", ".", "write", "(", "rc", "[", "size", ":", "]", ")", "return", "rc", "[", ":", "size", "]", "return", "rc" ]
read a number of bytes from the file and return it as a string .. note:: this method will block if there is no data already available :param size: the maximum number of bytes to read from the file. < 0 means read the file to the end :type size: int :returns: a string of the read file contents
[ "read", "a", "number", "of", "bytes", "from", "the", "file", "and", "return", "it", "as", "a", "string" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L48-L91
train
teepark/greenhouse
greenhouse/io/files.py
FileBase.readline
def readline(self, max_len=-1): """read from the file until a newline is encountered .. note:: this method will block if there isn't already a full line available from the data source :param max_len: stop reading a single line after this many bytes :type max_len: int :returns: a string of the line it read from the file, including the newline at the end """ buf = self._rbuf newline, chunksize = self.NEWLINE, self.CHUNKSIZE buf.seek(0) text = buf.read() if len(text) >= max_len >= 0: buf.seek(0) buf.truncate() buf.write(text[max_len:]) return text[:max_len] while text.find(newline) < 0: text = self._read_chunk(chunksize) if text is None: text = '' continue if buf.tell() + len(text) >= max_len >= 0: text = buf.getvalue() + text buf.seek(0) buf.truncate() buf.write(text[max_len:]) return text[:max_len] if not text: break buf.write(text) else: # found a newline rc = buf.getvalue() index = rc.find(newline) + len(newline) buf.seek(0) buf.truncate() buf.write(rc[index:]) return rc[:index] # hit the end of the file, no more newlines rc = buf.getvalue() buf.seek(0) buf.truncate() return rc
python
def readline(self, max_len=-1): """read from the file until a newline is encountered .. note:: this method will block if there isn't already a full line available from the data source :param max_len: stop reading a single line after this many bytes :type max_len: int :returns: a string of the line it read from the file, including the newline at the end """ buf = self._rbuf newline, chunksize = self.NEWLINE, self.CHUNKSIZE buf.seek(0) text = buf.read() if len(text) >= max_len >= 0: buf.seek(0) buf.truncate() buf.write(text[max_len:]) return text[:max_len] while text.find(newline) < 0: text = self._read_chunk(chunksize) if text is None: text = '' continue if buf.tell() + len(text) >= max_len >= 0: text = buf.getvalue() + text buf.seek(0) buf.truncate() buf.write(text[max_len:]) return text[:max_len] if not text: break buf.write(text) else: # found a newline rc = buf.getvalue() index = rc.find(newline) + len(newline) buf.seek(0) buf.truncate() buf.write(rc[index:]) return rc[:index] # hit the end of the file, no more newlines rc = buf.getvalue() buf.seek(0) buf.truncate() return rc
[ "def", "readline", "(", "self", ",", "max_len", "=", "-", "1", ")", ":", "buf", "=", "self", ".", "_rbuf", "newline", ",", "chunksize", "=", "self", ".", "NEWLINE", ",", "self", ".", "CHUNKSIZE", "buf", ".", "seek", "(", "0", ")", "text", "=", "buf", ".", "read", "(", ")", "if", "len", "(", "text", ")", ">=", "max_len", ">=", "0", ":", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "buf", ".", "write", "(", "text", "[", "max_len", ":", "]", ")", "return", "text", "[", ":", "max_len", "]", "while", "text", ".", "find", "(", "newline", ")", "<", "0", ":", "text", "=", "self", ".", "_read_chunk", "(", "chunksize", ")", "if", "text", "is", "None", ":", "text", "=", "''", "continue", "if", "buf", ".", "tell", "(", ")", "+", "len", "(", "text", ")", ">=", "max_len", ">=", "0", ":", "text", "=", "buf", ".", "getvalue", "(", ")", "+", "text", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "buf", ".", "write", "(", "text", "[", "max_len", ":", "]", ")", "return", "text", "[", ":", "max_len", "]", "if", "not", "text", ":", "break", "buf", ".", "write", "(", "text", ")", "else", ":", "# found a newline", "rc", "=", "buf", ".", "getvalue", "(", ")", "index", "=", "rc", ".", "find", "(", "newline", ")", "+", "len", "(", "newline", ")", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "buf", ".", "write", "(", "rc", "[", "index", ":", "]", ")", "return", "rc", "[", ":", "index", "]", "# hit the end of the file, no more newlines", "rc", "=", "buf", ".", "getvalue", "(", ")", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "return", "rc" ]
read from the file until a newline is encountered .. note:: this method will block if there isn't already a full line available from the data source :param max_len: stop reading a single line after this many bytes :type max_len: int :returns: a string of the line it read from the file, including the newline at the end
[ "read", "from", "the", "file", "until", "a", "newline", "is", "encountered" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L93-L147
train
teepark/greenhouse
greenhouse/io/files.py
FileBase.write
def write(self, data): """write data to the file :param data: the data to write into the file, at the descriptor's current position :type data: str """ while data: went = self._write_chunk(data) if went is None: continue data = data[went:]
python
def write(self, data): """write data to the file :param data: the data to write into the file, at the descriptor's current position :type data: str """ while data: went = self._write_chunk(data) if went is None: continue data = data[went:]
[ "def", "write", "(", "self", ",", "data", ")", ":", "while", "data", ":", "went", "=", "self", ".", "_write_chunk", "(", "data", ")", "if", "went", "is", "None", ":", "continue", "data", "=", "data", "[", "went", ":", "]" ]
write data to the file :param data: the data to write into the file, at the descriptor's current position :type data: str
[ "write", "data", "to", "the", "file" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L161-L173
train
teepark/greenhouse
greenhouse/io/files.py
File._wait_event
def _wait_event(self, reading): "wait on our events" with self._registered(reading, not reading): (self._readable if reading else self._writable).wait() if scheduler.state.interrupted: raise IOError(errno.EINTR, "interrupted system call")
python
def _wait_event(self, reading): "wait on our events" with self._registered(reading, not reading): (self._readable if reading else self._writable).wait() if scheduler.state.interrupted: raise IOError(errno.EINTR, "interrupted system call")
[ "def", "_wait_event", "(", "self", ",", "reading", ")", ":", "with", "self", ".", "_registered", "(", "reading", ",", "not", "reading", ")", ":", "(", "self", ".", "_readable", "if", "reading", "else", "self", ".", "_writable", ")", ".", "wait", "(", ")", "if", "scheduler", ".", "state", ".", "interrupted", ":", "raise", "IOError", "(", "errno", ".", "EINTR", ",", "\"interrupted system call\"", ")" ]
wait on our events
[ "wait", "on", "our", "events" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L273-L279
train
teepark/greenhouse
greenhouse/io/files.py
File.fromfd
def fromfd(cls, fd, mode='rb', bufsize=-1): """create a cooperating greenhouse file from an existing descriptor :param fd: the file descriptor to wrap in a new file object :type fd: int :param mode: the file mode :type mode: str :param bufsize: the size of read buffer to use. 0 indicates unbuffered, and < 0 means use the system default. defaults to -1 :returns: a new :class:`File` object connected to the descriptor """ fp = object.__new__(cls) # bypass __init__ fp._rbuf = StringIO() fp.encoding = None fp.mode = mode fp._fileno = fd fp._closed = False cls._add_flags(fd, cls._mode_to_flags(mode)) fp._set_up_waiting() return fp
python
def fromfd(cls, fd, mode='rb', bufsize=-1): """create a cooperating greenhouse file from an existing descriptor :param fd: the file descriptor to wrap in a new file object :type fd: int :param mode: the file mode :type mode: str :param bufsize: the size of read buffer to use. 0 indicates unbuffered, and < 0 means use the system default. defaults to -1 :returns: a new :class:`File` object connected to the descriptor """ fp = object.__new__(cls) # bypass __init__ fp._rbuf = StringIO() fp.encoding = None fp.mode = mode fp._fileno = fd fp._closed = False cls._add_flags(fd, cls._mode_to_flags(mode)) fp._set_up_waiting() return fp
[ "def", "fromfd", "(", "cls", ",", "fd", ",", "mode", "=", "'rb'", ",", "bufsize", "=", "-", "1", ")", ":", "fp", "=", "object", ".", "__new__", "(", "cls", ")", "# bypass __init__", "fp", ".", "_rbuf", "=", "StringIO", "(", ")", "fp", ".", "encoding", "=", "None", "fp", ".", "mode", "=", "mode", "fp", ".", "_fileno", "=", "fd", "fp", ".", "_closed", "=", "False", "cls", ".", "_add_flags", "(", "fd", ",", "cls", ".", "_mode_to_flags", "(", "mode", ")", ")", "fp", ".", "_set_up_waiting", "(", ")", "return", "fp" ]
create a cooperating greenhouse file from an existing descriptor :param fd: the file descriptor to wrap in a new file object :type fd: int :param mode: the file mode :type mode: str :param bufsize: the size of read buffer to use. 0 indicates unbuffered, and < 0 means use the system default. defaults to -1 :returns: a new :class:`File` object connected to the descriptor
[ "create", "a", "cooperating", "greenhouse", "file", "from", "an", "existing", "descriptor" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L310-L333
train
teepark/greenhouse
greenhouse/io/files.py
File.isatty
def isatty(self): "return whether the file is connected to a tty or not" try: return os.isatty(self._fileno) except OSError, e: raise IOError(*e.args)
python
def isatty(self): "return whether the file is connected to a tty or not" try: return os.isatty(self._fileno) except OSError, e: raise IOError(*e.args)
[ "def", "isatty", "(", "self", ")", ":", "try", ":", "return", "os", ".", "isatty", "(", "self", ".", "_fileno", ")", "except", "OSError", ",", "e", ":", "raise", "IOError", "(", "*", "e", ".", "args", ")" ]
return whether the file is connected to a tty or not
[ "return", "whether", "the", "file", "is", "connected", "to", "a", "tty", "or", "not" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L357-L362
train
teepark/greenhouse
greenhouse/io/files.py
File.seek
def seek(self, position, modifier=0): """move the cursor on the file descriptor to a different location :param position: an integer offset from the location indicated by the modifier :type position: int :param modifier: an indicator of how to find the seek location. - ``os.SEEK_SET`` means start from the beginning of the file - ``os.SEEK_CUR`` means start wherever the cursor already is - ``os.SEEK_END`` means start from the end of the file the default is ``os.SEEK_SET`` """ os.lseek(self._fileno, position, modifier) # clear out the buffer buf = self._rbuf buf.seek(0) buf.truncate()
python
def seek(self, position, modifier=0): """move the cursor on the file descriptor to a different location :param position: an integer offset from the location indicated by the modifier :type position: int :param modifier: an indicator of how to find the seek location. - ``os.SEEK_SET`` means start from the beginning of the file - ``os.SEEK_CUR`` means start wherever the cursor already is - ``os.SEEK_END`` means start from the end of the file the default is ``os.SEEK_SET`` """ os.lseek(self._fileno, position, modifier) # clear out the buffer buf = self._rbuf buf.seek(0) buf.truncate()
[ "def", "seek", "(", "self", ",", "position", ",", "modifier", "=", "0", ")", ":", "os", ".", "lseek", "(", "self", ".", "_fileno", ",", "position", ",", "modifier", ")", "# clear out the buffer", "buf", "=", "self", ".", "_rbuf", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")" ]
move the cursor on the file descriptor to a different location :param position: an integer offset from the location indicated by the modifier :type position: int :param modifier: an indicator of how to find the seek location. - ``os.SEEK_SET`` means start from the beginning of the file - ``os.SEEK_CUR`` means start wherever the cursor already is - ``os.SEEK_END`` means start from the end of the file the default is ``os.SEEK_SET``
[ "move", "the", "cursor", "on", "the", "file", "descriptor", "to", "a", "different", "location" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L364-L384
train
teepark/greenhouse
greenhouse/io/files.py
File.tell
def tell(self): "get the file descriptor's position relative to the file's beginning" with _fdopen(os.dup(self._fileno)) as fp: return fp.tell()
python
def tell(self): "get the file descriptor's position relative to the file's beginning" with _fdopen(os.dup(self._fileno)) as fp: return fp.tell()
[ "def", "tell", "(", "self", ")", ":", "with", "_fdopen", "(", "os", ".", "dup", "(", "self", ".", "_fileno", ")", ")", "as", "fp", ":", "return", "fp", ".", "tell", "(", ")" ]
get the file descriptor's position relative to the file's beginning
[ "get", "the", "file", "descriptor", "s", "position", "relative", "to", "the", "file", "s", "beginning" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L386-L389
train
vmonaco/pohmm
pohmm/classification.py
PohmmClassifier.fit
def fit(self, labels, samples, pstates): """ Fit the classifier with labels y and observations X """ assert len(labels) == len(samples) == len(pstates) for label in set(labels): label_samples = [s for l,s in zip(labels, samples) if l == label] label_pstates = [p for l,p in zip(labels, pstates) if l == label] pohmm = self.pohmm_factory() pohmm.fit(label_samples, label_pstates) self.pohmms[label] = pohmm return self
python
def fit(self, labels, samples, pstates): """ Fit the classifier with labels y and observations X """ assert len(labels) == len(samples) == len(pstates) for label in set(labels): label_samples = [s for l,s in zip(labels, samples) if l == label] label_pstates = [p for l,p in zip(labels, pstates) if l == label] pohmm = self.pohmm_factory() pohmm.fit(label_samples, label_pstates) self.pohmms[label] = pohmm return self
[ "def", "fit", "(", "self", ",", "labels", ",", "samples", ",", "pstates", ")", ":", "assert", "len", "(", "labels", ")", "==", "len", "(", "samples", ")", "==", "len", "(", "pstates", ")", "for", "label", "in", "set", "(", "labels", ")", ":", "label_samples", "=", "[", "s", "for", "l", ",", "s", "in", "zip", "(", "labels", ",", "samples", ")", "if", "l", "==", "label", "]", "label_pstates", "=", "[", "p", "for", "l", ",", "p", "in", "zip", "(", "labels", ",", "pstates", ")", "if", "l", "==", "label", "]", "pohmm", "=", "self", ".", "pohmm_factory", "(", ")", "pohmm", ".", "fit", "(", "label_samples", ",", "label_pstates", ")", "self", ".", "pohmms", "[", "label", "]", "=", "pohmm", "return", "self" ]
Fit the classifier with labels y and observations X
[ "Fit", "the", "classifier", "with", "labels", "y", "and", "observations", "X" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L16-L30
train
vmonaco/pohmm
pohmm/classification.py
PohmmClassifier.fit_df
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL): """ Fit the classifier with labels y and DataFrames dfs """ assert len(labels) == len(dfs) for label in set(labels): label_dfs = [s for l,s in zip(labels, dfs) if l == label] pohmm = self.pohmm_factory() pohmm.fit_df(label_dfs, pstate_col=pstate_col) self.pohmms[label] = pohmm return self
python
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL): """ Fit the classifier with labels y and DataFrames dfs """ assert len(labels) == len(dfs) for label in set(labels): label_dfs = [s for l,s in zip(labels, dfs) if l == label] pohmm = self.pohmm_factory() pohmm.fit_df(label_dfs, pstate_col=pstate_col) self.pohmms[label] = pohmm return self
[ "def", "fit_df", "(", "self", ",", "labels", ",", "dfs", ",", "pstate_col", "=", "PSTATE_COL", ")", ":", "assert", "len", "(", "labels", ")", "==", "len", "(", "dfs", ")", "for", "label", "in", "set", "(", "labels", ")", ":", "label_dfs", "=", "[", "s", "for", "l", ",", "s", "in", "zip", "(", "labels", ",", "dfs", ")", "if", "l", "==", "label", "]", "pohmm", "=", "self", ".", "pohmm_factory", "(", ")", "pohmm", ".", "fit_df", "(", "label_dfs", ",", "pstate_col", "=", "pstate_col", ")", "self", ".", "pohmms", "[", "label", "]", "=", "pohmm", "return", "self" ]
Fit the classifier with labels y and DataFrames dfs
[ "Fit", "the", "classifier", "with", "labels", "y", "and", "DataFrames", "dfs" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L32-L45
train
vmonaco/pohmm
pohmm/classification.py
PohmmClassifier.predict
def predict(self, sample, pstates): """ Predict the class label of X """ scores = {} for label, pohmm in self.pohmms.items(): scores[label] = pohmm.score(sample, pstates) max_score_label = max(scores.items(), key=itemgetter(1))[0] return max_score_label, scores
python
def predict(self, sample, pstates): """ Predict the class label of X """ scores = {} for label, pohmm in self.pohmms.items(): scores[label] = pohmm.score(sample, pstates) max_score_label = max(scores.items(), key=itemgetter(1))[0] return max_score_label, scores
[ "def", "predict", "(", "self", ",", "sample", ",", "pstates", ")", ":", "scores", "=", "{", "}", "for", "label", ",", "pohmm", "in", "self", ".", "pohmms", ".", "items", "(", ")", ":", "scores", "[", "label", "]", "=", "pohmm", ".", "score", "(", "sample", ",", "pstates", ")", "max_score_label", "=", "max", "(", "scores", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "[", "0", "]", "return", "max_score_label", ",", "scores" ]
Predict the class label of X
[ "Predict", "the", "class", "label", "of", "X" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L47-L55
train
vmonaco/pohmm
pohmm/classification.py
PohmmClassifier.predict_df
def predict_df(self, df, pstate_col=PSTATE_COL): """ Predict the class label of DataFrame df """ scores = {} for label, pohmm in self.pohmms.items(): scores[label] = pohmm.score_df(df, pstate_col=pstate_col) max_score_label = max(scores.items(), key=itemgetter(1))[0] return max_score_label, scores
python
def predict_df(self, df, pstate_col=PSTATE_COL): """ Predict the class label of DataFrame df """ scores = {} for label, pohmm in self.pohmms.items(): scores[label] = pohmm.score_df(df, pstate_col=pstate_col) max_score_label = max(scores.items(), key=itemgetter(1))[0] return max_score_label, scores
[ "def", "predict_df", "(", "self", ",", "df", ",", "pstate_col", "=", "PSTATE_COL", ")", ":", "scores", "=", "{", "}", "for", "label", ",", "pohmm", "in", "self", ".", "pohmms", ".", "items", "(", ")", ":", "scores", "[", "label", "]", "=", "pohmm", ".", "score_df", "(", "df", ",", "pstate_col", "=", "pstate_col", ")", "max_score_label", "=", "max", "(", "scores", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "[", "0", "]", "return", "max_score_label", ",", "scores" ]
Predict the class label of DataFrame df
[ "Predict", "the", "class", "label", "of", "DataFrame", "df" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L57-L65
train
EVEprosper/ProsperCommon
prosper/common/prosper_cli.py
ProsperApplication.load_secrets
def load_secrets(self, secret_path): """render secrets into config object""" self._config = p_config.render_secrets(self.config_path, secret_path)
python
def load_secrets(self, secret_path): """render secrets into config object""" self._config = p_config.render_secrets(self.config_path, secret_path)
[ "def", "load_secrets", "(", "self", ",", "secret_path", ")", ":", "self", ".", "_config", "=", "p_config", ".", "render_secrets", "(", "self", ".", "config_path", ",", "secret_path", ")" ]
render secrets into config object
[ "render", "secrets", "into", "config", "object" ]
bcada3b25420099e1f204db8d55eb268e7b4dc27
https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_cli.py#L57-L59
train
EVEprosper/ProsperCommon
prosper/common/prosper_cli.py
ProsperApplication.logger
def logger(self): """uses "global logger" for logging""" if self._logger: return self._logger else: log_builder = p_logging.ProsperLogger( self.PROGNAME, self.config.get_option('LOGGING', 'log_path'), config_obj=self.config ) if self.verbose: log_builder.configure_debug_logger() else: id_string = '({platform}--{version})'.format( platform=platform.node(), version=self.VERSION ) if self.config.get_option('LOGGING', 'discord_webhook'): log_builder.configure_discord_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'slack_webhook'): log_builder.configure_slack_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'hipchat_webhook'): log_builder.configure_hipchat_logger( custom_args=id_string ) self._logger = log_builder.get_logger() return self._logger
python
def logger(self): """uses "global logger" for logging""" if self._logger: return self._logger else: log_builder = p_logging.ProsperLogger( self.PROGNAME, self.config.get_option('LOGGING', 'log_path'), config_obj=self.config ) if self.verbose: log_builder.configure_debug_logger() else: id_string = '({platform}--{version})'.format( platform=platform.node(), version=self.VERSION ) if self.config.get_option('LOGGING', 'discord_webhook'): log_builder.configure_discord_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'slack_webhook'): log_builder.configure_slack_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'hipchat_webhook'): log_builder.configure_hipchat_logger( custom_args=id_string ) self._logger = log_builder.get_logger() return self._logger
[ "def", "logger", "(", "self", ")", ":", "if", "self", ".", "_logger", ":", "return", "self", ".", "_logger", "else", ":", "log_builder", "=", "p_logging", ".", "ProsperLogger", "(", "self", ".", "PROGNAME", ",", "self", ".", "config", ".", "get_option", "(", "'LOGGING'", ",", "'log_path'", ")", ",", "config_obj", "=", "self", ".", "config", ")", "if", "self", ".", "verbose", ":", "log_builder", ".", "configure_debug_logger", "(", ")", "else", ":", "id_string", "=", "'({platform}--{version})'", ".", "format", "(", "platform", "=", "platform", ".", "node", "(", ")", ",", "version", "=", "self", ".", "VERSION", ")", "if", "self", ".", "config", ".", "get_option", "(", "'LOGGING'", ",", "'discord_webhook'", ")", ":", "log_builder", ".", "configure_discord_logger", "(", "custom_args", "=", "id_string", ")", "if", "self", ".", "config", ".", "get_option", "(", "'LOGGING'", ",", "'slack_webhook'", ")", ":", "log_builder", ".", "configure_slack_logger", "(", "custom_args", "=", "id_string", ")", "if", "self", ".", "config", ".", "get_option", "(", "'LOGGING'", ",", "'hipchat_webhook'", ")", ":", "log_builder", ".", "configure_hipchat_logger", "(", "custom_args", "=", "id_string", ")", "self", ".", "_logger", "=", "log_builder", ".", "get_logger", "(", ")", "return", "self", ".", "_logger" ]
uses "global logger" for logging
[ "uses", "global", "logger", "for", "logging" ]
bcada3b25420099e1f204db8d55eb268e7b4dc27
https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_cli.py#L63-L95
train
EVEprosper/ProsperCommon
prosper/common/prosper_cli.py
ProsperApplication.config
def config(self): """uses "global config" for cfg""" if self._config: return self._config else: self._config = p_config.ProsperConfig(self.config_path) return self._config
python
def config(self): """uses "global config" for cfg""" if self._config: return self._config else: self._config = p_config.ProsperConfig(self.config_path) return self._config
[ "def", "config", "(", "self", ")", ":", "if", "self", ".", "_config", ":", "return", "self", ".", "_config", "else", ":", "self", ".", "_config", "=", "p_config", ".", "ProsperConfig", "(", "self", ".", "config_path", ")", "return", "self", ".", "_config" ]
uses "global config" for cfg
[ "uses", "global", "config", "for", "cfg" ]
bcada3b25420099e1f204db8d55eb268e7b4dc27
https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_cli.py#L99-L105
train
EVEprosper/ProsperCommon
prosper/common/prosper_cli.py
FlaskLauncher.notify_launch
def notify_launch(self, log_level='ERROR'): """logs launcher message before startup Args: log_level (str): level to notify at """ if not self.debug: self.logger.log( logging.getLevelName(log_level), 'LAUNCHING %s -- %s', self.PROGNAME, platform.node() ) flask_options = { key: getattr(self, key) for key in OPTION_ARGS } flask_options['host'] = self.get_host() self.logger.info('OPTIONS: %s', flask_options)
python
def notify_launch(self, log_level='ERROR'): """logs launcher message before startup Args: log_level (str): level to notify at """ if not self.debug: self.logger.log( logging.getLevelName(log_level), 'LAUNCHING %s -- %s', self.PROGNAME, platform.node() ) flask_options = { key: getattr(self, key) for key in OPTION_ARGS } flask_options['host'] = self.get_host() self.logger.info('OPTIONS: %s', flask_options)
[ "def", "notify_launch", "(", "self", ",", "log_level", "=", "'ERROR'", ")", ":", "if", "not", "self", ".", "debug", ":", "self", ".", "logger", ".", "log", "(", "logging", ".", "getLevelName", "(", "log_level", ")", ",", "'LAUNCHING %s -- %s'", ",", "self", ".", "PROGNAME", ",", "platform", ".", "node", "(", ")", ")", "flask_options", "=", "{", "key", ":", "getattr", "(", "self", ",", "key", ")", "for", "key", "in", "OPTION_ARGS", "}", "flask_options", "[", "'host'", "]", "=", "self", ".", "get_host", "(", ")", "self", ".", "logger", ".", "info", "(", "'OPTIONS: %s'", ",", "flask_options", ")" ]
logs launcher message before startup Args: log_level (str): level to notify at
[ "logs", "launcher", "message", "before", "startup" ]
bcada3b25420099e1f204db8d55eb268e7b4dc27
https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_cli.py#L146-L163
train
Open-ET/openet-core-beta
openet/core/common.py
landsat_c1_toa_cloud_mask
def landsat_c1_toa_cloud_mask(input_img, snow_flag=False, cirrus_flag=False, cloud_confidence=2, shadow_confidence=3, snow_confidence=3, cirrus_confidence=3): """Extract cloud mask from the Landsat Collection 1 TOA BQA band Parameters ---------- input_img : ee.Image Image from a Landsat Collection 1 TOA collection with a BQA band (e.g. LANDSAT/LE07/C01/T1_TOA). snow_flag : bool If true, mask snow pixels (the default is False). cirrus_flag : bool If true, mask cirrus pixels (the default is False). Note, cirrus bits are only set for Landsat 8 (OLI) images. cloud_confidence : int Minimum cloud confidence value (the default is 2). shadow_confidence : int Minimum cloud confidence value (the default is 3). snow_confidence : int Minimum snow confidence value (the default is 3). Only used if snow_flag is True. cirrus_confidence : int Minimum cirrus confidence value (the default is 3). Only used if cirrus_flag is True. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Designated Fill 1: Terrain Occlusion (OLI) / Dropped Pixel (TM, ETM+) 2-3: Radiometric Saturation 4: Cloud 5-6: Cloud Confidence 7-8: Cloud Shadow Confidence 9-10: Snow/Ice Confidence 11-12: Cirrus Confidence (Landsat 8 only) Confidence values 00: "Not Determined", algorithm did not determine the status of this condition 01: "No", algorithm has low to no confidence that this condition exists (0-33 percent confidence) 10: "Maybe", algorithm has medium confidence that this condition exists (34-66 percent confidence) 11: "Yes", algorithm has high confidence that this condition exists (67-100 percent confidence) References ---------- https://landsat.usgs.gov/collectionqualityband """ qa_img = input_img.select(['BQA']) cloud_mask = qa_img.rightShift(4).bitwiseAnd(1).neq(0)\ .And(qa_img.rightShift(5).bitwiseAnd(3).gte(cloud_confidence))\ .Or(qa_img.rightShift(7).bitwiseAnd(3).gte(shadow_confidence)) if snow_flag: cloud_mask = cloud_mask.Or( qa_img.rightShift(9).bitwiseAnd(3).gte(snow_confidence)) if cirrus_flag: cloud_mask = cloud_mask.Or( qa_img.rightShift(11).bitwiseAnd(3).gte(cirrus_confidence)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
python
def landsat_c1_toa_cloud_mask(input_img, snow_flag=False, cirrus_flag=False, cloud_confidence=2, shadow_confidence=3, snow_confidence=3, cirrus_confidence=3): """Extract cloud mask from the Landsat Collection 1 TOA BQA band Parameters ---------- input_img : ee.Image Image from a Landsat Collection 1 TOA collection with a BQA band (e.g. LANDSAT/LE07/C01/T1_TOA). snow_flag : bool If true, mask snow pixels (the default is False). cirrus_flag : bool If true, mask cirrus pixels (the default is False). Note, cirrus bits are only set for Landsat 8 (OLI) images. cloud_confidence : int Minimum cloud confidence value (the default is 2). shadow_confidence : int Minimum cloud confidence value (the default is 3). snow_confidence : int Minimum snow confidence value (the default is 3). Only used if snow_flag is True. cirrus_confidence : int Minimum cirrus confidence value (the default is 3). Only used if cirrus_flag is True. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Designated Fill 1: Terrain Occlusion (OLI) / Dropped Pixel (TM, ETM+) 2-3: Radiometric Saturation 4: Cloud 5-6: Cloud Confidence 7-8: Cloud Shadow Confidence 9-10: Snow/Ice Confidence 11-12: Cirrus Confidence (Landsat 8 only) Confidence values 00: "Not Determined", algorithm did not determine the status of this condition 01: "No", algorithm has low to no confidence that this condition exists (0-33 percent confidence) 10: "Maybe", algorithm has medium confidence that this condition exists (34-66 percent confidence) 11: "Yes", algorithm has high confidence that this condition exists (67-100 percent confidence) References ---------- https://landsat.usgs.gov/collectionqualityband """ qa_img = input_img.select(['BQA']) cloud_mask = qa_img.rightShift(4).bitwiseAnd(1).neq(0)\ .And(qa_img.rightShift(5).bitwiseAnd(3).gte(cloud_confidence))\ .Or(qa_img.rightShift(7).bitwiseAnd(3).gte(shadow_confidence)) if snow_flag: cloud_mask = cloud_mask.Or( qa_img.rightShift(9).bitwiseAnd(3).gte(snow_confidence)) if cirrus_flag: cloud_mask = cloud_mask.Or( qa_img.rightShift(11).bitwiseAnd(3).gte(cirrus_confidence)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
[ "def", "landsat_c1_toa_cloud_mask", "(", "input_img", ",", "snow_flag", "=", "False", ",", "cirrus_flag", "=", "False", ",", "cloud_confidence", "=", "2", ",", "shadow_confidence", "=", "3", ",", "snow_confidence", "=", "3", ",", "cirrus_confidence", "=", "3", ")", ":", "qa_img", "=", "input_img", ".", "select", "(", "[", "'BQA'", "]", ")", "cloud_mask", "=", "qa_img", ".", "rightShift", "(", "4", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ".", "And", "(", "qa_img", ".", "rightShift", "(", "5", ")", ".", "bitwiseAnd", "(", "3", ")", ".", "gte", "(", "cloud_confidence", ")", ")", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "7", ")", ".", "bitwiseAnd", "(", "3", ")", ".", "gte", "(", "shadow_confidence", ")", ")", "if", "snow_flag", ":", "cloud_mask", "=", "cloud_mask", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "9", ")", ".", "bitwiseAnd", "(", "3", ")", ".", "gte", "(", "snow_confidence", ")", ")", "if", "cirrus_flag", ":", "cloud_mask", "=", "cloud_mask", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "11", ")", ".", "bitwiseAnd", "(", "3", ")", ".", "gte", "(", "cirrus_confidence", ")", ")", "# Set cloudy pixels to 0 and clear to 1", "return", "cloud_mask", ".", "Not", "(", ")" ]
Extract cloud mask from the Landsat Collection 1 TOA BQA band Parameters ---------- input_img : ee.Image Image from a Landsat Collection 1 TOA collection with a BQA band (e.g. LANDSAT/LE07/C01/T1_TOA). snow_flag : bool If true, mask snow pixels (the default is False). cirrus_flag : bool If true, mask cirrus pixels (the default is False). Note, cirrus bits are only set for Landsat 8 (OLI) images. cloud_confidence : int Minimum cloud confidence value (the default is 2). shadow_confidence : int Minimum cloud confidence value (the default is 3). snow_confidence : int Minimum snow confidence value (the default is 3). Only used if snow_flag is True. cirrus_confidence : int Minimum cirrus confidence value (the default is 3). Only used if cirrus_flag is True. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Designated Fill 1: Terrain Occlusion (OLI) / Dropped Pixel (TM, ETM+) 2-3: Radiometric Saturation 4: Cloud 5-6: Cloud Confidence 7-8: Cloud Shadow Confidence 9-10: Snow/Ice Confidence 11-12: Cirrus Confidence (Landsat 8 only) Confidence values 00: "Not Determined", algorithm did not determine the status of this condition 01: "No", algorithm has low to no confidence that this condition exists (0-33 percent confidence) 10: "Maybe", algorithm has medium confidence that this condition exists (34-66 percent confidence) 11: "Yes", algorithm has high confidence that this condition exists (67-100 percent confidence) References ---------- https://landsat.usgs.gov/collectionqualityband
[ "Extract", "cloud", "mask", "from", "the", "Landsat", "Collection", "1", "TOA", "BQA", "band" ]
f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db
https://github.com/Open-ET/openet-core-beta/blob/f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db/openet/core/common.py#L6-L80
train
Open-ET/openet-core-beta
openet/core/common.py
landsat_c1_sr_cloud_mask
def landsat_c1_sr_cloud_mask(input_img, cloud_confidence=3, snow_flag=False): """Extract cloud mask from the Landsat Collection 1 SR pixel_qa band Parameters ---------- img : ee.Image Image from a Landsat Collection 1 SR image collection with a pixel_qa band (e.g. LANDSAT/LE07/C01/T1_SR). cloud_confidence : int Minimum cloud confidence value (the default is 3). snow_flag : bool If true, mask snow pixels (the default is False). Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Fill 1: Clear 2: Water 3: Cloud Shadow 4: Snow 5: Cloud 6-7: Cloud Confidence Confidence values 00: "None" 01: "Low" 10: "Medium" 11: "High" References ---------- https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment """ qa_img = input_img.select(['pixel_qa']) cloud_mask = qa_img.rightShift(5).bitwiseAnd(1).neq(0)\ .And(qa_img.rightShift(6).bitwiseAnd(3).gte(cloud_confidence))\ .Or(qa_img.rightShift(3).bitwiseAnd(1).neq(0)) if snow_flag: cloud_mask = cloud_mask.Or(qa_img.rightShift(4).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
python
def landsat_c1_sr_cloud_mask(input_img, cloud_confidence=3, snow_flag=False): """Extract cloud mask from the Landsat Collection 1 SR pixel_qa band Parameters ---------- img : ee.Image Image from a Landsat Collection 1 SR image collection with a pixel_qa band (e.g. LANDSAT/LE07/C01/T1_SR). cloud_confidence : int Minimum cloud confidence value (the default is 3). snow_flag : bool If true, mask snow pixels (the default is False). Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Fill 1: Clear 2: Water 3: Cloud Shadow 4: Snow 5: Cloud 6-7: Cloud Confidence Confidence values 00: "None" 01: "Low" 10: "Medium" 11: "High" References ---------- https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment """ qa_img = input_img.select(['pixel_qa']) cloud_mask = qa_img.rightShift(5).bitwiseAnd(1).neq(0)\ .And(qa_img.rightShift(6).bitwiseAnd(3).gte(cloud_confidence))\ .Or(qa_img.rightShift(3).bitwiseAnd(1).neq(0)) if snow_flag: cloud_mask = cloud_mask.Or(qa_img.rightShift(4).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
[ "def", "landsat_c1_sr_cloud_mask", "(", "input_img", ",", "cloud_confidence", "=", "3", ",", "snow_flag", "=", "False", ")", ":", "qa_img", "=", "input_img", ".", "select", "(", "[", "'pixel_qa'", "]", ")", "cloud_mask", "=", "qa_img", ".", "rightShift", "(", "5", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ".", "And", "(", "qa_img", ".", "rightShift", "(", "6", ")", ".", "bitwiseAnd", "(", "3", ")", ".", "gte", "(", "cloud_confidence", ")", ")", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "3", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ")", "if", "snow_flag", ":", "cloud_mask", "=", "cloud_mask", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "4", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ")", "# Set cloudy pixels to 0 and clear to 1", "return", "cloud_mask", ".", "Not", "(", ")" ]
Extract cloud mask from the Landsat Collection 1 SR pixel_qa band Parameters ---------- img : ee.Image Image from a Landsat Collection 1 SR image collection with a pixel_qa band (e.g. LANDSAT/LE07/C01/T1_SR). cloud_confidence : int Minimum cloud confidence value (the default is 3). snow_flag : bool If true, mask snow pixels (the default is False). Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Assuming Cloud must be set to check Cloud Confidence Bits 0: Fill 1: Clear 2: Water 3: Cloud Shadow 4: Snow 5: Cloud 6-7: Cloud Confidence Confidence values 00: "None" 01: "Low" 10: "Medium" 11: "High" References ---------- https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
[ "Extract", "cloud", "mask", "from", "the", "Landsat", "Collection", "1", "SR", "pixel_qa", "band" ]
f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db
https://github.com/Open-ET/openet-core-beta/blob/f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db/openet/core/common.py#L83-L135
train
Open-ET/openet-core-beta
openet/core/common.py
sentinel2_toa_cloud_mask
def sentinel2_toa_cloud_mask(input_img): """Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks """ qa_img = input_img.select(['QA60']) cloud_mask = qa_img.rightShift(10).bitwiseAnd(1).neq(0)\ .Or(qa_img.rightShift(11).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
python
def sentinel2_toa_cloud_mask(input_img): """Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks """ qa_img = input_img.select(['QA60']) cloud_mask = qa_img.rightShift(10).bitwiseAnd(1).neq(0)\ .Or(qa_img.rightShift(11).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
[ "def", "sentinel2_toa_cloud_mask", "(", "input_img", ")", ":", "qa_img", "=", "input_img", ".", "select", "(", "[", "'QA60'", "]", ")", "cloud_mask", "=", "qa_img", ".", "rightShift", "(", "10", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ".", "Or", "(", "qa_img", ".", "rightShift", "(", "11", ")", ".", "bitwiseAnd", "(", "1", ")", ".", "neq", "(", "0", ")", ")", "# Set cloudy pixels to 0 and clear to 1", "return", "cloud_mask", ".", "Not", "(", ")" ]
Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks
[ "Extract", "cloud", "mask", "from", "the", "Sentinel", "2", "TOA", "QA60", "band" ]
f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db
https://github.com/Open-ET/openet-core-beta/blob/f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db/openet/core/common.py#L138-L172
train
nickpandolfi/Cyther
cyther/searcher.py
where
def where(cmd, path=None): """ A function to wrap shutil.which for universal usage """ raw_result = shutil.which(cmd, os.X_OK, path) if raw_result: return os.path.abspath(raw_result) else: raise ValueError("Could not find '{}' in the path".format(cmd))
python
def where(cmd, path=None): """ A function to wrap shutil.which for universal usage """ raw_result = shutil.which(cmd, os.X_OK, path) if raw_result: return os.path.abspath(raw_result) else: raise ValueError("Could not find '{}' in the path".format(cmd))
[ "def", "where", "(", "cmd", ",", "path", "=", "None", ")", ":", "raw_result", "=", "shutil", ".", "which", "(", "cmd", ",", "os", ".", "X_OK", ",", "path", ")", "if", "raw_result", ":", "return", "os", ".", "path", ".", "abspath", "(", "raw_result", ")", "else", ":", "raise", "ValueError", "(", "\"Could not find '{}' in the path\"", ".", "format", "(", "cmd", ")", ")" ]
A function to wrap shutil.which for universal usage
[ "A", "function", "to", "wrap", "shutil", ".", "which", "for", "universal", "usage" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/searcher.py#L23-L31
train
nickpandolfi/Cyther
cyther/searcher.py
search_file
def search_file(pattern, file_path): """ Search a given file's contents for the regex pattern given as 'pattern' """ try: with open(file_path) as file: string = file.read() except PermissionError: return [] matches = re.findall(pattern, string) return matches
python
def search_file(pattern, file_path): """ Search a given file's contents for the regex pattern given as 'pattern' """ try: with open(file_path) as file: string = file.read() except PermissionError: return [] matches = re.findall(pattern, string) return matches
[ "def", "search_file", "(", "pattern", ",", "file_path", ")", ":", "try", ":", "with", "open", "(", "file_path", ")", "as", "file", ":", "string", "=", "file", ".", "read", "(", ")", "except", "PermissionError", ":", "return", "[", "]", "matches", "=", "re", ".", "findall", "(", "pattern", ",", "string", ")", "return", "matches" ]
Search a given file's contents for the regex pattern given as 'pattern'
[ "Search", "a", "given", "file", "s", "contents", "for", "the", "regex", "pattern", "given", "as", "pattern" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/searcher.py#L34-L46
train
nickpandolfi/Cyther
cyther/launcher.py
call
def call(commands, *, print_result=False, raise_exception=False, print_commands=False): """ Will call a set of commands and wrangle the output how you choose """ if isinstance(commands, str): commands = commands.split() if not (isinstance(commands, tuple) or isinstance(commands, list)): raise ValueError("Function 'call' does not accept a 'commands'" "argument of type '{}'".format(type(commands))) if raise_exception: print_result = False try: process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if print_commands: _print_commands(commands) except: # TODO Why couldn't we just do 'except Exception as output' output = traceback.format_exc() result = Result(1, stderr=output) if print_result and not raise_exception: print(output, file=sys.stderr) else: result = _extract_output(process, print_result, raise_exception) if raise_exception and (result.returncode == 1): message = "An error occurred in an external process:\n\n{}" raise Exception(message.format(result.getStderr())) return result
python
def call(commands, *, print_result=False, raise_exception=False, print_commands=False): """ Will call a set of commands and wrangle the output how you choose """ if isinstance(commands, str): commands = commands.split() if not (isinstance(commands, tuple) or isinstance(commands, list)): raise ValueError("Function 'call' does not accept a 'commands'" "argument of type '{}'".format(type(commands))) if raise_exception: print_result = False try: process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if print_commands: _print_commands(commands) except: # TODO Why couldn't we just do 'except Exception as output' output = traceback.format_exc() result = Result(1, stderr=output) if print_result and not raise_exception: print(output, file=sys.stderr) else: result = _extract_output(process, print_result, raise_exception) if raise_exception and (result.returncode == 1): message = "An error occurred in an external process:\n\n{}" raise Exception(message.format(result.getStderr())) return result
[ "def", "call", "(", "commands", ",", "*", ",", "print_result", "=", "False", ",", "raise_exception", "=", "False", ",", "print_commands", "=", "False", ")", ":", "if", "isinstance", "(", "commands", ",", "str", ")", ":", "commands", "=", "commands", ".", "split", "(", ")", "if", "not", "(", "isinstance", "(", "commands", ",", "tuple", ")", "or", "isinstance", "(", "commands", ",", "list", ")", ")", ":", "raise", "ValueError", "(", "\"Function 'call' does not accept a 'commands'\"", "\"argument of type '{}'\"", ".", "format", "(", "type", "(", "commands", ")", ")", ")", "if", "raise_exception", ":", "print_result", "=", "False", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "commands", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "if", "print_commands", ":", "_print_commands", "(", "commands", ")", "except", ":", "# TODO Why couldn't we just do 'except Exception as output'", "output", "=", "traceback", ".", "format_exc", "(", ")", "result", "=", "Result", "(", "1", ",", "stderr", "=", "output", ")", "if", "print_result", "and", "not", "raise_exception", ":", "print", "(", "output", ",", "file", "=", "sys", ".", "stderr", ")", "else", ":", "result", "=", "_extract_output", "(", "process", ",", "print_result", ",", "raise_exception", ")", "if", "raise_exception", "and", "(", "result", ".", "returncode", "==", "1", ")", ":", "message", "=", "\"An error occurred in an external process:\\n\\n{}\"", "raise", "Exception", "(", "message", ".", "format", "(", "result", ".", "getStderr", "(", ")", ")", ")", "return", "result" ]
Will call a set of commands and wrangle the output how you choose
[ "Will", "call", "a", "set", "of", "commands", "and", "wrangle", "the", "output", "how", "you", "choose" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L105-L140
train
nickpandolfi/Cyther
cyther/launcher.py
multiCall
def multiCall(*commands, dependent=True, bundle=False, print_result=False, print_commands=False): """ Calls the function 'call' multiple times, given sets of commands """ results = [] dependent_failed = False for command in commands: if not dependent_failed: response = call(command, print_result=print_result, print_commands=print_commands) # TODO Will an error ever return a code other than '1'? if (response.returncode == 1) and dependent: dependent_failed = True else: response = None results.append(response) if bundle: result = Result() for response in results: if not response: continue elif response.returncode == 1: result.returncode = 1 result.extendInformation(response) processed_response = result else: processed_response = results return processed_response
python
def multiCall(*commands, dependent=True, bundle=False, print_result=False, print_commands=False): """ Calls the function 'call' multiple times, given sets of commands """ results = [] dependent_failed = False for command in commands: if not dependent_failed: response = call(command, print_result=print_result, print_commands=print_commands) # TODO Will an error ever return a code other than '1'? if (response.returncode == 1) and dependent: dependent_failed = True else: response = None results.append(response) if bundle: result = Result() for response in results: if not response: continue elif response.returncode == 1: result.returncode = 1 result.extendInformation(response) processed_response = result else: processed_response = results return processed_response
[ "def", "multiCall", "(", "*", "commands", ",", "dependent", "=", "True", ",", "bundle", "=", "False", ",", "print_result", "=", "False", ",", "print_commands", "=", "False", ")", ":", "results", "=", "[", "]", "dependent_failed", "=", "False", "for", "command", "in", "commands", ":", "if", "not", "dependent_failed", ":", "response", "=", "call", "(", "command", ",", "print_result", "=", "print_result", ",", "print_commands", "=", "print_commands", ")", "# TODO Will an error ever return a code other than '1'?", "if", "(", "response", ".", "returncode", "==", "1", ")", "and", "dependent", ":", "dependent_failed", "=", "True", "else", ":", "response", "=", "None", "results", ".", "append", "(", "response", ")", "if", "bundle", ":", "result", "=", "Result", "(", ")", "for", "response", "in", "results", ":", "if", "not", "response", ":", "continue", "elif", "response", ".", "returncode", "==", "1", ":", "result", ".", "returncode", "=", "1", "result", ".", "extendInformation", "(", "response", ")", "processed_response", "=", "result", "else", ":", "processed_response", "=", "results", "return", "processed_response" ]
Calls the function 'call' multiple times, given sets of commands
[ "Calls", "the", "function", "call", "multiple", "times", "given", "sets", "of", "commands" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L145-L177
train
nickpandolfi/Cyther
cyther/launcher.py
distribute
def distribute(function, iterable, *, workers=4): """ A version of multiprocessing.Pool.map that works using dill to pickle the function and iterable """ with multiprocessing.Pool(workers) as pool: processes = [] for item in iterable: pickled = dill.dumps((function, item)) process = pool.apply_async(_run_pickled, (pickled,)) processes.append(process) results = [process.get() for process in processes] return results
python
def distribute(function, iterable, *, workers=4): """ A version of multiprocessing.Pool.map that works using dill to pickle the function and iterable """ with multiprocessing.Pool(workers) as pool: processes = [] for item in iterable: pickled = dill.dumps((function, item)) process = pool.apply_async(_run_pickled, (pickled,)) processes.append(process) results = [process.get() for process in processes] return results
[ "def", "distribute", "(", "function", ",", "iterable", ",", "*", ",", "workers", "=", "4", ")", ":", "with", "multiprocessing", ".", "Pool", "(", "workers", ")", "as", "pool", ":", "processes", "=", "[", "]", "for", "item", "in", "iterable", ":", "pickled", "=", "dill", ".", "dumps", "(", "(", "function", ",", "item", ")", ")", "process", "=", "pool", ".", "apply_async", "(", "_run_pickled", ",", "(", "pickled", ",", ")", ")", "processes", ".", "append", "(", "process", ")", "results", "=", "[", "process", ".", "get", "(", ")", "for", "process", "in", "processes", "]", "return", "results" ]
A version of multiprocessing.Pool.map that works using dill to pickle the function and iterable
[ "A", "version", "of", "multiprocessing", ".", "Pool", ".", "map", "that", "works", "using", "dill", "to", "pickle", "the", "function", "and", "iterable" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L185-L198
train
nickpandolfi/Cyther
cyther/launcher.py
Result.getOutput
def getOutput(self): """ Returns the combined output of stdout and stderr """ output = self.stdout if self.stdout: output += '\r\n' output += self.stderr return output
python
def getOutput(self): """ Returns the combined output of stdout and stderr """ output = self.stdout if self.stdout: output += '\r\n' output += self.stderr return output
[ "def", "getOutput", "(", "self", ")", ":", "output", "=", "self", ".", "stdout", "if", "self", ".", "stdout", ":", "output", "+=", "'\\r\\n'", "output", "+=", "self", ".", "stderr", "return", "output" ]
Returns the combined output of stdout and stderr
[ "Returns", "the", "combined", "output", "of", "stdout", "and", "stderr" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L54-L62
train
nickpandolfi/Cyther
cyther/launcher.py
Result.extendInformation
def extendInformation(self, response): """ This extends the objects stdout and stderr by 'response's stdout and stderr """ if response.stdout: self.stdout += '\r\n' + response.stdout if response.stderr: self.stderr += '\r\n' + response.stderr
python
def extendInformation(self, response): """ This extends the objects stdout and stderr by 'response's stdout and stderr """ if response.stdout: self.stdout += '\r\n' + response.stdout if response.stderr: self.stderr += '\r\n' + response.stderr
[ "def", "extendInformation", "(", "self", ",", "response", ")", ":", "if", "response", ".", "stdout", ":", "self", ".", "stdout", "+=", "'\\r\\n'", "+", "response", ".", "stdout", "if", "response", ".", "stderr", ":", "self", ".", "stderr", "+=", "'\\r\\n'", "+", "response", ".", "stderr" ]
This extends the objects stdout and stderr by 'response's stdout and stderr
[ "This", "extends", "the", "objects", "stdout", "and", "stderr", "by", "response", "s", "stdout", "and", "stderr" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L64-L72
train
teepark/greenhouse
greenhouse/ext/zmq.py
wait_socks
def wait_socks(sock_events, inmask=1, outmask=2, timeout=None): """wait on a combination of zeromq sockets, normal sockets, and fds .. note:: this method can block it will return once there is relevant activity on any of the descriptors or sockets, or the timeout expires :param sock_events: two-tuples, the first item is either a zeromq socket, a socket, or a file descriptor, and the second item is a mask made up of the inmask and/or the outmask bitwise-ORd together :type sock_events: list :param inmask: the mask to use for readable events (default 1) :type inmask: int :param outmask: the mask to use for writable events (default 2) :type outmask: int :param timeout: the maximum time to block before raising an exception :type timeout: int, float or None :returns: a list of two-tuples, each has one of the first elements from ``sock_events``, the second element is the event mask of the activity that was detected (made up on inmask and/or outmask bitwise-ORd together) """ results = [] for sock, mask in sock_events: if isinstance(sock, zmq.backend.Socket): mask = _check_events(sock, mask, inmask, outmask) if mask: results.append((sock, mask)) if results: return results fd_map = {} fd_events = [] for sock, mask in sock_events: if isinstance(sock, zmq.backend.Socket): fd = sock.getsockopt(zmq.FD) elif isinstance(sock, int): fd = sock else: fd = sock.fileno() fd_map[fd] = sock fd_events.append((fd, mask)) while 1: started = time.time() active = descriptor.wait_fds(fd_events, inmask, outmask, timeout) if not active: # timed out return [] results = [] for fd, mask in active: sock = fd_map[fd] if isinstance(sock, zmq.backend.Socket): mask = _check_events(sock, mask, inmask, outmask) if not mask: continue results.append((sock, mask)) if results: return results timeout -= time.time() - started
python
def wait_socks(sock_events, inmask=1, outmask=2, timeout=None): """wait on a combination of zeromq sockets, normal sockets, and fds .. note:: this method can block it will return once there is relevant activity on any of the descriptors or sockets, or the timeout expires :param sock_events: two-tuples, the first item is either a zeromq socket, a socket, or a file descriptor, and the second item is a mask made up of the inmask and/or the outmask bitwise-ORd together :type sock_events: list :param inmask: the mask to use for readable events (default 1) :type inmask: int :param outmask: the mask to use for writable events (default 2) :type outmask: int :param timeout: the maximum time to block before raising an exception :type timeout: int, float or None :returns: a list of two-tuples, each has one of the first elements from ``sock_events``, the second element is the event mask of the activity that was detected (made up on inmask and/or outmask bitwise-ORd together) """ results = [] for sock, mask in sock_events: if isinstance(sock, zmq.backend.Socket): mask = _check_events(sock, mask, inmask, outmask) if mask: results.append((sock, mask)) if results: return results fd_map = {} fd_events = [] for sock, mask in sock_events: if isinstance(sock, zmq.backend.Socket): fd = sock.getsockopt(zmq.FD) elif isinstance(sock, int): fd = sock else: fd = sock.fileno() fd_map[fd] = sock fd_events.append((fd, mask)) while 1: started = time.time() active = descriptor.wait_fds(fd_events, inmask, outmask, timeout) if not active: # timed out return [] results = [] for fd, mask in active: sock = fd_map[fd] if isinstance(sock, zmq.backend.Socket): mask = _check_events(sock, mask, inmask, outmask) if not mask: continue results.append((sock, mask)) if results: return results timeout -= time.time() - started
[ "def", "wait_socks", "(", "sock_events", ",", "inmask", "=", "1", ",", "outmask", "=", "2", ",", "timeout", "=", "None", ")", ":", "results", "=", "[", "]", "for", "sock", ",", "mask", "in", "sock_events", ":", "if", "isinstance", "(", "sock", ",", "zmq", ".", "backend", ".", "Socket", ")", ":", "mask", "=", "_check_events", "(", "sock", ",", "mask", ",", "inmask", ",", "outmask", ")", "if", "mask", ":", "results", ".", "append", "(", "(", "sock", ",", "mask", ")", ")", "if", "results", ":", "return", "results", "fd_map", "=", "{", "}", "fd_events", "=", "[", "]", "for", "sock", ",", "mask", "in", "sock_events", ":", "if", "isinstance", "(", "sock", ",", "zmq", ".", "backend", ".", "Socket", ")", ":", "fd", "=", "sock", ".", "getsockopt", "(", "zmq", ".", "FD", ")", "elif", "isinstance", "(", "sock", ",", "int", ")", ":", "fd", "=", "sock", "else", ":", "fd", "=", "sock", ".", "fileno", "(", ")", "fd_map", "[", "fd", "]", "=", "sock", "fd_events", ".", "append", "(", "(", "fd", ",", "mask", ")", ")", "while", "1", ":", "started", "=", "time", ".", "time", "(", ")", "active", "=", "descriptor", ".", "wait_fds", "(", "fd_events", ",", "inmask", ",", "outmask", ",", "timeout", ")", "if", "not", "active", ":", "# timed out", "return", "[", "]", "results", "=", "[", "]", "for", "fd", ",", "mask", "in", "active", ":", "sock", "=", "fd_map", "[", "fd", "]", "if", "isinstance", "(", "sock", ",", "zmq", ".", "backend", ".", "Socket", ")", ":", "mask", "=", "_check_events", "(", "sock", ",", "mask", ",", "inmask", ",", "outmask", ")", "if", "not", "mask", ":", "continue", "results", ".", "append", "(", "(", "sock", ",", "mask", ")", ")", "if", "results", ":", "return", "results", "timeout", "-=", "time", ".", "time", "(", ")", "-", "started" ]
wait on a combination of zeromq sockets, normal sockets, and fds .. note:: this method can block it will return once there is relevant activity on any of the descriptors or sockets, or the timeout expires :param sock_events: two-tuples, the first item is either a zeromq socket, a socket, or a file descriptor, and the second item is a mask made up of the inmask and/or the outmask bitwise-ORd together :type sock_events: list :param inmask: the mask to use for readable events (default 1) :type inmask: int :param outmask: the mask to use for writable events (default 2) :type outmask: int :param timeout: the maximum time to block before raising an exception :type timeout: int, float or None :returns: a list of two-tuples, each has one of the first elements from ``sock_events``, the second element is the event mask of the activity that was detected (made up on inmask and/or outmask bitwise-ORd together)
[ "wait", "on", "a", "combination", "of", "zeromq", "sockets", "normal", "sockets", "and", "fds" ]
8fd1be4f5443ba090346b5ec82fdbeb0a060d956
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/ext/zmq.py#L12-L79
train
cltl/KafNafParserPy
KafNafParserPy/factuality_data.py
Cfactualities.remove_this_factuality
def remove_this_factuality(self,factuality_id): """ Removes the factuality for the given factuality identifier @type factuality_id: string @param factuality_id: the factuality identifier to be removed """ for fact in self.get_factualities(): if fact.get_id() == factuality_id: self.node.remove(fact.get_node()) break
python
def remove_this_factuality(self,factuality_id): """ Removes the factuality for the given factuality identifier @type factuality_id: string @param factuality_id: the factuality identifier to be removed """ for fact in self.get_factualities(): if fact.get_id() == factuality_id: self.node.remove(fact.get_node()) break
[ "def", "remove_this_factuality", "(", "self", ",", "factuality_id", ")", ":", "for", "fact", "in", "self", ".", "get_factualities", "(", ")", ":", "if", "fact", ".", "get_id", "(", ")", "==", "factuality_id", ":", "self", ".", "node", ".", "remove", "(", "fact", ".", "get_node", "(", ")", ")", "break" ]
Removes the factuality for the given factuality identifier @type factuality_id: string @param factuality_id: the factuality identifier to be removed
[ "Removes", "the", "factuality", "for", "the", "given", "factuality", "identifier" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/factuality_data.py#L224-L233
train
cltl/KafNafParserPy
KafNafParserPy/factuality_data.py
Cfactualities.remove_factuality
def remove_factuality(self, fid): """ Removes a factuality element with a specific id from the layer """ for node_pre in self.node.findall('factuality'): if node_pre.get('id') == fid: self.node.remove(node_pre)
python
def remove_factuality(self, fid): """ Removes a factuality element with a specific id from the layer """ for node_pre in self.node.findall('factuality'): if node_pre.get('id') == fid: self.node.remove(node_pre)
[ "def", "remove_factuality", "(", "self", ",", "fid", ")", ":", "for", "node_pre", "in", "self", ".", "node", ".", "findall", "(", "'factuality'", ")", ":", "if", "node_pre", ".", "get", "(", "'id'", ")", "==", "fid", ":", "self", ".", "node", ".", "remove", "(", "node_pre", ")" ]
Removes a factuality element with a specific id from the layer
[ "Removes", "a", "factuality", "element", "with", "a", "specific", "id", "from", "the", "layer" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/factuality_data.py#L236-L242
train
cltl/KafNafParserPy
KafNafParserPy/factuality_data.py
Cfactualitylayer.remove_this_factvalue
def remove_this_factvalue(self,factvalue_id): """ Removes the factvalue for the given factvalue identifier @type factvalue_id: string @param factvalue_id: the factvalue identifier to be removed """ for fact in self.get_factvalues(): if fact.get_id() == factvalue_id: self.node.remove(fact.get_node()) break
python
def remove_this_factvalue(self,factvalue_id): """ Removes the factvalue for the given factvalue identifier @type factvalue_id: string @param factvalue_id: the factvalue identifier to be removed """ for fact in self.get_factvalues(): if fact.get_id() == factvalue_id: self.node.remove(fact.get_node()) break
[ "def", "remove_this_factvalue", "(", "self", ",", "factvalue_id", ")", ":", "for", "fact", "in", "self", ".", "get_factvalues", "(", ")", ":", "if", "fact", ".", "get_id", "(", ")", "==", "factvalue_id", ":", "self", ".", "node", ".", "remove", "(", "fact", ".", "get_node", "(", ")", ")", "break" ]
Removes the factvalue for the given factvalue identifier @type factvalue_id: string @param factvalue_id: the factvalue identifier to be removed
[ "Removes", "the", "factvalue", "for", "the", "given", "factvalue", "identifier" ]
9bc32e803c176404b255ba317479b8780ed5f569
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/factuality_data.py#L393-L402
train
sio2project/filetracker
filetracker/client/remote_data_store.py
RemoteDataStore._add_version_to_request
def _add_version_to_request(self, url, headers, version): """Adds version to either url or headers, depending on protocol.""" if self._has_capability(SERVER_REQUIRES_VERSION_HEADER): new_headers = headers.copy() new_headers['Last-Modified'] = email.utils.formatdate(version) return url, new_headers else: url_params = { 'last_modified': email.utils.formatdate(version) } new_url = url + "?" + urlencode(url_params) return new_url, headers
python
def _add_version_to_request(self, url, headers, version): """Adds version to either url or headers, depending on protocol.""" if self._has_capability(SERVER_REQUIRES_VERSION_HEADER): new_headers = headers.copy() new_headers['Last-Modified'] = email.utils.formatdate(version) return url, new_headers else: url_params = { 'last_modified': email.utils.formatdate(version) } new_url = url + "?" + urlencode(url_params) return new_url, headers
[ "def", "_add_version_to_request", "(", "self", ",", "url", ",", "headers", ",", "version", ")", ":", "if", "self", ".", "_has_capability", "(", "SERVER_REQUIRES_VERSION_HEADER", ")", ":", "new_headers", "=", "headers", ".", "copy", "(", ")", "new_headers", "[", "'Last-Modified'", "]", "=", "email", ".", "utils", ".", "formatdate", "(", "version", ")", "return", "url", ",", "new_headers", "else", ":", "url_params", "=", "{", "'last_modified'", ":", "email", ".", "utils", ".", "formatdate", "(", "version", ")", "}", "new_url", "=", "url", "+", "\"?\"", "+", "urlencode", "(", "url_params", ")", "return", "new_url", ",", "headers" ]
Adds version to either url or headers, depending on protocol.
[ "Adds", "version", "to", "either", "url", "or", "headers", "depending", "on", "protocol", "." ]
359b474850622e3d0c25ee2596d7242c02f84efb
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/client/remote_data_store.py#L216-L227
train
sio2project/filetracker
filetracker/client/remote_data_store.py
RemoteDataStore._protocol_version
def _protocol_version(self): """Returns the protocol version that should be used. If the version wasn't established yet, asks the server what versions it supports and picks the highest one. """ if hasattr(self, '_protocol_ver'): return self._protocol_ver response = requests.get(self.base_url + '/version/') if response.status_code == 404: server_versions = {1} elif response.status_code == 200: server_versions = set(response.json()['protocol_versions']) if not server_versions: raise FiletrackerError( 'Server hasn\'t reported any supported protocols') else: response.raise_for_status() common_versions = _SUPPORTED_VERSIONS.intersection(server_versions) if not common_versions: raise FiletrackerError( 'Couldn\'t agree on protocol version: client supports ' '{}, server supports {}.' .format(_PROTOCOL_CAPABILITIES, server_versions)) self._protocol_ver = max(common_versions) print('Settled for protocol version {}'.format(self._protocol_ver)) return self._protocol_ver
python
def _protocol_version(self): """Returns the protocol version that should be used. If the version wasn't established yet, asks the server what versions it supports and picks the highest one. """ if hasattr(self, '_protocol_ver'): return self._protocol_ver response = requests.get(self.base_url + '/version/') if response.status_code == 404: server_versions = {1} elif response.status_code == 200: server_versions = set(response.json()['protocol_versions']) if not server_versions: raise FiletrackerError( 'Server hasn\'t reported any supported protocols') else: response.raise_for_status() common_versions = _SUPPORTED_VERSIONS.intersection(server_versions) if not common_versions: raise FiletrackerError( 'Couldn\'t agree on protocol version: client supports ' '{}, server supports {}.' .format(_PROTOCOL_CAPABILITIES, server_versions)) self._protocol_ver = max(common_versions) print('Settled for protocol version {}'.format(self._protocol_ver)) return self._protocol_ver
[ "def", "_protocol_version", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_protocol_ver'", ")", ":", "return", "self", ".", "_protocol_ver", "response", "=", "requests", ".", "get", "(", "self", ".", "base_url", "+", "'/version/'", ")", "if", "response", ".", "status_code", "==", "404", ":", "server_versions", "=", "{", "1", "}", "elif", "response", ".", "status_code", "==", "200", ":", "server_versions", "=", "set", "(", "response", ".", "json", "(", ")", "[", "'protocol_versions'", "]", ")", "if", "not", "server_versions", ":", "raise", "FiletrackerError", "(", "'Server hasn\\'t reported any supported protocols'", ")", "else", ":", "response", ".", "raise_for_status", "(", ")", "common_versions", "=", "_SUPPORTED_VERSIONS", ".", "intersection", "(", "server_versions", ")", "if", "not", "common_versions", ":", "raise", "FiletrackerError", "(", "'Couldn\\'t agree on protocol version: client supports '", "'{}, server supports {}.'", ".", "format", "(", "_PROTOCOL_CAPABILITIES", ",", "server_versions", ")", ")", "self", ".", "_protocol_ver", "=", "max", "(", "common_versions", ")", "print", "(", "'Settled for protocol version {}'", ".", "format", "(", "self", ".", "_protocol_ver", ")", ")", "return", "self", ".", "_protocol_ver" ]
Returns the protocol version that should be used. If the version wasn't established yet, asks the server what versions it supports and picks the highest one.
[ "Returns", "the", "protocol", "version", "that", "should", "be", "used", "." ]
359b474850622e3d0c25ee2596d7242c02f84efb
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/client/remote_data_store.py#L229-L260
train
sio2project/filetracker
filetracker/scripts/cachecleaner.py
CacheCleaner.run
def run(self): """Starts cleaning cache in infinite loop. """ logger.info("Starting daemon.") while True: try: self._scan_disk() do_cleaning, delete_from_index = self._analyze_file_index() if do_cleaning: self._clean_cache(delete_from_index) except Exception: logger.exception("Following exception occurred:") sleeping_until_time = datetime.datetime.now() + self.scan_interval logger.info("Sleeping until %s.", sleeping_until_time) time.sleep(self.scan_interval.total_seconds())
python
def run(self): """Starts cleaning cache in infinite loop. """ logger.info("Starting daemon.") while True: try: self._scan_disk() do_cleaning, delete_from_index = self._analyze_file_index() if do_cleaning: self._clean_cache(delete_from_index) except Exception: logger.exception("Following exception occurred:") sleeping_until_time = datetime.datetime.now() + self.scan_interval logger.info("Sleeping until %s.", sleeping_until_time) time.sleep(self.scan_interval.total_seconds())
[ "def", "run", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Starting daemon.\"", ")", "while", "True", ":", "try", ":", "self", ".", "_scan_disk", "(", ")", "do_cleaning", ",", "delete_from_index", "=", "self", ".", "_analyze_file_index", "(", ")", "if", "do_cleaning", ":", "self", ".", "_clean_cache", "(", "delete_from_index", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Following exception occurred:\"", ")", "sleeping_until_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "self", ".", "scan_interval", "logger", ".", "info", "(", "\"Sleeping until %s.\"", ",", "sleeping_until_time", ")", "time", ".", "sleep", "(", "self", ".", "scan_interval", ".", "total_seconds", "(", ")", ")" ]
Starts cleaning cache in infinite loop.
[ "Starts", "cleaning", "cache", "in", "infinite", "loop", "." ]
359b474850622e3d0c25ee2596d7242c02f84efb
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/scripts/cachecleaner.py#L73-L87
train
nickpandolfi/Cyther
cyther/pathway.py
normalize
def normalize(path_name, override=None): """ Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION """ identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
python
def normalize(path_name, override=None): """ Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION """ identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
[ "def", "normalize", "(", "path_name", ",", "override", "=", "None", ")", ":", "identity", "=", "identify", "(", "path_name", ",", "override", "=", "override", ")", "new_path_name", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "expanduser", "(", "path_name", ")", ")", "return", "new_path_name", ",", "identity" ]
Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION
[ "Prepares", "a", "path", "name", "to", "be", "worked", "with", ".", "Path", "name", "must", "not", "be", "empty", ".", "This", "function", "will", "return", "the", "normpath", "ed", "path", "and", "the", "identity", "of", "the", "path", ".", "This", "function", "takes", "an", "optional", "overriding", "argument", "for", "the", "identity", "." ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L36-L50
train
nickpandolfi/Cyther
cyther/pathway.py
join_ext
def join_ext(name, extension): """ Joins a given name with an extension. If the extension doesn't have a '.' it will add it for you """ if extension[0] == EXT: ret = name + extension else: ret = name + EXT + extension return ret
python
def join_ext(name, extension): """ Joins a given name with an extension. If the extension doesn't have a '.' it will add it for you """ if extension[0] == EXT: ret = name + extension else: ret = name + EXT + extension return ret
[ "def", "join_ext", "(", "name", ",", "extension", ")", ":", "if", "extension", "[", "0", "]", "==", "EXT", ":", "ret", "=", "name", "+", "extension", "else", ":", "ret", "=", "name", "+", "EXT", "+", "extension", "return", "ret" ]
Joins a given name with an extension. If the extension doesn't have a '.' it will add it for you
[ "Joins", "a", "given", "name", "with", "an", "extension", ".", "If", "the", "extension", "doesn", "t", "have", "a", ".", "it", "will", "add", "it", "for", "you" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L98-L107
train
nickpandolfi/Cyther
cyther/pathway.py
has_ext
def has_ext(path_name, *, multiple=None, if_all_ext=False): """ Determine if the given path name has an extension """ base = os.path.basename(path_name) count = base.count(EXT) if not if_all_ext and base[0] == EXT and count != 0: count -= 1 if multiple is None: return count >= 1 elif multiple: return count > 1 else: return count == 1
python
def has_ext(path_name, *, multiple=None, if_all_ext=False): """ Determine if the given path name has an extension """ base = os.path.basename(path_name) count = base.count(EXT) if not if_all_ext and base[0] == EXT and count != 0: count -= 1 if multiple is None: return count >= 1 elif multiple: return count > 1 else: return count == 1
[ "def", "has_ext", "(", "path_name", ",", "*", ",", "multiple", "=", "None", ",", "if_all_ext", "=", "False", ")", ":", "base", "=", "os", ".", "path", ".", "basename", "(", "path_name", ")", "count", "=", "base", ".", "count", "(", "EXT", ")", "if", "not", "if_all_ext", "and", "base", "[", "0", "]", "==", "EXT", "and", "count", "!=", "0", ":", "count", "-=", "1", "if", "multiple", "is", "None", ":", "return", "count", ">=", "1", "elif", "multiple", ":", "return", "count", ">", "1", "else", ":", "return", "count", "==", "1" ]
Determine if the given path name has an extension
[ "Determine", "if", "the", "given", "path", "name", "has", "an", "extension" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L118-L133
train
nickpandolfi/Cyther
cyther/pathway.py
get_ext
def get_ext(path_name, *, if_all_ext=False): """ Get an extension from the given path name. If an extension cannot be found, it will return an empty string """ if has_ext(path_name): return os.path.splitext(path_name)[EXTENSION] elif if_all_ext and has_ext(path_name, if_all_ext=True): return os.path.splitext(path_name)[NAME] else: return ''
python
def get_ext(path_name, *, if_all_ext=False): """ Get an extension from the given path name. If an extension cannot be found, it will return an empty string """ if has_ext(path_name): return os.path.splitext(path_name)[EXTENSION] elif if_all_ext and has_ext(path_name, if_all_ext=True): return os.path.splitext(path_name)[NAME] else: return ''
[ "def", "get_ext", "(", "path_name", ",", "*", ",", "if_all_ext", "=", "False", ")", ":", "if", "has_ext", "(", "path_name", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "path_name", ")", "[", "EXTENSION", "]", "elif", "if_all_ext", "and", "has_ext", "(", "path_name", ",", "if_all_ext", "=", "True", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "path_name", ")", "[", "NAME", "]", "else", ":", "return", "''" ]
Get an extension from the given path name. If an extension cannot be found, it will return an empty string
[ "Get", "an", "extension", "from", "the", "given", "path", "name", ".", "If", "an", "extension", "cannot", "be", "found", "it", "will", "return", "an", "empty", "string" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L136-L146
train
nickpandolfi/Cyther
cyther/pathway.py
get_dir
def get_dir(path_name, *, greedy=False, override=None, identity=None): """ Gets the directory path of the given path name. If the argument 'greedy' is specified as True, then if the path name represents a directory itself, the function will return the whole path """ if identity is None: identity = identify(path_name, override=override) path_name = os.path.normpath(path_name) if greedy and identity == ISDIR: return path_name else: return os.path.dirname(path_name)
python
def get_dir(path_name, *, greedy=False, override=None, identity=None): """ Gets the directory path of the given path name. If the argument 'greedy' is specified as True, then if the path name represents a directory itself, the function will return the whole path """ if identity is None: identity = identify(path_name, override=override) path_name = os.path.normpath(path_name) if greedy and identity == ISDIR: return path_name else: return os.path.dirname(path_name)
[ "def", "get_dir", "(", "path_name", ",", "*", ",", "greedy", "=", "False", ",", "override", "=", "None", ",", "identity", "=", "None", ")", ":", "if", "identity", "is", "None", ":", "identity", "=", "identify", "(", "path_name", ",", "override", "=", "override", ")", "path_name", "=", "os", ".", "path", ".", "normpath", "(", "path_name", ")", "if", "greedy", "and", "identity", "==", "ISDIR", ":", "return", "path_name", "else", ":", "return", "os", ".", "path", ".", "dirname", "(", "path_name", ")" ]
Gets the directory path of the given path name. If the argument 'greedy' is specified as True, then if the path name represents a directory itself, the function will return the whole path
[ "Gets", "the", "directory", "path", "of", "the", "given", "path", "name", ".", "If", "the", "argument", "greedy", "is", "specified", "as", "True", "then", "if", "the", "path", "name", "represents", "a", "directory", "itself", "the", "function", "will", "return", "the", "whole", "path" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L157-L171
train
nickpandolfi/Cyther
cyther/pathway.py
get_system_drives
def get_system_drives(): """ Get the available drive names on the system. Always returns a list. """ drives = [] if os.name == 'nt': import ctypes bitmask = ctypes.windll.kernel32.GetLogicalDrives() letter = ord('A') while bitmask > 0: if bitmask & 1: name = chr(letter) + ':' + os.sep if os.path.isdir(name): drives.append(name) bitmask >>= 1 letter += 1 else: current_drive = get_drive(os.getcwd()) if current_drive: drive = current_drive else: drive = os.sep drives.append(drive) return drives
python
def get_system_drives(): """ Get the available drive names on the system. Always returns a list. """ drives = [] if os.name == 'nt': import ctypes bitmask = ctypes.windll.kernel32.GetLogicalDrives() letter = ord('A') while bitmask > 0: if bitmask & 1: name = chr(letter) + ':' + os.sep if os.path.isdir(name): drives.append(name) bitmask >>= 1 letter += 1 else: current_drive = get_drive(os.getcwd()) if current_drive: drive = current_drive else: drive = os.sep drives.append(drive) return drives
[ "def", "get_system_drives", "(", ")", ":", "drives", "=", "[", "]", "if", "os", ".", "name", "==", "'nt'", ":", "import", "ctypes", "bitmask", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "GetLogicalDrives", "(", ")", "letter", "=", "ord", "(", "'A'", ")", "while", "bitmask", ">", "0", ":", "if", "bitmask", "&", "1", ":", "name", "=", "chr", "(", "letter", ")", "+", "':'", "+", "os", ".", "sep", "if", "os", ".", "path", ".", "isdir", "(", "name", ")", ":", "drives", ".", "append", "(", "name", ")", "bitmask", ">>=", "1", "letter", "+=", "1", "else", ":", "current_drive", "=", "get_drive", "(", "os", ".", "getcwd", "(", ")", ")", "if", "current_drive", ":", "drive", "=", "current_drive", "else", ":", "drive", "=", "os", ".", "sep", "drives", ".", "append", "(", "drive", ")", "return", "drives" ]
Get the available drive names on the system. Always returns a list.
[ "Get", "the", "available", "drive", "names", "on", "the", "system", ".", "Always", "returns", "a", "list", "." ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L206-L230
train
nickpandolfi/Cyther
cyther/pathway.py
has_suffix
def has_suffix(path_name, suffix): """ Determines if path_name has a suffix of at least 'suffix' """ if isinstance(suffix, str): suffix = disintegrate(suffix) components = disintegrate(path_name) for i in range(-1, -(len(suffix) + 1), -1): if components[i] != suffix[i]: break else: return True return False
python
def has_suffix(path_name, suffix): """ Determines if path_name has a suffix of at least 'suffix' """ if isinstance(suffix, str): suffix = disintegrate(suffix) components = disintegrate(path_name) for i in range(-1, -(len(suffix) + 1), -1): if components[i] != suffix[i]: break else: return True return False
[ "def", "has_suffix", "(", "path_name", ",", "suffix", ")", ":", "if", "isinstance", "(", "suffix", ",", "str", ")", ":", "suffix", "=", "disintegrate", "(", "suffix", ")", "components", "=", "disintegrate", "(", "path_name", ")", "for", "i", "in", "range", "(", "-", "1", ",", "-", "(", "len", "(", "suffix", ")", "+", "1", ")", ",", "-", "1", ")", ":", "if", "components", "[", "i", "]", "!=", "suffix", "[", "i", "]", ":", "break", "else", ":", "return", "True", "return", "False" ]
Determines if path_name has a suffix of at least 'suffix'
[ "Determines", "if", "path_name", "has", "a", "suffix", "of", "at", "least", "suffix" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L233-L247
train
nickpandolfi/Cyther
cyther/pathway.py
path
def path(path_name=None, override=None, *, root=None, name=None, ext=None, inject=None, relpath=None, reduce=False): """ Path manipulation black magic """ path_name, identity, root = _initialize(path_name, override, root, inject) new_name = _process_name(path_name, identity, name, ext) new_directory = _process_directory(path_name, identity, root, inject) full_path = os.path.normpath(os.path.join(new_directory, new_name)) if APPEND_SEP_TO_DIRS and not new_name and full_path[-1] != os.sep: full_path += os.sep final_path = _format_path(full_path, root, relpath, reduce) return final_path
python
def path(path_name=None, override=None, *, root=None, name=None, ext=None, inject=None, relpath=None, reduce=False): """ Path manipulation black magic """ path_name, identity, root = _initialize(path_name, override, root, inject) new_name = _process_name(path_name, identity, name, ext) new_directory = _process_directory(path_name, identity, root, inject) full_path = os.path.normpath(os.path.join(new_directory, new_name)) if APPEND_SEP_TO_DIRS and not new_name and full_path[-1] != os.sep: full_path += os.sep final_path = _format_path(full_path, root, relpath, reduce) return final_path
[ "def", "path", "(", "path_name", "=", "None", ",", "override", "=", "None", ",", "*", ",", "root", "=", "None", ",", "name", "=", "None", ",", "ext", "=", "None", ",", "inject", "=", "None", ",", "relpath", "=", "None", ",", "reduce", "=", "False", ")", ":", "path_name", ",", "identity", ",", "root", "=", "_initialize", "(", "path_name", ",", "override", ",", "root", ",", "inject", ")", "new_name", "=", "_process_name", "(", "path_name", ",", "identity", ",", "name", ",", "ext", ")", "new_directory", "=", "_process_directory", "(", "path_name", ",", "identity", ",", "root", ",", "inject", ")", "full_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "new_directory", ",", "new_name", ")", ")", "if", "APPEND_SEP_TO_DIRS", "and", "not", "new_name", "and", "full_path", "[", "-", "1", "]", "!=", "os", ".", "sep", ":", "full_path", "+=", "os", ".", "sep", "final_path", "=", "_format_path", "(", "full_path", ",", "root", ",", "relpath", ",", "reduce", ")", "return", "final_path" ]
Path manipulation black magic
[ "Path", "manipulation", "black", "magic" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L364-L376
train
nickpandolfi/Cyther
cyther/pathway.py
File.path
def path(self, **kwargs): """ Returns a different object with the specified changes applied to it. This object is not changed in the process. """ new_path = path(self.getPath(), **kwargs) return File(new_path)
python
def path(self, **kwargs): """ Returns a different object with the specified changes applied to it. This object is not changed in the process. """ new_path = path(self.getPath(), **kwargs) return File(new_path)
[ "def", "path", "(", "self", ",", "*", "*", "kwargs", ")", ":", "new_path", "=", "path", "(", "self", ".", "getPath", "(", ")", ",", "*", "*", "kwargs", ")", "return", "File", "(", "new_path", ")" ]
Returns a different object with the specified changes applied to it. This object is not changed in the process.
[ "Returns", "a", "different", "object", "with", "the", "specified", "changes", "applied", "to", "it", ".", "This", "object", "is", "not", "changed", "in", "the", "process", "." ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L408-L414
train
nickpandolfi/Cyther
cyther/pathway.py
File.isOutDated
def isOutDated(self, output_file): """ Figures out if Cyther should compile the given FileInfo object by checking the both of the modified times """ if output_file.exists(): source_time = self.getmtime() output_time = output_file.getmtime() return source_time > output_time else: return True
python
def isOutDated(self, output_file): """ Figures out if Cyther should compile the given FileInfo object by checking the both of the modified times """ if output_file.exists(): source_time = self.getmtime() output_time = output_file.getmtime() return source_time > output_time else: return True
[ "def", "isOutDated", "(", "self", ",", "output_file", ")", ":", "if", "output_file", ".", "exists", "(", ")", ":", "source_time", "=", "self", ".", "getmtime", "(", ")", "output_time", "=", "output_file", ".", "getmtime", "(", ")", "return", "source_time", ">", "output_time", "else", ":", "return", "True" ]
Figures out if Cyther should compile the given FileInfo object by checking the both of the modified times
[ "Figures", "out", "if", "Cyther", "should", "compile", "the", "given", "FileInfo", "object", "by", "checking", "the", "both", "of", "the", "modified", "times" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L416-L426
train
nickpandolfi/Cyther
cyther/pathway.py
File.isUpdated
def isUpdated(self): """ Figures out if the file had previously errored and hasn't been fixed since given a numerical time """ modified_time = self.getmtime() valid = modified_time > self.__stamp return valid
python
def isUpdated(self): """ Figures out if the file had previously errored and hasn't been fixed since given a numerical time """ modified_time = self.getmtime() valid = modified_time > self.__stamp return valid
[ "def", "isUpdated", "(", "self", ")", ":", "modified_time", "=", "self", ".", "getmtime", "(", ")", "valid", "=", "modified_time", ">", "self", ".", "__stamp", "return", "valid" ]
Figures out if the file had previously errored and hasn't been fixed since given a numerical time
[ "Figures", "out", "if", "the", "file", "had", "previously", "errored", "and", "hasn", "t", "been", "fixed", "since", "given", "a", "numerical", "time" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L434-L441
train
Unbabel/unbabel-py
unbabel/api.py
UnbabelApi.get_translations
def get_translations(self, status=None): ''' Returns the translations requested by the user ''' if status is not None: result = self.api_call('translation/?status=%s' % status) else: result = self.api_call('translation/') if result.status_code == 200: translations_json = json.loads(result.content)["objects"] translations = [Translation(**tj) for tj in translations_json] else: log.critical( 'Error status when fetching translation from server: {' '}!'.format( result.status_code)) translations = [] return translations
python
def get_translations(self, status=None): ''' Returns the translations requested by the user ''' if status is not None: result = self.api_call('translation/?status=%s' % status) else: result = self.api_call('translation/') if result.status_code == 200: translations_json = json.loads(result.content)["objects"] translations = [Translation(**tj) for tj in translations_json] else: log.critical( 'Error status when fetching translation from server: {' '}!'.format( result.status_code)) translations = [] return translations
[ "def", "get_translations", "(", "self", ",", "status", "=", "None", ")", ":", "if", "status", "is", "not", "None", ":", "result", "=", "self", ".", "api_call", "(", "'translation/?status=%s'", "%", "status", ")", "else", ":", "result", "=", "self", ".", "api_call", "(", "'translation/'", ")", "if", "result", ".", "status_code", "==", "200", ":", "translations_json", "=", "json", ".", "loads", "(", "result", ".", "content", ")", "[", "\"objects\"", "]", "translations", "=", "[", "Translation", "(", "*", "*", "tj", ")", "for", "tj", "in", "translations_json", "]", "else", ":", "log", ".", "critical", "(", "'Error status when fetching translation from server: {'", "'}!'", ".", "format", "(", "result", ".", "status_code", ")", ")", "translations", "=", "[", "]", "return", "translations" ]
Returns the translations requested by the user
[ "Returns", "the", "translations", "requested", "by", "the", "user" ]
3bd6397174e184d89d2a11149d87be5d12570c64
https://github.com/Unbabel/unbabel-py/blob/3bd6397174e184d89d2a11149d87be5d12570c64/unbabel/api.py#L370-L387
train
Unbabel/unbabel-py
unbabel/api.py
UnbabelApi.get_translation
def get_translation(self, uid): ''' Returns a translation with the given id ''' result = self.api_call('translation/{}/'.format(uid)) if result.status_code == 200: translation = Translation(**json.loads(result.content)) else: log.critical( 'Error status when fetching translation from server: {' '}!'.format( result.status_code)) raise ValueError(result.content) return translation
python
def get_translation(self, uid): ''' Returns a translation with the given id ''' result = self.api_call('translation/{}/'.format(uid)) if result.status_code == 200: translation = Translation(**json.loads(result.content)) else: log.critical( 'Error status when fetching translation from server: {' '}!'.format( result.status_code)) raise ValueError(result.content) return translation
[ "def", "get_translation", "(", "self", ",", "uid", ")", ":", "result", "=", "self", ".", "api_call", "(", "'translation/{}/'", ".", "format", "(", "uid", ")", ")", "if", "result", ".", "status_code", "==", "200", ":", "translation", "=", "Translation", "(", "*", "*", "json", ".", "loads", "(", "result", ".", "content", ")", ")", "else", ":", "log", ".", "critical", "(", "'Error status when fetching translation from server: {'", "'}!'", ".", "format", "(", "result", ".", "status_code", ")", ")", "raise", "ValueError", "(", "result", ".", "content", ")", "return", "translation" ]
Returns a translation with the given id
[ "Returns", "a", "translation", "with", "the", "given", "id" ]
3bd6397174e184d89d2a11149d87be5d12570c64
https://github.com/Unbabel/unbabel-py/blob/3bd6397174e184d89d2a11149d87be5d12570c64/unbabel/api.py#L389-L402
train
Unbabel/unbabel-py
unbabel/api.py
UnbabelApi.get_language_pairs
def get_language_pairs(self, train_langs=None): ''' Returns the language pairs available on unbabel ''' if train_langs is None: result = self.api_call('language_pair/') else: result = self.api_call( 'language_pair/?train_langs={}'.format(train_langs)) try: langs_json = json.loads(result.content) if 'error' in langs_json: return [] languages = [LangPair(Language( shortname=lang_json["lang_pair"]["source_language"][ "shortname"], name=lang_json["lang_pair"]["source_language"]["name"]), Language(shortname=lang_json["lang_pair"][ "target_language"]["shortname"], name=lang_json["lang_pair"][ "target_language"]["name"]) ) for lang_json in langs_json["objects"]] except Exception, e: log.exception("Error decoding get language pairs") raise e return languages
python
def get_language_pairs(self, train_langs=None): ''' Returns the language pairs available on unbabel ''' if train_langs is None: result = self.api_call('language_pair/') else: result = self.api_call( 'language_pair/?train_langs={}'.format(train_langs)) try: langs_json = json.loads(result.content) if 'error' in langs_json: return [] languages = [LangPair(Language( shortname=lang_json["lang_pair"]["source_language"][ "shortname"], name=lang_json["lang_pair"]["source_language"]["name"]), Language(shortname=lang_json["lang_pair"][ "target_language"]["shortname"], name=lang_json["lang_pair"][ "target_language"]["name"]) ) for lang_json in langs_json["objects"]] except Exception, e: log.exception("Error decoding get language pairs") raise e return languages
[ "def", "get_language_pairs", "(", "self", ",", "train_langs", "=", "None", ")", ":", "if", "train_langs", "is", "None", ":", "result", "=", "self", ".", "api_call", "(", "'language_pair/'", ")", "else", ":", "result", "=", "self", ".", "api_call", "(", "'language_pair/?train_langs={}'", ".", "format", "(", "train_langs", ")", ")", "try", ":", "langs_json", "=", "json", ".", "loads", "(", "result", ".", "content", ")", "if", "'error'", "in", "langs_json", ":", "return", "[", "]", "languages", "=", "[", "LangPair", "(", "Language", "(", "shortname", "=", "lang_json", "[", "\"lang_pair\"", "]", "[", "\"source_language\"", "]", "[", "\"shortname\"", "]", ",", "name", "=", "lang_json", "[", "\"lang_pair\"", "]", "[", "\"source_language\"", "]", "[", "\"name\"", "]", ")", ",", "Language", "(", "shortname", "=", "lang_json", "[", "\"lang_pair\"", "]", "[", "\"target_language\"", "]", "[", "\"shortname\"", "]", ",", "name", "=", "lang_json", "[", "\"lang_pair\"", "]", "[", "\"target_language\"", "]", "[", "\"name\"", "]", ")", ")", "for", "lang_json", "in", "langs_json", "[", "\"objects\"", "]", "]", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "\"Error decoding get language pairs\"", ")", "raise", "e", "return", "languages" ]
Returns the language pairs available on unbabel
[ "Returns", "the", "language", "pairs", "available", "on", "unbabel" ]
3bd6397174e184d89d2a11149d87be5d12570c64
https://github.com/Unbabel/unbabel-py/blob/3bd6397174e184d89d2a11149d87be5d12570c64/unbabel/api.py#L451-L476
train
Unbabel/unbabel-py
unbabel/api.py
UnbabelApi.get_tones
def get_tones(self): ''' Returns the tones available on unbabel ''' result = self.api_call('tone/') tones_json = json.loads(result.content) tones = [Tone(name=tone_json["tone"]["name"], description=tone_json["tone"]["description"]) for tone_json in tones_json["objects"]] return tones
python
def get_tones(self): ''' Returns the tones available on unbabel ''' result = self.api_call('tone/') tones_json = json.loads(result.content) tones = [Tone(name=tone_json["tone"]["name"], description=tone_json["tone"]["description"]) for tone_json in tones_json["objects"]] return tones
[ "def", "get_tones", "(", "self", ")", ":", "result", "=", "self", ".", "api_call", "(", "'tone/'", ")", "tones_json", "=", "json", ".", "loads", "(", "result", ".", "content", ")", "tones", "=", "[", "Tone", "(", "name", "=", "tone_json", "[", "\"tone\"", "]", "[", "\"name\"", "]", ",", "description", "=", "tone_json", "[", "\"tone\"", "]", "[", "\"description\"", "]", ")", "for", "tone_json", "in", "tones_json", "[", "\"objects\"", "]", "]", "return", "tones" ]
Returns the tones available on unbabel
[ "Returns", "the", "tones", "available", "on", "unbabel" ]
3bd6397174e184d89d2a11149d87be5d12570c64
https://github.com/Unbabel/unbabel-py/blob/3bd6397174e184d89d2a11149d87be5d12570c64/unbabel/api.py#L478-L487
train
Unbabel/unbabel-py
unbabel/api.py
UnbabelApi.get_topics
def get_topics(self): ''' Returns the topics available on unbabel ''' result = self.api_call('topic/') topics_json = json.loads(result.content) topics = [Topic(name=topic_json["topic"]["name"]) for topic_json in topics_json["objects"]] return topics
python
def get_topics(self): ''' Returns the topics available on unbabel ''' result = self.api_call('topic/') topics_json = json.loads(result.content) topics = [Topic(name=topic_json["topic"]["name"]) for topic_json in topics_json["objects"]] return topics
[ "def", "get_topics", "(", "self", ")", ":", "result", "=", "self", ".", "api_call", "(", "'topic/'", ")", "topics_json", "=", "json", ".", "loads", "(", "result", ".", "content", ")", "topics", "=", "[", "Topic", "(", "name", "=", "topic_json", "[", "\"topic\"", "]", "[", "\"name\"", "]", ")", "for", "topic_json", "in", "topics_json", "[", "\"objects\"", "]", "]", "return", "topics" ]
Returns the topics available on unbabel
[ "Returns", "the", "topics", "available", "on", "unbabel" ]
3bd6397174e184d89d2a11149d87be5d12570c64
https://github.com/Unbabel/unbabel-py/blob/3bd6397174e184d89d2a11149d87be5d12570c64/unbabel/api.py#L489-L497
train
vmonaco/pohmm
pohmm/pohmm.py
Pohmm.rand
def rand(self, unique_pstates, random_state=None): """ Randomize the POHMM parameters """ self._init_pstates(unique_pstates) self._init_random(random_state=random_state) self._compute_marginals() return self
python
def rand(self, unique_pstates, random_state=None): """ Randomize the POHMM parameters """ self._init_pstates(unique_pstates) self._init_random(random_state=random_state) self._compute_marginals() return self
[ "def", "rand", "(", "self", ",", "unique_pstates", ",", "random_state", "=", "None", ")", ":", "self", ".", "_init_pstates", "(", "unique_pstates", ")", "self", ".", "_init_random", "(", "random_state", "=", "random_state", ")", "self", ".", "_compute_marginals", "(", ")", "return", "self" ]
Randomize the POHMM parameters
[ "Randomize", "the", "POHMM", "parameters" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L662-L669
train
vmonaco/pohmm
pohmm/pohmm.py
Pohmm.score_events
def score_events(self, obs, pstates): """ Compute the log probability of each event under the model. """ pstates_idx = np.array([self.e[p] for p in pstates]) framelogprob = self._compute_log_likelihood(obs, pstates_idx) _, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx) L = logsumexp(fwdlattice, axis=1) return np.concatenate([L[[0]], np.diff(L)])
python
def score_events(self, obs, pstates): """ Compute the log probability of each event under the model. """ pstates_idx = np.array([self.e[p] for p in pstates]) framelogprob = self._compute_log_likelihood(obs, pstates_idx) _, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx) L = logsumexp(fwdlattice, axis=1) return np.concatenate([L[[0]], np.diff(L)])
[ "def", "score_events", "(", "self", ",", "obs", ",", "pstates", ")", ":", "pstates_idx", "=", "np", ".", "array", "(", "[", "self", ".", "e", "[", "p", "]", "for", "p", "in", "pstates", "]", ")", "framelogprob", "=", "self", ".", "_compute_log_likelihood", "(", "obs", ",", "pstates_idx", ")", "_", ",", "fwdlattice", "=", "self", ".", "_do_forward_pass", "(", "framelogprob", ",", "pstates_idx", ")", "L", "=", "logsumexp", "(", "fwdlattice", ",", "axis", "=", "1", ")", "return", "np", ".", "concatenate", "(", "[", "L", "[", "[", "0", "]", "]", ",", "np", ".", "diff", "(", "L", ")", "]", ")" ]
Compute the log probability of each event under the model.
[ "Compute", "the", "log", "probability", "of", "each", "event", "under", "the", "model", "." ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L742-L751
train
vmonaco/pohmm
pohmm/pohmm.py
Pohmm.predict
def predict(self, obs, pstates, next_pstate=None): """ Predict the next observation """ assert len(obs) == len(pstates) pstates_idx = np.array([self.e[ei] for ei in pstates]) next_pstate_idx = self.e[next_pstate] if len(obs) == 0: # No history, use the starting probas next_hstate_prob = self.startprob[next_pstate_idx] else: # With a history, determine the hidden state posteriors using # the last posteriors and transition matrix framelogprob = self._compute_log_likelihood(obs, pstates_idx) _, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx) next_hstate_prob = np.zeros(self.n_hidden_states) alpha_n = fwdlattice[-1] vmax = alpha_n.max(axis=0) alpha_n = np.exp(alpha_n - vmax) alpha_n = alpha_n / alpha_n.sum() trans = self.transmat[pstates_idx[-1], next_pstate_idx] for i in range(self.n_hidden_states): next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)]) assert next_hstate_prob.sum() - 1 < TOLERANCE # Make the prediction prediction = np.array( [self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in self.emission_name]) # next_hstate = np.argmax(next_hstate_prob) # prediction = np.array( # [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in # self.emission_name]) return prediction
python
def predict(self, obs, pstates, next_pstate=None): """ Predict the next observation """ assert len(obs) == len(pstates) pstates_idx = np.array([self.e[ei] for ei in pstates]) next_pstate_idx = self.e[next_pstate] if len(obs) == 0: # No history, use the starting probas next_hstate_prob = self.startprob[next_pstate_idx] else: # With a history, determine the hidden state posteriors using # the last posteriors and transition matrix framelogprob = self._compute_log_likelihood(obs, pstates_idx) _, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx) next_hstate_prob = np.zeros(self.n_hidden_states) alpha_n = fwdlattice[-1] vmax = alpha_n.max(axis=0) alpha_n = np.exp(alpha_n - vmax) alpha_n = alpha_n / alpha_n.sum() trans = self.transmat[pstates_idx[-1], next_pstate_idx] for i in range(self.n_hidden_states): next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)]) assert next_hstate_prob.sum() - 1 < TOLERANCE # Make the prediction prediction = np.array( [self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in self.emission_name]) # next_hstate = np.argmax(next_hstate_prob) # prediction = np.array( # [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in # self.emission_name]) return prediction
[ "def", "predict", "(", "self", ",", "obs", ",", "pstates", ",", "next_pstate", "=", "None", ")", ":", "assert", "len", "(", "obs", ")", "==", "len", "(", "pstates", ")", "pstates_idx", "=", "np", ".", "array", "(", "[", "self", ".", "e", "[", "ei", "]", "for", "ei", "in", "pstates", "]", ")", "next_pstate_idx", "=", "self", ".", "e", "[", "next_pstate", "]", "if", "len", "(", "obs", ")", "==", "0", ":", "# No history, use the starting probas", "next_hstate_prob", "=", "self", ".", "startprob", "[", "next_pstate_idx", "]", "else", ":", "# With a history, determine the hidden state posteriors using", "# the last posteriors and transition matrix", "framelogprob", "=", "self", ".", "_compute_log_likelihood", "(", "obs", ",", "pstates_idx", ")", "_", ",", "fwdlattice", "=", "self", ".", "_do_forward_pass", "(", "framelogprob", ",", "pstates_idx", ")", "next_hstate_prob", "=", "np", ".", "zeros", "(", "self", ".", "n_hidden_states", ")", "alpha_n", "=", "fwdlattice", "[", "-", "1", "]", "vmax", "=", "alpha_n", ".", "max", "(", "axis", "=", "0", ")", "alpha_n", "=", "np", ".", "exp", "(", "alpha_n", "-", "vmax", ")", "alpha_n", "=", "alpha_n", "/", "alpha_n", ".", "sum", "(", ")", "trans", "=", "self", ".", "transmat", "[", "pstates_idx", "[", "-", "1", "]", ",", "next_pstate_idx", "]", "for", "i", "in", "range", "(", "self", ".", "n_hidden_states", ")", ":", "next_hstate_prob", "[", "i", "]", "=", "np", ".", "sum", "(", "[", "alpha_n", "[", "j", "]", "*", "trans", "[", "j", ",", "i", "]", "for", "j", "in", "range", "(", "self", ".", "n_hidden_states", ")", "]", ")", "assert", "next_hstate_prob", ".", "sum", "(", ")", "-", "1", "<", "TOLERANCE", "# Make the prediction", "prediction", "=", "np", ".", "array", "(", "[", "self", ".", "expected_value", "(", "feature", ",", "pstate", "=", "next_pstate", ",", "hstate_prob", "=", "next_hstate_prob", ")", "for", "feature", "in", "self", ".", "emission_name", "]", ")", "# next_hstate = np.argmax(next_hstate_prob)", "# prediction = np.array(", "# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in", "# self.emission_name])", "return", "prediction" ]
Predict the next observation
[ "Predict", "the", "next", "observation" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L759-L800
train
vmonaco/pohmm
pohmm/pohmm.py
Pohmm.fit_df
def fit_df(self, dfs, pstate_col=PSTATE_COL): """ Convenience function to fit a model from a list of dataframes """ obs_cols = list(self.emission_name) obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs] pstates = [df[pstate_col].values for df in dfs] return self.fit(obs, pstates)
python
def fit_df(self, dfs, pstate_col=PSTATE_COL): """ Convenience function to fit a model from a list of dataframes """ obs_cols = list(self.emission_name) obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs] pstates = [df[pstate_col].values for df in dfs] return self.fit(obs, pstates)
[ "def", "fit_df", "(", "self", ",", "dfs", ",", "pstate_col", "=", "PSTATE_COL", ")", ":", "obs_cols", "=", "list", "(", "self", ".", "emission_name", ")", "obs", "=", "[", "df", "[", "df", ".", "columns", ".", "difference", "(", "[", "pstate_col", "]", ")", "]", "[", "obs_cols", "]", ".", "values", "for", "df", "in", "dfs", "]", "pstates", "=", "[", "df", "[", "pstate_col", "]", ".", "values", "for", "df", "in", "dfs", "]", "return", "self", ".", "fit", "(", "obs", ",", "pstates", ")" ]
Convenience function to fit a model from a list of dataframes
[ "Convenience", "function", "to", "fit", "a", "model", "from", "a", "list", "of", "dataframes" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L876-L883
train
vmonaco/pohmm
pohmm/pohmm.py
Pohmm.sample_df
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL): """ Convenience function to generate samples a model and create a dataframe """ try: import pandas as pd except Exception as e: raise e obs, pstates, hstates = self.sample(pstates, n_obs, random_state) items = [] if pstate_col is not None: items.append((pstate_col, pstates)) if hstate_col is not None: items.append((hstate_col, hstates)) items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)] df = pd.DataFrame.from_items(items) return df
python
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL): """ Convenience function to generate samples a model and create a dataframe """ try: import pandas as pd except Exception as e: raise e obs, pstates, hstates = self.sample(pstates, n_obs, random_state) items = [] if pstate_col is not None: items.append((pstate_col, pstates)) if hstate_col is not None: items.append((hstate_col, hstates)) items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)] df = pd.DataFrame.from_items(items) return df
[ "def", "sample_df", "(", "self", ",", "pstates", "=", "None", ",", "n_obs", "=", "None", ",", "random_state", "=", "None", ",", "pstate_col", "=", "PSTATE_COL", ",", "hstate_col", "=", "HSTATE_COL", ")", ":", "try", ":", "import", "pandas", "as", "pd", "except", "Exception", "as", "e", ":", "raise", "e", "obs", ",", "pstates", ",", "hstates", "=", "self", ".", "sample", "(", "pstates", ",", "n_obs", ",", "random_state", ")", "items", "=", "[", "]", "if", "pstate_col", "is", "not", "None", ":", "items", ".", "append", "(", "(", "pstate_col", ",", "pstates", ")", ")", "if", "hstate_col", "is", "not", "None", ":", "items", ".", "append", "(", "(", "hstate_col", ",", "hstates", ")", ")", "items", "=", "items", "+", "[", "(", "self", ".", "emission_name", "[", "i", "]", ",", "obs", "[", ":", ",", "i", "]", ")", "for", "i", "in", "range", "(", "self", ".", "n_features", ")", "]", "df", "=", "pd", ".", "DataFrame", ".", "from_items", "(", "items", ")", "return", "df" ]
Convenience function to generate samples a model and create a dataframe
[ "Convenience", "function", "to", "generate", "samples", "a", "model", "and", "create", "a", "dataframe" ]
c00f8a62d3005a171d424549a55d46c421859ae9
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L919-L939
train
nickpandolfi/Cyther
cyther/__main__.py
main
def main(args=None): """ Entry point for cyther-script, generated by setup.py on installation """ if args is None: args = sys.argv[1:] if not args: args = ['-h'] namespace = parser.parse_args(args) entry_function = namespace.func del namespace.func kwargs = namespace.__dict__ return entry_function(**kwargs)
python
def main(args=None): """ Entry point for cyther-script, generated by setup.py on installation """ if args is None: args = sys.argv[1:] if not args: args = ['-h'] namespace = parser.parse_args(args) entry_function = namespace.func del namespace.func kwargs = namespace.__dict__ return entry_function(**kwargs)
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "if", "not", "args", ":", "args", "=", "[", "'-h'", "]", "namespace", "=", "parser", ".", "parse_args", "(", "args", ")", "entry_function", "=", "namespace", ".", "func", "del", "namespace", ".", "func", "kwargs", "=", "namespace", ".", "__dict__", "return", "entry_function", "(", "*", "*", "kwargs", ")" ]
Entry point for cyther-script, generated by setup.py on installation
[ "Entry", "point", "for", "cyther", "-", "script", "generated", "by", "setup", ".", "py", "on", "installation" ]
9fb0bd77af594008aa6ee8af460aa8c953abf5bc
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/__main__.py#L11-L25
train
marchete/django-adldap-sync
adldap_sync/management/commands/syncldap.py
Command.get_ldap_users
def get_ldap_users(self): """Retrieve user data from LDAP server.""" if (not self.conf_LDAP_SYNC_USER): return (None, None) user_keys = set(self.conf_LDAP_SYNC_USER_ATTRIBUTES.keys()) user_keys.update(self.conf_LDAP_SYNC_USER_EXTRA_ATTRIBUTES) uri_users_server, users = self.ldap_search(self.conf_LDAP_SYNC_USER_FILTER, user_keys, self.conf_LDAP_SYNC_USER_INCREMENTAL, self.conf_LDAP_SYNC_USER_FILTER_INCREMENTAL) logger.debug("Retrieved %d users from %s LDAP server" % (len(users), uri_users_server)) return (uri_users_server, users)
python
def get_ldap_users(self): """Retrieve user data from LDAP server.""" if (not self.conf_LDAP_SYNC_USER): return (None, None) user_keys = set(self.conf_LDAP_SYNC_USER_ATTRIBUTES.keys()) user_keys.update(self.conf_LDAP_SYNC_USER_EXTRA_ATTRIBUTES) uri_users_server, users = self.ldap_search(self.conf_LDAP_SYNC_USER_FILTER, user_keys, self.conf_LDAP_SYNC_USER_INCREMENTAL, self.conf_LDAP_SYNC_USER_FILTER_INCREMENTAL) logger.debug("Retrieved %d users from %s LDAP server" % (len(users), uri_users_server)) return (uri_users_server, users)
[ "def", "get_ldap_users", "(", "self", ")", ":", "if", "(", "not", "self", ".", "conf_LDAP_SYNC_USER", ")", ":", "return", "(", "None", ",", "None", ")", "user_keys", "=", "set", "(", "self", ".", "conf_LDAP_SYNC_USER_ATTRIBUTES", ".", "keys", "(", ")", ")", "user_keys", ".", "update", "(", "self", ".", "conf_LDAP_SYNC_USER_EXTRA_ATTRIBUTES", ")", "uri_users_server", ",", "users", "=", "self", ".", "ldap_search", "(", "self", ".", "conf_LDAP_SYNC_USER_FILTER", ",", "user_keys", ",", "self", ".", "conf_LDAP_SYNC_USER_INCREMENTAL", ",", "self", ".", "conf_LDAP_SYNC_USER_FILTER_INCREMENTAL", ")", "logger", ".", "debug", "(", "\"Retrieved %d users from %s LDAP server\"", "%", "(", "len", "(", "users", ")", ",", "uri_users_server", ")", ")", "return", "(", "uri_users_server", ",", "users", ")" ]
Retrieve user data from LDAP server.
[ "Retrieve", "user", "data", "from", "LDAP", "server", "." ]
f6be226a4fb2a433d22e95043bd656ce902f8254
https://github.com/marchete/django-adldap-sync/blob/f6be226a4fb2a433d22e95043bd656ce902f8254/adldap_sync/management/commands/syncldap.py#L290-L298
train
marchete/django-adldap-sync
adldap_sync/management/commands/syncldap.py
Command.get_ldap_groups
def get_ldap_groups(self): """Retrieve groups from LDAP server.""" if (not self.conf_LDAP_SYNC_GROUP): return (None, None) uri_groups_server, groups = self.ldap_search(self.conf_LDAP_SYNC_GROUP_FILTER, self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys(), self.conf_LDAP_SYNC_GROUP_INCREMENTAL, self.conf_LDAP_SYNC_GROUP_FILTER_INCREMENTAL) logger.debug("Retrieved %d groups from %s LDAP server" % (len(groups), uri_groups_server)) return (uri_groups_server, groups)
python
def get_ldap_groups(self): """Retrieve groups from LDAP server.""" if (not self.conf_LDAP_SYNC_GROUP): return (None, None) uri_groups_server, groups = self.ldap_search(self.conf_LDAP_SYNC_GROUP_FILTER, self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys(), self.conf_LDAP_SYNC_GROUP_INCREMENTAL, self.conf_LDAP_SYNC_GROUP_FILTER_INCREMENTAL) logger.debug("Retrieved %d groups from %s LDAP server" % (len(groups), uri_groups_server)) return (uri_groups_server, groups)
[ "def", "get_ldap_groups", "(", "self", ")", ":", "if", "(", "not", "self", ".", "conf_LDAP_SYNC_GROUP", ")", ":", "return", "(", "None", ",", "None", ")", "uri_groups_server", ",", "groups", "=", "self", ".", "ldap_search", "(", "self", ".", "conf_LDAP_SYNC_GROUP_FILTER", ",", "self", ".", "conf_LDAP_SYNC_GROUP_ATTRIBUTES", ".", "keys", "(", ")", ",", "self", ".", "conf_LDAP_SYNC_GROUP_INCREMENTAL", ",", "self", ".", "conf_LDAP_SYNC_GROUP_FILTER_INCREMENTAL", ")", "logger", ".", "debug", "(", "\"Retrieved %d groups from %s LDAP server\"", "%", "(", "len", "(", "groups", ")", ",", "uri_groups_server", ")", ")", "return", "(", "uri_groups_server", ",", "groups", ")" ]
Retrieve groups from LDAP server.
[ "Retrieve", "groups", "from", "LDAP", "server", "." ]
f6be226a4fb2a433d22e95043bd656ce902f8254
https://github.com/marchete/django-adldap-sync/blob/f6be226a4fb2a433d22e95043bd656ce902f8254/adldap_sync/management/commands/syncldap.py#L558-L564
train
marchete/django-adldap-sync
adldap_sync/management/commands/syncldap.py
Command.get_ldap_user_membership
def get_ldap_user_membership(self, user_dn): """Retrieve user membership from LDAP server.""" #Escape parenthesis in DN membership_filter = self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_FILTER.replace('{distinguishedName}', user_dn.replace('(', "\(").replace(')', "\)")) try: uri, groups = self.ldap_search(membership_filter, self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys(), False, membership_filter) except Exception as e: logger.error("Error reading membership: Filter %s, Keys %s" % (membership_filter, str(self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys()))) return None #logger.debug("AD Membership: Retrieved %d groups for user '%s'" % (len(groups), user_dn)) return (uri, groups)
python
def get_ldap_user_membership(self, user_dn): """Retrieve user membership from LDAP server.""" #Escape parenthesis in DN membership_filter = self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_FILTER.replace('{distinguishedName}', user_dn.replace('(', "\(").replace(')', "\)")) try: uri, groups = self.ldap_search(membership_filter, self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys(), False, membership_filter) except Exception as e: logger.error("Error reading membership: Filter %s, Keys %s" % (membership_filter, str(self.conf_LDAP_SYNC_GROUP_ATTRIBUTES.keys()))) return None #logger.debug("AD Membership: Retrieved %d groups for user '%s'" % (len(groups), user_dn)) return (uri, groups)
[ "def", "get_ldap_user_membership", "(", "self", ",", "user_dn", ")", ":", "#Escape parenthesis in DN", "membership_filter", "=", "self", ".", "conf_LDAP_SYNC_GROUP_MEMBERSHIP_FILTER", ".", "replace", "(", "'{distinguishedName}'", ",", "user_dn", ".", "replace", "(", "'('", ",", "\"\\(\"", ")", ".", "replace", "(", "')'", ",", "\"\\)\"", ")", ")", "try", ":", "uri", ",", "groups", "=", "self", ".", "ldap_search", "(", "membership_filter", ",", "self", ".", "conf_LDAP_SYNC_GROUP_ATTRIBUTES", ".", "keys", "(", ")", ",", "False", ",", "membership_filter", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Error reading membership: Filter %s, Keys %s\"", "%", "(", "membership_filter", ",", "str", "(", "self", ".", "conf_LDAP_SYNC_GROUP_ATTRIBUTES", ".", "keys", "(", ")", ")", ")", ")", "return", "None", "#logger.debug(\"AD Membership: Retrieved %d groups for user '%s'\" % (len(groups), user_dn))", "return", "(", "uri", ",", "groups", ")" ]
Retrieve user membership from LDAP server.
[ "Retrieve", "user", "membership", "from", "LDAP", "server", "." ]
f6be226a4fb2a433d22e95043bd656ce902f8254
https://github.com/marchete/django-adldap-sync/blob/f6be226a4fb2a433d22e95043bd656ce902f8254/adldap_sync/management/commands/syncldap.py#L603-L613
train
marchete/django-adldap-sync
adldap_sync/management/commands/syncldap.py
Command.sync_ldap_user_membership
def sync_ldap_user_membership(self, user, ldap_groups): """Synchronize LDAP membership to Django membership""" groupname_field = 'name' actualGroups = user.groups.values_list('name', flat=True) user_Membership_total = len(ldap_groups) user_Membership_added = 0 user_Membership_deleted = 0 user_Membership_errors = 0 ldap_groups += self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_ADD_DEFAULT ldap_name_groups = [] for cname, ldap_attributes in ldap_groups: defaults = {} try: for name, attribute in ldap_attributes.items(): defaults[self.conf_LDAP_SYNC_GROUP_ATTRIBUTES[name]] = attribute[0].decode('utf-8') except AttributeError: # In some cases attrs is a list instead of a dict; skip these invalid groups continue try: groupname = defaults[groupname_field] ldap_name_groups.append(groupname) except KeyError: logger.warning("Group is missing a required attribute '%s'" % groupname_field) user_Membership_errors += 1 continue if (groupname not in actualGroups): kwargs = { groupname_field + '__iexact': groupname, 'defaults': defaults, } #Adding Group Membership try: if (self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_CREATE_IF_NOT_EXISTS): group, created = Group.objects.get_or_create(**kwargs) else: group = Group.objects.get(name=groupname) created = False except (ObjectDoesNotExist): #Doesn't exist and not autocreate groups, we pass the error continue except (IntegrityError, DataError) as e: logger.error("Error creating group %s: %s" % (groupname, e)) user_Membership_errors += 1 else: if created: logger.debug("Created group %s" % groupname) #Now well assign the user group.user_set.add(user) user_Membership_added += 1 #Default Primary Group: Temporary is fixed #removing group membership for check_group in actualGroups: if (check_group not in ldap_name_groups): group = Group.objects.get(name=check_group) group.user_set.remove(user) user_Membership_deleted += 1 if ((user_Membership_deleted > 0) or (user_Membership_added > 0)): group.save() logger.info("Group membership for user %s synchronized: %d Added, %d Removed" % (user.username, user_Membership_added, user_Membership_deleted)) #Return statistics self.stats_membership_total += user_Membership_total self.stats_membership_added += user_Membership_added self.stats_membership_deleted += user_Membership_deleted self.stats_membership_errors += user_Membership_errors
python
def sync_ldap_user_membership(self, user, ldap_groups): """Synchronize LDAP membership to Django membership""" groupname_field = 'name' actualGroups = user.groups.values_list('name', flat=True) user_Membership_total = len(ldap_groups) user_Membership_added = 0 user_Membership_deleted = 0 user_Membership_errors = 0 ldap_groups += self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_ADD_DEFAULT ldap_name_groups = [] for cname, ldap_attributes in ldap_groups: defaults = {} try: for name, attribute in ldap_attributes.items(): defaults[self.conf_LDAP_SYNC_GROUP_ATTRIBUTES[name]] = attribute[0].decode('utf-8') except AttributeError: # In some cases attrs is a list instead of a dict; skip these invalid groups continue try: groupname = defaults[groupname_field] ldap_name_groups.append(groupname) except KeyError: logger.warning("Group is missing a required attribute '%s'" % groupname_field) user_Membership_errors += 1 continue if (groupname not in actualGroups): kwargs = { groupname_field + '__iexact': groupname, 'defaults': defaults, } #Adding Group Membership try: if (self.conf_LDAP_SYNC_GROUP_MEMBERSHIP_CREATE_IF_NOT_EXISTS): group, created = Group.objects.get_or_create(**kwargs) else: group = Group.objects.get(name=groupname) created = False except (ObjectDoesNotExist): #Doesn't exist and not autocreate groups, we pass the error continue except (IntegrityError, DataError) as e: logger.error("Error creating group %s: %s" % (groupname, e)) user_Membership_errors += 1 else: if created: logger.debug("Created group %s" % groupname) #Now well assign the user group.user_set.add(user) user_Membership_added += 1 #Default Primary Group: Temporary is fixed #removing group membership for check_group in actualGroups: if (check_group not in ldap_name_groups): group = Group.objects.get(name=check_group) group.user_set.remove(user) user_Membership_deleted += 1 if ((user_Membership_deleted > 0) or (user_Membership_added > 0)): group.save() logger.info("Group membership for user %s synchronized: %d Added, %d Removed" % (user.username, user_Membership_added, user_Membership_deleted)) #Return statistics self.stats_membership_total += user_Membership_total self.stats_membership_added += user_Membership_added self.stats_membership_deleted += user_Membership_deleted self.stats_membership_errors += user_Membership_errors
[ "def", "sync_ldap_user_membership", "(", "self", ",", "user", ",", "ldap_groups", ")", ":", "groupname_field", "=", "'name'", "actualGroups", "=", "user", ".", "groups", ".", "values_list", "(", "'name'", ",", "flat", "=", "True", ")", "user_Membership_total", "=", "len", "(", "ldap_groups", ")", "user_Membership_added", "=", "0", "user_Membership_deleted", "=", "0", "user_Membership_errors", "=", "0", "ldap_groups", "+=", "self", ".", "conf_LDAP_SYNC_GROUP_MEMBERSHIP_ADD_DEFAULT", "ldap_name_groups", "=", "[", "]", "for", "cname", ",", "ldap_attributes", "in", "ldap_groups", ":", "defaults", "=", "{", "}", "try", ":", "for", "name", ",", "attribute", "in", "ldap_attributes", ".", "items", "(", ")", ":", "defaults", "[", "self", ".", "conf_LDAP_SYNC_GROUP_ATTRIBUTES", "[", "name", "]", "]", "=", "attribute", "[", "0", "]", ".", "decode", "(", "'utf-8'", ")", "except", "AttributeError", ":", "# In some cases attrs is a list instead of a dict; skip these invalid groups", "continue", "try", ":", "groupname", "=", "defaults", "[", "groupname_field", "]", "ldap_name_groups", ".", "append", "(", "groupname", ")", "except", "KeyError", ":", "logger", ".", "warning", "(", "\"Group is missing a required attribute '%s'\"", "%", "groupname_field", ")", "user_Membership_errors", "+=", "1", "continue", "if", "(", "groupname", "not", "in", "actualGroups", ")", ":", "kwargs", "=", "{", "groupname_field", "+", "'__iexact'", ":", "groupname", ",", "'defaults'", ":", "defaults", ",", "}", "#Adding Group Membership", "try", ":", "if", "(", "self", ".", "conf_LDAP_SYNC_GROUP_MEMBERSHIP_CREATE_IF_NOT_EXISTS", ")", ":", "group", ",", "created", "=", "Group", ".", "objects", ".", "get_or_create", "(", "*", "*", "kwargs", ")", "else", ":", "group", "=", "Group", ".", "objects", ".", "get", "(", "name", "=", "groupname", ")", "created", "=", "False", "except", "(", "ObjectDoesNotExist", ")", ":", "#Doesn't exist and not autocreate groups, we pass the error", "continue", "except", "(", "IntegrityError", ",", "DataError", ")", "as", "e", ":", "logger", ".", "error", "(", "\"Error creating group %s: %s\"", "%", "(", "groupname", ",", "e", ")", ")", "user_Membership_errors", "+=", "1", "else", ":", "if", "created", ":", "logger", ".", "debug", "(", "\"Created group %s\"", "%", "groupname", ")", "#Now well assign the user", "group", ".", "user_set", ".", "add", "(", "user", ")", "user_Membership_added", "+=", "1", "#Default Primary Group: Temporary is fixed", "#removing group membership", "for", "check_group", "in", "actualGroups", ":", "if", "(", "check_group", "not", "in", "ldap_name_groups", ")", ":", "group", "=", "Group", ".", "objects", ".", "get", "(", "name", "=", "check_group", ")", "group", ".", "user_set", ".", "remove", "(", "user", ")", "user_Membership_deleted", "+=", "1", "if", "(", "(", "user_Membership_deleted", ">", "0", ")", "or", "(", "user_Membership_added", ">", "0", ")", ")", ":", "group", ".", "save", "(", ")", "logger", ".", "info", "(", "\"Group membership for user %s synchronized: %d Added, %d Removed\"", "%", "(", "user", ".", "username", ",", "user_Membership_added", ",", "user_Membership_deleted", ")", ")", "#Return statistics", "self", ".", "stats_membership_total", "+=", "user_Membership_total", "self", ".", "stats_membership_added", "+=", "user_Membership_added", "self", ".", "stats_membership_deleted", "+=", "user_Membership_deleted", "self", ".", "stats_membership_errors", "+=", "user_Membership_errors" ]
Synchronize LDAP membership to Django membership
[ "Synchronize", "LDAP", "membership", "to", "Django", "membership" ]
f6be226a4fb2a433d22e95043bd656ce902f8254
https://github.com/marchete/django-adldap-sync/blob/f6be226a4fb2a433d22e95043bd656ce902f8254/adldap_sync/management/commands/syncldap.py#L615-L686
train
marchete/django-adldap-sync
adldap_sync/management/commands/syncldap.py
Command.ldap_search
def ldap_search(self, filter, attributes, incremental, incremental_filter): """ Query the configured LDAP server with the provided search filter and attribute list. """ for uri in self.conf_LDAP_SYNC_BIND_URI: #Read record of this uri if (self.working_uri == uri): adldap_sync = self.working_adldap_sync created = False else: adldap_sync, created = ADldap_Sync.objects.get_or_create(ldap_sync_uri=uri) if ((adldap_sync.syncs_to_full > 0) and incremental): filter_to_use = incremental_filter.replace('?', self.whenchanged.strftime(self.conf_LDAP_SYNC_INCREMENTAL_TIMESTAMPFORMAT)) logger.debug("Using an incremental search. Filter is:'%s'" % filter_to_use) else: filter_to_use = filter ldap.set_option(ldap.OPT_REFERRALS, 0) #ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10) l = PagedLDAPObject(uri) l.protocol_version = 3 if (uri.startswith('ldaps:')): l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND) l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) l.set_option(ldap.OPT_X_TLS_DEMAND, True) else: l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_NEVER) l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) l.set_option(ldap.OPT_X_TLS_DEMAND, False) try: l.simple_bind_s(self.conf_LDAP_SYNC_BIND_DN, self.conf_LDAP_SYNC_BIND_PASS) except ldap.LDAPError as e: logger.error("Error connecting to LDAP server %s : %s" % (uri, e)) continue results = l.paged_search_ext_s(self.conf_LDAP_SYNC_BIND_SEARCH, ldap.SCOPE_SUBTREE, filter_to_use, attrlist=attributes, serverctrls=None) l.unbind_s() if (self.working_uri is None): self.working_uri = uri self.conf_LDAP_SYNC_BIND_URI.insert(0, uri) self.working_adldap_sync = adldap_sync return (uri, results) # Return both the LDAP server URI used and the request. This is for incremental sync purposes #if not connected correctly, raise error raise
python
def ldap_search(self, filter, attributes, incremental, incremental_filter): """ Query the configured LDAP server with the provided search filter and attribute list. """ for uri in self.conf_LDAP_SYNC_BIND_URI: #Read record of this uri if (self.working_uri == uri): adldap_sync = self.working_adldap_sync created = False else: adldap_sync, created = ADldap_Sync.objects.get_or_create(ldap_sync_uri=uri) if ((adldap_sync.syncs_to_full > 0) and incremental): filter_to_use = incremental_filter.replace('?', self.whenchanged.strftime(self.conf_LDAP_SYNC_INCREMENTAL_TIMESTAMPFORMAT)) logger.debug("Using an incremental search. Filter is:'%s'" % filter_to_use) else: filter_to_use = filter ldap.set_option(ldap.OPT_REFERRALS, 0) #ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10) l = PagedLDAPObject(uri) l.protocol_version = 3 if (uri.startswith('ldaps:')): l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND) l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) l.set_option(ldap.OPT_X_TLS_DEMAND, True) else: l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_NEVER) l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) l.set_option(ldap.OPT_X_TLS_DEMAND, False) try: l.simple_bind_s(self.conf_LDAP_SYNC_BIND_DN, self.conf_LDAP_SYNC_BIND_PASS) except ldap.LDAPError as e: logger.error("Error connecting to LDAP server %s : %s" % (uri, e)) continue results = l.paged_search_ext_s(self.conf_LDAP_SYNC_BIND_SEARCH, ldap.SCOPE_SUBTREE, filter_to_use, attrlist=attributes, serverctrls=None) l.unbind_s() if (self.working_uri is None): self.working_uri = uri self.conf_LDAP_SYNC_BIND_URI.insert(0, uri) self.working_adldap_sync = adldap_sync return (uri, results) # Return both the LDAP server URI used and the request. This is for incremental sync purposes #if not connected correctly, raise error raise
[ "def", "ldap_search", "(", "self", ",", "filter", ",", "attributes", ",", "incremental", ",", "incremental_filter", ")", ":", "for", "uri", "in", "self", ".", "conf_LDAP_SYNC_BIND_URI", ":", "#Read record of this uri", "if", "(", "self", ".", "working_uri", "==", "uri", ")", ":", "adldap_sync", "=", "self", ".", "working_adldap_sync", "created", "=", "False", "else", ":", "adldap_sync", ",", "created", "=", "ADldap_Sync", ".", "objects", ".", "get_or_create", "(", "ldap_sync_uri", "=", "uri", ")", "if", "(", "(", "adldap_sync", ".", "syncs_to_full", ">", "0", ")", "and", "incremental", ")", ":", "filter_to_use", "=", "incremental_filter", ".", "replace", "(", "'?'", ",", "self", ".", "whenchanged", ".", "strftime", "(", "self", ".", "conf_LDAP_SYNC_INCREMENTAL_TIMESTAMPFORMAT", ")", ")", "logger", ".", "debug", "(", "\"Using an incremental search. Filter is:'%s'\"", "%", "filter_to_use", ")", "else", ":", "filter_to_use", "=", "filter", "ldap", ".", "set_option", "(", "ldap", ".", "OPT_REFERRALS", ",", "0", ")", "#ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10)", "l", "=", "PagedLDAPObject", "(", "uri", ")", "l", ".", "protocol_version", "=", "3", "if", "(", "uri", ".", "startswith", "(", "'ldaps:'", ")", ")", ":", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS", ",", "ldap", ".", "OPT_X_TLS_DEMAND", ")", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS_REQUIRE_CERT", ",", "ldap", ".", "OPT_X_TLS_DEMAND", ")", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS_DEMAND", ",", "True", ")", "else", ":", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS", ",", "ldap", ".", "OPT_X_TLS_NEVER", ")", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS_REQUIRE_CERT", ",", "ldap", ".", "OPT_X_TLS_NEVER", ")", "l", ".", "set_option", "(", "ldap", ".", "OPT_X_TLS_DEMAND", ",", "False", ")", "try", ":", "l", ".", "simple_bind_s", "(", "self", ".", "conf_LDAP_SYNC_BIND_DN", ",", "self", ".", "conf_LDAP_SYNC_BIND_PASS", ")", "except", "ldap", ".", "LDAPError", "as", "e", ":", "logger", ".", "error", "(", "\"Error connecting to LDAP server %s : %s\"", "%", "(", "uri", ",", "e", ")", ")", "continue", "results", "=", "l", ".", "paged_search_ext_s", "(", "self", ".", "conf_LDAP_SYNC_BIND_SEARCH", ",", "ldap", ".", "SCOPE_SUBTREE", ",", "filter_to_use", ",", "attrlist", "=", "attributes", ",", "serverctrls", "=", "None", ")", "l", ".", "unbind_s", "(", ")", "if", "(", "self", ".", "working_uri", "is", "None", ")", ":", "self", ".", "working_uri", "=", "uri", "self", ".", "conf_LDAP_SYNC_BIND_URI", ".", "insert", "(", "0", ",", "uri", ")", "self", ".", "working_adldap_sync", "=", "adldap_sync", "return", "(", "uri", ",", "results", ")", "# Return both the LDAP server URI used and the request. This is for incremental sync purposes", "#if not connected correctly, raise error", "raise" ]
Query the configured LDAP server with the provided search filter and attribute list.
[ "Query", "the", "configured", "LDAP", "server", "with", "the", "provided", "search", "filter", "and", "attribute", "list", "." ]
f6be226a4fb2a433d22e95043bd656ce902f8254
https://github.com/marchete/django-adldap-sync/blob/f6be226a4fb2a433d22e95043bd656ce902f8254/adldap_sync/management/commands/syncldap.py#L688-L735
train
mjirik/imtools
imtools/datasets.py
sliver_reader
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False): """ Generator for reading sliver data from directory structure. :param filename_end_mask: file selection can be controlled with this parameter :param sliver_reference_dir: directory with sliver .mhd and .raw files :param read_orig: read image data if is set True :param read_seg: read segmentation data if is set True :return: numeric_label, vs_mm, oname, orig_data, rname, ref_data """ sliver_reference_dir = op.expanduser(sliver_reference_dir) orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask) ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask) orig_fnames.sort() ref_fnames.sort() output = [] for i in range(0, len(orig_fnames)): oname = orig_fnames[i] rname = ref_fnames[i] vs_mm = None ref_data= None orig_data = None if read_orig: orig_data, metadata = io3d.datareader.read(oname) vs_mm = metadata['voxelsize_mm'] if read_seg: ref_data, metadata = io3d.datareader.read(rname) vs_mm = metadata['voxelsize_mm'] import re numeric_label = re.search(".*g(\d+)", oname).group(1) out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data) yield out
python
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False): """ Generator for reading sliver data from directory structure. :param filename_end_mask: file selection can be controlled with this parameter :param sliver_reference_dir: directory with sliver .mhd and .raw files :param read_orig: read image data if is set True :param read_seg: read segmentation data if is set True :return: numeric_label, vs_mm, oname, orig_data, rname, ref_data """ sliver_reference_dir = op.expanduser(sliver_reference_dir) orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask) ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask) orig_fnames.sort() ref_fnames.sort() output = [] for i in range(0, len(orig_fnames)): oname = orig_fnames[i] rname = ref_fnames[i] vs_mm = None ref_data= None orig_data = None if read_orig: orig_data, metadata = io3d.datareader.read(oname) vs_mm = metadata['voxelsize_mm'] if read_seg: ref_data, metadata = io3d.datareader.read(rname) vs_mm = metadata['voxelsize_mm'] import re numeric_label = re.search(".*g(\d+)", oname).group(1) out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data) yield out
[ "def", "sliver_reader", "(", "filename_end_mask", "=", "\"*[0-9].mhd\"", ",", "sliver_reference_dir", "=", "\"~/data/medical/orig/sliver07/training/\"", ",", "read_orig", "=", "True", ",", "read_seg", "=", "False", ")", ":", "sliver_reference_dir", "=", "op", ".", "expanduser", "(", "sliver_reference_dir", ")", "orig_fnames", "=", "glob", ".", "glob", "(", "sliver_reference_dir", "+", "\"*orig\"", "+", "filename_end_mask", ")", "ref_fnames", "=", "glob", ".", "glob", "(", "sliver_reference_dir", "+", "\"*seg\"", "+", "filename_end_mask", ")", "orig_fnames", ".", "sort", "(", ")", "ref_fnames", ".", "sort", "(", ")", "output", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "orig_fnames", ")", ")", ":", "oname", "=", "orig_fnames", "[", "i", "]", "rname", "=", "ref_fnames", "[", "i", "]", "vs_mm", "=", "None", "ref_data", "=", "None", "orig_data", "=", "None", "if", "read_orig", ":", "orig_data", ",", "metadata", "=", "io3d", ".", "datareader", ".", "read", "(", "oname", ")", "vs_mm", "=", "metadata", "[", "'voxelsize_mm'", "]", "if", "read_seg", ":", "ref_data", ",", "metadata", "=", "io3d", ".", "datareader", ".", "read", "(", "rname", ")", "vs_mm", "=", "metadata", "[", "'voxelsize_mm'", "]", "import", "re", "numeric_label", "=", "re", ".", "search", "(", "\".*g(\\d+)\"", ",", "oname", ")", ".", "group", "(", "1", ")", "out", "=", "(", "numeric_label", ",", "vs_mm", ",", "oname", ",", "orig_data", ",", "rname", ",", "ref_data", ")", "yield", "out" ]
Generator for reading sliver data from directory structure. :param filename_end_mask: file selection can be controlled with this parameter :param sliver_reference_dir: directory with sliver .mhd and .raw files :param read_orig: read image data if is set True :param read_seg: read segmentation data if is set True :return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
[ "Generator", "for", "reading", "sliver", "data", "from", "directory", "structure", "." ]
eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/datasets.py#L14-L47
train
EVEprosper/ProsperCommon
prosper/common/flask_utils.py
make_gunicorn_config
def make_gunicorn_config( _gunicorn_config_path='', ): """makes gunicorn.conf file for launching in docker Notes: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ renders gunicorn.config (python) file in running dir looks for GUNICORN_{option} in environment vars Args: _gunicorn_config_path (str): TEST HOOK, path to dump file """ gunicorn_py = '''"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ """ from os import environ for key, value in environ.items(): if key.startswith('GUNICORN_'): gunicorn_key = key.split('_', 1)[1].lower() locals()[gunicorn_key] = value ''' gunicorn_file = 'gunicorn.conf' if _gunicorn_config_path: gunicorn_file = _gunicorn_config_path with open(gunicorn_file, 'w') as gunicorn_cfg: gunicorn_cfg.write(gunicorn_py)
python
def make_gunicorn_config( _gunicorn_config_path='', ): """makes gunicorn.conf file for launching in docker Notes: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ renders gunicorn.config (python) file in running dir looks for GUNICORN_{option} in environment vars Args: _gunicorn_config_path (str): TEST HOOK, path to dump file """ gunicorn_py = '''"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ """ from os import environ for key, value in environ.items(): if key.startswith('GUNICORN_'): gunicorn_key = key.split('_', 1)[1].lower() locals()[gunicorn_key] = value ''' gunicorn_file = 'gunicorn.conf' if _gunicorn_config_path: gunicorn_file = _gunicorn_config_path with open(gunicorn_file, 'w') as gunicorn_cfg: gunicorn_cfg.write(gunicorn_py)
[ "def", "make_gunicorn_config", "(", "_gunicorn_config_path", "=", "''", ",", ")", ":", "gunicorn_py", "=", "'''\"\"\"AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config\nBased off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/\n\"\"\"\nfrom os import environ\n\nfor key, value in environ.items():\n if key.startswith('GUNICORN_'):\n gunicorn_key = key.split('_', 1)[1].lower()\n locals()[gunicorn_key] = value\n\n'''", "gunicorn_file", "=", "'gunicorn.conf'", "if", "_gunicorn_config_path", ":", "gunicorn_file", "=", "_gunicorn_config_path", "with", "open", "(", "gunicorn_file", ",", "'w'", ")", "as", "gunicorn_cfg", ":", "gunicorn_cfg", ".", "write", "(", "gunicorn_py", ")" ]
makes gunicorn.conf file for launching in docker Notes: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ renders gunicorn.config (python) file in running dir looks for GUNICORN_{option} in environment vars Args: _gunicorn_config_path (str): TEST HOOK, path to dump file
[ "makes", "gunicorn", ".", "conf", "file", "for", "launching", "in", "docker" ]
bcada3b25420099e1f204db8d55eb268e7b4dc27
https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/flask_utils.py#L4-L34
train
polysquare/cmake-ast
cmakeast/printer.py
_print_details
def _print_details(extra=None): """Return a function that prints node details.""" def print_node_handler(name, node, depth): """Standard printer for a node.""" line = "{0}{1} {2} ({3}:{4})".format(depth, (" " * depth), name, node.line, node.col) if extra is not None: line += " [{0}]".format(extra(node)) sys.stdout.write(line + "\n") return print_node_handler
python
def _print_details(extra=None): """Return a function that prints node details.""" def print_node_handler(name, node, depth): """Standard printer for a node.""" line = "{0}{1} {2} ({3}:{4})".format(depth, (" " * depth), name, node.line, node.col) if extra is not None: line += " [{0}]".format(extra(node)) sys.stdout.write(line + "\n") return print_node_handler
[ "def", "_print_details", "(", "extra", "=", "None", ")", ":", "def", "print_node_handler", "(", "name", ",", "node", ",", "depth", ")", ":", "\"\"\"Standard printer for a node.\"\"\"", "line", "=", "\"{0}{1} {2} ({3}:{4})\"", ".", "format", "(", "depth", ",", "(", "\" \"", "*", "depth", ")", ",", "name", ",", "node", ".", "line", ",", "node", ".", "col", ")", "if", "extra", "is", "not", "None", ":", "line", "+=", "\" [{0}]\"", ".", "format", "(", "extra", "(", "node", ")", ")", "sys", ".", "stdout", ".", "write", "(", "line", "+", "\"\\n\"", ")", "return", "print_node_handler" ]
Return a function that prints node details.
[ "Return", "a", "function", "that", "prints", "node", "details", "." ]
431a32d595d76f1f8f993eb6ddcc79effbadff9d
https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/printer.py#L24-L38
train
polysquare/cmake-ast
cmakeast/printer.py
do_print
def do_print(filename): """Print the AST of filename.""" with open(filename) as cmake_file: body = ast.parse(cmake_file.read()) word_print = _print_details(lambda n: "{0} {1}".format(n.type, n.contents)) ast_visitor.recurse(body, while_stmnt=_print_details(), foreach=_print_details(), function_def=_print_details(), macro_def=_print_details(), if_block=_print_details(), if_stmnt=_print_details(), elseif_stmnt=_print_details(), else_stmnt=_print_details(), function_call=_print_details(lambda n: n.name), word=word_print)
python
def do_print(filename): """Print the AST of filename.""" with open(filename) as cmake_file: body = ast.parse(cmake_file.read()) word_print = _print_details(lambda n: "{0} {1}".format(n.type, n.contents)) ast_visitor.recurse(body, while_stmnt=_print_details(), foreach=_print_details(), function_def=_print_details(), macro_def=_print_details(), if_block=_print_details(), if_stmnt=_print_details(), elseif_stmnt=_print_details(), else_stmnt=_print_details(), function_call=_print_details(lambda n: n.name), word=word_print)
[ "def", "do_print", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "cmake_file", ":", "body", "=", "ast", ".", "parse", "(", "cmake_file", ".", "read", "(", ")", ")", "word_print", "=", "_print_details", "(", "lambda", "n", ":", "\"{0} {1}\"", ".", "format", "(", "n", ".", "type", ",", "n", ".", "contents", ")", ")", "ast_visitor", ".", "recurse", "(", "body", ",", "while_stmnt", "=", "_print_details", "(", ")", ",", "foreach", "=", "_print_details", "(", ")", ",", "function_def", "=", "_print_details", "(", ")", ",", "macro_def", "=", "_print_details", "(", ")", ",", "if_block", "=", "_print_details", "(", ")", ",", "if_stmnt", "=", "_print_details", "(", ")", ",", "elseif_stmnt", "=", "_print_details", "(", ")", ",", "else_stmnt", "=", "_print_details", "(", ")", ",", "function_call", "=", "_print_details", "(", "lambda", "n", ":", "n", ".", "name", ")", ",", "word", "=", "word_print", ")" ]
Print the AST of filename.
[ "Print", "the", "AST", "of", "filename", "." ]
431a32d595d76f1f8f993eb6ddcc79effbadff9d
https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/printer.py#L41-L58
train