repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
google/budou
budou/parser.py
get_parser
def get_parser(segmenter, **options): """Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified. """ if segmenter == 'nlapi': return NLAPIParser(**options) elif segmenter == 'mecab': return MecabParser() elif segmenter == 'tinysegmenter': return TinysegmenterParser() else: raise ValueError('Segmenter {} is not supported.'.format(segmenter))
python
def get_parser(segmenter, **options): """Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified. """ if segmenter == 'nlapi': return NLAPIParser(**options) elif segmenter == 'mecab': return MecabParser() elif segmenter == 'tinysegmenter': return TinysegmenterParser() else: raise ValueError('Segmenter {} is not supported.'.format(segmenter))
[ "def", "get_parser", "(", "segmenter", ",", "*", "*", "options", ")", ":", "if", "segmenter", "==", "'nlapi'", ":", "return", "NLAPIParser", "(", "*", "*", "options", ")", "elif", "segmenter", "==", "'mecab'", ":", "return", "MecabParser", "(", ")", "elif", "segmenter", "==", "'tinysegmenter'", ":", "return", "TinysegmenterParser", "(", ")", "else", ":", "raise", "ValueError", "(", "'Segmenter {} is not supported.'", ".", "format", "(", "segmenter", ")", ")" ]
Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified.
[ "Gets", "a", "parser", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L129-L149
train
google/budou
budou/parser.py
preprocess
def preprocess(source): """Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str) """ doc = html5lib.parseFragment(source) source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8') source = source.replace(u'\n', u'').strip() source = re.sub(r'\s\s+', u' ', source) return source
python
def preprocess(source): """Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str) """ doc = html5lib.parseFragment(source) source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8') source = source.replace(u'\n', u'').strip() source = re.sub(r'\s\s+', u' ', source) return source
[ "def", "preprocess", "(", "source", ")", ":", "doc", "=", "html5lib", ".", "parseFragment", "(", "source", ")", "source", "=", "ET", ".", "tostring", "(", "doc", ",", "encoding", "=", "'utf-8'", ",", "method", "=", "'text'", ")", ".", "decode", "(", "'utf-8'", ")", "source", "=", "source", ".", "replace", "(", "u'\\n'", ",", "u''", ")", ".", "strip", "(", ")", "source", "=", "re", ".", "sub", "(", "r'\\s\\s+'", ",", "u' '", ",", "source", ")", "return", "source" ]
Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str)
[ "Removes", "unnecessary", "break", "lines", "and", "white", "spaces", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L169-L182
train
google/budou
budou/budou.py
main
def main(): """Budou main method for the command line tool. """ args = docopt(__doc__) if args['--version']: print(__version__) sys.exit() result = parse( args['<source>'], segmenter=args['--segmenter'], language=args['--language'], classname=args['--classname']) print(result['html_code']) sys.exit()
python
def main(): """Budou main method for the command line tool. """ args = docopt(__doc__) if args['--version']: print(__version__) sys.exit() result = parse( args['<source>'], segmenter=args['--segmenter'], language=args['--language'], classname=args['--classname']) print(result['html_code']) sys.exit()
[ "def", "main", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ")", "if", "args", "[", "'--version'", "]", ":", "print", "(", "__version__", ")", "sys", ".", "exit", "(", ")", "result", "=", "parse", "(", "args", "[", "'<source>'", "]", ",", "segmenter", "=", "args", "[", "'--segmenter'", "]", ",", "language", "=", "args", "[", "'--language'", "]", ",", "classname", "=", "args", "[", "'--classname'", "]", ")", "print", "(", "result", "[", "'html_code'", "]", ")", "sys", ".", "exit", "(", ")" ]
Budou main method for the command line tool.
[ "Budou", "main", "method", "for", "the", "command", "line", "tool", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L48-L62
train
google/budou
budou/budou.py
parse
def parse(source, segmenter='nlapi', language=None, max_length=None, classname=None, attributes=None, **kwargs): """Parses input source. Args: source (str): Input source to process. segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi]. language (:obj:`str`, optional): Language code. max_length (:obj:`int`, optional): Maximum length of a chunk. classname (:obj:`str`, optional): Class name of output SPAN tags. attributes (:obj:`dict`, optional): Attributes for output SPAN tags. Returns: Results in a dict. :code:`chunks` holds a list of chunks (:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML code. """ parser = get_parser(segmenter, **kwargs) return parser.parse( source, language=language, max_length=max_length, classname=classname, attributes=attributes)
python
def parse(source, segmenter='nlapi', language=None, max_length=None, classname=None, attributes=None, **kwargs): """Parses input source. Args: source (str): Input source to process. segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi]. language (:obj:`str`, optional): Language code. max_length (:obj:`int`, optional): Maximum length of a chunk. classname (:obj:`str`, optional): Class name of output SPAN tags. attributes (:obj:`dict`, optional): Attributes for output SPAN tags. Returns: Results in a dict. :code:`chunks` holds a list of chunks (:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML code. """ parser = get_parser(segmenter, **kwargs) return parser.parse( source, language=language, max_length=max_length, classname=classname, attributes=attributes)
[ "def", "parse", "(", "source", ",", "segmenter", "=", "'nlapi'", ",", "language", "=", "None", ",", "max_length", "=", "None", ",", "classname", "=", "None", ",", "attributes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "parser", "=", "get_parser", "(", "segmenter", ",", "*", "*", "kwargs", ")", "return", "parser", ".", "parse", "(", "source", ",", "language", "=", "language", ",", "max_length", "=", "max_length", ",", "classname", "=", "classname", ",", "attributes", "=", "attributes", ")" ]
Parses input source. Args: source (str): Input source to process. segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi]. language (:obj:`str`, optional): Language code. max_length (:obj:`int`, optional): Maximum length of a chunk. classname (:obj:`str`, optional): Class name of output SPAN tags. attributes (:obj:`dict`, optional): Attributes for output SPAN tags. Returns: Results in a dict. :code:`chunks` holds a list of chunks (:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML code.
[ "Parses", "input", "source", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L64-L84
train
google/budou
budou/budou.py
authenticate
def authenticate(json_path=None): """Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`) """ msg = ('budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.') warnings.warn(msg, DeprecationWarning) parser = get_parser('nlapi', credentials_path=json_path) return parser
python
def authenticate(json_path=None): """Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`) """ msg = ('budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.') warnings.warn(msg, DeprecationWarning) parser = get_parser('nlapi', credentials_path=json_path) return parser
[ "def", "authenticate", "(", "json_path", "=", "None", ")", ":", "msg", "=", "(", "'budou.authentication() is deprecated. '", "'Please use budou.get_parser() to obtain a parser instead.'", ")", "warnings", ".", "warn", "(", "msg", ",", "DeprecationWarning", ")", "parser", "=", "get_parser", "(", "'nlapi'", ",", "credentials_path", "=", "json_path", ")", "return", "parser" ]
Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`)
[ "Gets", "a", "Natural", "Language", "API", "parser", "by", "authenticating", "the", "API", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L86-L104
train
google/budou
budou/nlapisegmenter.py
_memorize
def _memorize(func): """Decorator to cache the given function's output. """ def _wrapper(self, *args, **kwargs): """Wrapper to cache the function's output. """ if self.use_cache: cache = load_cache(self.cache_filename) original_key = ':'.join([ self.__class__.__name__, func.__name__, '_'.join([str(a) for a in args]), '_'.join([str(w) for w in kwargs.values()])]) cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest() cached_val = cache.get(cache_key) if cached_val: return cached_val val = func(self, *args, **kwargs) if self.use_cache: cache.set(cache_key, val) return val return _wrapper
python
def _memorize(func): """Decorator to cache the given function's output. """ def _wrapper(self, *args, **kwargs): """Wrapper to cache the function's output. """ if self.use_cache: cache = load_cache(self.cache_filename) original_key = ':'.join([ self.__class__.__name__, func.__name__, '_'.join([str(a) for a in args]), '_'.join([str(w) for w in kwargs.values()])]) cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest() cached_val = cache.get(cache_key) if cached_val: return cached_val val = func(self, *args, **kwargs) if self.use_cache: cache.set(cache_key, val) return val return _wrapper
[ "def", "_memorize", "(", "func", ")", ":", "def", "_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper to cache the function's output.\n \"\"\"", "if", "self", ".", "use_cache", ":", "cache", "=", "load_cache", "(", "self", ".", "cache_filename", ")", "original_key", "=", "':'", ".", "join", "(", "[", "self", ".", "__class__", ".", "__name__", ",", "func", ".", "__name__", ",", "'_'", ".", "join", "(", "[", "str", "(", "a", ")", "for", "a", "in", "args", "]", ")", ",", "'_'", ".", "join", "(", "[", "str", "(", "w", ")", "for", "w", "in", "kwargs", ".", "values", "(", ")", "]", ")", "]", ")", "cache_key", "=", "hashlib", ".", "md5", "(", "original_key", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "cached_val", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "cached_val", ":", "return", "cached_val", "val", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "self", ".", "use_cache", ":", "cache", ".", "set", "(", "cache_key", ",", "val", ")", "return", "val", "return", "_wrapper" ]
Decorator to cache the given function's output.
[ "Decorator", "to", "cache", "the", "given", "function", "s", "output", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L62-L84
train
google/budou
budou/nlapisegmenter.py
NLAPISegmenter._get_source_chunks
def _get_source_chunks(self, input_text, language=None): """Returns a chunk list retrieved from Syntax Analysis results. Args: input_text (str): Text to annotate. language (:obj:`str`, optional): Language of the text. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`) """ chunks = ChunkList() seek = 0 result = self._get_annotations(input_text, language=language) tokens = result['tokens'] language = result['language'] for i, token in enumerate(tokens): word = token['text']['content'] begin_offset = token['text']['beginOffset'] label = token['dependencyEdge']['label'] pos = token['partOfSpeech']['tag'] if begin_offset > seek: chunks.append(Chunk.space()) seek = begin_offset chunk = Chunk(word, pos, label) if chunk.label in _DEPENDENT_LABEL: # Determining concatenating direction based on syntax dependency. chunk.dependency = i < token['dependencyEdge']['headTokenIndex'] if chunk.is_punct(): chunk.dependency = chunk.is_open_punct() chunks.append(chunk) seek += len(word) return chunks, language
python
def _get_source_chunks(self, input_text, language=None): """Returns a chunk list retrieved from Syntax Analysis results. Args: input_text (str): Text to annotate. language (:obj:`str`, optional): Language of the text. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`) """ chunks = ChunkList() seek = 0 result = self._get_annotations(input_text, language=language) tokens = result['tokens'] language = result['language'] for i, token in enumerate(tokens): word = token['text']['content'] begin_offset = token['text']['beginOffset'] label = token['dependencyEdge']['label'] pos = token['partOfSpeech']['tag'] if begin_offset > seek: chunks.append(Chunk.space()) seek = begin_offset chunk = Chunk(word, pos, label) if chunk.label in _DEPENDENT_LABEL: # Determining concatenating direction based on syntax dependency. chunk.dependency = i < token['dependencyEdge']['headTokenIndex'] if chunk.is_punct(): chunk.dependency = chunk.is_open_punct() chunks.append(chunk) seek += len(word) return chunks, language
[ "def", "_get_source_chunks", "(", "self", ",", "input_text", ",", "language", "=", "None", ")", ":", "chunks", "=", "ChunkList", "(", ")", "seek", "=", "0", "result", "=", "self", ".", "_get_annotations", "(", "input_text", ",", "language", "=", "language", ")", "tokens", "=", "result", "[", "'tokens'", "]", "language", "=", "result", "[", "'language'", "]", "for", "i", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "word", "=", "token", "[", "'text'", "]", "[", "'content'", "]", "begin_offset", "=", "token", "[", "'text'", "]", "[", "'beginOffset'", "]", "label", "=", "token", "[", "'dependencyEdge'", "]", "[", "'label'", "]", "pos", "=", "token", "[", "'partOfSpeech'", "]", "[", "'tag'", "]", "if", "begin_offset", ">", "seek", ":", "chunks", ".", "append", "(", "Chunk", ".", "space", "(", ")", ")", "seek", "=", "begin_offset", "chunk", "=", "Chunk", "(", "word", ",", "pos", ",", "label", ")", "if", "chunk", ".", "label", "in", "_DEPENDENT_LABEL", ":", "# Determining concatenating direction based on syntax dependency.", "chunk", ".", "dependency", "=", "i", "<", "token", "[", "'dependencyEdge'", "]", "[", "'headTokenIndex'", "]", "if", "chunk", ".", "is_punct", "(", ")", ":", "chunk", ".", "dependency", "=", "chunk", ".", "is_open_punct", "(", ")", "chunks", ".", "append", "(", "chunk", ")", "seek", "+=", "len", "(", "word", ")", "return", "chunks", ",", "language" ]
Returns a chunk list retrieved from Syntax Analysis results. Args: input_text (str): Text to annotate. language (:obj:`str`, optional): Language of the text. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`)
[ "Returns", "a", "chunk", "list", "retrieved", "from", "Syntax", "Analysis", "results", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L170-L201
train
google/budou
budou/nlapisegmenter.py
NLAPISegmenter._group_chunks_by_entities
def _group_chunks_by_entities(self, chunks, entities): """Groups chunks by entities retrieved from NL API Entity Analysis. Args: chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed. entities (:obj:`list` of :obj:`dict`): List of entities. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`) """ for entity in entities: chunks_to_concat = chunks.get_overlaps( entity['beginOffset'], len(entity['content'])) if not chunks_to_concat: continue new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat]) new_chunk = Chunk(new_chunk_word) chunks.swap(chunks_to_concat, new_chunk) return chunks
python
def _group_chunks_by_entities(self, chunks, entities): """Groups chunks by entities retrieved from NL API Entity Analysis. Args: chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed. entities (:obj:`list` of :obj:`dict`): List of entities. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`) """ for entity in entities: chunks_to_concat = chunks.get_overlaps( entity['beginOffset'], len(entity['content'])) if not chunks_to_concat: continue new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat]) new_chunk = Chunk(new_chunk_word) chunks.swap(chunks_to_concat, new_chunk) return chunks
[ "def", "_group_chunks_by_entities", "(", "self", ",", "chunks", ",", "entities", ")", ":", "for", "entity", "in", "entities", ":", "chunks_to_concat", "=", "chunks", ".", "get_overlaps", "(", "entity", "[", "'beginOffset'", "]", ",", "len", "(", "entity", "[", "'content'", "]", ")", ")", "if", "not", "chunks_to_concat", ":", "continue", "new_chunk_word", "=", "u''", ".", "join", "(", "[", "chunk", ".", "word", "for", "chunk", "in", "chunks_to_concat", "]", ")", "new_chunk", "=", "Chunk", "(", "new_chunk_word", ")", "chunks", ".", "swap", "(", "chunks_to_concat", ",", "new_chunk", ")", "return", "chunks" ]
Groups chunks by entities retrieved from NL API Entity Analysis. Args: chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed. entities (:obj:`list` of :obj:`dict`): List of entities. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`)
[ "Groups", "chunks", "by", "entities", "retrieved", "from", "NL", "API", "Entity", "Analysis", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L203-L221
train
google/budou
budou/nlapisegmenter.py
NLAPISegmenter._get_annotations
def _get_annotations(self, text, language=''): """Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
python
def _get_annotations(self, text, language=''): """Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
[ "def", "_get_annotations", "(", "self", ",", "text", ",", "language", "=", "''", ")", ":", "body", "=", "{", "'document'", ":", "{", "'type'", ":", "'PLAIN_TEXT'", ",", "'content'", ":", "text", ",", "}", ",", "'features'", ":", "{", "'extract_syntax'", ":", "True", ",", "}", ",", "'encodingType'", ":", "'UTF32'", ",", "}", "if", "language", ":", "body", "[", "'document'", "]", "[", "'language'", "]", "=", "language", "request", "=", "self", ".", "service", ".", "documents", "(", ")", ".", "annotateText", "(", "body", "=", "body", ")", "response", "=", "request", ".", "execute", "(", ")", "tokens", "=", "response", ".", "get", "(", "'tokens'", ",", "[", "]", ")", "language", "=", "response", ".", "get", "(", "'language'", ")", "return", "{", "'tokens'", ":", "tokens", ",", "'language'", ":", "language", "}" ]
Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input.
[ "Returns", "the", "list", "of", "annotations", "retrieved", "from", "the", "given", "text", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L224-L253
train
google/budou
budou/nlapisegmenter.py
NLAPISegmenter._get_entities
def _get_entities(self, text, language=''): """Returns the list of entities retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: List of entities. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().analyzeEntities(body=body) response = request.execute() result = [] for entity in response.get('entities', []): mentions = entity.get('mentions', []) if not mentions: continue entity_text = mentions[0]['text'] offset = entity_text['beginOffset'] for word in entity_text['content'].split(): result.append({'content': word, 'beginOffset': offset}) offset += len(word) return result
python
def _get_entities(self, text, language=''): """Returns the list of entities retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: List of entities. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().analyzeEntities(body=body) response = request.execute() result = [] for entity in response.get('entities', []): mentions = entity.get('mentions', []) if not mentions: continue entity_text = mentions[0]['text'] offset = entity_text['beginOffset'] for word in entity_text['content'].split(): result.append({'content': word, 'beginOffset': offset}) offset += len(word) return result
[ "def", "_get_entities", "(", "self", ",", "text", ",", "language", "=", "''", ")", ":", "body", "=", "{", "'document'", ":", "{", "'type'", ":", "'PLAIN_TEXT'", ",", "'content'", ":", "text", ",", "}", ",", "'encodingType'", ":", "'UTF32'", ",", "}", "if", "language", ":", "body", "[", "'document'", "]", "[", "'language'", "]", "=", "language", "request", "=", "self", ".", "service", ".", "documents", "(", ")", ".", "analyzeEntities", "(", "body", "=", "body", ")", "response", "=", "request", ".", "execute", "(", ")", "result", "=", "[", "]", "for", "entity", "in", "response", ".", "get", "(", "'entities'", ",", "[", "]", ")", ":", "mentions", "=", "entity", ".", "get", "(", "'mentions'", ",", "[", "]", ")", "if", "not", "mentions", ":", "continue", "entity_text", "=", "mentions", "[", "0", "]", "[", "'text'", "]", "offset", "=", "entity_text", "[", "'beginOffset'", "]", "for", "word", "in", "entity_text", "[", "'content'", "]", ".", "split", "(", ")", ":", "result", ".", "append", "(", "{", "'content'", ":", "word", ",", "'beginOffset'", ":", "offset", "}", ")", "offset", "+=", "len", "(", "word", ")", "return", "result" ]
Returns the list of entities retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: List of entities.
[ "Returns", "the", "list", "of", "entities", "retrieved", "from", "the", "given", "text", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L256-L288
train
google/budou
budou/cachefactory.py
PickleCache.get
def get(self, key): """Gets a value by a key. Args: key (str): Key to retrieve the value. Returns: Retrieved value. """ self._create_file_if_none_exists() with open(self.filename, 'rb') as file_object: cache_pickle = pickle.load(file_object) val = cache_pickle.get(key, None) return val
python
def get(self, key): """Gets a value by a key. Args: key (str): Key to retrieve the value. Returns: Retrieved value. """ self._create_file_if_none_exists() with open(self.filename, 'rb') as file_object: cache_pickle = pickle.load(file_object) val = cache_pickle.get(key, None) return val
[ "def", "get", "(", "self", ",", "key", ")", ":", "self", ".", "_create_file_if_none_exists", "(", ")", "with", "open", "(", "self", ".", "filename", ",", "'rb'", ")", "as", "file_object", ":", "cache_pickle", "=", "pickle", ".", "load", "(", "file_object", ")", "val", "=", "cache_pickle", ".", "get", "(", "key", ",", "None", ")", "return", "val" ]
Gets a value by a key. Args: key (str): Key to retrieve the value. Returns: Retrieved value.
[ "Gets", "a", "value", "by", "a", "key", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L93-L105
train
google/budou
budou/cachefactory.py
PickleCache.set
def set(self, key, val): """Sets a value in a key. Args: key (str): Key for the value. val: Value to set. Returns: Retrieved value. """ self._create_file_if_none_exists() with open(self.filename, 'r+b') as file_object: cache_pickle = pickle.load(file_object) cache_pickle[key] = val file_object.seek(0) pickle.dump(cache_pickle, file_object)
python
def set(self, key, val): """Sets a value in a key. Args: key (str): Key for the value. val: Value to set. Returns: Retrieved value. """ self._create_file_if_none_exists() with open(self.filename, 'r+b') as file_object: cache_pickle = pickle.load(file_object) cache_pickle[key] = val file_object.seek(0) pickle.dump(cache_pickle, file_object)
[ "def", "set", "(", "self", ",", "key", ",", "val", ")", ":", "self", ".", "_create_file_if_none_exists", "(", ")", "with", "open", "(", "self", ".", "filename", ",", "'r+b'", ")", "as", "file_object", ":", "cache_pickle", "=", "pickle", ".", "load", "(", "file_object", ")", "cache_pickle", "[", "key", "]", "=", "val", "file_object", ".", "seek", "(", "0", ")", "pickle", ".", "dump", "(", "cache_pickle", ",", "file_object", ")" ]
Sets a value in a key. Args: key (str): Key for the value. val: Value to set. Returns: Retrieved value.
[ "Sets", "a", "value", "in", "a", "key", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L107-L122
train
google/budou
budou/chunk.py
Chunk.serialize
def serialize(self): """Returns serialized chunk data in dictionary.""" return { 'word': self.word, 'pos': self.pos, 'label': self.label, 'dependency': self.dependency, 'has_cjk': self.has_cjk(), }
python
def serialize(self): """Returns serialized chunk data in dictionary.""" return { 'word': self.word, 'pos': self.pos, 'label': self.label, 'dependency': self.dependency, 'has_cjk': self.has_cjk(), }
[ "def", "serialize", "(", "self", ")", ":", "return", "{", "'word'", ":", "self", ".", "word", ",", "'pos'", ":", "self", ".", "pos", ",", "'label'", ":", "self", ".", "label", ",", "'dependency'", ":", "self", ".", "dependency", ",", "'has_cjk'", ":", "self", ".", "has_cjk", "(", ")", ",", "}" ]
Returns serialized chunk data in dictionary.
[ "Returns", "serialized", "chunk", "data", "in", "dictionary", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L79-L87
train
google/budou
budou/chunk.py
Chunk.has_cjk
def has_cjk(self): """Checks if the word of the chunk contains CJK characters. This is using unicode codepoint ranges from https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149 Returns: bool: True if the chunk has any CJK character. """ cjk_codepoint_ranges = [ (4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] for char in self.word: if any([start <= ord(char) <= end for start, end in cjk_codepoint_ranges]): return True return False
python
def has_cjk(self): """Checks if the word of the chunk contains CJK characters. This is using unicode codepoint ranges from https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149 Returns: bool: True if the chunk has any CJK character. """ cjk_codepoint_ranges = [ (4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] for char in self.word: if any([start <= ord(char) <= end for start, end in cjk_codepoint_ranges]): return True return False
[ "def", "has_cjk", "(", "self", ")", ":", "cjk_codepoint_ranges", "=", "[", "(", "4352", ",", "4607", ")", ",", "(", "11904", ",", "42191", ")", ",", "(", "43072", ",", "43135", ")", ",", "(", "44032", ",", "55215", ")", ",", "(", "63744", ",", "64255", ")", ",", "(", "65072", ",", "65103", ")", ",", "(", "65381", ",", "65500", ")", ",", "(", "131072", ",", "196607", ")", "]", "for", "char", "in", "self", ".", "word", ":", "if", "any", "(", "[", "start", "<=", "ord", "(", "char", ")", "<=", "end", "for", "start", ",", "end", "in", "cjk_codepoint_ranges", "]", ")", ":", "return", "True", "return", "False" ]
Checks if the word of the chunk contains CJK characters. This is using unicode codepoint ranges from https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149 Returns: bool: True if the chunk has any CJK character.
[ "Checks", "if", "the", "word", "of", "the", "chunk", "contains", "CJK", "characters", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L119-L135
train
google/budou
budou/chunk.py
ChunkList.get_overlaps
def get_overlaps(self, offset, length): """Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`) """ # In case entity's offset points to a space just before the entity. if ''.join([chunk.word for chunk in self])[offset] == ' ': offset += 1 index = 0 result = ChunkList() for chunk in self: if offset < index + len(chunk.word) and index < offset + length: result.append(chunk) index += len(chunk.word) return result
python
def get_overlaps(self, offset, length): """Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`) """ # In case entity's offset points to a space just before the entity. if ''.join([chunk.word for chunk in self])[offset] == ' ': offset += 1 index = 0 result = ChunkList() for chunk in self: if offset < index + len(chunk.word) and index < offset + length: result.append(chunk) index += len(chunk.word) return result
[ "def", "get_overlaps", "(", "self", ",", "offset", ",", "length", ")", ":", "# In case entity's offset points to a space just before the entity.", "if", "''", ".", "join", "(", "[", "chunk", ".", "word", "for", "chunk", "in", "self", "]", ")", "[", "offset", "]", "==", "' '", ":", "offset", "+=", "1", "index", "=", "0", "result", "=", "ChunkList", "(", ")", "for", "chunk", "in", "self", ":", "if", "offset", "<", "index", "+", "len", "(", "chunk", ".", "word", ")", "and", "index", "<", "offset", "+", "length", ":", "result", ".", "append", "(", "chunk", ")", "index", "+=", "len", "(", "chunk", ".", "word", ")", "return", "result" ]
Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
[ "Returns", "chunks", "overlapped", "with", "the", "given", "range", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L189-L208
train
google/budou
budou/chunk.py
ChunkList.swap
def swap(self, old_chunks, new_chunk): """Swaps old consecutive chunks with new chunk. Args: old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to be removed. new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted. """ indexes = [self.index(chunk) for chunk in old_chunks] del self[indexes[0]:indexes[-1] + 1] self.insert(indexes[0], new_chunk)
python
def swap(self, old_chunks, new_chunk): """Swaps old consecutive chunks with new chunk. Args: old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to be removed. new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted. """ indexes = [self.index(chunk) for chunk in old_chunks] del self[indexes[0]:indexes[-1] + 1] self.insert(indexes[0], new_chunk)
[ "def", "swap", "(", "self", ",", "old_chunks", ",", "new_chunk", ")", ":", "indexes", "=", "[", "self", ".", "index", "(", "chunk", ")", "for", "chunk", "in", "old_chunks", "]", "del", "self", "[", "indexes", "[", "0", "]", ":", "indexes", "[", "-", "1", "]", "+", "1", "]", "self", ".", "insert", "(", "indexes", "[", "0", "]", ",", "new_chunk", ")" ]
Swaps old consecutive chunks with new chunk. Args: old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to be removed. new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
[ "Swaps", "old", "consecutive", "chunks", "with", "new", "chunk", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L210-L220
train
google/budou
budou/chunk.py
ChunkList.resolve_dependencies
def resolve_dependencies(self): """Resolves chunk dependency by concatenating them. """ self._concatenate_inner(True) self._concatenate_inner(False) self._insert_breaklines()
python
def resolve_dependencies(self): """Resolves chunk dependency by concatenating them. """ self._concatenate_inner(True) self._concatenate_inner(False) self._insert_breaklines()
[ "def", "resolve_dependencies", "(", "self", ")", ":", "self", ".", "_concatenate_inner", "(", "True", ")", "self", ".", "_concatenate_inner", "(", "False", ")", "self", ".", "_insert_breaklines", "(", ")" ]
Resolves chunk dependency by concatenating them.
[ "Resolves", "chunk", "dependency", "by", "concatenating", "them", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L222-L227
train
google/budou
budou/chunk.py
ChunkList._concatenate_inner
def _concatenate_inner(self, direction): """Concatenates chunks based on each chunk's dependency. Args: direction (bool): Direction of concatenation process. True for forward. """ tmp_bucket = [] source_chunks = self if direction else self[::-1] target_chunks = ChunkList() for chunk in source_chunks: if ( # if the chunk has matched dependency, do concatenation. chunk.dependency == direction or # if the chunk is SPACE, concatenate to the previous chunk. (direction is False and chunk.is_space()) ): tmp_bucket.append(chunk) continue tmp_bucket.append(chunk) if not direction: tmp_bucket = tmp_bucket[::-1] new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket]) new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label, dependency=chunk.dependency) target_chunks.append(new_chunk) tmp_bucket = ChunkList() if tmp_bucket: target_chunks += tmp_bucket if not direction: target_chunks = target_chunks[::-1] self.list = target_chunks
python
def _concatenate_inner(self, direction): """Concatenates chunks based on each chunk's dependency. Args: direction (bool): Direction of concatenation process. True for forward. """ tmp_bucket = [] source_chunks = self if direction else self[::-1] target_chunks = ChunkList() for chunk in source_chunks: if ( # if the chunk has matched dependency, do concatenation. chunk.dependency == direction or # if the chunk is SPACE, concatenate to the previous chunk. (direction is False and chunk.is_space()) ): tmp_bucket.append(chunk) continue tmp_bucket.append(chunk) if not direction: tmp_bucket = tmp_bucket[::-1] new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket]) new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label, dependency=chunk.dependency) target_chunks.append(new_chunk) tmp_bucket = ChunkList() if tmp_bucket: target_chunks += tmp_bucket if not direction: target_chunks = target_chunks[::-1] self.list = target_chunks
[ "def", "_concatenate_inner", "(", "self", ",", "direction", ")", ":", "tmp_bucket", "=", "[", "]", "source_chunks", "=", "self", "if", "direction", "else", "self", "[", ":", ":", "-", "1", "]", "target_chunks", "=", "ChunkList", "(", ")", "for", "chunk", "in", "source_chunks", ":", "if", "(", "# if the chunk has matched dependency, do concatenation.", "chunk", ".", "dependency", "==", "direction", "or", "# if the chunk is SPACE, concatenate to the previous chunk.", "(", "direction", "is", "False", "and", "chunk", ".", "is_space", "(", ")", ")", ")", ":", "tmp_bucket", ".", "append", "(", "chunk", ")", "continue", "tmp_bucket", ".", "append", "(", "chunk", ")", "if", "not", "direction", ":", "tmp_bucket", "=", "tmp_bucket", "[", ":", ":", "-", "1", "]", "new_word", "=", "''", ".", "join", "(", "[", "tmp_chunk", ".", "word", "for", "tmp_chunk", "in", "tmp_bucket", "]", ")", "new_chunk", "=", "Chunk", "(", "new_word", ",", "pos", "=", "chunk", ".", "pos", ",", "label", "=", "chunk", ".", "label", ",", "dependency", "=", "chunk", ".", "dependency", ")", "target_chunks", ".", "append", "(", "new_chunk", ")", "tmp_bucket", "=", "ChunkList", "(", ")", "if", "tmp_bucket", ":", "target_chunks", "+=", "tmp_bucket", "if", "not", "direction", ":", "target_chunks", "=", "target_chunks", "[", ":", ":", "-", "1", "]", "self", ".", "list", "=", "target_chunks" ]
Concatenates chunks based on each chunk's dependency. Args: direction (bool): Direction of concatenation process. True for forward.
[ "Concatenates", "chunks", "based", "on", "each", "chunk", "s", "dependency", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L229-L259
train
google/budou
budou/chunk.py
ChunkList._insert_breaklines
def _insert_breaklines(self): """Inserts a breakline instead of a trailing space if the chunk is in CJK. """ target_chunks = ChunkList() for chunk in self: if chunk.word[-1] == ' ' and chunk.has_cjk(): chunk.word = chunk.word[:-1] target_chunks.append(chunk) target_chunks.append(chunk.breakline()) else: target_chunks.append(chunk) self.list = target_chunks
python
def _insert_breaklines(self): """Inserts a breakline instead of a trailing space if the chunk is in CJK. """ target_chunks = ChunkList() for chunk in self: if chunk.word[-1] == ' ' and chunk.has_cjk(): chunk.word = chunk.word[:-1] target_chunks.append(chunk) target_chunks.append(chunk.breakline()) else: target_chunks.append(chunk) self.list = target_chunks
[ "def", "_insert_breaklines", "(", "self", ")", ":", "target_chunks", "=", "ChunkList", "(", ")", "for", "chunk", "in", "self", ":", "if", "chunk", ".", "word", "[", "-", "1", "]", "==", "' '", "and", "chunk", ".", "has_cjk", "(", ")", ":", "chunk", ".", "word", "=", "chunk", ".", "word", "[", ":", "-", "1", "]", "target_chunks", ".", "append", "(", "chunk", ")", "target_chunks", ".", "append", "(", "chunk", ".", "breakline", "(", ")", ")", "else", ":", "target_chunks", ".", "append", "(", "chunk", ")", "self", ".", "list", "=", "target_chunks" ]
Inserts a breakline instead of a trailing space if the chunk is in CJK.
[ "Inserts", "a", "breakline", "instead", "of", "a", "trailing", "space", "if", "the", "chunk", "is", "in", "CJK", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L261-L272
train
google/budou
budou/chunk.py
ChunkList.html_serialize
def html_serialize(self, attributes, max_length=None): """Returns concatenated HTML code with SPAN tag. Args: attributes (dict): A map of name-value pairs for attributes of output SPAN tags. max_length (:obj:`int`, optional): Maximum length of span enclosed chunk. Returns: The organized HTML code. (str) """ doc = ET.Element('span') for chunk in self: if (chunk.has_cjk() and not (max_length and len(chunk.word) > max_length)): ele = ET.Element('span') ele.text = chunk.word for key, val in attributes.items(): ele.attrib[key] = val doc.append(ele) else: # add word without span tag for non-CJK text (e.g. English) # by appending it after the last element if doc.getchildren(): if doc.getchildren()[-1].tail is None: doc.getchildren()[-1].tail = chunk.word else: doc.getchildren()[-1].tail += chunk.word else: if doc.text is None: doc.text = chunk.word else: doc.text += chunk.word result = ET.tostring(doc, encoding='utf-8').decode('utf-8') result = html5lib.serialize( html5lib.parseFragment(result), sanitize=True, quote_attr_values='always') return result
python
def html_serialize(self, attributes, max_length=None): """Returns concatenated HTML code with SPAN tag. Args: attributes (dict): A map of name-value pairs for attributes of output SPAN tags. max_length (:obj:`int`, optional): Maximum length of span enclosed chunk. Returns: The organized HTML code. (str) """ doc = ET.Element('span') for chunk in self: if (chunk.has_cjk() and not (max_length and len(chunk.word) > max_length)): ele = ET.Element('span') ele.text = chunk.word for key, val in attributes.items(): ele.attrib[key] = val doc.append(ele) else: # add word without span tag for non-CJK text (e.g. English) # by appending it after the last element if doc.getchildren(): if doc.getchildren()[-1].tail is None: doc.getchildren()[-1].tail = chunk.word else: doc.getchildren()[-1].tail += chunk.word else: if doc.text is None: doc.text = chunk.word else: doc.text += chunk.word result = ET.tostring(doc, encoding='utf-8').decode('utf-8') result = html5lib.serialize( html5lib.parseFragment(result), sanitize=True, quote_attr_values='always') return result
[ "def", "html_serialize", "(", "self", ",", "attributes", ",", "max_length", "=", "None", ")", ":", "doc", "=", "ET", ".", "Element", "(", "'span'", ")", "for", "chunk", "in", "self", ":", "if", "(", "chunk", ".", "has_cjk", "(", ")", "and", "not", "(", "max_length", "and", "len", "(", "chunk", ".", "word", ")", ">", "max_length", ")", ")", ":", "ele", "=", "ET", ".", "Element", "(", "'span'", ")", "ele", ".", "text", "=", "chunk", ".", "word", "for", "key", ",", "val", "in", "attributes", ".", "items", "(", ")", ":", "ele", ".", "attrib", "[", "key", "]", "=", "val", "doc", ".", "append", "(", "ele", ")", "else", ":", "# add word without span tag for non-CJK text (e.g. English)", "# by appending it after the last element", "if", "doc", ".", "getchildren", "(", ")", ":", "if", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "is", "None", ":", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "=", "chunk", ".", "word", "else", ":", "doc", ".", "getchildren", "(", ")", "[", "-", "1", "]", ".", "tail", "+=", "chunk", ".", "word", "else", ":", "if", "doc", ".", "text", "is", "None", ":", "doc", ".", "text", "=", "chunk", ".", "word", "else", ":", "doc", ".", "text", "+=", "chunk", ".", "word", "result", "=", "ET", ".", "tostring", "(", "doc", ",", "encoding", "=", "'utf-8'", ")", ".", "decode", "(", "'utf-8'", ")", "result", "=", "html5lib", ".", "serialize", "(", "html5lib", ".", "parseFragment", "(", "result", ")", ",", "sanitize", "=", "True", ",", "quote_attr_values", "=", "'always'", ")", "return", "result" ]
Returns concatenated HTML code with SPAN tag. Args: attributes (dict): A map of name-value pairs for attributes of output SPAN tags. max_length (:obj:`int`, optional): Maximum length of span enclosed chunk. Returns: The organized HTML code. (str)
[ "Returns", "concatenated", "HTML", "code", "with", "SPAN", "tag", "." ]
101224e6523186851f38ee57a6b2e7bdbd826de2
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L274-L311
train
c-w/gutenberg
gutenberg/acquire/text.py
_etextno_to_uri_subdirectory
def _etextno_to_uri_subdirectory(etextno): """Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453' """ str_etextno = str(etextno).zfill(2) all_but_last_digit = list(str_etextno[:-1]) subdir_part = "/".join(all_but_last_digit) subdir = "{}/{}".format(subdir_part, etextno) # etextno not zfilled return subdir
python
def _etextno_to_uri_subdirectory(etextno): """Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453' """ str_etextno = str(etextno).zfill(2) all_but_last_digit = list(str_etextno[:-1]) subdir_part = "/".join(all_but_last_digit) subdir = "{}/{}".format(subdir_part, etextno) # etextno not zfilled return subdir
[ "def", "_etextno_to_uri_subdirectory", "(", "etextno", ")", ":", "str_etextno", "=", "str", "(", "etextno", ")", ".", "zfill", "(", "2", ")", "all_but_last_digit", "=", "list", "(", "str_etextno", "[", ":", "-", "1", "]", ")", "subdir_part", "=", "\"/\"", ".", "join", "(", "all_but_last_digit", ")", "subdir", "=", "\"{}/{}\"", ".", "format", "(", "subdir_part", ",", "etextno", ")", "# etextno not zfilled", "return", "subdir" ]
Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453'
[ "Returns", "the", "subdirectory", "that", "an", "etextno", "will", "be", "found", "in", "a", "gutenberg", "mirror", ".", "Generally", "one", "finds", "the", "subdirectory", "by", "separating", "out", "each", "digit", "of", "the", "etext", "number", "and", "uses", "it", "for", "a", "directory", ".", "The", "exception", "here", "is", "for", "etext", "numbers", "less", "than", "10", "which", "are", "prepended", "with", "a", "0", "for", "the", "directory", "traversal", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L30-L48
train
c-w/gutenberg
gutenberg/acquire/text.py
_format_download_uri_for_extension
def _format_download_uri_for_extension(etextno, extension, mirror=None): """Returns the download location on the Project Gutenberg servers for a given text and extension. The list of available extensions for a given text can be found via the formaturi metadata extractor. """ mirror = mirror or _GUTENBERG_MIRROR root = mirror.strip().rstrip('/') path = _etextno_to_uri_subdirectory(etextno) uri = '{root}/{path}/{etextno}{extension}'.format( root=root, path=path, etextno=etextno, extension=extension) return uri
python
def _format_download_uri_for_extension(etextno, extension, mirror=None): """Returns the download location on the Project Gutenberg servers for a given text and extension. The list of available extensions for a given text can be found via the formaturi metadata extractor. """ mirror = mirror or _GUTENBERG_MIRROR root = mirror.strip().rstrip('/') path = _etextno_to_uri_subdirectory(etextno) uri = '{root}/{path}/{etextno}{extension}'.format( root=root, path=path, etextno=etextno, extension=extension) return uri
[ "def", "_format_download_uri_for_extension", "(", "etextno", ",", "extension", ",", "mirror", "=", "None", ")", ":", "mirror", "=", "mirror", "or", "_GUTENBERG_MIRROR", "root", "=", "mirror", ".", "strip", "(", ")", ".", "rstrip", "(", "'/'", ")", "path", "=", "_etextno_to_uri_subdirectory", "(", "etextno", ")", "uri", "=", "'{root}/{path}/{etextno}{extension}'", ".", "format", "(", "root", "=", "root", ",", "path", "=", "path", ",", "etextno", "=", "etextno", ",", "extension", "=", "extension", ")", "return", "uri" ]
Returns the download location on the Project Gutenberg servers for a given text and extension. The list of available extensions for a given text can be found via the formaturi metadata extractor.
[ "Returns", "the", "download", "location", "on", "the", "Project", "Gutenberg", "servers", "for", "a", "given", "text", "and", "extension", ".", "The", "list", "of", "available", "extensions", "for", "a", "given", "text", "can", "be", "found", "via", "the", "formaturi", "metadata", "extractor", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L64-L80
train
c-w/gutenberg
gutenberg/acquire/text.py
_format_download_uri
def _format_download_uri(etextno, mirror=None, prefer_ascii=False): """Returns the download location on the Project Gutenberg servers for a given text. Use prefer_ascii to control whether you want to fetch plaintext us-ascii file first (default old behavior) or if you prefer UTF-8 then 8-bits then plaintext. Raises: UnknownDownloadUri: If no download location can be found for the text. """ mirror = mirror or _GUTENBERG_MIRROR if not _does_mirror_exist(mirror): raise UnknownDownloadUriException( 'Could not reach Gutenberg mirror "{:s}". Try setting a ' 'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for ' '--mirror flag or GUTENBERG_MIRROR environment variable.' .format(mirror)) # Check https://www.gutenberg.org/files/ for details about available # extensions ; # - .txt is plaintext us-ascii # - -8.txt is 8-bit plaintext, multiple encodings # - -0.txt is UTF-8 ascii_first = ('.txt', '-0.txt', '-8.txt') utf8_first = ('-0.txt', '-8.txt', '.txt') extensions = ascii_first if prefer_ascii else utf8_first for extension in extensions: uri = _format_download_uri_for_extension(etextno, extension, mirror) if _does_uri_exist(uri): return uri raise UnknownDownloadUriException( 'Failed to find a textual download candidate for {} on {}. ' 'Either the book does not exist or it is only available in ' 'non-textual formats.' .format(etextno, mirror))
python
def _format_download_uri(etextno, mirror=None, prefer_ascii=False): """Returns the download location on the Project Gutenberg servers for a given text. Use prefer_ascii to control whether you want to fetch plaintext us-ascii file first (default old behavior) or if you prefer UTF-8 then 8-bits then plaintext. Raises: UnknownDownloadUri: If no download location can be found for the text. """ mirror = mirror or _GUTENBERG_MIRROR if not _does_mirror_exist(mirror): raise UnknownDownloadUriException( 'Could not reach Gutenberg mirror "{:s}". Try setting a ' 'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for ' '--mirror flag or GUTENBERG_MIRROR environment variable.' .format(mirror)) # Check https://www.gutenberg.org/files/ for details about available # extensions ; # - .txt is plaintext us-ascii # - -8.txt is 8-bit plaintext, multiple encodings # - -0.txt is UTF-8 ascii_first = ('.txt', '-0.txt', '-8.txt') utf8_first = ('-0.txt', '-8.txt', '.txt') extensions = ascii_first if prefer_ascii else utf8_first for extension in extensions: uri = _format_download_uri_for_extension(etextno, extension, mirror) if _does_uri_exist(uri): return uri raise UnknownDownloadUriException( 'Failed to find a textual download candidate for {} on {}. ' 'Either the book does not exist or it is only available in ' 'non-textual formats.' .format(etextno, mirror))
[ "def", "_format_download_uri", "(", "etextno", ",", "mirror", "=", "None", ",", "prefer_ascii", "=", "False", ")", ":", "mirror", "=", "mirror", "or", "_GUTENBERG_MIRROR", "if", "not", "_does_mirror_exist", "(", "mirror", ")", ":", "raise", "UnknownDownloadUriException", "(", "'Could not reach Gutenberg mirror \"{:s}\". Try setting a '", "'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for '", "'--mirror flag or GUTENBERG_MIRROR environment variable.'", ".", "format", "(", "mirror", ")", ")", "# Check https://www.gutenberg.org/files/ for details about available", "# extensions ;", "# - .txt is plaintext us-ascii", "# - -8.txt is 8-bit plaintext, multiple encodings", "# - -0.txt is UTF-8", "ascii_first", "=", "(", "'.txt'", ",", "'-0.txt'", ",", "'-8.txt'", ")", "utf8_first", "=", "(", "'-0.txt'", ",", "'-8.txt'", ",", "'.txt'", ")", "extensions", "=", "ascii_first", "if", "prefer_ascii", "else", "utf8_first", "for", "extension", "in", "extensions", ":", "uri", "=", "_format_download_uri_for_extension", "(", "etextno", ",", "extension", ",", "mirror", ")", "if", "_does_uri_exist", "(", "uri", ")", ":", "return", "uri", "raise", "UnknownDownloadUriException", "(", "'Failed to find a textual download candidate for {} on {}. '", "'Either the book does not exist or it is only available in '", "'non-textual formats.'", ".", "format", "(", "etextno", ",", "mirror", ")", ")" ]
Returns the download location on the Project Gutenberg servers for a given text. Use prefer_ascii to control whether you want to fetch plaintext us-ascii file first (default old behavior) or if you prefer UTF-8 then 8-bits then plaintext. Raises: UnknownDownloadUri: If no download location can be found for the text.
[ "Returns", "the", "download", "location", "on", "the", "Project", "Gutenberg", "servers", "for", "a", "given", "text", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L83-L119
train
c-w/gutenberg
gutenberg/acquire/text.py
load_etext
def load_etext(etextno, refresh_cache=False, mirror=None, prefer_ascii=False): """Returns a unicode representation of the full body of a Project Gutenberg text. After making an initial remote call to Project Gutenberg's servers, the text is persisted locally. """ etextno = validate_etextno(etextno) cached = os.path.join(_TEXT_CACHE, '{}.txt.gz'.format(etextno)) if refresh_cache: remove(cached) if not os.path.exists(cached): makedirs(os.path.dirname(cached)) download_uri = _format_download_uri(etextno, mirror, prefer_ascii) response = requests.get(download_uri) # Ensure proper UTF-8 saving. There might be instances of ebooks or # mirrors which advertise a broken encoding, and this will break # downstream usages. For example, #55517 from aleph.gutenberg.org: # # from gutenberg.acquire import load_etext # print(load_etext(55517, refresh_cache=True)[0:1000]) # # response.encoding will be 'ISO-8859-1' while the file is UTF-8 if response.encoding != response.apparent_encoding: response.encoding = response.apparent_encoding text = response.text with closing(gzip.open(cached, 'w')) as cache: cache.write(text.encode('utf-8')) with closing(gzip.open(cached, 'r')) as cache: text = cache.read().decode('utf-8') return text
python
def load_etext(etextno, refresh_cache=False, mirror=None, prefer_ascii=False): """Returns a unicode representation of the full body of a Project Gutenberg text. After making an initial remote call to Project Gutenberg's servers, the text is persisted locally. """ etextno = validate_etextno(etextno) cached = os.path.join(_TEXT_CACHE, '{}.txt.gz'.format(etextno)) if refresh_cache: remove(cached) if not os.path.exists(cached): makedirs(os.path.dirname(cached)) download_uri = _format_download_uri(etextno, mirror, prefer_ascii) response = requests.get(download_uri) # Ensure proper UTF-8 saving. There might be instances of ebooks or # mirrors which advertise a broken encoding, and this will break # downstream usages. For example, #55517 from aleph.gutenberg.org: # # from gutenberg.acquire import load_etext # print(load_etext(55517, refresh_cache=True)[0:1000]) # # response.encoding will be 'ISO-8859-1' while the file is UTF-8 if response.encoding != response.apparent_encoding: response.encoding = response.apparent_encoding text = response.text with closing(gzip.open(cached, 'w')) as cache: cache.write(text.encode('utf-8')) with closing(gzip.open(cached, 'r')) as cache: text = cache.read().decode('utf-8') return text
[ "def", "load_etext", "(", "etextno", ",", "refresh_cache", "=", "False", ",", "mirror", "=", "None", ",", "prefer_ascii", "=", "False", ")", ":", "etextno", "=", "validate_etextno", "(", "etextno", ")", "cached", "=", "os", ".", "path", ".", "join", "(", "_TEXT_CACHE", ",", "'{}.txt.gz'", ".", "format", "(", "etextno", ")", ")", "if", "refresh_cache", ":", "remove", "(", "cached", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cached", ")", ":", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "cached", ")", ")", "download_uri", "=", "_format_download_uri", "(", "etextno", ",", "mirror", ",", "prefer_ascii", ")", "response", "=", "requests", ".", "get", "(", "download_uri", ")", "# Ensure proper UTF-8 saving. There might be instances of ebooks or", "# mirrors which advertise a broken encoding, and this will break", "# downstream usages. For example, #55517 from aleph.gutenberg.org:", "#", "# from gutenberg.acquire import load_etext", "# print(load_etext(55517, refresh_cache=True)[0:1000])", "#", "# response.encoding will be 'ISO-8859-1' while the file is UTF-8", "if", "response", ".", "encoding", "!=", "response", ".", "apparent_encoding", ":", "response", ".", "encoding", "=", "response", ".", "apparent_encoding", "text", "=", "response", ".", "text", "with", "closing", "(", "gzip", ".", "open", "(", "cached", ",", "'w'", ")", ")", "as", "cache", ":", "cache", ".", "write", "(", "text", ".", "encode", "(", "'utf-8'", ")", ")", "with", "closing", "(", "gzip", ".", "open", "(", "cached", ",", "'r'", ")", ")", "as", "cache", ":", "text", "=", "cache", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "text" ]
Returns a unicode representation of the full body of a Project Gutenberg text. After making an initial remote call to Project Gutenberg's servers, the text is persisted locally.
[ "Returns", "a", "unicode", "representation", "of", "the", "full", "body", "of", "a", "Project", "Gutenberg", "text", ".", "After", "making", "an", "initial", "remote", "call", "to", "Project", "Gutenberg", "s", "servers", "the", "text", "is", "persisted", "locally", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L122-L153
train
c-w/gutenberg
gutenberg/_util/logging.py
disable_logging
def disable_logging(logger=None): """Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified. """ logger = logger or logging.getLogger() disabled = logger.disabled logger.disabled = True yield logger.disabled = disabled
python
def disable_logging(logger=None): """Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified. """ logger = logger or logging.getLogger() disabled = logger.disabled logger.disabled = True yield logger.disabled = disabled
[ "def", "disable_logging", "(", "logger", "=", "None", ")", ":", "logger", "=", "logger", "or", "logging", ".", "getLogger", "(", ")", "disabled", "=", "logger", ".", "disabled", "logger", ".", "disabled", "=", "True", "yield", "logger", ".", "disabled", "=", "disabled" ]
Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified.
[ "Context", "manager", "to", "temporarily", "suppress", "all", "logging", "for", "a", "given", "logger", "or", "the", "root", "logger", "if", "no", "particular", "logger", "is", "specified", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/logging.py#L11-L20
train
c-w/gutenberg
gutenberg/_util/os.py
makedirs
def makedirs(*args, **kwargs): """Wrapper around os.makedirs that doesn't raise an exception if the directory already exists. """ try: os.makedirs(*args, **kwargs) except OSError as ex: if ex.errno != errno.EEXIST: raise
python
def makedirs(*args, **kwargs): """Wrapper around os.makedirs that doesn't raise an exception if the directory already exists. """ try: os.makedirs(*args, **kwargs) except OSError as ex: if ex.errno != errno.EEXIST: raise
[ "def", "makedirs", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "os", ".", "makedirs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "OSError", "as", "ex", ":", "if", "ex", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise" ]
Wrapper around os.makedirs that doesn't raise an exception if the directory already exists.
[ "Wrapper", "around", "os", ".", "makedirs", "that", "doesn", "t", "raise", "an", "exception", "if", "the", "directory", "already", "exists", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L12-L21
train
c-w/gutenberg
gutenberg/_util/os.py
remove
def remove(path): """Wrapper that switches between os.remove and shutil.rmtree depending on whether the provided path is a file or directory. """ if not os.path.exists(path): return if os.path.isdir(path): return shutil.rmtree(path) if os.path.isfile(path): return os.remove(path)
python
def remove(path): """Wrapper that switches between os.remove and shutil.rmtree depending on whether the provided path is a file or directory. """ if not os.path.exists(path): return if os.path.isdir(path): return shutil.rmtree(path) if os.path.isfile(path): return os.remove(path)
[ "def", "remove", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "shutil", ".", "rmtree", "(", "path", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "os", ".", "remove", "(", "path", ")" ]
Wrapper that switches between os.remove and shutil.rmtree depending on whether the provided path is a file or directory.
[ "Wrapper", "that", "switches", "between", "os", ".", "remove", "and", "shutil", ".", "rmtree", "depending", "on", "whether", "the", "provided", "path", "is", "a", "file", "or", "directory", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L24-L36
train
c-w/gutenberg
gutenberg/_util/os.py
determine_encoding
def determine_encoding(path, default=None): """Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file. """ byte_order_marks = ( ('utf-8-sig', (codecs.BOM_UTF8, )), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) try: with open(path, 'rb') as infile: raw = infile.read(4) except IOError: return default for encoding, boms in byte_order_marks: if any(raw.startswith(bom) for bom in boms): return encoding return default
python
def determine_encoding(path, default=None): """Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file. """ byte_order_marks = ( ('utf-8-sig', (codecs.BOM_UTF8, )), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) try: with open(path, 'rb') as infile: raw = infile.read(4) except IOError: return default for encoding, boms in byte_order_marks: if any(raw.startswith(bom) for bom in boms): return encoding return default
[ "def", "determine_encoding", "(", "path", ",", "default", "=", "None", ")", ":", "byte_order_marks", "=", "(", "(", "'utf-8-sig'", ",", "(", "codecs", ".", "BOM_UTF8", ",", ")", ")", ",", "(", "'utf-16'", ",", "(", "codecs", ".", "BOM_UTF16_LE", ",", "codecs", ".", "BOM_UTF16_BE", ")", ")", ",", "(", "'utf-32'", ",", "(", "codecs", ".", "BOM_UTF32_LE", ",", "codecs", ".", "BOM_UTF32_BE", ")", ")", ",", ")", "try", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "infile", ":", "raw", "=", "infile", ".", "read", "(", "4", ")", "except", "IOError", ":", "return", "default", "for", "encoding", ",", "boms", "in", "byte_order_marks", ":", "if", "any", "(", "raw", ".", "startswith", "(", "bom", ")", "for", "bom", "in", "boms", ")", ":", "return", "encoding", "return", "default" ]
Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file.
[ "Determines", "the", "encoding", "of", "a", "file", "based", "on", "byte", "order", "marks", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L39-L67
train
c-w/gutenberg
gutenberg/_util/os.py
reopen_encoded
def reopen_encoded(fileobj, mode='r', fallback_encoding=None): """Makes sure that a file was opened with some valid encoding. Arguments: fileobj (file): The file-object. mode (str, optional): The mode in which to re-open the file. fallback_encoding (str, optional): The encoding in which to re-open the file if it does not specify an encoding itself. Returns: file: The re-opened file. """ encoding = determine_encoding(fileobj.name, fallback_encoding) fileobj.close() return open(fileobj.name, mode, encoding=encoding)
python
def reopen_encoded(fileobj, mode='r', fallback_encoding=None): """Makes sure that a file was opened with some valid encoding. Arguments: fileobj (file): The file-object. mode (str, optional): The mode in which to re-open the file. fallback_encoding (str, optional): The encoding in which to re-open the file if it does not specify an encoding itself. Returns: file: The re-opened file. """ encoding = determine_encoding(fileobj.name, fallback_encoding) fileobj.close() return open(fileobj.name, mode, encoding=encoding)
[ "def", "reopen_encoded", "(", "fileobj", ",", "mode", "=", "'r'", ",", "fallback_encoding", "=", "None", ")", ":", "encoding", "=", "determine_encoding", "(", "fileobj", ".", "name", ",", "fallback_encoding", ")", "fileobj", ".", "close", "(", ")", "return", "open", "(", "fileobj", ".", "name", ",", "mode", ",", "encoding", "=", "encoding", ")" ]
Makes sure that a file was opened with some valid encoding. Arguments: fileobj (file): The file-object. mode (str, optional): The mode in which to re-open the file. fallback_encoding (str, optional): The encoding in which to re-open the file if it does not specify an encoding itself. Returns: file: The re-opened file.
[ "Makes", "sure", "that", "a", "file", "was", "opened", "with", "some", "valid", "encoding", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L70-L85
train
c-w/gutenberg
gutenberg/query/api.py
get_metadata
def get_metadata(feature_name, etextno): """Looks up the value of a meta-data feature for a given text. Arguments: feature_name (str): The name of the meta-data to look up. etextno (int): The identifier of the Gutenberg text for which to look up the meta-data. Returns: frozenset: The values of the meta-data for the text or an empty set if the text does not have meta-data associated with the feature. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name. """ metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno) return frozenset(metadata_values)
python
def get_metadata(feature_name, etextno): """Looks up the value of a meta-data feature for a given text. Arguments: feature_name (str): The name of the meta-data to look up. etextno (int): The identifier of the Gutenberg text for which to look up the meta-data. Returns: frozenset: The values of the meta-data for the text or an empty set if the text does not have meta-data associated with the feature. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name. """ metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno) return frozenset(metadata_values)
[ "def", "get_metadata", "(", "feature_name", ",", "etextno", ")", ":", "metadata_values", "=", "MetadataExtractor", ".", "get", "(", "feature_name", ")", ".", "get_metadata", "(", "etextno", ")", "return", "frozenset", "(", "metadata_values", ")" ]
Looks up the value of a meta-data feature for a given text. Arguments: feature_name (str): The name of the meta-data to look up. etextno (int): The identifier of the Gutenberg text for which to look up the meta-data. Returns: frozenset: The values of the meta-data for the text or an empty set if the text does not have meta-data associated with the feature. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name.
[ "Looks", "up", "the", "value", "of", "a", "meta", "-", "data", "feature", "for", "a", "given", "text", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L20-L38
train
c-w/gutenberg
gutenberg/query/api.py
get_etexts
def get_etexts(feature_name, value): """Looks up all the texts that have meta-data matching some criterion. Arguments: feature_name (str): The meta-data on which to select the texts. value (str): The value of the meta-data on which to filter the texts. Returns: frozenset: The set of all the Project Gutenberg text identifiers that match the provided query. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name. """ matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value) return frozenset(matching_etexts)
python
def get_etexts(feature_name, value): """Looks up all the texts that have meta-data matching some criterion. Arguments: feature_name (str): The meta-data on which to select the texts. value (str): The value of the meta-data on which to filter the texts. Returns: frozenset: The set of all the Project Gutenberg text identifiers that match the provided query. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name. """ matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value) return frozenset(matching_etexts)
[ "def", "get_etexts", "(", "feature_name", ",", "value", ")", ":", "matching_etexts", "=", "MetadataExtractor", ".", "get", "(", "feature_name", ")", ".", "get_etexts", "(", "value", ")", "return", "frozenset", "(", "matching_etexts", ")" ]
Looks up all the texts that have meta-data matching some criterion. Arguments: feature_name (str): The meta-data on which to select the texts. value (str): The value of the meta-data on which to filter the texts. Returns: frozenset: The set of all the Project Gutenberg text identifiers that match the provided query. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name.
[ "Looks", "up", "all", "the", "texts", "that", "have", "meta", "-", "data", "matching", "some", "criterion", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L41-L58
train
c-w/gutenberg
gutenberg/query/api.py
MetadataExtractor._uri_to_etext
def _uri_to_etext(cls, uri_ref): """Converts the representation used to identify a text in the meta-data RDF graph to a human-friendly integer text identifier. """ try: return validate_etextno(int(os.path.basename(uri_ref.toPython()))) except InvalidEtextIdException: return None
python
def _uri_to_etext(cls, uri_ref): """Converts the representation used to identify a text in the meta-data RDF graph to a human-friendly integer text identifier. """ try: return validate_etextno(int(os.path.basename(uri_ref.toPython()))) except InvalidEtextIdException: return None
[ "def", "_uri_to_etext", "(", "cls", ",", "uri_ref", ")", ":", "try", ":", "return", "validate_etextno", "(", "int", "(", "os", ".", "path", ".", "basename", "(", "uri_ref", ".", "toPython", "(", ")", ")", ")", ")", "except", "InvalidEtextIdException", ":", "return", "None" ]
Converts the representation used to identify a text in the meta-data RDF graph to a human-friendly integer text identifier.
[ "Converts", "the", "representation", "used", "to", "identify", "a", "text", "in", "the", "meta", "-", "data", "RDF", "graph", "to", "a", "human", "-", "friendly", "integer", "text", "identifier", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L127-L135
train
c-w/gutenberg
gutenberg/query/api.py
MetadataExtractor._implementations
def _implementations(cls): """Returns all the concrete subclasses of MetadataExtractor. """ if cls.__implementations: return cls.__implementations cls.__implementations = {} for implementation in all_subclasses(MetadataExtractor): try: feature_name = implementation.feature_name() cls.__implementations[feature_name] = implementation except NotImplementedError: pass return cls.__implementations
python
def _implementations(cls): """Returns all the concrete subclasses of MetadataExtractor. """ if cls.__implementations: return cls.__implementations cls.__implementations = {} for implementation in all_subclasses(MetadataExtractor): try: feature_name = implementation.feature_name() cls.__implementations[feature_name] = implementation except NotImplementedError: pass return cls.__implementations
[ "def", "_implementations", "(", "cls", ")", ":", "if", "cls", ".", "__implementations", ":", "return", "cls", ".", "__implementations", "cls", ".", "__implementations", "=", "{", "}", "for", "implementation", "in", "all_subclasses", "(", "MetadataExtractor", ")", ":", "try", ":", "feature_name", "=", "implementation", ".", "feature_name", "(", ")", "cls", ".", "__implementations", "[", "feature_name", "]", "=", "implementation", "except", "NotImplementedError", ":", "pass", "return", "cls", ".", "__implementations" ]
Returns all the concrete subclasses of MetadataExtractor.
[ "Returns", "all", "the", "concrete", "subclasses", "of", "MetadataExtractor", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L138-L152
train
c-w/gutenberg
gutenberg/query/api.py
MetadataExtractor.get
def get(feature_name): """Returns the MetadataExtractor that can extract information about the provided feature name. Raises: UnsupportedFeature: If no extractor exists for the feature name. """ implementations = MetadataExtractor._implementations() try: return implementations[feature_name] except KeyError: raise UnsupportedFeatureException( 'no MetadataExtractor registered for feature "{feature_name}" ' '(try any of the following: {supported_features})' .format(feature_name=feature_name, supported_features=', '.join(sorted(implementations))))
python
def get(feature_name): """Returns the MetadataExtractor that can extract information about the provided feature name. Raises: UnsupportedFeature: If no extractor exists for the feature name. """ implementations = MetadataExtractor._implementations() try: return implementations[feature_name] except KeyError: raise UnsupportedFeatureException( 'no MetadataExtractor registered for feature "{feature_name}" ' '(try any of the following: {supported_features})' .format(feature_name=feature_name, supported_features=', '.join(sorted(implementations))))
[ "def", "get", "(", "feature_name", ")", ":", "implementations", "=", "MetadataExtractor", ".", "_implementations", "(", ")", "try", ":", "return", "implementations", "[", "feature_name", "]", "except", "KeyError", ":", "raise", "UnsupportedFeatureException", "(", "'no MetadataExtractor registered for feature \"{feature_name}\" '", "'(try any of the following: {supported_features})'", ".", "format", "(", "feature_name", "=", "feature_name", ",", "supported_features", "=", "', '", ".", "join", "(", "sorted", "(", "implementations", ")", ")", ")", ")" ]
Returns the MetadataExtractor that can extract information about the provided feature name. Raises: UnsupportedFeature: If no extractor exists for the feature name.
[ "Returns", "the", "MetadataExtractor", "that", "can", "extract", "information", "about", "the", "provided", "feature", "name", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L155-L171
train
c-w/gutenberg
gutenberg/acquire/metadata.py
set_metadata_cache
def set_metadata_cache(cache): """Sets the metadata cache object to use. """ global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE.is_open: _METADATA_CACHE.close() _METADATA_CACHE = cache
python
def set_metadata_cache(cache): """Sets the metadata cache object to use. """ global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE.is_open: _METADATA_CACHE.close() _METADATA_CACHE = cache
[ "def", "set_metadata_cache", "(", "cache", ")", ":", "global", "_METADATA_CACHE", "if", "_METADATA_CACHE", "and", "_METADATA_CACHE", ".", "is_open", ":", "_METADATA_CACHE", ".", "close", "(", ")", "_METADATA_CACHE", "=", "cache" ]
Sets the metadata cache object to use.
[ "Sets", "the", "metadata", "cache", "object", "to", "use", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L323-L332
train
c-w/gutenberg
gutenberg/acquire/metadata.py
_create_metadata_cache
def _create_metadata_cache(cache_location): """Creates a new metadata cache instance appropriate for this platform. """ cache_url = os.getenv('GUTENBERG_FUSEKI_URL') if cache_url: return FusekiMetadataCache(cache_location, cache_url) try: return SleepycatMetadataCache(cache_location) except InvalidCacheException: logging.warning('Unable to create cache based on BSD-DB. ' 'Falling back to SQLite backend. ' 'Performance may be degraded significantly.') return SqliteMetadataCache(cache_location)
python
def _create_metadata_cache(cache_location): """Creates a new metadata cache instance appropriate for this platform. """ cache_url = os.getenv('GUTENBERG_FUSEKI_URL') if cache_url: return FusekiMetadataCache(cache_location, cache_url) try: return SleepycatMetadataCache(cache_location) except InvalidCacheException: logging.warning('Unable to create cache based on BSD-DB. ' 'Falling back to SQLite backend. ' 'Performance may be degraded significantly.') return SqliteMetadataCache(cache_location)
[ "def", "_create_metadata_cache", "(", "cache_location", ")", ":", "cache_url", "=", "os", ".", "getenv", "(", "'GUTENBERG_FUSEKI_URL'", ")", "if", "cache_url", ":", "return", "FusekiMetadataCache", "(", "cache_location", ",", "cache_url", ")", "try", ":", "return", "SleepycatMetadataCache", "(", "cache_location", ")", "except", "InvalidCacheException", ":", "logging", ".", "warning", "(", "'Unable to create cache based on BSD-DB. '", "'Falling back to SQLite backend. '", "'Performance may be degraded significantly.'", ")", "return", "SqliteMetadataCache", "(", "cache_location", ")" ]
Creates a new metadata cache instance appropriate for this platform.
[ "Creates", "a", "new", "metadata", "cache", "instance", "appropriate", "for", "this", "platform", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L347-L362
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache.open
def open(self): """Opens an existing cache. """ try: self.graph.open(self.cache_uri, create=False) self._add_namespaces(self.graph) self.is_open = True except Exception: raise InvalidCacheException('The cache is invalid or not created')
python
def open(self): """Opens an existing cache. """ try: self.graph.open(self.cache_uri, create=False) self._add_namespaces(self.graph) self.is_open = True except Exception: raise InvalidCacheException('The cache is invalid or not created')
[ "def", "open", "(", "self", ")", ":", "try", ":", "self", ".", "graph", ".", "open", "(", "self", ".", "cache_uri", ",", "create", "=", "False", ")", "self", ".", "_add_namespaces", "(", "self", ".", "graph", ")", "self", ".", "is_open", "=", "True", "except", "Exception", ":", "raise", "InvalidCacheException", "(", "'The cache is invalid or not created'", ")" ]
Opens an existing cache.
[ "Opens", "an", "existing", "cache", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L61-L70
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache.populate
def populate(self): """Populates a new cache. """ if self.exists: raise CacheAlreadyExistsException('location: %s' % self.cache_uri) self._populate_setup() with closing(self.graph): with self._download_metadata_archive() as metadata_archive: for fact in self._iter_metadata_triples(metadata_archive): self._add_to_graph(fact)
python
def populate(self): """Populates a new cache. """ if self.exists: raise CacheAlreadyExistsException('location: %s' % self.cache_uri) self._populate_setup() with closing(self.graph): with self._download_metadata_archive() as metadata_archive: for fact in self._iter_metadata_triples(metadata_archive): self._add_to_graph(fact)
[ "def", "populate", "(", "self", ")", ":", "if", "self", ".", "exists", ":", "raise", "CacheAlreadyExistsException", "(", "'location: %s'", "%", "self", ".", "cache_uri", ")", "self", ".", "_populate_setup", "(", ")", "with", "closing", "(", "self", ".", "graph", ")", ":", "with", "self", ".", "_download_metadata_archive", "(", ")", "as", "metadata_archive", ":", "for", "fact", "in", "self", ".", "_iter_metadata_triples", "(", "metadata_archive", ")", ":", "self", ".", "_add_to_graph", "(", "fact", ")" ]
Populates a new cache.
[ "Populates", "a", "new", "cache", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L86-L98
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache.refresh
def refresh(self): """Refresh the cache by deleting the old one and creating a new one. """ if self.exists: self.delete() self.populate() self.open()
python
def refresh(self): """Refresh the cache by deleting the old one and creating a new one. """ if self.exists: self.delete() self.populate() self.open()
[ "def", "refresh", "(", "self", ")", ":", "if", "self", ".", "exists", ":", "self", ".", "delete", "(", ")", "self", ".", "populate", "(", ")", "self", ".", "open", "(", ")" ]
Refresh the cache by deleting the old one and creating a new one.
[ "Refresh", "the", "cache", "by", "deleting", "the", "old", "one", "and", "creating", "a", "new", "one", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L112-L119
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache._download_metadata_archive
def _download_metadata_archive(self): """Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta-data catalog. The catalog describes the texts on Project Gutenberg in RDF. The function returns a file-pointer to the catalog. """ with tempfile.NamedTemporaryFile(delete=False) as metadata_archive: shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive) yield metadata_archive.name remove(metadata_archive.name)
python
def _download_metadata_archive(self): """Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta-data catalog. The catalog describes the texts on Project Gutenberg in RDF. The function returns a file-pointer to the catalog. """ with tempfile.NamedTemporaryFile(delete=False) as metadata_archive: shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive) yield metadata_archive.name remove(metadata_archive.name)
[ "def", "_download_metadata_archive", "(", "self", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "metadata_archive", ":", "shutil", ".", "copyfileobj", "(", "urlopen", "(", "self", ".", "catalog_source", ")", ",", "metadata_archive", ")", "yield", "metadata_archive", ".", "name", "remove", "(", "metadata_archive", ".", "name", ")" ]
Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta-data catalog. The catalog describes the texts on Project Gutenberg in RDF. The function returns a file-pointer to the catalog.
[ "Makes", "a", "remote", "call", "to", "the", "Project", "Gutenberg", "servers", "and", "downloads", "the", "entire", "Project", "Gutenberg", "meta", "-", "data", "catalog", ".", "The", "catalog", "describes", "the", "texts", "on", "Project", "Gutenberg", "in", "RDF", ".", "The", "function", "returns", "a", "file", "-", "pointer", "to", "the", "catalog", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L138-L148
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache._metadata_is_invalid
def _metadata_is_invalid(cls, fact): """Determines if the fact is not well formed. """ return any(isinstance(token, URIRef) and ' ' in token for token in fact)
python
def _metadata_is_invalid(cls, fact): """Determines if the fact is not well formed. """ return any(isinstance(token, URIRef) and ' ' in token for token in fact)
[ "def", "_metadata_is_invalid", "(", "cls", ",", "fact", ")", ":", "return", "any", "(", "isinstance", "(", "token", ",", "URIRef", ")", "and", "' '", "in", "token", "for", "token", "in", "fact", ")" ]
Determines if the fact is not well formed.
[ "Determines", "if", "the", "fact", "is", "not", "well", "formed", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L151-L156
train
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache._iter_metadata_triples
def _iter_metadata_triples(cls, metadata_archive_path): """Yields all meta-data of Project Gutenberg texts contained in the catalog dump. """ pg_rdf_regex = re.compile(r'pg\d+.rdf$') with closing(tarfile.open(metadata_archive_path)) as metadata_archive: for item in metadata_archive: if pg_rdf_regex.search(item.name): with disable_logging(): extracted = metadata_archive.extractfile(item) graph = Graph().parse(extracted) for fact in graph: if cls._metadata_is_invalid(fact): logging.info('skipping invalid triple %s', fact) else: yield fact
python
def _iter_metadata_triples(cls, metadata_archive_path): """Yields all meta-data of Project Gutenberg texts contained in the catalog dump. """ pg_rdf_regex = re.compile(r'pg\d+.rdf$') with closing(tarfile.open(metadata_archive_path)) as metadata_archive: for item in metadata_archive: if pg_rdf_regex.search(item.name): with disable_logging(): extracted = metadata_archive.extractfile(item) graph = Graph().parse(extracted) for fact in graph: if cls._metadata_is_invalid(fact): logging.info('skipping invalid triple %s', fact) else: yield fact
[ "def", "_iter_metadata_triples", "(", "cls", ",", "metadata_archive_path", ")", ":", "pg_rdf_regex", "=", "re", ".", "compile", "(", "r'pg\\d+.rdf$'", ")", "with", "closing", "(", "tarfile", ".", "open", "(", "metadata_archive_path", ")", ")", "as", "metadata_archive", ":", "for", "item", "in", "metadata_archive", ":", "if", "pg_rdf_regex", ".", "search", "(", "item", ".", "name", ")", ":", "with", "disable_logging", "(", ")", ":", "extracted", "=", "metadata_archive", ".", "extractfile", "(", "item", ")", "graph", "=", "Graph", "(", ")", ".", "parse", "(", "extracted", ")", "for", "fact", "in", "graph", ":", "if", "cls", ".", "_metadata_is_invalid", "(", "fact", ")", ":", "logging", ".", "info", "(", "'skipping invalid triple %s'", ",", "fact", ")", "else", ":", "yield", "fact" ]
Yields all meta-data of Project Gutenberg texts contained in the catalog dump.
[ "Yields", "all", "meta", "-", "data", "of", "Project", "Gutenberg", "texts", "contained", "in", "the", "catalog", "dump", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L159-L175
train
c-w/gutenberg
gutenberg/acquire/metadata.py
FusekiMetadataCache._populate_setup
def _populate_setup(self): """Just create a local marker file since the actual database should already be created on the Fuseki server. """ makedirs(os.path.dirname(self._cache_marker)) with codecs.open(self._cache_marker, 'w', encoding='utf-8') as fobj: fobj.write(self.cache_uri) self.graph.open(self.cache_uri)
python
def _populate_setup(self): """Just create a local marker file since the actual database should already be created on the Fuseki server. """ makedirs(os.path.dirname(self._cache_marker)) with codecs.open(self._cache_marker, 'w', encoding='utf-8') as fobj: fobj.write(self.cache_uri) self.graph.open(self.cache_uri)
[ "def", "_populate_setup", "(", "self", ")", ":", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "_cache_marker", ")", ")", "with", "codecs", ".", "open", "(", "self", ".", "_cache_marker", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "fobj", ":", "fobj", ".", "write", "(", "self", ".", "cache_uri", ")", "self", ".", "graph", ".", "open", "(", "self", ".", "cache_uri", ")" ]
Just create a local marker file since the actual database should already be created on the Fuseki server.
[ "Just", "create", "a", "local", "marker", "file", "since", "the", "actual", "database", "should", "already", "be", "created", "on", "the", "Fuseki", "server", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L219-L227
train
c-w/gutenberg
gutenberg/acquire/metadata.py
FusekiMetadataCache.delete
def delete(self): """Deletes the local marker file and also any data in the Fuseki server. """ MetadataCache.delete(self) try: self.graph.query('DELETE WHERE { ?s ?p ?o . }') except ResultException: # this is often just a false positive since Jena Fuseki does not # return tuples for a deletion query, so swallowing the exception # here is fine logging.exception('error when deleting graph')
python
def delete(self): """Deletes the local marker file and also any data in the Fuseki server. """ MetadataCache.delete(self) try: self.graph.query('DELETE WHERE { ?s ?p ?o . }') except ResultException: # this is often just a false positive since Jena Fuseki does not # return tuples for a deletion query, so swallowing the exception # here is fine logging.exception('error when deleting graph')
[ "def", "delete", "(", "self", ")", ":", "MetadataCache", ".", "delete", "(", "self", ")", "try", ":", "self", ".", "graph", ".", "query", "(", "'DELETE WHERE { ?s ?p ?o . }'", ")", "except", "ResultException", ":", "# this is often just a false positive since Jena Fuseki does not", "# return tuples for a deletion query, so swallowing the exception", "# here is fine", "logging", ".", "exception", "(", "'error when deleting graph'", ")" ]
Deletes the local marker file and also any data in the Fuseki server.
[ "Deletes", "the", "local", "marker", "file", "and", "also", "any", "data", "in", "the", "Fuseki", "server", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L229-L241
train
c-w/gutenberg
gutenberg/acquire/metadata.py
FusekiMetadataCache._metadata_is_invalid
def _metadata_is_invalid(cls, fact): """Filters out blank nodes since the SPARQLUpdateStore does not support them. """ return (MetadataCache._metadata_is_invalid(fact) or any(isinstance(token, BNode) for token in fact))
python
def _metadata_is_invalid(cls, fact): """Filters out blank nodes since the SPARQLUpdateStore does not support them. """ return (MetadataCache._metadata_is_invalid(fact) or any(isinstance(token, BNode) for token in fact))
[ "def", "_metadata_is_invalid", "(", "cls", ",", "fact", ")", ":", "return", "(", "MetadataCache", ".", "_metadata_is_invalid", "(", "fact", ")", "or", "any", "(", "isinstance", "(", "token", ",", "BNode", ")", "for", "token", "in", "fact", ")", ")" ]
Filters out blank nodes since the SPARQLUpdateStore does not support them.
[ "Filters", "out", "blank", "nodes", "since", "the", "SPARQLUpdateStore", "does", "not", "support", "them", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L269-L275
train
c-w/gutenberg
gutenberg/_util/objects.py
all_subclasses
def all_subclasses(cls): """Recursively returns all the subclasses of the provided class. """ subclasses = cls.__subclasses__() descendants = (descendant for subclass in subclasses for descendant in all_subclasses(subclass)) return set(subclasses) | set(descendants)
python
def all_subclasses(cls): """Recursively returns all the subclasses of the provided class. """ subclasses = cls.__subclasses__() descendants = (descendant for subclass in subclasses for descendant in all_subclasses(subclass)) return set(subclasses) | set(descendants)
[ "def", "all_subclasses", "(", "cls", ")", ":", "subclasses", "=", "cls", ".", "__subclasses__", "(", ")", "descendants", "=", "(", "descendant", "for", "subclass", "in", "subclasses", "for", "descendant", "in", "all_subclasses", "(", "subclass", ")", ")", "return", "set", "(", "subclasses", ")", "|", "set", "(", "descendants", ")" ]
Recursively returns all the subclasses of the provided class.
[ "Recursively", "returns", "all", "the", "subclasses", "of", "the", "provided", "class", "." ]
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/objects.py#L4-L11
train
ralphbean/ansi2html
ansi2html/converter.py
Ansi2HTMLConverter._collapse_cursor
def _collapse_cursor(self, parts): """ Act on any CursorMoveUp commands by deleting preceding tokens """ final_parts = [] for part in parts: # Throw out empty string tokens ("") if not part: continue # Go back, deleting every token in the last 'line' if part == CursorMoveUp: if final_parts: final_parts.pop() while final_parts and '\n' not in final_parts[-1]: final_parts.pop() continue # Otherwise, just pass this token forward final_parts.append(part) return final_parts
python
def _collapse_cursor(self, parts): """ Act on any CursorMoveUp commands by deleting preceding tokens """ final_parts = [] for part in parts: # Throw out empty string tokens ("") if not part: continue # Go back, deleting every token in the last 'line' if part == CursorMoveUp: if final_parts: final_parts.pop() while final_parts and '\n' not in final_parts[-1]: final_parts.pop() continue # Otherwise, just pass this token forward final_parts.append(part) return final_parts
[ "def", "_collapse_cursor", "(", "self", ",", "parts", ")", ":", "final_parts", "=", "[", "]", "for", "part", "in", "parts", ":", "# Throw out empty string tokens (\"\")", "if", "not", "part", ":", "continue", "# Go back, deleting every token in the last 'line'", "if", "part", "==", "CursorMoveUp", ":", "if", "final_parts", ":", "final_parts", ".", "pop", "(", ")", "while", "final_parts", "and", "'\\n'", "not", "in", "final_parts", "[", "-", "1", "]", ":", "final_parts", ".", "pop", "(", ")", "continue", "# Otherwise, just pass this token forward", "final_parts", ".", "append", "(", "part", ")", "return", "final_parts" ]
Act on any CursorMoveUp commands by deleting preceding tokens
[ "Act", "on", "any", "CursorMoveUp", "commands", "by", "deleting", "preceding", "tokens" ]
ac3b230f29d3ab180d29efd98c14ffef29707e2b
https://github.com/ralphbean/ansi2html/blob/ac3b230f29d3ab180d29efd98c14ffef29707e2b/ansi2html/converter.py#L413-L436
train
ralphbean/ansi2html
ansi2html/converter.py
Ansi2HTMLConverter.prepare
def prepare(self, ansi='', ensure_trailing_newline=False): """ Load the contents of 'ansi' into this object """ body, styles = self.apply_regex(ansi) if ensure_trailing_newline and _needs_extra_newline(body): body += '\n' self._attrs = { 'dark_bg': self.dark_bg, 'line_wrap': self.line_wrap, 'font_size': self.font_size, 'body': body, 'styles': styles, } return self._attrs
python
def prepare(self, ansi='', ensure_trailing_newline=False): """ Load the contents of 'ansi' into this object """ body, styles = self.apply_regex(ansi) if ensure_trailing_newline and _needs_extra_newline(body): body += '\n' self._attrs = { 'dark_bg': self.dark_bg, 'line_wrap': self.line_wrap, 'font_size': self.font_size, 'body': body, 'styles': styles, } return self._attrs
[ "def", "prepare", "(", "self", ",", "ansi", "=", "''", ",", "ensure_trailing_newline", "=", "False", ")", ":", "body", ",", "styles", "=", "self", ".", "apply_regex", "(", "ansi", ")", "if", "ensure_trailing_newline", "and", "_needs_extra_newline", "(", "body", ")", ":", "body", "+=", "'\\n'", "self", ".", "_attrs", "=", "{", "'dark_bg'", ":", "self", ".", "dark_bg", ",", "'line_wrap'", ":", "self", ".", "line_wrap", ",", "'font_size'", ":", "self", ".", "font_size", ",", "'body'", ":", "body", ",", "'styles'", ":", "styles", ",", "}", "return", "self", ".", "_attrs" ]
Load the contents of 'ansi' into this object
[ "Load", "the", "contents", "of", "ansi", "into", "this", "object" ]
ac3b230f29d3ab180d29efd98c14ffef29707e2b
https://github.com/ralphbean/ansi2html/blob/ac3b230f29d3ab180d29efd98c14ffef29707e2b/ansi2html/converter.py#L438-L454
train
PyO3/setuptools-rust
setuptools_rust/build_ext.py
build_ext.run
def run(self): """Run build_rust sub command """ if self.has_rust_extensions(): log.info("running build_rust") build_rust = self.get_finalized_command("build_rust") build_rust.inplace = self.inplace build_rust.run() _build_ext.run(self)
python
def run(self): """Run build_rust sub command """ if self.has_rust_extensions(): log.info("running build_rust") build_rust = self.get_finalized_command("build_rust") build_rust.inplace = self.inplace build_rust.run() _build_ext.run(self)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "has_rust_extensions", "(", ")", ":", "log", ".", "info", "(", "\"running build_rust\"", ")", "build_rust", "=", "self", ".", "get_finalized_command", "(", "\"build_rust\"", ")", "build_rust", ".", "inplace", "=", "self", ".", "inplace", "build_rust", ".", "run", "(", ")", "_build_ext", ".", "run", "(", "self", ")" ]
Run build_rust sub command
[ "Run", "build_rust", "sub", "command" ]
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/build_ext.py#L20-L28
train
PyO3/setuptools-rust
setuptools_rust/extension.py
RustExtension.get_lib_name
def get_lib_name(self): """ Parse Cargo.toml to get the name of the shared library. """ # We import in here to make sure the the setup_requires are already installed import toml cfg = toml.load(self.path) name = cfg.get("lib", {}).get("name") if name is None: name = cfg.get("package", {}).get("name") if name is None: raise Exception( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re.sub(r"[./\\-]", "_", name) return name
python
def get_lib_name(self): """ Parse Cargo.toml to get the name of the shared library. """ # We import in here to make sure the the setup_requires are already installed import toml cfg = toml.load(self.path) name = cfg.get("lib", {}).get("name") if name is None: name = cfg.get("package", {}).get("name") if name is None: raise Exception( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re.sub(r"[./\\-]", "_", name) return name
[ "def", "get_lib_name", "(", "self", ")", ":", "# We import in here to make sure the the setup_requires are already installed", "import", "toml", "cfg", "=", "toml", ".", "load", "(", "self", ".", "path", ")", "name", "=", "cfg", ".", "get", "(", "\"lib\"", ",", "{", "}", ")", ".", "get", "(", "\"name\"", ")", "if", "name", "is", "None", ":", "name", "=", "cfg", ".", "get", "(", "\"package\"", ",", "{", "}", ")", ".", "get", "(", "\"name\"", ")", "if", "name", "is", "None", ":", "raise", "Exception", "(", "\"Can not parse library name from Cargo.toml. \"", "\"Cargo.toml missing value for 'name' key \"", "\"in both the [package] section and the [lib] section\"", ")", "name", "=", "re", ".", "sub", "(", "r\"[./\\\\-]\"", ",", "\"_\"", ",", "name", ")", "return", "name" ]
Parse Cargo.toml to get the name of the shared library.
[ "Parse", "Cargo", ".", "toml", "to", "get", "the", "name", "of", "the", "shared", "library", "." ]
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/extension.py#L106-L122
train
PyO3/setuptools-rust
setuptools_rust/tomlgen.py
find_rust_extensions
def find_rust_extensions(*directories, **kwargs): """Attempt to find Rust extensions in given directories. This function will recurse through the directories in the given directories, to find a name whose name is ``libfile``. When such a file is found, an extension is created, expecting the cargo manifest file (``Cargo.toml``) to be next to that file. The extension destination will be deduced from the name of the directory where that ``libfile`` is contained. Arguments: directories (list, *optional*): a list of directories to walk through recursively to find extensions. If none are given, then the current directory will be used instead. Keyword Arguments: libfile (str): the name of the file to look for when searching for Rust extensions. Defaults to ``lib.rs``, but might be changed to allow defining more *Pythonic* filenames (like ``__init__.rs``)! Note: All other keyword arguments will be directly passed to the `RustExtension` instance created when an extension is found. One may be interested in passing ``bindings`` and ``strip`` options:: >>> import setuptools_rust as rust >>> rust.find_rust_extensions(binding=rust.Binding.PyO3) Example: Consider the following project:: lib/ └ mylib/ └ rustext/ ├ lib.rs ├ ... └ Cargo.toml setup.py There is only one extension that can be found in the ``lib`` module:: >>> import setuptools_rust as rust >>> for ext in rust.find_rust_extensions("lib"): ... print(ext.name, "=>", ext.path) lib.mylib.rustext => lib/mylib/rustext/Cargo.toml """ # Get the file used to mark a Rust extension libfile = kwargs.get("libfile", "lib.rs") # Get the directories to explore directories = directories or [os.getcwd()] extensions = [] for directory in directories: for base, dirs, files in os.walk(directory): if libfile in files: dotpath = os.path.relpath(base).replace(os.path.sep, ".") tomlpath = os.path.join(base, "Cargo.toml") ext = RustExtension(dotpath, tomlpath, **kwargs) ext.libfile = os.path.join(base, libfile) extensions.append(ext) return extensions
python
def find_rust_extensions(*directories, **kwargs): """Attempt to find Rust extensions in given directories. This function will recurse through the directories in the given directories, to find a name whose name is ``libfile``. When such a file is found, an extension is created, expecting the cargo manifest file (``Cargo.toml``) to be next to that file. The extension destination will be deduced from the name of the directory where that ``libfile`` is contained. Arguments: directories (list, *optional*): a list of directories to walk through recursively to find extensions. If none are given, then the current directory will be used instead. Keyword Arguments: libfile (str): the name of the file to look for when searching for Rust extensions. Defaults to ``lib.rs``, but might be changed to allow defining more *Pythonic* filenames (like ``__init__.rs``)! Note: All other keyword arguments will be directly passed to the `RustExtension` instance created when an extension is found. One may be interested in passing ``bindings`` and ``strip`` options:: >>> import setuptools_rust as rust >>> rust.find_rust_extensions(binding=rust.Binding.PyO3) Example: Consider the following project:: lib/ └ mylib/ └ rustext/ ├ lib.rs ├ ... └ Cargo.toml setup.py There is only one extension that can be found in the ``lib`` module:: >>> import setuptools_rust as rust >>> for ext in rust.find_rust_extensions("lib"): ... print(ext.name, "=>", ext.path) lib.mylib.rustext => lib/mylib/rustext/Cargo.toml """ # Get the file used to mark a Rust extension libfile = kwargs.get("libfile", "lib.rs") # Get the directories to explore directories = directories or [os.getcwd()] extensions = [] for directory in directories: for base, dirs, files in os.walk(directory): if libfile in files: dotpath = os.path.relpath(base).replace(os.path.sep, ".") tomlpath = os.path.join(base, "Cargo.toml") ext = RustExtension(dotpath, tomlpath, **kwargs) ext.libfile = os.path.join(base, libfile) extensions.append(ext) return extensions
[ "def", "find_rust_extensions", "(", "*", "directories", ",", "*", "*", "kwargs", ")", ":", "# Get the file used to mark a Rust extension", "libfile", "=", "kwargs", ".", "get", "(", "\"libfile\"", ",", "\"lib.rs\"", ")", "# Get the directories to explore", "directories", "=", "directories", "or", "[", "os", ".", "getcwd", "(", ")", "]", "extensions", "=", "[", "]", "for", "directory", "in", "directories", ":", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "directory", ")", ":", "if", "libfile", "in", "files", ":", "dotpath", "=", "os", ".", "path", ".", "relpath", "(", "base", ")", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "\".\"", ")", "tomlpath", "=", "os", ".", "path", ".", "join", "(", "base", ",", "\"Cargo.toml\"", ")", "ext", "=", "RustExtension", "(", "dotpath", ",", "tomlpath", ",", "*", "*", "kwargs", ")", "ext", ".", "libfile", "=", "os", ".", "path", ".", "join", "(", "base", ",", "libfile", ")", "extensions", ".", "append", "(", "ext", ")", "return", "extensions" ]
Attempt to find Rust extensions in given directories. This function will recurse through the directories in the given directories, to find a name whose name is ``libfile``. When such a file is found, an extension is created, expecting the cargo manifest file (``Cargo.toml``) to be next to that file. The extension destination will be deduced from the name of the directory where that ``libfile`` is contained. Arguments: directories (list, *optional*): a list of directories to walk through recursively to find extensions. If none are given, then the current directory will be used instead. Keyword Arguments: libfile (str): the name of the file to look for when searching for Rust extensions. Defaults to ``lib.rs``, but might be changed to allow defining more *Pythonic* filenames (like ``__init__.rs``)! Note: All other keyword arguments will be directly passed to the `RustExtension` instance created when an extension is found. One may be interested in passing ``bindings`` and ``strip`` options:: >>> import setuptools_rust as rust >>> rust.find_rust_extensions(binding=rust.Binding.PyO3) Example: Consider the following project:: lib/ └ mylib/ └ rustext/ ├ lib.rs ├ ... └ Cargo.toml setup.py There is only one extension that can be found in the ``lib`` module:: >>> import setuptools_rust as rust >>> for ext in rust.find_rust_extensions("lib"): ... print(ext.name, "=>", ext.path) lib.mylib.rustext => lib/mylib/rustext/Cargo.toml
[ "Attempt", "to", "find", "Rust", "extensions", "in", "given", "directories", "." ]
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/tomlgen.py#L207-L274
train
gamechanger/mongothon
mongothon/events.py
EventHandlerRegistrar.register
def register(self, event, fn): """ Registers the given function as a handler to be applied in response to the the given event. """ # TODO: Can we check the method signature? self._handler_dict.setdefault(event, []) if fn not in self._handler_dict[event]: self._handler_dict[event].append(fn)
python
def register(self, event, fn): """ Registers the given function as a handler to be applied in response to the the given event. """ # TODO: Can we check the method signature? self._handler_dict.setdefault(event, []) if fn not in self._handler_dict[event]: self._handler_dict[event].append(fn)
[ "def", "register", "(", "self", ",", "event", ",", "fn", ")", ":", "# TODO: Can we check the method signature?", "self", ".", "_handler_dict", ".", "setdefault", "(", "event", ",", "[", "]", ")", "if", "fn", "not", "in", "self", ".", "_handler_dict", "[", "event", "]", ":", "self", ".", "_handler_dict", "[", "event", "]", ".", "append", "(", "fn", ")" ]
Registers the given function as a handler to be applied in response to the the given event.
[ "Registers", "the", "given", "function", "as", "a", "handler", "to", "be", "applied", "in", "response", "to", "the", "the", "given", "event", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L15-L24
train
gamechanger/mongothon
mongothon/events.py
EventHandlerRegistrar.apply
def apply(self, event, document, *args, **kwargs): """ Applies all middleware functions registered against the given event in order to the given document. """ for fn in self._handler_dict.get(event, []): fn(document, *args, **kwargs)
python
def apply(self, event, document, *args, **kwargs): """ Applies all middleware functions registered against the given event in order to the given document. """ for fn in self._handler_dict.get(event, []): fn(document, *args, **kwargs)
[ "def", "apply", "(", "self", ",", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "fn", "in", "self", ".", "_handler_dict", ".", "get", "(", "event", ",", "[", "]", ")", ":", "fn", "(", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Applies all middleware functions registered against the given event in order to the given document.
[ "Applies", "all", "middleware", "functions", "registered", "against", "the", "given", "event", "in", "order", "to", "the", "given", "document", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L26-L32
train
gamechanger/mongothon
mongothon/events.py
EventHandlerRegistrar.deregister
def deregister(self, event, fn): """ Deregister the handler function from the given event. """ if event in self._handler_dict and fn in self._handler_dict[event]: self._handler_dict[event].remove(fn)
python
def deregister(self, event, fn): """ Deregister the handler function from the given event. """ if event in self._handler_dict and fn in self._handler_dict[event]: self._handler_dict[event].remove(fn)
[ "def", "deregister", "(", "self", ",", "event", ",", "fn", ")", ":", "if", "event", "in", "self", ".", "_handler_dict", "and", "fn", "in", "self", ".", "_handler_dict", "[", "event", "]", ":", "self", ".", "_handler_dict", "[", "event", "]", ".", "remove", "(", "fn", ")" ]
Deregister the handler function from the given event.
[ "Deregister", "the", "handler", "function", "from", "the", "given", "event", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L34-L39
train
gamechanger/mongothon
mongothon/queries.py
ScopeBuilder.unpack_scope
def unpack_scope(cls, scope): """Unpacks the response from a scope function. The function should return either a query, a query and a projection, or a query a projection and an query options hash.""" query = {} projection = {} options = {} if isinstance(scope, tuple): if len(scope) > 3: raise ValueError("Invalid scope") if len(scope) >= 1: query = scope[0] if len(scope) >= 2: projection = scope[1] if len(scope) == 3: options = scope[2] elif isinstance(scope, dict): query = scope else: raise ValueError("Invalid scope") return query, projection, options
python
def unpack_scope(cls, scope): """Unpacks the response from a scope function. The function should return either a query, a query and a projection, or a query a projection and an query options hash.""" query = {} projection = {} options = {} if isinstance(scope, tuple): if len(scope) > 3: raise ValueError("Invalid scope") if len(scope) >= 1: query = scope[0] if len(scope) >= 2: projection = scope[1] if len(scope) == 3: options = scope[2] elif isinstance(scope, dict): query = scope else: raise ValueError("Invalid scope") return query, projection, options
[ "def", "unpack_scope", "(", "cls", ",", "scope", ")", ":", "query", "=", "{", "}", "projection", "=", "{", "}", "options", "=", "{", "}", "if", "isinstance", "(", "scope", ",", "tuple", ")", ":", "if", "len", "(", "scope", ")", ">", "3", ":", "raise", "ValueError", "(", "\"Invalid scope\"", ")", "if", "len", "(", "scope", ")", ">=", "1", ":", "query", "=", "scope", "[", "0", "]", "if", "len", "(", "scope", ")", ">=", "2", ":", "projection", "=", "scope", "[", "1", "]", "if", "len", "(", "scope", ")", "==", "3", ":", "options", "=", "scope", "[", "2", "]", "elif", "isinstance", "(", "scope", ",", "dict", ")", ":", "query", "=", "scope", "else", ":", "raise", "ValueError", "(", "\"Invalid scope\"", ")", "return", "query", ",", "projection", ",", "options" ]
Unpacks the response from a scope function. The function should return either a query, a query and a projection, or a query a projection and an query options hash.
[ "Unpacks", "the", "response", "from", "a", "scope", "function", ".", "The", "function", "should", "return", "either", "a", "query", "a", "query", "and", "a", "projection", "or", "a", "query", "a", "projection", "and", "an", "query", "options", "hash", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L24-L46
train
gamechanger/mongothon
mongothon/queries.py
ScopeBuilder.register_fn
def register_fn(cls, f): """Registers a scope function on this builder.""" def inner(self, *args, **kwargs): try: query, projection, options = cls.unpack_scope(f(*args, **kwargs)) new_query = deepcopy(self.query) new_projection = deepcopy(self.projection) new_options = deepcopy(self.options) deep_merge(query, new_query) new_projection.update(projection) new_options.update(options) return ScopeBuilder(self.model, self.fns, new_query, new_projection, new_options) except ValueError: raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__)) setattr(cls, f.__name__, inner)
python
def register_fn(cls, f): """Registers a scope function on this builder.""" def inner(self, *args, **kwargs): try: query, projection, options = cls.unpack_scope(f(*args, **kwargs)) new_query = deepcopy(self.query) new_projection = deepcopy(self.projection) new_options = deepcopy(self.options) deep_merge(query, new_query) new_projection.update(projection) new_options.update(options) return ScopeBuilder(self.model, self.fns, new_query, new_projection, new_options) except ValueError: raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__)) setattr(cls, f.__name__, inner)
[ "def", "register_fn", "(", "cls", ",", "f", ")", ":", "def", "inner", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "query", ",", "projection", ",", "options", "=", "cls", ".", "unpack_scope", "(", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "new_query", "=", "deepcopy", "(", "self", ".", "query", ")", "new_projection", "=", "deepcopy", "(", "self", ".", "projection", ")", "new_options", "=", "deepcopy", "(", "self", ".", "options", ")", "deep_merge", "(", "query", ",", "new_query", ")", "new_projection", ".", "update", "(", "projection", ")", "new_options", ".", "update", "(", "options", ")", "return", "ScopeBuilder", "(", "self", ".", "model", ",", "self", ".", "fns", ",", "new_query", ",", "new_projection", ",", "new_options", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Scope function \\\"{}\\ returns an invalid scope\"", ".", "format", "(", "f", ".", "__name__", ")", ")", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "inner", ")" ]
Registers a scope function on this builder.
[ "Registers", "a", "scope", "function", "on", "this", "builder", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L50-L66
train
gamechanger/mongothon
mongothon/queries.py
ScopeBuilder.cursor
def cursor(self): """ Returns a cursor for the currently assembled query, creating it if it doesn't already exist. """ if not self._active_cursor: self._active_cursor = self.model.find(self.query, self.projection or None, **self.options) return self._active_cursor
python
def cursor(self): """ Returns a cursor for the currently assembled query, creating it if it doesn't already exist. """ if not self._active_cursor: self._active_cursor = self.model.find(self.query, self.projection or None, **self.options) return self._active_cursor
[ "def", "cursor", "(", "self", ")", ":", "if", "not", "self", ".", "_active_cursor", ":", "self", ".", "_active_cursor", "=", "self", ".", "model", ".", "find", "(", "self", ".", "query", ",", "self", ".", "projection", "or", "None", ",", "*", "*", "self", ".", "options", ")", "return", "self", ".", "_active_cursor" ]
Returns a cursor for the currently assembled query, creating it if it doesn't already exist.
[ "Returns", "a", "cursor", "for", "the", "currently", "assembled", "query", "creating", "it", "if", "it", "doesn", "t", "already", "exist", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L80-L89
train
gamechanger/mongothon
mongothon/model.py
Model._ensure_object_id
def _ensure_object_id(cls, id): """Checks whether the given id is an ObjectId instance, and if not wraps it.""" if isinstance(id, ObjectId): return id if isinstance(id, basestring) and OBJECTIDEXPR.match(id): return ObjectId(id) return id
python
def _ensure_object_id(cls, id): """Checks whether the given id is an ObjectId instance, and if not wraps it.""" if isinstance(id, ObjectId): return id if isinstance(id, basestring) and OBJECTIDEXPR.match(id): return ObjectId(id) return id
[ "def", "_ensure_object_id", "(", "cls", ",", "id", ")", ":", "if", "isinstance", "(", "id", ",", "ObjectId", ")", ":", "return", "id", "if", "isinstance", "(", "id", ",", "basestring", ")", "and", "OBJECTIDEXPR", ".", "match", "(", "id", ")", ":", "return", "ObjectId", "(", "id", ")", "return", "id" ]
Checks whether the given id is an ObjectId instance, and if not wraps it.
[ "Checks", "whether", "the", "given", "id", "is", "an", "ObjectId", "instance", "and", "if", "not", "wraps", "it", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L69-L77
train
gamechanger/mongothon
mongothon/model.py
Model.apply_defaults
def apply_defaults(self): """Apply schema defaults to this document.""" self.emit('will_apply_defaults') self.schema.apply_defaults(self) self.emit('did_apply_defaults')
python
def apply_defaults(self): """Apply schema defaults to this document.""" self.emit('will_apply_defaults') self.schema.apply_defaults(self) self.emit('did_apply_defaults')
[ "def", "apply_defaults", "(", "self", ")", ":", "self", ".", "emit", "(", "'will_apply_defaults'", ")", "self", ".", "schema", ".", "apply_defaults", "(", "self", ")", "self", ".", "emit", "(", "'did_apply_defaults'", ")" ]
Apply schema defaults to this document.
[ "Apply", "schema", "defaults", "to", "this", "document", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L106-L110
train
gamechanger/mongothon
mongothon/model.py
Model.reload
def reload(self): """Reloads the current model's data from the underlying database record, updating it in-place.""" self.emit('will_reload') self.populate(self.collection.find_one(type(self)._id_spec(self['_id']))) self.emit('did_reload')
python
def reload(self): """Reloads the current model's data from the underlying database record, updating it in-place.""" self.emit('will_reload') self.populate(self.collection.find_one(type(self)._id_spec(self['_id']))) self.emit('did_reload')
[ "def", "reload", "(", "self", ")", ":", "self", ".", "emit", "(", "'will_reload'", ")", "self", ".", "populate", "(", "self", ".", "collection", ".", "find_one", "(", "type", "(", "self", ")", ".", "_id_spec", "(", "self", "[", "'_id'", "]", ")", ")", ")", "self", ".", "emit", "(", "'did_reload'", ")" ]
Reloads the current model's data from the underlying database record, updating it in-place.
[ "Reloads", "the", "current", "model", "s", "data", "from", "the", "underlying", "database", "record", "updating", "it", "in", "-", "place", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L195-L200
train
gamechanger/mongothon
mongothon/model.py
Model.on
def on(cls, event, handler_func=None): """ Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass """ if handler_func: cls.handler_registrar().register(event, handler_func) return def register(fn): cls.handler_registrar().register(event, fn) return fn return register
python
def on(cls, event, handler_func=None): """ Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass """ if handler_func: cls.handler_registrar().register(event, handler_func) return def register(fn): cls.handler_registrar().register(event, fn) return fn return register
[ "def", "on", "(", "cls", ",", "event", ",", "handler_func", "=", "None", ")", ":", "if", "handler_func", ":", "cls", ".", "handler_registrar", "(", ")", ".", "register", "(", "event", ",", "handler_func", ")", "return", "def", "register", "(", "fn", ")", ":", "cls", ".", "handler_registrar", "(", ")", ".", "register", "(", "event", ",", "fn", ")", "return", "fn", "return", "register" ]
Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass
[ "Registers", "a", "handler", "function", "whenever", "an", "instance", "of", "the", "model", "emits", "the", "given", "event", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L203-L227
train
gamechanger/mongothon
mongothon/model.py
Model._emit
def _emit(self, event, document, *args, **kwargs): """ Inner version of emit which passes the given document as the primary argument to handler functions. """ self.handler_registrar().apply(event, document, *args, **kwargs)
python
def _emit(self, event, document, *args, **kwargs): """ Inner version of emit which passes the given document as the primary argument to handler functions. """ self.handler_registrar().apply(event, document, *args, **kwargs)
[ "def", "_emit", "(", "self", ",", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "handler_registrar", "(", ")", ".", "apply", "(", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Inner version of emit which passes the given document as the primary argument to handler functions.
[ "Inner", "version", "of", "emit", "which", "passes", "the", "given", "document", "as", "the", "primary", "argument", "to", "handler", "functions", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L229-L234
train
gamechanger/mongothon
mongothon/model.py
Model.emit
def emit(self, event, *args, **kwargs): """ Emits an event call to all handler functions registered against this model's class and the given event type. """ self._emit(event, self, *args, **kwargs)
python
def emit(self, event, *args, **kwargs): """ Emits an event call to all handler functions registered against this model's class and the given event type. """ self._emit(event, self, *args, **kwargs)
[ "def", "emit", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_emit", "(", "event", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Emits an event call to all handler functions registered against this model's class and the given event type.
[ "Emits", "an", "event", "call", "to", "all", "handler", "functions", "registered", "against", "this", "model", "s", "class", "and", "the", "given", "event", "type", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L237-L242
train
gamechanger/mongothon
mongothon/model.py
Model.static_method
def static_method(cls, f): """Decorator which dynamically binds static methods to the model for later use.""" setattr(cls, f.__name__, staticmethod(f)) return f
python
def static_method(cls, f): """Decorator which dynamically binds static methods to the model for later use.""" setattr(cls, f.__name__, staticmethod(f)) return f
[ "def", "static_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "staticmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds static methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "static", "methods", "to", "the", "model", "for", "later", "use", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L267-L270
train
gamechanger/mongothon
mongothon/model.py
Model.class_method
def class_method(cls, f): """Decorator which dynamically binds class methods to the model for later use.""" setattr(cls, f.__name__, classmethod(f)) return f
python
def class_method(cls, f): """Decorator which dynamically binds class methods to the model for later use.""" setattr(cls, f.__name__, classmethod(f)) return f
[ "def", "class_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "classmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds class methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "class", "methods", "to", "the", "model", "for", "later", "use", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L273-L276
train
gamechanger/mongothon
mongothon/model.py
Model.scope
def scope(cls, f): """Decorator which can dynamically attach a query scope to the model.""" if not hasattr(cls, "scopes"): cls.scopes = copy(STANDARD_SCOPES) cls.scopes.append(f) def create_builder(self, *args, **kwargs): bldr = ScopeBuilder(cls, cls.scopes) return getattr(bldr, f.__name__)(*args, **kwargs) setattr(cls, f.__name__, classmethod(create_builder)) return f
python
def scope(cls, f): """Decorator which can dynamically attach a query scope to the model.""" if not hasattr(cls, "scopes"): cls.scopes = copy(STANDARD_SCOPES) cls.scopes.append(f) def create_builder(self, *args, **kwargs): bldr = ScopeBuilder(cls, cls.scopes) return getattr(bldr, f.__name__)(*args, **kwargs) setattr(cls, f.__name__, classmethod(create_builder)) return f
[ "def", "scope", "(", "cls", ",", "f", ")", ":", "if", "not", "hasattr", "(", "cls", ",", "\"scopes\"", ")", ":", "cls", ".", "scopes", "=", "copy", "(", "STANDARD_SCOPES", ")", "cls", ".", "scopes", ".", "append", "(", "f", ")", "def", "create_builder", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bldr", "=", "ScopeBuilder", "(", "cls", ",", "cls", ".", "scopes", ")", "return", "getattr", "(", "bldr", ",", "f", ".", "__name__", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "classmethod", "(", "create_builder", ")", ")", "return", "f" ]
Decorator which can dynamically attach a query scope to the model.
[ "Decorator", "which", "can", "dynamically", "attach", "a", "query", "scope", "to", "the", "model", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L285-L297
train
gamechanger/mongothon
mongothon/__init__.py
_module_name_from_previous_frame
def _module_name_from_previous_frame(num_frames_back): """ Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller. """ frm = inspect.stack()[num_frames_back + 1] return inspect.getmodule(frm[0]).__name__
python
def _module_name_from_previous_frame(num_frames_back): """ Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller. """ frm = inspect.stack()[num_frames_back + 1] return inspect.getmodule(frm[0]).__name__
[ "def", "_module_name_from_previous_frame", "(", "num_frames_back", ")", ":", "frm", "=", "inspect", ".", "stack", "(", ")", "[", "num_frames_back", "+", "1", "]", "return", "inspect", ".", "getmodule", "(", "frm", "[", "0", "]", ")", ".", "__name__" ]
Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller.
[ "Returns", "the", "module", "name", "associated", "with", "a", "frame", "num_frames_back", "in", "the", "call", "stack", ".", "This", "function", "adds", "1", "to", "account", "for", "itself", "so", "num_frames_back", "should", "be", "given", "relative", "to", "the", "caller", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L9-L16
train
gamechanger/mongothon
mongothon/__init__.py
create_model
def create_model(schema, collection, class_name=None): """ Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this. """ if not class_name: class_name = camelize(str(collection.name)) model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(lambda: collection))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
python
def create_model(schema, collection, class_name=None): """ Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this. """ if not class_name: class_name = camelize(str(collection.name)) model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(lambda: collection))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
[ "def", "create_model", "(", "schema", ",", "collection", ",", "class_name", "=", "None", ")", ":", "if", "not", "class_name", ":", "class_name", "=", "camelize", "(", "str", "(", "collection", ".", "name", ")", ")", "model_class", "=", "type", "(", "class_name", ",", "(", "Model", ",", ")", ",", "dict", "(", "schema", "=", "schema", ",", "_collection_factory", "=", "staticmethod", "(", "lambda", ":", "collection", ")", ")", ")", "# Since we are dynamically creating this class here, we modify __module__ on the", "# created class to point back to the module from which `create_model` was called", "model_class", ".", "__module__", "=", "_module_name_from_previous_frame", "(", "1", ")", "return", "model_class" ]
Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this.
[ "Main", "entry", "point", "to", "creating", "a", "new", "mongothon", "model", ".", "Both", "schema", "and", "Pymongo", "collection", "objects", "must", "be", "provided", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L19-L42
train
gamechanger/mongothon
mongothon/__init__.py
create_model_offline
def create_model_offline(schema, collection_factory, class_name): """ Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access. """ model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(collection_factory))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model_offline` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
python
def create_model_offline(schema, collection_factory, class_name): """ Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access. """ model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(collection_factory))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model_offline` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
[ "def", "create_model_offline", "(", "schema", ",", "collection_factory", ",", "class_name", ")", ":", "model_class", "=", "type", "(", "class_name", ",", "(", "Model", ",", ")", ",", "dict", "(", "schema", "=", "schema", ",", "_collection_factory", "=", "staticmethod", "(", "collection_factory", ")", ")", ")", "# Since we are dynamically creating this class here, we modify __module__ on the", "# created class to point back to the module from which `create_model_offline` was called", "model_class", ".", "__module__", "=", "_module_name_from_previous_frame", "(", "1", ")", "return", "model_class" ]
Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access.
[ "Entry", "point", "for", "creating", "a", "new", "Mongothon", "model", "without", "instantiating", "a", "database", "connection", ".", "The", "collection", "is", "instead", "provided", "through", "a", "closure", "that", "is", "resolved", "upon", "the", "model", "s", "first", "database", "access", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L45-L59
train
gamechanger/mongothon
mongothon/document.py
wrap
def wrap(value): """ Wraps the given value in a Document or DocumentList as applicable. """ if isinstance(value, Document) or isinstance(value, DocumentList): return value elif isinstance(value, dict): return Document(value) elif isinstance(value, list): return DocumentList(value) else: return value
python
def wrap(value): """ Wraps the given value in a Document or DocumentList as applicable. """ if isinstance(value, Document) or isinstance(value, DocumentList): return value elif isinstance(value, dict): return Document(value) elif isinstance(value, list): return DocumentList(value) else: return value
[ "def", "wrap", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", "or", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "Document", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "return", "DocumentList", "(", "value", ")", "else", ":", "return", "value" ]
Wraps the given value in a Document or DocumentList as applicable.
[ "Wraps", "the", "given", "value", "in", "a", "Document", "or", "DocumentList", "as", "applicable", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L3-L14
train
gamechanger/mongothon
mongothon/document.py
unwrap
def unwrap(value): """ Unwraps the given Document or DocumentList as applicable. """ if isinstance(value, Document): return value.to_dict() elif isinstance(value, DocumentList): return value.to_list() else: return value
python
def unwrap(value): """ Unwraps the given Document or DocumentList as applicable. """ if isinstance(value, Document): return value.to_dict() elif isinstance(value, DocumentList): return value.to_list() else: return value
[ "def", "unwrap", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", ":", "return", "value", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "return", "value", ".", "to_list", "(", ")", "else", ":", "return", "value" ]
Unwraps the given Document or DocumentList as applicable.
[ "Unwraps", "the", "given", "Document", "or", "DocumentList", "as", "applicable", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L17-L26
train
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_change
def note_change(self, key, value): """ Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset. """ # If we're changing the value and we haven't done so already, note it. if value != self._instance[key] and key not in self._previous and key not in self._added: self._previous[key] = self._instance[key] # If we're setting the value back to the original value, discard the change note if key in self._previous and value == self._previous[key]: del self._previous[key]
python
def note_change(self, key, value): """ Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset. """ # If we're changing the value and we haven't done so already, note it. if value != self._instance[key] and key not in self._previous and key not in self._added: self._previous[key] = self._instance[key] # If we're setting the value back to the original value, discard the change note if key in self._previous and value == self._previous[key]: del self._previous[key]
[ "def", "note_change", "(", "self", ",", "key", ",", "value", ")", ":", "# If we're changing the value and we haven't done so already, note it.", "if", "value", "!=", "self", ".", "_instance", "[", "key", "]", "and", "key", "not", "in", "self", ".", "_previous", "and", "key", "not", "in", "self", ".", "_added", ":", "self", ".", "_previous", "[", "key", "]", "=", "self", ".", "_instance", "[", "key", "]", "# If we're setting the value back to the original value, discard the change note", "if", "key", "in", "self", ".", "_previous", "and", "value", "==", "self", ".", "_previous", "[", "key", "]", ":", "del", "self", ".", "_previous", "[", "key", "]" ]
Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset.
[ "Updates", "change", "state", "to", "reflect", "a", "change", "to", "a", "field", ".", "Takes", "care", "of", "ignoring", "no", "-", "ops", "reversions", "and", "takes", "appropriate", "steps", "if", "the", "field", "was", "previously", "deleted", "or", "added", "to", "ensure", "the", "change", "state", "purely", "reflects", "the", "diff", "since", "last", "reset", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L49-L62
train
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_addition
def note_addition(self, key, value): """ Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly. """ # If we're adding a field we previously deleted, remove the deleted note. if key in self._deleted: # If the key we're adding back has a different value, then it's a change if value != self._deleted[key]: self._previous[key] = self._deleted[key] del self._deleted[key] else: self._added.append(key)
python
def note_addition(self, key, value): """ Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly. """ # If we're adding a field we previously deleted, remove the deleted note. if key in self._deleted: # If the key we're adding back has a different value, then it's a change if value != self._deleted[key]: self._previous[key] = self._deleted[key] del self._deleted[key] else: self._added.append(key)
[ "def", "note_addition", "(", "self", ",", "key", ",", "value", ")", ":", "# If we're adding a field we previously deleted, remove the deleted note.", "if", "key", "in", "self", ".", "_deleted", ":", "# If the key we're adding back has a different value, then it's a change", "if", "value", "!=", "self", ".", "_deleted", "[", "key", "]", ":", "self", ".", "_previous", "[", "key", "]", "=", "self", ".", "_deleted", "[", "key", "]", "del", "self", ".", "_deleted", "[", "key", "]", "else", ":", "self", ".", "_added", ".", "append", "(", "key", ")" ]
Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly.
[ "Updates", "the", "change", "state", "to", "reflect", "the", "addition", "of", "a", "field", ".", "Detects", "previous", "changes", "and", "deletions", "of", "the", "field", "and", "acts", "accordingly", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L64-L76
train
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_deletion
def note_deletion(self, key): """ Notes the deletion of a field. """ # If we'rew deleting a key we previously added, then there is no diff if key in self._added: self._added.remove(key) else: # If the deleted key was previously changed, use the original value if key in self._previous: self._deleted[key] = self._previous[key] del self._previous[key] else: self._deleted[key] = self._instance[key]
python
def note_deletion(self, key): """ Notes the deletion of a field. """ # If we'rew deleting a key we previously added, then there is no diff if key in self._added: self._added.remove(key) else: # If the deleted key was previously changed, use the original value if key in self._previous: self._deleted[key] = self._previous[key] del self._previous[key] else: self._deleted[key] = self._instance[key]
[ "def", "note_deletion", "(", "self", ",", "key", ")", ":", "# If we'rew deleting a key we previously added, then there is no diff", "if", "key", "in", "self", ".", "_added", ":", "self", ".", "_added", ".", "remove", "(", "key", ")", "else", ":", "# If the deleted key was previously changed, use the original value", "if", "key", "in", "self", ".", "_previous", ":", "self", ".", "_deleted", "[", "key", "]", "=", "self", ".", "_previous", "[", "key", "]", "del", "self", ".", "_previous", "[", "key", "]", "else", ":", "self", ".", "_deleted", "[", "key", "]", "=", "self", ".", "_instance", "[", "key", "]" ]
Notes the deletion of a field.
[ "Notes", "the", "deletion", "of", "a", "field", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L78-L91
train
gamechanger/mongothon
mongothon/document.py
ChangeTracker.changes
def changes(self): """ Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')} """ return {key: (self._previous[key], self._instance[key]) for key in self._previous}
python
def changes(self): """ Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')} """ return {key: (self._previous[key], self._instance[key]) for key in self._previous}
[ "def", "changes", "(", "self", ")", ":", "return", "{", "key", ":", "(", "self", ".", "_previous", "[", "key", "]", ",", "self", ".", "_instance", "[", "key", "]", ")", "for", "key", "in", "self", ".", "_previous", "}" ]
Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')}
[ "Returns", "a", "dict", "containing", "just", "the", "fields", "which", "have", "changed", "on", "this", "Document", "since", "it", "was", "created", "or", "last", "saved", "together", "with", "both", "their", "previous", "and", "current", "values" ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L107-L118
train
gamechanger/mongothon
mongothon/document.py
Document.reset_all_changes
def reset_all_changes(self): """ Resets change tracking in this document, recursing into child Documents and DocumentLists. """ self.reset_changes() for value in self.values(): if isinstance(value, Document) or isinstance(value, DocumentList): value.reset_all_changes()
python
def reset_all_changes(self): """ Resets change tracking in this document, recursing into child Documents and DocumentLists. """ self.reset_changes() for value in self.values(): if isinstance(value, Document) or isinstance(value, DocumentList): value.reset_all_changes()
[ "def", "reset_all_changes", "(", "self", ")", ":", "self", ".", "reset_changes", "(", ")", "for", "value", "in", "self", ".", "values", "(", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", "or", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "value", ".", "reset_all_changes", "(", ")" ]
Resets change tracking in this document, recursing into child Documents and DocumentLists.
[ "Resets", "change", "tracking", "in", "this", "document", "recursing", "into", "child", "Documents", "and", "DocumentLists", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L161-L169
train
gamechanger/mongothon
mongothon/document.py
Document.populate
def populate(self, other): """Like update, but clears the contents first.""" self.clear() self.update(other) self.reset_all_changes()
python
def populate(self, other): """Like update, but clears the contents first.""" self.clear() self.update(other) self.reset_all_changes()
[ "def", "populate", "(", "self", ",", "other", ")", ":", "self", ".", "clear", "(", ")", "self", ".", "update", "(", "other", ")", "self", ".", "reset_all_changes", "(", ")" ]
Like update, but clears the contents first.
[ "Like", "update", "but", "clears", "the", "contents", "first", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L215-L219
train
msoulier/tftpy
tftpy/TftpPacketFactory.py
TftpPacketFactory.parse
def parse(self, buffer): """This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.""" log.debug("parsing a %d byte packet" % len(buffer)) (opcode,) = struct.unpack(str("!H"), buffer[:2]) log.debug("opcode is %d" % opcode) packet = self.__create(opcode) packet.buffer = buffer return packet.decode()
python
def parse(self, buffer): """This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.""" log.debug("parsing a %d byte packet" % len(buffer)) (opcode,) = struct.unpack(str("!H"), buffer[:2]) log.debug("opcode is %d" % opcode) packet = self.__create(opcode) packet.buffer = buffer return packet.decode()
[ "def", "parse", "(", "self", ",", "buffer", ")", ":", "log", ".", "debug", "(", "\"parsing a %d byte packet\"", "%", "len", "(", "buffer", ")", ")", "(", "opcode", ",", ")", "=", "struct", ".", "unpack", "(", "str", "(", "\"!H\"", ")", ",", "buffer", "[", ":", "2", "]", ")", "log", ".", "debug", "(", "\"opcode is %d\"", "%", "opcode", ")", "packet", "=", "self", ".", "__create", "(", "opcode", ")", "packet", ".", "buffer", "=", "buffer", "return", "packet", ".", "decode", "(", ")" ]
This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.
[ "This", "method", "is", "used", "to", "parse", "an", "existing", "datagram", "into", "its", "corresponding", "TftpPacket", "object", ".", "The", "buffer", "is", "the", "raw", "bytes", "off", "of", "the", "network", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketFactory.py#L28-L37
train
msoulier/tftpy
tftpy/TftpPacketFactory.py
TftpPacketFactory.__create
def __create(self, opcode): """This method returns the appropriate class object corresponding to the passed opcode.""" tftpassert(opcode in self.classes, "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet
python
def __create(self, opcode): """This method returns the appropriate class object corresponding to the passed opcode.""" tftpassert(opcode in self.classes, "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet
[ "def", "__create", "(", "self", ",", "opcode", ")", ":", "tftpassert", "(", "opcode", "in", "self", ".", "classes", ",", "\"Unsupported opcode: %d\"", "%", "opcode", ")", "packet", "=", "self", ".", "classes", "[", "opcode", "]", "(", ")", "return", "packet" ]
This method returns the appropriate class object corresponding to the passed opcode.
[ "This", "method", "returns", "the", "appropriate", "class", "object", "corresponding", "to", "the", "passed", "opcode", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketFactory.py#L39-L47
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpMetrics.add_dup
def add_dup(self, pkt): """This method adds a dup for a packet to the metrics.""" log.debug("Recording a dup of %s", pkt) s = str(pkt) if s in self.dups: self.dups[s] += 1 else: self.dups[s] = 1 tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
python
def add_dup(self, pkt): """This method adds a dup for a packet to the metrics.""" log.debug("Recording a dup of %s", pkt) s = str(pkt) if s in self.dups: self.dups[s] += 1 else: self.dups[s] = 1 tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
[ "def", "add_dup", "(", "self", ",", "pkt", ")", ":", "log", ".", "debug", "(", "\"Recording a dup of %s\"", ",", "pkt", ")", "s", "=", "str", "(", "pkt", ")", "if", "s", "in", "self", ".", "dups", ":", "self", ".", "dups", "[", "s", "]", "+=", "1", "else", ":", "self", ".", "dups", "[", "s", "]", "=", "1", "tftpassert", "(", "self", ".", "dups", "[", "s", "]", "<", "MAX_DUPS", ",", "\"Max duplicates reached\"", ")" ]
This method adds a dup for a packet to the metrics.
[ "This", "method", "adds", "a", "dup", "for", "a", "packet", "to", "the", "metrics", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L62-L70
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.checkTimeout
def checkTimeout(self, now): """Compare current time with last_update time, and raise an exception if we're over the timeout time.""" log.debug("checking for timeout on session %s", self) if now - self.last_update > self.timeout: raise TftpTimeout("Timeout waiting for traffic")
python
def checkTimeout(self, now): """Compare current time with last_update time, and raise an exception if we're over the timeout time.""" log.debug("checking for timeout on session %s", self) if now - self.last_update > self.timeout: raise TftpTimeout("Timeout waiting for traffic")
[ "def", "checkTimeout", "(", "self", ",", "now", ")", ":", "log", ".", "debug", "(", "\"checking for timeout on session %s\"", ",", "self", ")", "if", "now", "-", "self", ".", "last_update", ">", "self", ".", "timeout", ":", "raise", "TftpTimeout", "(", "\"Timeout waiting for traffic\"", ")" ]
Compare current time with last_update time, and raise an exception if we're over the timeout time.
[ "Compare", "current", "time", "with", "last_update", "time", "and", "raise", "an", "exception", "if", "we", "re", "over", "the", "timeout", "time", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L121-L126
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.end
def end(self, close_fileobj=True): """Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.""" log.debug("in TftpContext.end - closing socket") self.sock.close() if close_fileobj and self.fileobj is not None and not self.fileobj.closed: log.debug("self.fileobj is open - closing") self.fileobj.close()
python
def end(self, close_fileobj=True): """Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.""" log.debug("in TftpContext.end - closing socket") self.sock.close() if close_fileobj and self.fileobj is not None and not self.fileobj.closed: log.debug("self.fileobj is open - closing") self.fileobj.close()
[ "def", "end", "(", "self", ",", "close_fileobj", "=", "True", ")", ":", "log", ".", "debug", "(", "\"in TftpContext.end - closing socket\"", ")", "self", ".", "sock", ".", "close", "(", ")", "if", "close_fileobj", "and", "self", ".", "fileobj", "is", "not", "None", "and", "not", "self", ".", "fileobj", ".", "closed", ":", "log", ".", "debug", "(", "\"self.fileobj is open - closing\"", ")", "self", ".", "fileobj", ".", "close", "(", ")" ]
Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.
[ "Perform", "session", "cleanup", "since", "the", "end", "method", "should", "always", "be", "called", "explicitely", "by", "the", "calling", "code", "this", "works", "better", "than", "the", "destructor", ".", "Set", "close_fileobj", "to", "False", "so", "fileobj", "can", "be", "returned", "open", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L131-L140
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.sethost
def sethost(self, host): """Setter method that also sets the address property as a result of the host that is set.""" self.__host = host self.address = socket.gethostbyname(host)
python
def sethost(self, host): """Setter method that also sets the address property as a result of the host that is set.""" self.__host = host self.address = socket.gethostbyname(host)
[ "def", "sethost", "(", "self", ",", "host", ")", ":", "self", ".", "__host", "=", "host", "self", ".", "address", "=", "socket", ".", "gethostbyname", "(", "host", ")" ]
Setter method that also sets the address property as a result of the host that is set.
[ "Setter", "method", "that", "also", "sets", "the", "address", "property", "as", "a", "result", "of", "the", "host", "that", "is", "set", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L146-L150
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.cycle
def cycle(self): """Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.""" try: (buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE) except socket.timeout: log.warning("Timeout waiting for traffic, retrying...") raise TftpTimeout("Timed-out waiting for traffic") # Ok, we've received a packet. Log it. log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport) # And update our last updated time. self.last_update = time.time() # Decode it. recvpkt = self.factory.parse(buffer) # Check for known "connection". if raddress != self.address: log.warning("Received traffic from %s, expected host %s. Discarding" % (raddress, self.host)) if self.tidport and self.tidport != rport: log.warning("Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % (raddress, rport, self.host, self.tidport)) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self.packethook: self.packethook(recvpkt) # And handle it, possibly changing state. self.state = self.state.handle(recvpkt, raddress, rport) # If we didn't throw any exceptions here, reset the retry_count to # zero. self.retry_count = 0
python
def cycle(self): """Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.""" try: (buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE) except socket.timeout: log.warning("Timeout waiting for traffic, retrying...") raise TftpTimeout("Timed-out waiting for traffic") # Ok, we've received a packet. Log it. log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport) # And update our last updated time. self.last_update = time.time() # Decode it. recvpkt = self.factory.parse(buffer) # Check for known "connection". if raddress != self.address: log.warning("Received traffic from %s, expected host %s. Discarding" % (raddress, self.host)) if self.tidport and self.tidport != rport: log.warning("Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % (raddress, rport, self.host, self.tidport)) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self.packethook: self.packethook(recvpkt) # And handle it, possibly changing state. self.state = self.state.handle(recvpkt, raddress, rport) # If we didn't throw any exceptions here, reset the retry_count to # zero. self.retry_count = 0
[ "def", "cycle", "(", "self", ")", ":", "try", ":", "(", "buffer", ",", "(", "raddress", ",", "rport", ")", ")", "=", "self", ".", "sock", ".", "recvfrom", "(", "MAX_BLKSIZE", ")", "except", "socket", ".", "timeout", ":", "log", ".", "warning", "(", "\"Timeout waiting for traffic, retrying...\"", ")", "raise", "TftpTimeout", "(", "\"Timed-out waiting for traffic\"", ")", "# Ok, we've received a packet. Log it.", "log", ".", "debug", "(", "\"Received %d bytes from %s:%s\"", ",", "len", "(", "buffer", ")", ",", "raddress", ",", "rport", ")", "# And update our last updated time.", "self", ".", "last_update", "=", "time", ".", "time", "(", ")", "# Decode it.", "recvpkt", "=", "self", ".", "factory", ".", "parse", "(", "buffer", ")", "# Check for known \"connection\".", "if", "raddress", "!=", "self", ".", "address", ":", "log", ".", "warning", "(", "\"Received traffic from %s, expected host %s. Discarding\"", "%", "(", "raddress", ",", "self", ".", "host", ")", ")", "if", "self", ".", "tidport", "and", "self", ".", "tidport", "!=", "rport", ":", "log", ".", "warning", "(", "\"Received traffic from %s:%s but we're \"", "\"connected to %s:%s. Discarding.\"", "%", "(", "raddress", ",", "rport", ",", "self", ".", "host", ",", "self", ".", "tidport", ")", ")", "# If there is a packethook defined, call it. We unconditionally", "# pass all packets, it's up to the client to screen out different", "# kinds of packets. This way, the client is privy to things like", "# negotiated options.", "if", "self", ".", "packethook", ":", "self", ".", "packethook", "(", "recvpkt", ")", "# And handle it, possibly changing state.", "self", ".", "state", "=", "self", ".", "state", ".", "handle", "(", "recvpkt", ",", "raddress", ",", "rport", ")", "# If we didn't throw any exceptions here, reset the retry_count to", "# zero.", "self", ".", "retry_count", "=", "0" ]
Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.
[ "Here", "we", "wait", "for", "a", "response", "from", "the", "server", "after", "sending", "it", "something", "and", "dispatch", "appropriate", "action", "to", "that", "response", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L165-L205
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContextClientDownload.start
def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout as err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warning("resending last packet") self.state.resendLast() except TftpFileNotFoundError as err: # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log.error("Received File not found error") if self.fileobj is not None and not self.filelike_fileobj: if os.path.exists(self.fileobj.name): log.debug("unlinking output file of %s", self.fileobj.name) os.unlink(self.fileobj.name) raise
python
def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout as err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warning("resending last packet") self.state.resendLast() except TftpFileNotFoundError as err: # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log.error("Received File not found error") if self.fileobj is not None and not self.filelike_fileobj: if os.path.exists(self.fileobj.name): log.debug("unlinking output file of %s", self.fileobj.name) os.unlink(self.fileobj.name) raise
[ "def", "start", "(", "self", ")", ":", "log", ".", "info", "(", "\"Sending tftp download request to %s\"", "%", "self", ".", "host", ")", "log", ".", "info", "(", "\" filename -> %s\"", "%", "self", ".", "file_to_transfer", ")", "log", ".", "info", "(", "\" options -> %s\"", "%", "self", ".", "options", ")", "self", ".", "metrics", ".", "start_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Set metrics.start_time to %s\"", "%", "self", ".", "metrics", ".", "start_time", ")", "# FIXME: put this in a sendRRQ method?", "pkt", "=", "TftpPacketRRQ", "(", ")", "pkt", ".", "filename", "=", "self", ".", "file_to_transfer", "pkt", ".", "mode", "=", "\"octet\"", "# FIXME - shouldn't hardcode this", "pkt", ".", "options", "=", "self", ".", "options", "self", ".", "sock", ".", "sendto", "(", "pkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "self", ".", "next_block", "=", "1", "self", ".", "last_pkt", "=", "pkt", "self", ".", "state", "=", "TftpStateSentRRQ", "(", "self", ")", "while", "self", ".", "state", ":", "try", ":", "log", ".", "debug", "(", "\"State is %s\"", "%", "self", ".", "state", ")", "self", ".", "cycle", "(", ")", "except", "TftpTimeout", "as", "err", ":", "log", ".", "error", "(", "str", "(", "err", ")", ")", "self", ".", "retry_count", "+=", "1", "if", "self", ".", "retry_count", ">=", "TIMEOUT_RETRIES", ":", "log", ".", "debug", "(", "\"hit max retries, giving up\"", ")", "raise", "else", ":", "log", ".", "warning", "(", "\"resending last packet\"", ")", "self", ".", "state", ".", "resendLast", "(", ")", "except", "TftpFileNotFoundError", "as", "err", ":", "# If we received file not found, then we should not save the open", "# output file or we'll be left with a size zero file. Delete it,", "# if it exists.", "log", ".", "error", "(", "\"Received File not found error\"", ")", "if", "self", ".", "fileobj", "is", "not", "None", "and", "not", "self", ".", "filelike_fileobj", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "fileobj", ".", "name", ")", ":", "log", ".", "debug", "(", "\"unlinking output file of %s\"", ",", "self", ".", "fileobj", ".", "name", ")", "os", ".", "unlink", "(", "self", ".", "fileobj", ".", "name", ")", "raise" ]
Initiate the download.
[ "Initiate", "the", "download", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L379-L422
train
msoulier/tftpy
tftpy/TftpContexts.py
TftpContextClientDownload.end
def end(self): """Finish up the context.""" TftpContext.end(self, not self.filelike_fileobj) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute()
python
def end(self): """Finish up the context.""" TftpContext.end(self, not self.filelike_fileobj) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute()
[ "def", "end", "(", "self", ")", ":", "TftpContext", ".", "end", "(", "self", ",", "not", "self", ".", "filelike_fileobj", ")", "self", ".", "metrics", ".", "end_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Set metrics.end_time to %s\"", "%", "self", ".", "metrics", ".", "end_time", ")", "self", ".", "metrics", ".", "compute", "(", ")" ]
Finish up the context.
[ "Finish", "up", "the", "context", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L424-L429
train
msoulier/tftpy
tftpy/TftpClient.py
TftpClient.download
def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.""" # We're downloading. log.debug("Creating download context with the following params:") log.debug("host = %s, port = %s, filename = %s" % (self.host, self.iport, filename)) log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout)) self.context = TftpContextClientDownload(self.host, self.iport, filename, output, self.options, packethook, timeout, localip = self.localip) self.context.start() # Download happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Download complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Received %d duplicate packets" % metrics.dupcount)
python
def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.""" # We're downloading. log.debug("Creating download context with the following params:") log.debug("host = %s, port = %s, filename = %s" % (self.host, self.iport, filename)) log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout)) self.context = TftpContextClientDownload(self.host, self.iport, filename, output, self.options, packethook, timeout, localip = self.localip) self.context.start() # Download happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Download complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Received %d duplicate packets" % metrics.dupcount)
[ "def", "download", "(", "self", ",", "filename", ",", "output", ",", "packethook", "=", "None", ",", "timeout", "=", "SOCK_TIMEOUT", ")", ":", "# We're downloading.", "log", ".", "debug", "(", "\"Creating download context with the following params:\"", ")", "log", ".", "debug", "(", "\"host = %s, port = %s, filename = %s\"", "%", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ")", ")", "log", ".", "debug", "(", "\"options = %s, packethook = %s, timeout = %s\"", "%", "(", "self", ".", "options", ",", "packethook", ",", "timeout", ")", ")", "self", ".", "context", "=", "TftpContextClientDownload", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ",", "output", ",", "self", ".", "options", ",", "packethook", ",", "timeout", ",", "localip", "=", "self", ".", "localip", ")", "self", ".", "context", ".", "start", "(", ")", "# Download happens here", "self", ".", "context", ".", "end", "(", ")", "metrics", "=", "self", ".", "context", ".", "metrics", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "\"Download complete.\"", ")", "if", "metrics", ".", "duration", "==", "0", ":", "log", ".", "info", "(", "\"Duration too short, rate undetermined\"", ")", "else", ":", "log", ".", "info", "(", "\"Downloaded %.2f bytes in %.2f seconds\"", "%", "(", "metrics", ".", "bytes", ",", "metrics", ".", "duration", ")", ")", "log", ".", "info", "(", "\"Average rate: %.2f kbps\"", "%", "metrics", ".", "kbps", ")", "log", ".", "info", "(", "\"%.2f bytes in resent data\"", "%", "metrics", ".", "resent_bytes", ")", "log", ".", "info", "(", "\"Received %d duplicate packets\"", "%", "metrics", ".", "dupcount", ")" ]
This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.
[ "This", "method", "initiates", "a", "tftp", "download", "from", "the", "configured", "remote", "host", "requesting", "the", "filename", "passed", ".", "It", "writes", "the", "file", "to", "output", "which", "can", "be", "a", "file", "-", "like", "object", "or", "a", "path", "to", "a", "local", "file", ".", "If", "a", "packethook", "is", "provided", "it", "must", "be", "a", "function", "that", "takes", "a", "single", "parameter", "which", "will", "be", "a", "copy", "of", "each", "DAT", "packet", "received", "in", "the", "form", "of", "a", "TftpPacketDAT", "object", ".", "The", "timeout", "parameter", "may", "be", "used", "to", "override", "the", "default", "SOCK_TIMEOUT", "setting", "which", "is", "the", "amount", "of", "time", "that", "the", "client", "will", "wait", "for", "a", "receive", "packet", "to", "arrive", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpClient.py#L35-L72
train
msoulier/tftpy
tftpy/TftpClient.py
TftpClient.upload
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.""" self.context = TftpContextClientUpload(self.host, self.iport, filename, input, self.options, packethook, timeout, localip = self.localip) self.context.start() # Upload happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Upload complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Resent %d packets" % metrics.dupcount)
python
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.""" self.context = TftpContextClientUpload(self.host, self.iport, filename, input, self.options, packethook, timeout, localip = self.localip) self.context.start() # Upload happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Upload complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Resent %d packets" % metrics.dupcount)
[ "def", "upload", "(", "self", ",", "filename", ",", "input", ",", "packethook", "=", "None", ",", "timeout", "=", "SOCK_TIMEOUT", ")", ":", "self", ".", "context", "=", "TftpContextClientUpload", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ",", "input", ",", "self", ".", "options", ",", "packethook", ",", "timeout", ",", "localip", "=", "self", ".", "localip", ")", "self", ".", "context", ".", "start", "(", ")", "# Upload happens here", "self", ".", "context", ".", "end", "(", ")", "metrics", "=", "self", ".", "context", ".", "metrics", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "\"Upload complete.\"", ")", "if", "metrics", ".", "duration", "==", "0", ":", "log", ".", "info", "(", "\"Duration too short, rate undetermined\"", ")", "else", ":", "log", ".", "info", "(", "\"Uploaded %d bytes in %.2f seconds\"", "%", "(", "metrics", ".", "bytes", ",", "metrics", ".", "duration", ")", ")", "log", ".", "info", "(", "\"Average rate: %.2f kbps\"", "%", "metrics", ".", "kbps", ")", "log", ".", "info", "(", "\"%.2f bytes in resent data\"", "%", "metrics", ".", "resent_bytes", ")", "log", ".", "info", "(", "\"Resent %d packets\"", "%", "metrics", ".", "dupcount", ")" ]
This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.
[ "This", "method", "initiates", "a", "tftp", "upload", "to", "the", "configured", "remote", "host", "uploading", "the", "filename", "passed", ".", "It", "reads", "the", "file", "from", "input", "which", "can", "be", "a", "file", "-", "like", "object", "or", "a", "path", "to", "a", "local", "file", ".", "If", "a", "packethook", "is", "provided", "it", "must", "be", "a", "function", "that", "takes", "a", "single", "parameter", "which", "will", "be", "a", "copy", "of", "each", "DAT", "packet", "sent", "in", "the", "form", "of", "a", "TftpPacketDAT", "object", ".", "The", "timeout", "parameter", "may", "be", "used", "to", "override", "the", "default", "SOCK_TIMEOUT", "setting", "which", "is", "the", "amount", "of", "time", "that", "the", "client", "will", "wait", "for", "a", "DAT", "packet", "to", "be", "ACKd", "by", "the", "server", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpClient.py#L74-L107
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketWithOptions.decode_options
def decode_options(self, buffer): """This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.""" fmt = b"!" options = {} log.debug("decode_options: buffer is: %s", repr(buffer)) log.debug("size of buffer is %d bytes", len(buffer)) if len(buffer) == 0: log.debug("size of buffer is zero, returning empty hash") return {} # Count the nulls in the buffer. Each one terminates a string. log.debug("about to iterate options buffer counting nulls") length = 0 for i in range(len(buffer)): if ord(buffer[i:i+1]) == 0: log.debug("found a null at length %d", length) if length > 0: fmt += b"%dsx" % length length = -1 else: raise TftpException("Invalid options in buffer") length += 1 log.debug("about to unpack, fmt is: %s", fmt) mystruct = struct.unpack(fmt, buffer) tftpassert(len(mystruct) % 2 == 0, "packet with odd number of option/value pairs") for i in range(0, len(mystruct), 2): key = mystruct[i].decode('ascii') val = mystruct[i+1].decode('ascii') log.debug("setting option %s to %s", key, val) log.debug("types are %s and %s", type(key), type(val)) options[key] = val return options
python
def decode_options(self, buffer): """This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.""" fmt = b"!" options = {} log.debug("decode_options: buffer is: %s", repr(buffer)) log.debug("size of buffer is %d bytes", len(buffer)) if len(buffer) == 0: log.debug("size of buffer is zero, returning empty hash") return {} # Count the nulls in the buffer. Each one terminates a string. log.debug("about to iterate options buffer counting nulls") length = 0 for i in range(len(buffer)): if ord(buffer[i:i+1]) == 0: log.debug("found a null at length %d", length) if length > 0: fmt += b"%dsx" % length length = -1 else: raise TftpException("Invalid options in buffer") length += 1 log.debug("about to unpack, fmt is: %s", fmt) mystruct = struct.unpack(fmt, buffer) tftpassert(len(mystruct) % 2 == 0, "packet with odd number of option/value pairs") for i in range(0, len(mystruct), 2): key = mystruct[i].decode('ascii') val = mystruct[i+1].decode('ascii') log.debug("setting option %s to %s", key, val) log.debug("types are %s and %s", type(key), type(val)) options[key] = val return options
[ "def", "decode_options", "(", "self", ",", "buffer", ")", ":", "fmt", "=", "b\"!\"", "options", "=", "{", "}", "log", ".", "debug", "(", "\"decode_options: buffer is: %s\"", ",", "repr", "(", "buffer", ")", ")", "log", ".", "debug", "(", "\"size of buffer is %d bytes\"", ",", "len", "(", "buffer", ")", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "log", ".", "debug", "(", "\"size of buffer is zero, returning empty hash\"", ")", "return", "{", "}", "# Count the nulls in the buffer. Each one terminates a string.", "log", ".", "debug", "(", "\"about to iterate options buffer counting nulls\"", ")", "length", "=", "0", "for", "i", "in", "range", "(", "len", "(", "buffer", ")", ")", ":", "if", "ord", "(", "buffer", "[", "i", ":", "i", "+", "1", "]", ")", "==", "0", ":", "log", ".", "debug", "(", "\"found a null at length %d\"", ",", "length", ")", "if", "length", ">", "0", ":", "fmt", "+=", "b\"%dsx\"", "%", "length", "length", "=", "-", "1", "else", ":", "raise", "TftpException", "(", "\"Invalid options in buffer\"", ")", "length", "+=", "1", "log", ".", "debug", "(", "\"about to unpack, fmt is: %s\"", ",", "fmt", ")", "mystruct", "=", "struct", ".", "unpack", "(", "fmt", ",", "buffer", ")", "tftpassert", "(", "len", "(", "mystruct", ")", "%", "2", "==", "0", ",", "\"packet with odd number of option/value pairs\"", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "mystruct", ")", ",", "2", ")", ":", "key", "=", "mystruct", "[", "i", "]", ".", "decode", "(", "'ascii'", ")", "val", "=", "mystruct", "[", "i", "+", "1", "]", ".", "decode", "(", "'ascii'", ")", "log", ".", "debug", "(", "\"setting option %s to %s\"", ",", "key", ",", "val", ")", "log", ".", "debug", "(", "\"types are %s and %s\"", ",", "type", "(", "key", ")", ",", "type", "(", "val", ")", ")", "options", "[", "key", "]", "=", "val", "return", "options" ]
This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.
[ "This", "method", "decodes", "the", "section", "of", "the", "buffer", "that", "contains", "an", "unknown", "number", "of", "options", ".", "It", "returns", "a", "dictionary", "of", "option", "names", "and", "values", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L56-L95
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketInitial.encode
def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") # Make sure filename and mode are bytestrings. filename = self.filename mode = self.mode if not isinstance(filename, bytes): filename = filename.encode('ascii') if not isinstance(self.mode, bytes): mode = mode.encode('ascii') ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s", ptype, filename, mode) for key in self.options: log.debug(" Option %s = %s", key, self.options[key]) fmt = b"!H" fmt += b"%dsx" % len(filename) if mode == b"octet": fmt += b"5sx" else: raise AssertionError("Unsupported mode: %s" % mode) # Add options. Note that the options list must be bytes. options_list = [] if len(list(self.options.keys())) > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name name = key if not isinstance(name, bytes): name = name.encode('ascii') options_list.append(name) fmt += b"%dsx" % len(name) # Populate the option value value = self.options[key] # Work with all strings. if isinstance(value, int): value = str(value) if not isinstance(value, bytes): value = value.encode('ascii') options_list.append(value) fmt += b"%dsx" % len(value) log.debug("fmt is %s", fmt) log.debug("options_list is %s", options_list) log.debug("size of struct is %d", struct.calcsize(fmt)) self.buffer = struct.pack(fmt, self.opcode, filename, mode, *options_list) log.debug("buffer is %s", repr(self.buffer)) return self
python
def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") # Make sure filename and mode are bytestrings. filename = self.filename mode = self.mode if not isinstance(filename, bytes): filename = filename.encode('ascii') if not isinstance(self.mode, bytes): mode = mode.encode('ascii') ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s", ptype, filename, mode) for key in self.options: log.debug(" Option %s = %s", key, self.options[key]) fmt = b"!H" fmt += b"%dsx" % len(filename) if mode == b"octet": fmt += b"5sx" else: raise AssertionError("Unsupported mode: %s" % mode) # Add options. Note that the options list must be bytes. options_list = [] if len(list(self.options.keys())) > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name name = key if not isinstance(name, bytes): name = name.encode('ascii') options_list.append(name) fmt += b"%dsx" % len(name) # Populate the option value value = self.options[key] # Work with all strings. if isinstance(value, int): value = str(value) if not isinstance(value, bytes): value = value.encode('ascii') options_list.append(value) fmt += b"%dsx" % len(value) log.debug("fmt is %s", fmt) log.debug("options_list is %s", options_list) log.debug("size of struct is %d", struct.calcsize(fmt)) self.buffer = struct.pack(fmt, self.opcode, filename, mode, *options_list) log.debug("buffer is %s", repr(self.buffer)) return self
[ "def", "encode", "(", "self", ")", ":", "tftpassert", "(", "self", ".", "filename", ",", "\"filename required in initial packet\"", ")", "tftpassert", "(", "self", ".", "mode", ",", "\"mode required in initial packet\"", ")", "# Make sure filename and mode are bytestrings.", "filename", "=", "self", ".", "filename", "mode", "=", "self", ".", "mode", "if", "not", "isinstance", "(", "filename", ",", "bytes", ")", ":", "filename", "=", "filename", ".", "encode", "(", "'ascii'", ")", "if", "not", "isinstance", "(", "self", ".", "mode", ",", "bytes", ")", ":", "mode", "=", "mode", ".", "encode", "(", "'ascii'", ")", "ptype", "=", "None", "if", "self", ".", "opcode", "==", "1", ":", "ptype", "=", "\"RRQ\"", "else", ":", "ptype", "=", "\"WRQ\"", "log", ".", "debug", "(", "\"Encoding %s packet, filename = %s, mode = %s\"", ",", "ptype", ",", "filename", ",", "mode", ")", "for", "key", "in", "self", ".", "options", ":", "log", ".", "debug", "(", "\" Option %s = %s\"", ",", "key", ",", "self", ".", "options", "[", "key", "]", ")", "fmt", "=", "b\"!H\"", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "filename", ")", "if", "mode", "==", "b\"octet\"", ":", "fmt", "+=", "b\"5sx\"", "else", ":", "raise", "AssertionError", "(", "\"Unsupported mode: %s\"", "%", "mode", ")", "# Add options. Note that the options list must be bytes.", "options_list", "=", "[", "]", "if", "len", "(", "list", "(", "self", ".", "options", ".", "keys", "(", ")", ")", ")", ">", "0", ":", "log", ".", "debug", "(", "\"there are options to encode\"", ")", "for", "key", "in", "self", ".", "options", ":", "# Populate the option name", "name", "=", "key", "if", "not", "isinstance", "(", "name", ",", "bytes", ")", ":", "name", "=", "name", ".", "encode", "(", "'ascii'", ")", "options_list", ".", "append", "(", "name", ")", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "name", ")", "# Populate the option value", "value", "=", "self", ".", "options", "[", "key", "]", "# Work with all strings.", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value", "=", "str", "(", "value", ")", "if", "not", "isinstance", "(", "value", ",", "bytes", ")", ":", "value", "=", "value", ".", "encode", "(", "'ascii'", ")", "options_list", ".", "append", "(", "value", ")", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "value", ")", "log", ".", "debug", "(", "\"fmt is %s\"", ",", "fmt", ")", "log", ".", "debug", "(", "\"options_list is %s\"", ",", "options_list", ")", "log", ".", "debug", "(", "\"size of struct is %d\"", ",", "struct", ".", "calcsize", "(", "fmt", ")", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "filename", ",", "mode", ",", "*", "options_list", ")", "log", ".", "debug", "(", "\"buffer is %s\"", ",", "repr", "(", "self", ".", "buffer", ")", ")", "return", "self" ]
Encode the packet's buffer from the instance variables.
[ "Encode", "the", "packet", "s", "buffer", "from", "the", "instance", "variables", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L132-L190
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketDAT.encode
def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") data = self.data if not isinstance(self.data, bytes): data = self.data.encode('ascii') fmt = b"!HH%ds" % len(data) self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data) return self
python
def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") data = self.data if not isinstance(self.data, bytes): data = self.data.encode('ascii') fmt = b"!HH%ds" % len(data) self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data) return self
[ "def", "encode", "(", "self", ")", ":", "if", "len", "(", "self", ".", "data", ")", "==", "0", ":", "log", ".", "debug", "(", "\"Encoding an empty DAT packet\"", ")", "data", "=", "self", ".", "data", "if", "not", "isinstance", "(", "self", ".", "data", ",", "bytes", ")", ":", "data", "=", "self", ".", "data", ".", "encode", "(", "'ascii'", ")", "fmt", "=", "b\"!HH%ds\"", "%", "len", "(", "data", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "self", ".", "blocknumber", ",", "data", ")", "return", "self" ]
Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.
[ "Encode", "the", "DAT", "packet", ".", "This", "method", "populates", "self", ".", "buffer", "and", "returns", "self", "for", "easy", "method", "chaining", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L292-L305
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketDAT.decode
def decode(self): """Decode self.buffer into instance variables. It returns self for easy method chaining.""" # We know the first 2 bytes are the opcode. The second two are the # block number. (self.blocknumber,) = struct.unpack(str("!H"), self.buffer[2:4]) log.debug("decoding DAT packet, block number %d", self.blocknumber) log.debug("should be %d bytes in the packet total", len(self.buffer)) # Everything else is data. self.data = self.buffer[4:] log.debug("found %d bytes of data", len(self.data)) return self
python
def decode(self): """Decode self.buffer into instance variables. It returns self for easy method chaining.""" # We know the first 2 bytes are the opcode. The second two are the # block number. (self.blocknumber,) = struct.unpack(str("!H"), self.buffer[2:4]) log.debug("decoding DAT packet, block number %d", self.blocknumber) log.debug("should be %d bytes in the packet total", len(self.buffer)) # Everything else is data. self.data = self.buffer[4:] log.debug("found %d bytes of data", len(self.data)) return self
[ "def", "decode", "(", "self", ")", ":", "# We know the first 2 bytes are the opcode. The second two are the", "# block number.", "(", "self", ".", "blocknumber", ",", ")", "=", "struct", ".", "unpack", "(", "str", "(", "\"!H\"", ")", ",", "self", ".", "buffer", "[", "2", ":", "4", "]", ")", "log", ".", "debug", "(", "\"decoding DAT packet, block number %d\"", ",", "self", ".", "blocknumber", ")", "log", ".", "debug", "(", "\"should be %d bytes in the packet total\"", ",", "len", "(", "self", ".", "buffer", ")", ")", "# Everything else is data.", "self", ".", "data", "=", "self", ".", "buffer", "[", "4", ":", "]", "log", ".", "debug", "(", "\"found %d bytes of data\"", ",", "len", "(", "self", ".", "data", ")", ")", "return", "self" ]
Decode self.buffer into instance variables. It returns self for easy method chaining.
[ "Decode", "self", ".", "buffer", "into", "instance", "variables", ".", "It", "returns", "self", "for", "easy", "method", "chaining", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L307-L318
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketERR.encode
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
python
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
[ "def", "encode", "(", "self", ")", ":", "fmt", "=", "b\"!HH%dsx\"", "%", "len", "(", "self", ".", "errmsgs", "[", "self", ".", "errorcode", "]", ")", "log", ".", "debug", "(", "\"encoding ERR packet with fmt %s\"", ",", "fmt", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsgs", "[", "self", ".", "errorcode", "]", ")", "return", "self" ]
Encode the DAT packet based on instance variables, populating self.buffer, returning self.
[ "Encode", "the", "DAT", "packet", "based", "on", "instance", "variables", "populating", "self", ".", "buffer", "returning", "self", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L399-L408
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketERR.decode
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
python
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
[ "def", "decode", "(", "self", ")", ":", "buflen", "=", "len", "(", "self", ".", "buffer", ")", "tftpassert", "(", "buflen", ">=", "4", ",", "\"malformed ERR packet, too short\"", ")", "log", ".", "debug", "(", "\"Decoding ERR packet, length %s bytes\"", ",", "buflen", ")", "if", "buflen", "==", "4", ":", "log", ".", "debug", "(", "\"Allowing this affront to the RFC of a 4-byte packet\"", ")", "fmt", "=", "b\"!HH\"", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "else", ":", "log", ".", "debug", "(", "\"Good ERR packet > 4 bytes\"", ")", "fmt", "=", "b\"!HH%dsx\"", "%", "(", "len", "(", "self", ".", "buffer", ")", "-", "5", ")", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsg", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "log", ".", "error", "(", "\"ERR packet - errorcode: %d, message: %s\"", "%", "(", "self", ".", "errorcode", ",", "self", ".", "errmsg", ")", ")", "return", "self" ]
Decode self.buffer, populating instance variables and return self.
[ "Decode", "self", ".", "buffer", "populating", "instance", "variables", "and", "return", "self", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L410-L429
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketOACK.match_options
def match_options(self, options): """This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.""" for name in self.options: if name in options: if name == 'blksize': # We can accept anything between the min and max values. size = int(self.options[name]) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE: log.debug("negotiated blksize of %d bytes", size) options['blksize'] = size else: raise TftpException("blksize %s option outside allowed range" % size) elif name == 'tsize': size = int(self.options[name]) if size < 0: raise TftpException("Negative file sizes not supported") else: raise TftpException("Unsupported option: %s" % name) return True
python
def match_options(self, options): """This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.""" for name in self.options: if name in options: if name == 'blksize': # We can accept anything between the min and max values. size = int(self.options[name]) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE: log.debug("negotiated blksize of %d bytes", size) options['blksize'] = size else: raise TftpException("blksize %s option outside allowed range" % size) elif name == 'tsize': size = int(self.options[name]) if size < 0: raise TftpException("Negative file sizes not supported") else: raise TftpException("Unsupported option: %s" % name) return True
[ "def", "match_options", "(", "self", ",", "options", ")", ":", "for", "name", "in", "self", ".", "options", ":", "if", "name", "in", "options", ":", "if", "name", "==", "'blksize'", ":", "# We can accept anything between the min and max values.", "size", "=", "int", "(", "self", ".", "options", "[", "name", "]", ")", "if", "size", ">=", "MIN_BLKSIZE", "and", "size", "<=", "MAX_BLKSIZE", ":", "log", ".", "debug", "(", "\"negotiated blksize of %d bytes\"", ",", "size", ")", "options", "[", "'blksize'", "]", "=", "size", "else", ":", "raise", "TftpException", "(", "\"blksize %s option outside allowed range\"", "%", "size", ")", "elif", "name", "==", "'tsize'", ":", "size", "=", "int", "(", "self", ".", "options", "[", "name", "]", ")", "if", "size", "<", "0", ":", "raise", "TftpException", "(", "\"Negative file sizes not supported\"", ")", "else", ":", "raise", "TftpException", "(", "\"Unsupported option: %s\"", "%", "name", ")", "return", "True" ]
This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.
[ "This", "method", "takes", "a", "set", "of", "options", "and", "tries", "to", "match", "them", "with", "its", "own", ".", "It", "can", "accept", "some", "changes", "in", "those", "options", "from", "the", "server", "as", "part", "of", "a", "negotiation", ".", "Changed", "or", "unchanged", "it", "will", "return", "a", "dict", "of", "the", "options", "so", "that", "the", "session", "can", "update", "itself", "to", "the", "negotiated", "options", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L472-L494
train
msoulier/tftpy
tftpy/TftpStates.py
TftpState.handleOACK
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
python
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
[ "def", "handleOACK", "(", "self", ",", "pkt", ")", ":", "if", "len", "(", "pkt", ".", "options", ".", "keys", "(", ")", ")", ">", "0", ":", "if", "pkt", ".", "match_options", "(", "self", ".", "context", ".", "options", ")", ":", "log", ".", "info", "(", "\"Successful negotiation of options\"", ")", "# Set options to OACK options", "self", ".", "context", ".", "options", "=", "pkt", ".", "options", "for", "key", "in", "self", ".", "context", ".", "options", ":", "log", ".", "info", "(", "\" %s = %s\"", "%", "(", "key", ",", "self", ".", "context", ".", "options", "[", "key", "]", ")", ")", "else", ":", "log", ".", "error", "(", "\"Failed to negotiate options\"", ")", "raise", "TftpException", "(", "\"Failed to negotiate options\"", ")", "else", ":", "raise", "TftpException", "(", "\"No options found in OACK\"", ")" ]
This method handles an OACK from the server, syncing any accepted options.
[ "This", "method", "handles", "an", "OACK", "from", "the", "server", "syncing", "any", "accepted", "options", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L39-L53
train
msoulier/tftpy
tftpy/TftpStates.py
TftpState.returnSupportedOptions
def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 0 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
python
def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 0 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
[ "def", "returnSupportedOptions", "(", "self", ",", "options", ")", ":", "# We support the options blksize and tsize right now.", "# FIXME - put this somewhere else?", "accepted_options", "=", "{", "}", "for", "option", "in", "options", ":", "if", "option", "==", "'blksize'", ":", "# Make sure it's valid.", "if", "int", "(", "options", "[", "option", "]", ")", ">", "MAX_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize greater than %d \"", "\"setting to maximum\"", "%", "MAX_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MAX_BLKSIZE", "elif", "int", "(", "options", "[", "option", "]", ")", "<", "MIN_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize less than %d \"", "\"setting to minimum\"", "%", "MIN_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MIN_BLKSIZE", "else", ":", "accepted_options", "[", "option", "]", "=", "options", "[", "option", "]", "elif", "option", "==", "'tsize'", ":", "log", ".", "debug", "(", "\"tsize option is set\"", ")", "accepted_options", "[", "'tsize'", "]", "=", "0", "else", ":", "log", ".", "info", "(", "\"Dropping unsupported option '%s'\"", "%", "option", ")", "log", ".", "debug", "(", "\"Returning these accepted options: %s\"", ",", "accepted_options", ")", "return", "accepted_options" ]
This method takes a requested options list from a client, and returns the ones that are supported.
[ "This", "method", "takes", "a", "requested", "options", "list", "from", "a", "client", "and", "returns", "the", "ones", "that", "are", "supported", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L55-L80
train
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendDAT
def sendDAT(self): """This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.""" finished = False blocknumber = self.context.next_block # Test hook if DELAY_BLOCK and DELAY_BLOCK == blocknumber: import time log.debug("Deliberately delaying 10 seconds...") time.sleep(10) dat = None blksize = self.context.getBlocksize() buffer = self.context.fileobj.read(blksize) log.debug("Read %d bytes into buffer", len(buffer)) if len(buffer) < blksize: log.info("Reached EOF on file %s" % self.context.file_to_transfer) finished = True dat = TftpPacketDAT() dat.data = buffer dat.blocknumber = blocknumber self.context.metrics.bytes += len(dat.data) log.debug("Sending DAT packet %d", dat.blocknumber) self.context.sock.sendto(dat.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(dat) self.context.last_pkt = dat return finished
python
def sendDAT(self): """This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.""" finished = False blocknumber = self.context.next_block # Test hook if DELAY_BLOCK and DELAY_BLOCK == blocknumber: import time log.debug("Deliberately delaying 10 seconds...") time.sleep(10) dat = None blksize = self.context.getBlocksize() buffer = self.context.fileobj.read(blksize) log.debug("Read %d bytes into buffer", len(buffer)) if len(buffer) < blksize: log.info("Reached EOF on file %s" % self.context.file_to_transfer) finished = True dat = TftpPacketDAT() dat.data = buffer dat.blocknumber = blocknumber self.context.metrics.bytes += len(dat.data) log.debug("Sending DAT packet %d", dat.blocknumber) self.context.sock.sendto(dat.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(dat) self.context.last_pkt = dat return finished
[ "def", "sendDAT", "(", "self", ")", ":", "finished", "=", "False", "blocknumber", "=", "self", ".", "context", ".", "next_block", "# Test hook", "if", "DELAY_BLOCK", "and", "DELAY_BLOCK", "==", "blocknumber", ":", "import", "time", "log", ".", "debug", "(", "\"Deliberately delaying 10 seconds...\"", ")", "time", ".", "sleep", "(", "10", ")", "dat", "=", "None", "blksize", "=", "self", ".", "context", ".", "getBlocksize", "(", ")", "buffer", "=", "self", ".", "context", ".", "fileobj", ".", "read", "(", "blksize", ")", "log", ".", "debug", "(", "\"Read %d bytes into buffer\"", ",", "len", "(", "buffer", ")", ")", "if", "len", "(", "buffer", ")", "<", "blksize", ":", "log", ".", "info", "(", "\"Reached EOF on file %s\"", "%", "self", ".", "context", ".", "file_to_transfer", ")", "finished", "=", "True", "dat", "=", "TftpPacketDAT", "(", ")", "dat", ".", "data", "=", "buffer", "dat", ".", "blocknumber", "=", "blocknumber", "self", ".", "context", ".", "metrics", ".", "bytes", "+=", "len", "(", "dat", ".", "data", ")", "log", ".", "debug", "(", "\"Sending DAT packet %d\"", ",", "dat", ".", "blocknumber", ")", "self", ".", "context", ".", "sock", ".", "sendto", "(", "dat", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "if", "self", ".", "context", ".", "packethook", ":", "self", ".", "context", ".", "packethook", "(", "dat", ")", "self", ".", "context", ".", "last_pkt", "=", "dat", "return", "finished" ]
This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.
[ "This", "method", "sends", "the", "next", "DAT", "packet", "based", "on", "the", "data", "in", "the", "context", ".", "It", "returns", "a", "boolean", "indicating", "whether", "the", "transfer", "is", "finished", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L82-L111
train
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendACK
def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
python
def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
[ "def", "sendACK", "(", "self", ",", "blocknumber", "=", "None", ")", ":", "log", ".", "debug", "(", "\"In sendACK, passed blocknumber is %s\"", ",", "blocknumber", ")", "if", "blocknumber", "is", "None", ":", "blocknumber", "=", "self", ".", "context", ".", "next_block", "log", ".", "info", "(", "\"Sending ack to block %d\"", "%", "blocknumber", ")", "ackpkt", "=", "TftpPacketACK", "(", ")", "ackpkt", ".", "blocknumber", "=", "blocknumber", "self", ".", "context", ".", "sock", ".", "sendto", "(", "ackpkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "self", ".", "context", ".", "last_pkt", "=", "ackpkt" ]
This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.
[ "This", "method", "sends", "an", "ack", "packet", "to", "the", "block", "number", "specified", ".", "If", "none", "is", "specified", "it", "defaults", "to", "the", "next_block", "property", "in", "the", "parent", "context", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L113-L126
train
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendError
def sendError(self, errorcode): """This method uses the socket passed, and uses the errorcode to compose and send an error packet.""" log.debug("In sendError, being asked to send error %d", errorcode) errpkt = TftpPacketERR() errpkt.errorcode = errorcode if self.context.tidport == None: log.debug("Error packet received outside session. Discarding") else: self.context.sock.sendto(errpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = errpkt
python
def sendError(self, errorcode): """This method uses the socket passed, and uses the errorcode to compose and send an error packet.""" log.debug("In sendError, being asked to send error %d", errorcode) errpkt = TftpPacketERR() errpkt.errorcode = errorcode if self.context.tidport == None: log.debug("Error packet received outside session. Discarding") else: self.context.sock.sendto(errpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = errpkt
[ "def", "sendError", "(", "self", ",", "errorcode", ")", ":", "log", ".", "debug", "(", "\"In sendError, being asked to send error %d\"", ",", "errorcode", ")", "errpkt", "=", "TftpPacketERR", "(", ")", "errpkt", ".", "errorcode", "=", "errorcode", "if", "self", ".", "context", ".", "tidport", "==", "None", ":", "log", ".", "debug", "(", "\"Error packet received outside session. Discarding\"", ")", "else", ":", "self", ".", "context", ".", "sock", ".", "sendto", "(", "errpkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "self", ".", "context", ".", "last_pkt", "=", "errpkt" ]
This method uses the socket passed, and uses the errorcode to compose and send an error packet.
[ "This", "method", "uses", "the", "socket", "passed", "and", "uses", "the", "errorcode", "to", "compose", "and", "send", "an", "error", "packet", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L128-L140
train