repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
DeV1doR/aioethereum
aioethereum/utils.py
add_0x
def add_0x(string): """Add 0x to string at start. """ if isinstance(string, bytes): string = string.decode('utf-8') return '0x' + str(string)
python
def add_0x(string): """Add 0x to string at start. """ if isinstance(string, bytes): string = string.decode('utf-8') return '0x' + str(string)
[ "def", "add_0x", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "string", "=", "string", ".", "decode", "(", "'utf-8'", ")", "return", "'0x'", "+", "str", "(", "string", ")" ]
Add 0x to string at start.
[ "Add", "0x", "to", "string", "at", "start", "." ]
85eb46550d862b3ccc309914ea871ca1c7b42157
https://github.com/DeV1doR/aioethereum/blob/85eb46550d862b3ccc309914ea871ca1c7b42157/aioethereum/utils.py#L4-L9
train
Genida/dependenpy
src/dependenpy/helpers.py
guess_depth
def guess_depth(packages): """ Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use. """ if len(packages) == 1: return packages[0].count('.') + 2 return min(p.count('.') for p in packages) + 1
python
def guess_depth(packages): """ Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use. """ if len(packages) == 1: return packages[0].count('.') + 2 return min(p.count('.') for p in packages) + 1
[ "def", "guess_depth", "(", "packages", ")", ":", "if", "len", "(", "packages", ")", "==", "1", ":", "return", "packages", "[", "0", "]", ".", "count", "(", "'.'", ")", "+", "2", "return", "min", "(", "p", ".", "count", "(", "'.'", ")", "for", "p", "in", "packages", ")", "+", "1" ]
Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use.
[ "Guess", "the", "optimal", "depth", "to", "use", "for", "the", "given", "list", "of", "arguments", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/helpers.py#L45-L57
train
Genida/dependenpy
src/dependenpy/helpers.py
PrintMixin.print
def print(self, format=TEXT, output=sys.stdout, **kwargs): """ Print the object in a file or on standard output by default. Args: format (str): output format (csv, json or text). output (file): descriptor to an opened file (default to standard output). **kwargs (): additional arguments. """ if format is None: format = TEXT if format == TEXT: print(self._to_text(**kwargs), file=output) elif format == CSV: print(self._to_csv(**kwargs), file=output) elif format == JSON: print(self._to_json(**kwargs), file=output)
python
def print(self, format=TEXT, output=sys.stdout, **kwargs): """ Print the object in a file or on standard output by default. Args: format (str): output format (csv, json or text). output (file): descriptor to an opened file (default to standard output). **kwargs (): additional arguments. """ if format is None: format = TEXT if format == TEXT: print(self._to_text(**kwargs), file=output) elif format == CSV: print(self._to_csv(**kwargs), file=output) elif format == JSON: print(self._to_json(**kwargs), file=output)
[ "def", "print", "(", "self", ",", "format", "=", "TEXT", ",", "output", "=", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")", ":", "if", "format", "is", "None", ":", "format", "=", "TEXT", "if", "format", "==", "TEXT", ":", "print", "(", "self", ".", "_to_text", "(", "*", "*", "kwargs", ")", ",", "file", "=", "output", ")", "elif", "format", "==", "CSV", ":", "print", "(", "self", ".", "_to_csv", "(", "*", "*", "kwargs", ")", ",", "file", "=", "output", ")", "elif", "format", "==", "JSON", ":", "print", "(", "self", ".", "_to_json", "(", "*", "*", "kwargs", ")", ",", "file", "=", "output", ")" ]
Print the object in a file or on standard output by default. Args: format (str): output format (csv, json or text). output (file): descriptor to an opened file (default to standard output). **kwargs (): additional arguments.
[ "Print", "the", "object", "in", "a", "file", "or", "on", "standard", "output", "by", "default", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/helpers.py#L16-L33
train
sengupta/twss
twss/twsslib.py
TWSS.import_training_data
def import_training_data(self, positive_corpus_file=os.path.join(os.path.dirname(__file__), "positive.txt"), negative_corpus_file=os.path.join(os.path.dirname(__file__), "negative.txt") ): """ This method imports the positive and negative training data from the two corpus files and creates the training data list. """ positive_corpus = open(positive_corpus_file) negative_corpus = open(negative_corpus_file) # for line in positive_corpus: # self.training_data.append((line, True)) # for line in negative_corpus: # self.training_data.append((line, False)) # The following code works. Need to profile this to see if this is an # improvement over the code above. positive_training_data = list(map(lambda x: (x, True), positive_corpus)) negative_training_data = list(map(lambda x: (x, False), negative_corpus)) self.training_data = positive_training_data + negative_training_data
python
def import_training_data(self, positive_corpus_file=os.path.join(os.path.dirname(__file__), "positive.txt"), negative_corpus_file=os.path.join(os.path.dirname(__file__), "negative.txt") ): """ This method imports the positive and negative training data from the two corpus files and creates the training data list. """ positive_corpus = open(positive_corpus_file) negative_corpus = open(negative_corpus_file) # for line in positive_corpus: # self.training_data.append((line, True)) # for line in negative_corpus: # self.training_data.append((line, False)) # The following code works. Need to profile this to see if this is an # improvement over the code above. positive_training_data = list(map(lambda x: (x, True), positive_corpus)) negative_training_data = list(map(lambda x: (x, False), negative_corpus)) self.training_data = positive_training_data + negative_training_data
[ "def", "import_training_data", "(", "self", ",", "positive_corpus_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"positive.txt\"", ")", ",", "negative_corpus_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"negative.txt\"", ")", ")", ":", "positive_corpus", "=", "open", "(", "positive_corpus_file", ")", "negative_corpus", "=", "open", "(", "negative_corpus_file", ")", "# for line in positive_corpus: ", "# self.training_data.append((line, True))", "# for line in negative_corpus: ", "# self.training_data.append((line, False))", "# The following code works. Need to profile this to see if this is an", "# improvement over the code above. ", "positive_training_data", "=", "list", "(", "map", "(", "lambda", "x", ":", "(", "x", ",", "True", ")", ",", "positive_corpus", ")", ")", "negative_training_data", "=", "list", "(", "map", "(", "lambda", "x", ":", "(", "x", ",", "False", ")", ",", "negative_corpus", ")", ")", "self", ".", "training_data", "=", "positive_training_data", "+", "negative_training_data" ]
This method imports the positive and negative training data from the two corpus files and creates the training data list.
[ "This", "method", "imports", "the", "positive", "and", "negative", "training", "data", "from", "the", "two", "corpus", "files", "and", "creates", "the", "training", "data", "list", "." ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L24-L48
train
sengupta/twss
twss/twsslib.py
TWSS.train
def train(self): """ This method generates the classifier. This method assumes that the training data has been loaded """ if not self.training_data: self.import_training_data() training_feature_set = [(self.extract_features(line), label) for (line, label) in self.training_data] self.classifier = nltk.NaiveBayesClassifier.train(training_feature_set)
python
def train(self): """ This method generates the classifier. This method assumes that the training data has been loaded """ if not self.training_data: self.import_training_data() training_feature_set = [(self.extract_features(line), label) for (line, label) in self.training_data] self.classifier = nltk.NaiveBayesClassifier.train(training_feature_set)
[ "def", "train", "(", "self", ")", ":", "if", "not", "self", ".", "training_data", ":", "self", ".", "import_training_data", "(", ")", "training_feature_set", "=", "[", "(", "self", ".", "extract_features", "(", "line", ")", ",", "label", ")", "for", "(", "line", ",", "label", ")", "in", "self", ".", "training_data", "]", "self", ".", "classifier", "=", "nltk", ".", "NaiveBayesClassifier", ".", "train", "(", "training_feature_set", ")" ]
This method generates the classifier. This method assumes that the training data has been loaded
[ "This", "method", "generates", "the", "classifier", ".", "This", "method", "assumes", "that", "the", "training", "data", "has", "been", "loaded" ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L50-L59
train
sengupta/twss
twss/twsslib.py
TWSS.extract_features
def extract_features(self, phrase): """ This function will extract features from the phrase being used. Currently, the feature we are extracting are unigrams of the text corpus. """ words = nltk.word_tokenize(phrase) features = {} for word in words: features['contains(%s)' % word] = (word in words) return features
python
def extract_features(self, phrase): """ This function will extract features from the phrase being used. Currently, the feature we are extracting are unigrams of the text corpus. """ words = nltk.word_tokenize(phrase) features = {} for word in words: features['contains(%s)' % word] = (word in words) return features
[ "def", "extract_features", "(", "self", ",", "phrase", ")", ":", "words", "=", "nltk", ".", "word_tokenize", "(", "phrase", ")", "features", "=", "{", "}", "for", "word", "in", "words", ":", "features", "[", "'contains(%s)'", "%", "word", "]", "=", "(", "word", "in", "words", ")", "return", "features" ]
This function will extract features from the phrase being used. Currently, the feature we are extracting are unigrams of the text corpus.
[ "This", "function", "will", "extract", "features", "from", "the", "phrase", "being", "used", ".", "Currently", "the", "feature", "we", "are", "extracting", "are", "unigrams", "of", "the", "text", "corpus", "." ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L61-L71
train
sengupta/twss
twss/twsslib.py
TWSS.is_twss
def is_twss(self, phrase): """ The magic function- this accepts a phrase and tells you if it classifies as an entendre """ featureset = self.extract_features(phrase) return self.classifier.classify(featureset)
python
def is_twss(self, phrase): """ The magic function- this accepts a phrase and tells you if it classifies as an entendre """ featureset = self.extract_features(phrase) return self.classifier.classify(featureset)
[ "def", "is_twss", "(", "self", ",", "phrase", ")", ":", "featureset", "=", "self", ".", "extract_features", "(", "phrase", ")", "return", "self", ".", "classifier", ".", "classify", "(", "featureset", ")" ]
The magic function- this accepts a phrase and tells you if it classifies as an entendre
[ "The", "magic", "function", "-", "this", "accepts", "a", "phrase", "and", "tells", "you", "if", "it", "classifies", "as", "an", "entendre" ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L73-L79
train
sengupta/twss
twss/twsslib.py
TWSS.save
def save(self, filename='classifier.dump'): """ Pickles the classifier and dumps it into a file """ ofile = open(filename,'w+') pickle.dump(self.classifier, ofile) ofile.close()
python
def save(self, filename='classifier.dump'): """ Pickles the classifier and dumps it into a file """ ofile = open(filename,'w+') pickle.dump(self.classifier, ofile) ofile.close()
[ "def", "save", "(", "self", ",", "filename", "=", "'classifier.dump'", ")", ":", "ofile", "=", "open", "(", "filename", ",", "'w+'", ")", "pickle", ".", "dump", "(", "self", ".", "classifier", ",", "ofile", ")", "ofile", ".", "close", "(", ")" ]
Pickles the classifier and dumps it into a file
[ "Pickles", "the", "classifier", "and", "dumps", "it", "into", "a", "file" ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L81-L87
train
sengupta/twss
twss/twsslib.py
TWSS.load
def load(self, filename='classifier.dump'): """ Unpickles the classifier used """ ifile = open(filename, 'r+') self.classifier = pickle.load(ifile) ifile.close()
python
def load(self, filename='classifier.dump'): """ Unpickles the classifier used """ ifile = open(filename, 'r+') self.classifier = pickle.load(ifile) ifile.close()
[ "def", "load", "(", "self", ",", "filename", "=", "'classifier.dump'", ")", ":", "ifile", "=", "open", "(", "filename", ",", "'r+'", ")", "self", ".", "classifier", "=", "pickle", ".", "load", "(", "ifile", ")", "ifile", ".", "close", "(", ")" ]
Unpickles the classifier used
[ "Unpickles", "the", "classifier", "used" ]
69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L89-L95
train
catch22/pw
pw/__main__.py
pw
def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
python
def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
[ "def", "pw", "(", "ctx", ",", "key_pattern", ",", "user_pattern", ",", "mode", ",", "strict_flag", ",", "user_flag", ",", "file", ",", "edit_subcommand", ",", "gen_subcommand", ",", ")", ":", "# install silent Ctrl-C handler", "def", "handle_sigint", "(", "*", "_", ")", ":", "click", ".", "echo", "(", ")", "ctx", ".", "exit", "(", "1", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "handle_sigint", ")", "# invoke a subcommand?", "if", "gen_subcommand", ":", "length", "=", "int", "(", "key_pattern", ")", "if", "key_pattern", "else", "None", "generate_password", "(", "mode", ",", "length", ")", "return", "elif", "edit_subcommand", ":", "launch_editor", "(", "ctx", ",", "file", ")", "return", "# verify that database file is present", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "click", ".", "echo", "(", "\"error: password store not found at '%s'\"", "%", "file", ",", "err", "=", "True", ")", "ctx", ".", "exit", "(", "1", ")", "# load database", "store", "=", "Store", ".", "load", "(", "file", ")", "# if no user query provided, split key query according to right-most \"@\" sign (since usernames are typically email addresses)", "if", "not", "user_pattern", ":", "user_pattern", ",", "_", ",", "key_pattern", "=", "key_pattern", ".", "rpartition", "(", "\"@\"", ")", "# search database", "results", "=", "store", ".", "search", "(", "key_pattern", ",", "user_pattern", ")", "results", "=", "list", "(", "results", ")", "# if strict flag is enabled, check that precisely a single record was found", "if", "strict_flag", "and", "len", "(", "results", ")", "!=", "1", ":", "click", ".", "echo", "(", "\"error: multiple or no records found (but using --strict flag)\"", ",", "err", "=", "True", ")", "ctx", ".", "exit", "(", "2", ")", "# raw mode?", "if", "mode", "==", "Mode", ".", "RAW", ":", "for", "entry", "in", "results", ":", "click", ".", "echo", "(", "entry", ".", "user", "if", "user_flag", "else", "entry", ".", "password", ")", "return", "# print results", "for", "idx", ",", "entry", "in", "enumerate", "(", "results", ")", ":", "# start with key and user", "line", "=", "highlight_match", "(", "key_pattern", ",", "entry", ".", "key", ")", "if", "entry", ".", "user", ":", "line", "+=", "\": \"", "+", "highlight_match", "(", "user_pattern", ",", "entry", ".", "user", ")", "# add password or copy&paste sucess message", "if", "mode", "==", "Mode", ".", "ECHO", "and", "not", "user_flag", ":", "line", "+=", "\" | \"", "+", "style_password", "(", "entry", ".", "password", ")", "elif", "mode", "==", "Mode", ".", "COPY", "and", "idx", "==", "0", ":", "try", ":", "import", "pyperclip", "pyperclip", ".", "copy", "(", "entry", ".", "user", "if", "user_flag", "else", "entry", ".", "password", ")", "result", "=", "style_success", "(", "\"*** %s COPIED TO CLIPBOARD ***\"", "%", "(", "\"USERNAME\"", "if", "user_flag", "else", "\"PASSWORD\"", ")", ")", "except", "ImportError", ":", "result", "=", "style_error", "(", "'*** PYTHON PACKAGE \"PYPERCLIP\" NOT FOUND ***'", ")", "line", "+=", "\" | \"", "+", "result", "# add notes", "if", "entry", ".", "notes", ":", "if", "idx", "==", "0", ":", "line", "+=", "\"\\n\"", "line", "+=", "\"\\n\"", ".", "join", "(", "\" \"", "+", "line", "for", "line", "in", "entry", ".", "notes", ".", "splitlines", "(", ")", ")", "else", ":", "lines", "=", "entry", ".", "notes", ".", "splitlines", "(", ")", "line", "+=", "\" | \"", "+", "lines", "[", "0", "]", "if", "len", "(", "lines", ")", ">", "1", ":", "line", "+=", "\" (...)\"", "click", ".", "echo", "(", "line", ")" ]
Search for USER and KEY in GPG-encrypted password file.
[ "Search", "for", "USER", "and", "KEY", "in", "GPG", "-", "encrypted", "password", "file", "." ]
2452924bbdccad28b21290b6ce062809c3d1c5f2
https://github.com/catch22/pw/blob/2452924bbdccad28b21290b6ce062809c3d1c5f2/pw/__main__.py#L91-L182
train
catch22/pw
pw/__main__.py
launch_editor
def launch_editor(ctx, file): """launch editor with decrypted password database""" # do not use EDITOR environment variable (rather force user to make a concious choice) editor = os.environ.get("PW_EDITOR") if not editor: click.echo("error: no editor set in PW_EDITOR environment variables") ctx.exit(1) # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load source (decrypting if necessary) is_encrypted = _gpg.is_encrypted(file) if is_encrypted: original = _gpg.decrypt(file) else: original = open(file, "rb").read() # if encrypted, determine recipient if is_encrypted: recipient = os.environ.get("PW_GPG_RECIPIENT") if not recipient: click.echo( "error: no recipient set in PW_GPG_RECIPIENT environment variables" ) ctx.exit(1) # launch the editor ext = _gpg.unencrypted_ext(file) modified = click.edit( original.decode("utf-8"), editor=editor, require_save=True, extension=ext ) if modified is None: click.echo("not modified") return modified = modified.encode("utf-8") # not encrypted? simply overwrite file if not is_encrypted: with open(file, "wb") as fp: fp.write(modified) return # otherwise, the process is somewhat more complicated _gpg.encrypt(recipient=recipient, dest_path=file, content=modified)
python
def launch_editor(ctx, file): """launch editor with decrypted password database""" # do not use EDITOR environment variable (rather force user to make a concious choice) editor = os.environ.get("PW_EDITOR") if not editor: click.echo("error: no editor set in PW_EDITOR environment variables") ctx.exit(1) # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load source (decrypting if necessary) is_encrypted = _gpg.is_encrypted(file) if is_encrypted: original = _gpg.decrypt(file) else: original = open(file, "rb").read() # if encrypted, determine recipient if is_encrypted: recipient = os.environ.get("PW_GPG_RECIPIENT") if not recipient: click.echo( "error: no recipient set in PW_GPG_RECIPIENT environment variables" ) ctx.exit(1) # launch the editor ext = _gpg.unencrypted_ext(file) modified = click.edit( original.decode("utf-8"), editor=editor, require_save=True, extension=ext ) if modified is None: click.echo("not modified") return modified = modified.encode("utf-8") # not encrypted? simply overwrite file if not is_encrypted: with open(file, "wb") as fp: fp.write(modified) return # otherwise, the process is somewhat more complicated _gpg.encrypt(recipient=recipient, dest_path=file, content=modified)
[ "def", "launch_editor", "(", "ctx", ",", "file", ")", ":", "# do not use EDITOR environment variable (rather force user to make a concious choice)", "editor", "=", "os", ".", "environ", ".", "get", "(", "\"PW_EDITOR\"", ")", "if", "not", "editor", ":", "click", ".", "echo", "(", "\"error: no editor set in PW_EDITOR environment variables\"", ")", "ctx", ".", "exit", "(", "1", ")", "# verify that database file is present", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "click", ".", "echo", "(", "\"error: password store not found at '%s'\"", "%", "file", ",", "err", "=", "True", ")", "ctx", ".", "exit", "(", "1", ")", "# load source (decrypting if necessary)", "is_encrypted", "=", "_gpg", ".", "is_encrypted", "(", "file", ")", "if", "is_encrypted", ":", "original", "=", "_gpg", ".", "decrypt", "(", "file", ")", "else", ":", "original", "=", "open", "(", "file", ",", "\"rb\"", ")", ".", "read", "(", ")", "# if encrypted, determine recipient", "if", "is_encrypted", ":", "recipient", "=", "os", ".", "environ", ".", "get", "(", "\"PW_GPG_RECIPIENT\"", ")", "if", "not", "recipient", ":", "click", ".", "echo", "(", "\"error: no recipient set in PW_GPG_RECIPIENT environment variables\"", ")", "ctx", ".", "exit", "(", "1", ")", "# launch the editor", "ext", "=", "_gpg", ".", "unencrypted_ext", "(", "file", ")", "modified", "=", "click", ".", "edit", "(", "original", ".", "decode", "(", "\"utf-8\"", ")", ",", "editor", "=", "editor", ",", "require_save", "=", "True", ",", "extension", "=", "ext", ")", "if", "modified", "is", "None", ":", "click", ".", "echo", "(", "\"not modified\"", ")", "return", "modified", "=", "modified", ".", "encode", "(", "\"utf-8\"", ")", "# not encrypted? simply overwrite file", "if", "not", "is_encrypted", ":", "with", "open", "(", "file", ",", "\"wb\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "modified", ")", "return", "# otherwise, the process is somewhat more complicated", "_gpg", ".", "encrypt", "(", "recipient", "=", "recipient", ",", "dest_path", "=", "file", ",", "content", "=", "modified", ")" ]
launch editor with decrypted password database
[ "launch", "editor", "with", "decrypted", "password", "database" ]
2452924bbdccad28b21290b6ce062809c3d1c5f2
https://github.com/catch22/pw/blob/2452924bbdccad28b21290b6ce062809c3d1c5f2/pw/__main__.py#L185-L231
train
catch22/pw
pw/__main__.py
generate_password
def generate_password(mode, length): """generate a random password""" # generate random password r = random.SystemRandom() length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "".join(r.choice(RANDOM_PASSWORD_ALPHABET) for _ in range(length)) # copy or echo generated password if mode == Mode.ECHO: click.echo(style_password(password)) elif mode == Mode.COPY: try: import pyperclip pyperclip.copy(password) result = style_success("*** PASSWORD COPIED TO CLIPBOARD ***") except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') click.echo(result) elif mode == Mode.RAW: click.echo(password)
python
def generate_password(mode, length): """generate a random password""" # generate random password r = random.SystemRandom() length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "".join(r.choice(RANDOM_PASSWORD_ALPHABET) for _ in range(length)) # copy or echo generated password if mode == Mode.ECHO: click.echo(style_password(password)) elif mode == Mode.COPY: try: import pyperclip pyperclip.copy(password) result = style_success("*** PASSWORD COPIED TO CLIPBOARD ***") except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') click.echo(result) elif mode == Mode.RAW: click.echo(password)
[ "def", "generate_password", "(", "mode", ",", "length", ")", ":", "# generate random password", "r", "=", "random", ".", "SystemRandom", "(", ")", "length", "=", "length", "or", "RANDOM_PASSWORD_DEFAULT_LENGTH", "password", "=", "\"\"", ".", "join", "(", "r", ".", "choice", "(", "RANDOM_PASSWORD_ALPHABET", ")", "for", "_", "in", "range", "(", "length", ")", ")", "# copy or echo generated password", "if", "mode", "==", "Mode", ".", "ECHO", ":", "click", ".", "echo", "(", "style_password", "(", "password", ")", ")", "elif", "mode", "==", "Mode", ".", "COPY", ":", "try", ":", "import", "pyperclip", "pyperclip", ".", "copy", "(", "password", ")", "result", "=", "style_success", "(", "\"*** PASSWORD COPIED TO CLIPBOARD ***\"", ")", "except", "ImportError", ":", "result", "=", "style_error", "(", "'*** PYTHON PACKAGE \"PYPERCLIP\" NOT FOUND ***'", ")", "click", ".", "echo", "(", "result", ")", "elif", "mode", "==", "Mode", ".", "RAW", ":", "click", ".", "echo", "(", "password", ")" ]
generate a random password
[ "generate", "a", "random", "password" ]
2452924bbdccad28b21290b6ce062809c3d1c5f2
https://github.com/catch22/pw/blob/2452924bbdccad28b21290b6ce062809c3d1c5f2/pw/__main__.py#L234-L254
train
striglia/stockfighter
stockfighter/gm.py
GM._load_data
def _load_data(self): """Internal method for querying the GM api for currently running levels and storing that state.""" url = urljoin(self.base_url, 'levels') resp = requests.get(url, headers=self.headers) # TOOD: Confirm/deny that this is a real API for the levels currenlty running... if resp.content: return resp.json() else: return None
python
def _load_data(self): """Internal method for querying the GM api for currently running levels and storing that state.""" url = urljoin(self.base_url, 'levels') resp = requests.get(url, headers=self.headers) # TOOD: Confirm/deny that this is a real API for the levels currenlty running... if resp.content: return resp.json() else: return None
[ "def", "_load_data", "(", "self", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'levels'", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "headers", ")", "# TOOD: Confirm/deny that this is a real API for the levels currenlty running...", "if", "resp", ".", "content", ":", "return", "resp", ".", "json", "(", ")", "else", ":", "return", "None" ]
Internal method for querying the GM api for currently running levels and storing that state.
[ "Internal", "method", "for", "querying", "the", "GM", "api", "for", "currently", "running", "levels", "and", "storing", "that", "state", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/gm.py#L16-L25
train
mkoura/dump2polarion
dump2polarion/csv2sqlite_cli.py
dump2sqlite
def dump2sqlite(records, output_file): """Dumps tests results to database.""" results_keys = list(records.results[0].keys()) pad_data = [] for key in REQUIRED_KEYS: if key not in results_keys: results_keys.append(key) pad_data.append("") conn = sqlite3.connect(os.path.expanduser(output_file), detect_types=sqlite3.PARSE_DECLTYPES) # in each row there needs to be data for every column # last column is current time pad_data.append(datetime.datetime.utcnow()) to_db = [list(row.values()) + pad_data for row in records.results] cur = conn.cursor() cur.execute( "CREATE TABLE testcases ({},sqltime TIMESTAMP)".format( ",".join("{} TEXT".format(key) for key in results_keys) ) ) cur.executemany( "INSERT INTO testcases VALUES ({},?)".format(",".join(["?"] * len(results_keys))), to_db ) if records.testrun: cur.execute("CREATE TABLE testrun (testrun TEXT)") cur.execute("INSERT INTO testrun VALUES (?)", (records.testrun,)) conn.commit() conn.close() logger.info("Data written to '%s'", output_file)
python
def dump2sqlite(records, output_file): """Dumps tests results to database.""" results_keys = list(records.results[0].keys()) pad_data = [] for key in REQUIRED_KEYS: if key not in results_keys: results_keys.append(key) pad_data.append("") conn = sqlite3.connect(os.path.expanduser(output_file), detect_types=sqlite3.PARSE_DECLTYPES) # in each row there needs to be data for every column # last column is current time pad_data.append(datetime.datetime.utcnow()) to_db = [list(row.values()) + pad_data for row in records.results] cur = conn.cursor() cur.execute( "CREATE TABLE testcases ({},sqltime TIMESTAMP)".format( ",".join("{} TEXT".format(key) for key in results_keys) ) ) cur.executemany( "INSERT INTO testcases VALUES ({},?)".format(",".join(["?"] * len(results_keys))), to_db ) if records.testrun: cur.execute("CREATE TABLE testrun (testrun TEXT)") cur.execute("INSERT INTO testrun VALUES (?)", (records.testrun,)) conn.commit() conn.close() logger.info("Data written to '%s'", output_file)
[ "def", "dump2sqlite", "(", "records", ",", "output_file", ")", ":", "results_keys", "=", "list", "(", "records", ".", "results", "[", "0", "]", ".", "keys", "(", ")", ")", "pad_data", "=", "[", "]", "for", "key", "in", "REQUIRED_KEYS", ":", "if", "key", "not", "in", "results_keys", ":", "results_keys", ".", "append", "(", "key", ")", "pad_data", ".", "append", "(", "\"\"", ")", "conn", "=", "sqlite3", ".", "connect", "(", "os", ".", "path", ".", "expanduser", "(", "output_file", ")", ",", "detect_types", "=", "sqlite3", ".", "PARSE_DECLTYPES", ")", "# in each row there needs to be data for every column", "# last column is current time", "pad_data", ".", "append", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ")", "to_db", "=", "[", "list", "(", "row", ".", "values", "(", ")", ")", "+", "pad_data", "for", "row", "in", "records", ".", "results", "]", "cur", "=", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"CREATE TABLE testcases ({},sqltime TIMESTAMP)\"", ".", "format", "(", "\",\"", ".", "join", "(", "\"{} TEXT\"", ".", "format", "(", "key", ")", "for", "key", "in", "results_keys", ")", ")", ")", "cur", ".", "executemany", "(", "\"INSERT INTO testcases VALUES ({},?)\"", ".", "format", "(", "\",\"", ".", "join", "(", "[", "\"?\"", "]", "*", "len", "(", "results_keys", ")", ")", ")", ",", "to_db", ")", "if", "records", ".", "testrun", ":", "cur", ".", "execute", "(", "\"CREATE TABLE testrun (testrun TEXT)\"", ")", "cur", ".", "execute", "(", "\"INSERT INTO testrun VALUES (?)\"", ",", "(", "records", ".", "testrun", ",", ")", ")", "conn", ".", "commit", "(", ")", "conn", ".", "close", "(", ")", "logger", ".", "info", "(", "\"Data written to '%s'\"", ",", "output_file", ")" ]
Dumps tests results to database.
[ "Dumps", "tests", "results", "to", "database", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/csv2sqlite_cli.py#L42-L77
train
romanorac/discomll
discomll/ensemble/core/k_medoids.py
fit
def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
python
def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
[ "def", "fit", "(", "sim_mat", ",", "D_len", ",", "cidx", ")", ":", "min_energy", "=", "np", ".", "inf", "for", "j", "in", "range", "(", "3", ")", ":", "# select indices in each sample that maximizes its dimension", "inds", "=", "[", "np", ".", "argmin", "(", "[", "sim_mat", "[", "idy", "]", ".", "get", "(", "idx", ",", "0", ")", "for", "idx", "in", "cidx", "]", ")", "for", "idy", "in", "range", "(", "D_len", ")", "if", "idy", "in", "sim_mat", "]", "cidx", "=", "[", "]", "energy", "=", "0", "# current enengy", "for", "i", "in", "np", ".", "unique", "(", "inds", ")", ":", "indsi", "=", "np", ".", "where", "(", "inds", "==", "i", ")", "[", "0", "]", "# find indices for every cluster", "minind", ",", "min_value", "=", "0", ",", "0", "for", "index", ",", "idy", "in", "enumerate", "(", "indsi", ")", ":", "if", "idy", "in", "sim_mat", ":", "# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])", "value", "=", "0", "for", "idx", "in", "indsi", ":", "value", "+=", "sim_mat", "[", "idy", "]", ".", "get", "(", "idx", ",", "0", ")", "if", "value", "<", "min_value", ":", "minind", ",", "min_value", "=", "index", ",", "value", "energy", "+=", "min_value", "cidx", ".", "append", "(", "indsi", "[", "minind", "]", ")", "# new centers", "if", "energy", "<", "min_energy", ":", "min_energy", ",", "inds_min", ",", "cidx_min", "=", "energy", ",", "inds", ",", "cidx", "return", "inds_min", ",", "cidx_min" ]
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters
[ "Algorithm", "maximizes", "energy", "between", "clusters", "which", "is", "distinction", "in", "this", "algorithm", ".", "Distance", "matrix", "contains", "mostly", "0", "which", "are", "overlooked", "due", "to", "search", "of", "maximal", "distances", ".", "Algorithm", "does", "not", "try", "to", "retain", "k", "clusters", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/k_medoids.py#L8-L41
train
ngmarchant/oasis
oasis/oasis.py
BetaBernoulliModel._calc_theta
def _calc_theta(self): """Calculate an estimate of theta""" if self.decaying_prior: n_sampled = np.clip(self.alpha_ + self.beta_, 1, np.inf) prior_weight = 1/n_sampled alpha = self.alpha_ + prior_weight * self.alpha_0 beta = self.beta_ + prior_weight * self.beta_0 else: alpha = self.alpha_ + self.alpha_0 beta = self.beta_ + self.beta_0 # Mean of Beta-distributed rv self.theta_ = alpha / (alpha + beta) # NEW: calculate theta assuming weak prior if self.store_wp: alpha = self.alpha_ + self._wp_weight * self.alpha_0 beta = self.beta_ + self._wp_weight * self.beta_0 self.theta_wp_ = alpha / (alpha + beta)
python
def _calc_theta(self): """Calculate an estimate of theta""" if self.decaying_prior: n_sampled = np.clip(self.alpha_ + self.beta_, 1, np.inf) prior_weight = 1/n_sampled alpha = self.alpha_ + prior_weight * self.alpha_0 beta = self.beta_ + prior_weight * self.beta_0 else: alpha = self.alpha_ + self.alpha_0 beta = self.beta_ + self.beta_0 # Mean of Beta-distributed rv self.theta_ = alpha / (alpha + beta) # NEW: calculate theta assuming weak prior if self.store_wp: alpha = self.alpha_ + self._wp_weight * self.alpha_0 beta = self.beta_ + self._wp_weight * self.beta_0 self.theta_wp_ = alpha / (alpha + beta)
[ "def", "_calc_theta", "(", "self", ")", ":", "if", "self", ".", "decaying_prior", ":", "n_sampled", "=", "np", ".", "clip", "(", "self", ".", "alpha_", "+", "self", ".", "beta_", ",", "1", ",", "np", ".", "inf", ")", "prior_weight", "=", "1", "/", "n_sampled", "alpha", "=", "self", ".", "alpha_", "+", "prior_weight", "*", "self", ".", "alpha_0", "beta", "=", "self", ".", "beta_", "+", "prior_weight", "*", "self", ".", "beta_0", "else", ":", "alpha", "=", "self", ".", "alpha_", "+", "self", ".", "alpha_0", "beta", "=", "self", ".", "beta_", "+", "self", ".", "beta_0", "# Mean of Beta-distributed rv", "self", ".", "theta_", "=", "alpha", "/", "(", "alpha", "+", "beta", ")", "# NEW: calculate theta assuming weak prior", "if", "self", ".", "store_wp", ":", "alpha", "=", "self", ".", "alpha_", "+", "self", ".", "_wp_weight", "*", "self", ".", "alpha_0", "beta", "=", "self", ".", "beta_", "+", "self", ".", "_wp_weight", "*", "self", ".", "beta_0", "self", ".", "theta_wp_", "=", "alpha", "/", "(", "alpha", "+", "beta", ")" ]
Calculate an estimate of theta
[ "Calculate", "an", "estimate", "of", "theta" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/oasis.py#L81-L99
train
ngmarchant/oasis
oasis/oasis.py
BetaBernoulliModel.update
def update(self, ell, k): """Update the posterior and estimates after a label is sampled Parameters ---------- ell : int sampled label: 0 or 1 k : int index of stratum where label was sampled """ self.alpha_[k] += ell self.beta_[k] += 1 - ell self._calc_theta() if self.store_variance: self._calc_var_theta()
python
def update(self, ell, k): """Update the posterior and estimates after a label is sampled Parameters ---------- ell : int sampled label: 0 or 1 k : int index of stratum where label was sampled """ self.alpha_[k] += ell self.beta_[k] += 1 - ell self._calc_theta() if self.store_variance: self._calc_var_theta()
[ "def", "update", "(", "self", ",", "ell", ",", "k", ")", ":", "self", ".", "alpha_", "[", "k", "]", "+=", "ell", "self", ".", "beta_", "[", "k", "]", "+=", "1", "-", "ell", "self", ".", "_calc_theta", "(", ")", "if", "self", ".", "store_variance", ":", "self", ".", "_calc_var_theta", "(", ")" ]
Update the posterior and estimates after a label is sampled Parameters ---------- ell : int sampled label: 0 or 1 k : int index of stratum where label was sampled
[ "Update", "the", "posterior", "and", "estimates", "after", "a", "label", "is", "sampled" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/oasis.py#L115-L131
train
ngmarchant/oasis
oasis/oasis.py
BetaBernoulliModel.reset
def reset(self): """Reset the instance to its initial state""" self.alpha_ = np.zeros(self._size, dtype=int) self.beta_ = np.zeros(self._size, dtype=int) self.theta_ = np.empty(self._size, dtype=float) if self.store_variance: self.var_theta_ = np.empty(self._size, dtype=float) if self.store_wp: self.theta_wp_ = np.empty(self._size, dtype=float) self._calc_theta() if self.store_variance: self._calc_var_theta()
python
def reset(self): """Reset the instance to its initial state""" self.alpha_ = np.zeros(self._size, dtype=int) self.beta_ = np.zeros(self._size, dtype=int) self.theta_ = np.empty(self._size, dtype=float) if self.store_variance: self.var_theta_ = np.empty(self._size, dtype=float) if self.store_wp: self.theta_wp_ = np.empty(self._size, dtype=float) self._calc_theta() if self.store_variance: self._calc_var_theta()
[ "def", "reset", "(", "self", ")", ":", "self", ".", "alpha_", "=", "np", ".", "zeros", "(", "self", ".", "_size", ",", "dtype", "=", "int", ")", "self", ".", "beta_", "=", "np", ".", "zeros", "(", "self", ".", "_size", ",", "dtype", "=", "int", ")", "self", ".", "theta_", "=", "np", ".", "empty", "(", "self", ".", "_size", ",", "dtype", "=", "float", ")", "if", "self", ".", "store_variance", ":", "self", ".", "var_theta_", "=", "np", ".", "empty", "(", "self", ".", "_size", ",", "dtype", "=", "float", ")", "if", "self", ".", "store_wp", ":", "self", ".", "theta_wp_", "=", "np", ".", "empty", "(", "self", ".", "_size", ",", "dtype", "=", "float", ")", "self", ".", "_calc_theta", "(", ")", "if", "self", ".", "store_variance", ":", "self", ".", "_calc_var_theta", "(", ")" ]
Reset the instance to its initial state
[ "Reset", "the", "instance", "to", "its", "initial", "state" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/oasis.py#L133-L145
train
ngmarchant/oasis
oasis/oasis.py
OASISSampler._calc_BB_prior
def _calc_BB_prior(self, theta_0): """Generate a prior for the BB model Parameters ---------- theta_0 : array-like, shape=(n_strata,) array of oracle probabilities (probability of a "1" label) for each stratum. This is just a guess. Returns ------- alpha_0 : numpy.ndarray, shape=(n_strata,) "alpha" hyperparameters for an ensemble of Beta-distributed rvs beta_0 : numpy.ndarray, shape=(n_strata,) "beta" hyperparameters for an ensemble of Beta-distributed rvs """ #: Easy vars prior_strength = self.prior_strength #weighted_strength = self.weights * strength n_strata = len(theta_0) weighted_strength = prior_strength / n_strata alpha_0 = theta_0 * weighted_strength beta_0 = (1 - theta_0) * weighted_strength return alpha_0, beta_0
python
def _calc_BB_prior(self, theta_0): """Generate a prior for the BB model Parameters ---------- theta_0 : array-like, shape=(n_strata,) array of oracle probabilities (probability of a "1" label) for each stratum. This is just a guess. Returns ------- alpha_0 : numpy.ndarray, shape=(n_strata,) "alpha" hyperparameters for an ensemble of Beta-distributed rvs beta_0 : numpy.ndarray, shape=(n_strata,) "beta" hyperparameters for an ensemble of Beta-distributed rvs """ #: Easy vars prior_strength = self.prior_strength #weighted_strength = self.weights * strength n_strata = len(theta_0) weighted_strength = prior_strength / n_strata alpha_0 = theta_0 * weighted_strength beta_0 = (1 - theta_0) * weighted_strength return alpha_0, beta_0
[ "def", "_calc_BB_prior", "(", "self", ",", "theta_0", ")", ":", "#: Easy vars", "prior_strength", "=", "self", ".", "prior_strength", "#weighted_strength = self.weights * strength", "n_strata", "=", "len", "(", "theta_0", ")", "weighted_strength", "=", "prior_strength", "/", "n_strata", "alpha_0", "=", "theta_0", "*", "weighted_strength", "beta_0", "=", "(", "1", "-", "theta_0", ")", "*", "weighted_strength", "return", "alpha_0", ",", "beta_0" ]
Generate a prior for the BB model Parameters ---------- theta_0 : array-like, shape=(n_strata,) array of oracle probabilities (probability of a "1" label) for each stratum. This is just a guess. Returns ------- alpha_0 : numpy.ndarray, shape=(n_strata,) "alpha" hyperparameters for an ensemble of Beta-distributed rvs beta_0 : numpy.ndarray, shape=(n_strata,) "beta" hyperparameters for an ensemble of Beta-distributed rvs
[ "Generate", "a", "prior", "for", "the", "BB", "model" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/oasis.py#L379-L404
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/backends/auth.py
ProxyUserBackend.authenticate
def authenticate(self, request, remote_user=None): #pylint:disable=arguments-differ # Django <=1.8 and >=1.9 have different signatures. """ The ``username`` passed here is considered trusted. This method simply returns the ``User`` object with the given username. In order to support older Django versions (before commit 4b9330ccc04575f9e5126529ec355a450d12e77c), if username is None, we will assume request is the ``remote_user`` parameter. """ if not remote_user: remote_user = request if not remote_user: return None user = None username = self.clean_username(remote_user) try: #pylint:disable=protected-access if self.create_unknown_user: defaults = {} if isinstance(request, dict): session_data = request if 'full_name' in session_data: first_name, _, last_name = full_name_natural_split( session_data['full_name']) defaults.update({ 'first_name': first_name, 'last_name': last_name }) for key in ('email', 'first_name', 'last_name'): if key in session_data: defaults.update({key: session_data[key]}) user, created = UserModel._default_manager.get_or_create(**{ UserModel.USERNAME_FIELD: username, 'defaults': defaults, }) if created: LOGGER.debug("created user '%s' in database.", username) user = self.configure_user(user) else: try: user = UserModel._default_manager.get_by_natural_key( username) except UserModel.DoesNotExist: pass except DatabaseError as err: LOGGER.debug("User table missing from database? (err:%s)", err) # We don't have a auth_user table, so let's build a hash in memory. for user in six.itervalues(self.users): LOGGER.debug("match %s with User(id=%d, username=%s)", username, user.id, user.username) if user.username == username: LOGGER.debug("found %d %s", user.id, user.username) return user # Not found in memory dict user = UserModel( id=random.randint(1, (1 << 32) - 1), username=username) LOGGER.debug("add User(id=%d, username=%s) to cache.", user.id, user.username) self.users[user.id] = user return user if self.user_can_authenticate(user) else None
python
def authenticate(self, request, remote_user=None): #pylint:disable=arguments-differ # Django <=1.8 and >=1.9 have different signatures. """ The ``username`` passed here is considered trusted. This method simply returns the ``User`` object with the given username. In order to support older Django versions (before commit 4b9330ccc04575f9e5126529ec355a450d12e77c), if username is None, we will assume request is the ``remote_user`` parameter. """ if not remote_user: remote_user = request if not remote_user: return None user = None username = self.clean_username(remote_user) try: #pylint:disable=protected-access if self.create_unknown_user: defaults = {} if isinstance(request, dict): session_data = request if 'full_name' in session_data: first_name, _, last_name = full_name_natural_split( session_data['full_name']) defaults.update({ 'first_name': first_name, 'last_name': last_name }) for key in ('email', 'first_name', 'last_name'): if key in session_data: defaults.update({key: session_data[key]}) user, created = UserModel._default_manager.get_or_create(**{ UserModel.USERNAME_FIELD: username, 'defaults': defaults, }) if created: LOGGER.debug("created user '%s' in database.", username) user = self.configure_user(user) else: try: user = UserModel._default_manager.get_by_natural_key( username) except UserModel.DoesNotExist: pass except DatabaseError as err: LOGGER.debug("User table missing from database? (err:%s)", err) # We don't have a auth_user table, so let's build a hash in memory. for user in six.itervalues(self.users): LOGGER.debug("match %s with User(id=%d, username=%s)", username, user.id, user.username) if user.username == username: LOGGER.debug("found %d %s", user.id, user.username) return user # Not found in memory dict user = UserModel( id=random.randint(1, (1 << 32) - 1), username=username) LOGGER.debug("add User(id=%d, username=%s) to cache.", user.id, user.username) self.users[user.id] = user return user if self.user_can_authenticate(user) else None
[ "def", "authenticate", "(", "self", ",", "request", ",", "remote_user", "=", "None", ")", ":", "#pylint:disable=arguments-differ", "# Django <=1.8 and >=1.9 have different signatures.", "if", "not", "remote_user", ":", "remote_user", "=", "request", "if", "not", "remote_user", ":", "return", "None", "user", "=", "None", "username", "=", "self", ".", "clean_username", "(", "remote_user", ")", "try", ":", "#pylint:disable=protected-access", "if", "self", ".", "create_unknown_user", ":", "defaults", "=", "{", "}", "if", "isinstance", "(", "request", ",", "dict", ")", ":", "session_data", "=", "request", "if", "'full_name'", "in", "session_data", ":", "first_name", ",", "_", ",", "last_name", "=", "full_name_natural_split", "(", "session_data", "[", "'full_name'", "]", ")", "defaults", ".", "update", "(", "{", "'first_name'", ":", "first_name", ",", "'last_name'", ":", "last_name", "}", ")", "for", "key", "in", "(", "'email'", ",", "'first_name'", ",", "'last_name'", ")", ":", "if", "key", "in", "session_data", ":", "defaults", ".", "update", "(", "{", "key", ":", "session_data", "[", "key", "]", "}", ")", "user", ",", "created", "=", "UserModel", ".", "_default_manager", ".", "get_or_create", "(", "*", "*", "{", "UserModel", ".", "USERNAME_FIELD", ":", "username", ",", "'defaults'", ":", "defaults", ",", "}", ")", "if", "created", ":", "LOGGER", ".", "debug", "(", "\"created user '%s' in database.\"", ",", "username", ")", "user", "=", "self", ".", "configure_user", "(", "user", ")", "else", ":", "try", ":", "user", "=", "UserModel", ".", "_default_manager", ".", "get_by_natural_key", "(", "username", ")", "except", "UserModel", ".", "DoesNotExist", ":", "pass", "except", "DatabaseError", "as", "err", ":", "LOGGER", ".", "debug", "(", "\"User table missing from database? (err:%s)\"", ",", "err", ")", "# We don't have a auth_user table, so let's build a hash in memory.", "for", "user", "in", "six", ".", "itervalues", "(", "self", ".", "users", ")", ":", "LOGGER", ".", "debug", "(", "\"match %s with User(id=%d, username=%s)\"", ",", "username", ",", "user", ".", "id", ",", "user", ".", "username", ")", "if", "user", ".", "username", "==", "username", ":", "LOGGER", ".", "debug", "(", "\"found %d %s\"", ",", "user", ".", "id", ",", "user", ".", "username", ")", "return", "user", "# Not found in memory dict", "user", "=", "UserModel", "(", "id", "=", "random", ".", "randint", "(", "1", ",", "(", "1", "<<", "32", ")", "-", "1", ")", ",", "username", "=", "username", ")", "LOGGER", ".", "debug", "(", "\"add User(id=%d, username=%s) to cache.\"", ",", "user", ".", "id", ",", "user", ".", "username", ")", "self", ".", "users", "[", "user", ".", "id", "]", "=", "user", "return", "user", "if", "self", ".", "user_can_authenticate", "(", "user", ")", "else", "None" ]
The ``username`` passed here is considered trusted. This method simply returns the ``User`` object with the given username. In order to support older Django versions (before commit 4b9330ccc04575f9e5126529ec355a450d12e77c), if username is None, we will assume request is the ``remote_user`` parameter.
[ "The", "username", "passed", "here", "is", "considered", "trusted", ".", "This", "method", "simply", "returns", "the", "User", "object", "with", "the", "given", "username", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/backends/auth.py#L55-L117
train
DavidMStraub/pylha
pylha/parse.py
numval
def numval(token): """Return the numerical value of token.value if it is a number""" if token.type == 'INTEGER': return int(token.value) elif token.type == 'FLOAT': return float(token.value) else: return token.value
python
def numval(token): """Return the numerical value of token.value if it is a number""" if token.type == 'INTEGER': return int(token.value) elif token.type == 'FLOAT': return float(token.value) else: return token.value
[ "def", "numval", "(", "token", ")", ":", "if", "token", ".", "type", "==", "'INTEGER'", ":", "return", "int", "(", "token", ".", "value", ")", "elif", "token", ".", "type", "==", "'FLOAT'", ":", "return", "float", "(", "token", ".", "value", ")", "else", ":", "return", "token", ".", "value" ]
Return the numerical value of token.value if it is a number
[ "Return", "the", "numerical", "value", "of", "token", ".", "value", "if", "it", "is", "a", "number" ]
8d65074609321e5eaf97fe962c56f6d79a3ad2b6
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L6-L13
train
DavidMStraub/pylha
pylha/parse.py
tokenize
def tokenize(code): """Tokenize the string `code`""" tok_regex = '|'.join('(?P<{}>{})'.format(*pair) for pair in _tokens) tok_regex = re.compile(tok_regex, re.IGNORECASE|re.M) line_num = 1 line_start = 0 for mo in re.finditer(tok_regex, code): kind = mo.lastgroup value = mo.group(kind) if kind == 'NEWLINE': line_start = mo.end() line_num += 1 elif kind == 'SKIP' or value=='': pass else: column = mo.start() - line_start yield Token(kind, value, line_num, column)
python
def tokenize(code): """Tokenize the string `code`""" tok_regex = '|'.join('(?P<{}>{})'.format(*pair) for pair in _tokens) tok_regex = re.compile(tok_regex, re.IGNORECASE|re.M) line_num = 1 line_start = 0 for mo in re.finditer(tok_regex, code): kind = mo.lastgroup value = mo.group(kind) if kind == 'NEWLINE': line_start = mo.end() line_num += 1 elif kind == 'SKIP' or value=='': pass else: column = mo.start() - line_start yield Token(kind, value, line_num, column)
[ "def", "tokenize", "(", "code", ")", ":", "tok_regex", "=", "'|'", ".", "join", "(", "'(?P<{}>{})'", ".", "format", "(", "*", "pair", ")", "for", "pair", "in", "_tokens", ")", "tok_regex", "=", "re", ".", "compile", "(", "tok_regex", ",", "re", ".", "IGNORECASE", "|", "re", ".", "M", ")", "line_num", "=", "1", "line_start", "=", "0", "for", "mo", "in", "re", ".", "finditer", "(", "tok_regex", ",", "code", ")", ":", "kind", "=", "mo", ".", "lastgroup", "value", "=", "mo", ".", "group", "(", "kind", ")", "if", "kind", "==", "'NEWLINE'", ":", "line_start", "=", "mo", ".", "end", "(", ")", "line_num", "+=", "1", "elif", "kind", "==", "'SKIP'", "or", "value", "==", "''", ":", "pass", "else", ":", "column", "=", "mo", ".", "start", "(", ")", "-", "line_start", "yield", "Token", "(", "kind", ",", "value", ",", "line_num", ",", "column", ")" ]
Tokenize the string `code`
[ "Tokenize", "the", "string", "code" ]
8d65074609321e5eaf97fe962c56f6d79a3ad2b6
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L26-L42
train
DavidMStraub/pylha
pylha/parse.py
parse
def parse(tokens): """Parse the token list into a hierarchical data structure""" d = collections.OrderedDict() prev_line = 0 blockname = None blockline = None for token in tokens: if token.type == 'COMMENT': continue elif token.type == 'BLOCK': block = token blockline = token.line blocktype = token.value.upper() blockname = None if blocktype not in d: d[blocktype] = collections.OrderedDict() elif token.line == blockline: if blockname is None: blockname = token.value d[blocktype][blockname] = collections.defaultdict(list) else: d[blocktype][blockname]['info'].append(numval(token)) elif token.line != prev_line: if blockname is None: raise ParseError("Found value outside block!") d[blocktype][blockname]['values'].append([numval(token)]) else: if blockname is None: raise ParseError("Found value outside block!") d[blocktype][blockname]['values'][-1].append(numval(token)) prev_line = token.line return d
python
def parse(tokens): """Parse the token list into a hierarchical data structure""" d = collections.OrderedDict() prev_line = 0 blockname = None blockline = None for token in tokens: if token.type == 'COMMENT': continue elif token.type == 'BLOCK': block = token blockline = token.line blocktype = token.value.upper() blockname = None if blocktype not in d: d[blocktype] = collections.OrderedDict() elif token.line == blockline: if blockname is None: blockname = token.value d[blocktype][blockname] = collections.defaultdict(list) else: d[blocktype][blockname]['info'].append(numval(token)) elif token.line != prev_line: if blockname is None: raise ParseError("Found value outside block!") d[blocktype][blockname]['values'].append([numval(token)]) else: if blockname is None: raise ParseError("Found value outside block!") d[blocktype][blockname]['values'][-1].append(numval(token)) prev_line = token.line return d
[ "def", "parse", "(", "tokens", ")", ":", "d", "=", "collections", ".", "OrderedDict", "(", ")", "prev_line", "=", "0", "blockname", "=", "None", "blockline", "=", "None", "for", "token", "in", "tokens", ":", "if", "token", ".", "type", "==", "'COMMENT'", ":", "continue", "elif", "token", ".", "type", "==", "'BLOCK'", ":", "block", "=", "token", "blockline", "=", "token", ".", "line", "blocktype", "=", "token", ".", "value", ".", "upper", "(", ")", "blockname", "=", "None", "if", "blocktype", "not", "in", "d", ":", "d", "[", "blocktype", "]", "=", "collections", ".", "OrderedDict", "(", ")", "elif", "token", ".", "line", "==", "blockline", ":", "if", "blockname", "is", "None", ":", "blockname", "=", "token", ".", "value", "d", "[", "blocktype", "]", "[", "blockname", "]", "=", "collections", ".", "defaultdict", "(", "list", ")", "else", ":", "d", "[", "blocktype", "]", "[", "blockname", "]", "[", "'info'", "]", ".", "append", "(", "numval", "(", "token", ")", ")", "elif", "token", ".", "line", "!=", "prev_line", ":", "if", "blockname", "is", "None", ":", "raise", "ParseError", "(", "\"Found value outside block!\"", ")", "d", "[", "blocktype", "]", "[", "blockname", "]", "[", "'values'", "]", ".", "append", "(", "[", "numval", "(", "token", ")", "]", ")", "else", ":", "if", "blockname", "is", "None", ":", "raise", "ParseError", "(", "\"Found value outside block!\"", ")", "d", "[", "blocktype", "]", "[", "blockname", "]", "[", "'values'", "]", "[", "-", "1", "]", ".", "append", "(", "numval", "(", "token", ")", ")", "prev_line", "=", "token", ".", "line", "return", "d" ]
Parse the token list into a hierarchical data structure
[ "Parse", "the", "token", "list", "into", "a", "hierarchical", "data", "structure" ]
8d65074609321e5eaf97fe962c56f6d79a3ad2b6
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L47-L78
train
DavidMStraub/pylha
pylha/parse.py
load
def load(stream): """Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.""" if isinstance(stream, str): string = stream else: string = stream.read() tokens = tokenize(string) return parse(tokens)
python
def load(stream): """Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.""" if isinstance(stream, str): string = stream else: string = stream.read() tokens = tokenize(string) return parse(tokens)
[ "def", "load", "(", "stream", ")", ":", "if", "isinstance", "(", "stream", ",", "str", ")", ":", "string", "=", "stream", "else", ":", "string", "=", "stream", ".", "read", "(", ")", "tokens", "=", "tokenize", "(", "string", ")", "return", "parse", "(", "tokens", ")" ]
Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.
[ "Parse", "the", "LHA", "document", "and", "produce", "the", "corresponding", "Python", "object", ".", "Accepts", "a", "string", "or", "a", "file", "-", "like", "object", "." ]
8d65074609321e5eaf97fe962c56f6d79a3ad2b6
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L80-L88
train
coopernurse/barrister
barrister/parser.py
IdlScanner.get_checksum
def get_checksum(self): """ Returns a checksum based on the IDL that ignores comments and ordering, but detects changes to types, parameter order, and enum values. """ arr = [ ] for elem in self.parsed: s = elem_checksum(elem) if s: arr.append(s) arr.sort() #print arr return md5(json.dumps(arr))
python
def get_checksum(self): """ Returns a checksum based on the IDL that ignores comments and ordering, but detects changes to types, parameter order, and enum values. """ arr = [ ] for elem in self.parsed: s = elem_checksum(elem) if s: arr.append(s) arr.sort() #print arr return md5(json.dumps(arr))
[ "def", "get_checksum", "(", "self", ")", ":", "arr", "=", "[", "]", "for", "elem", "in", "self", ".", "parsed", ":", "s", "=", "elem_checksum", "(", "elem", ")", "if", "s", ":", "arr", ".", "append", "(", "s", ")", "arr", ".", "sort", "(", ")", "#print arr", "return", "md5", "(", "json", ".", "dumps", "(", "arr", ")", ")" ]
Returns a checksum based on the IDL that ignores comments and ordering, but detects changes to types, parameter order, and enum values.
[ "Returns", "a", "checksum", "based", "on", "the", "IDL", "that", "ignores", "comments", "and", "ordering", "but", "detects", "changes", "to", "types", "parameter", "order", "and", "enum", "values", "." ]
0471b1d98d3327ba381684db496ec94c79c20848
https://github.com/coopernurse/barrister/blob/0471b1d98d3327ba381684db496ec94c79c20848/barrister/parser.py#L204-L217
train
ngmarchant/oasis
oasis/kad.py
KadaneSampler._update_estimate_and_sampler
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
python
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
[ "def", "_update_estimate_and_sampler", "(", "self", ",", "ell", ",", "ell_hat", ",", "weight", ",", "extra_info", ",", "*", "*", "kwargs", ")", ":", "stratum_idx", "=", "extra_info", "[", "'stratum'", "]", "self", ".", "_BB_TP", ".", "update", "(", "ell", "*", "ell_hat", ",", "stratum_idx", ")", "self", ".", "_BB_PP", ".", "update", "(", "ell_hat", ",", "stratum_idx", ")", "self", ".", "_BB_P", ".", "update", "(", "ell", ",", "stratum_idx", ")", "# Update model covariance matrix for stratum_idx", "self", ".", "_update_cov_model", "(", "strata_to_update", "=", "[", "stratum_idx", "]", ")", "# Update F-measure estimate, estimator variance, exp. variance decrease", "self", ".", "_update_estimates", "(", ")" ]
Update the BB models and the estimates
[ "Update", "the", "BB", "models", "and", "the", "estimates" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/kad.py#L133-L145
train
mkoura/dump2polarion
dump2polarion/svn_polarion.py
WorkItemCache.get_path
def get_path(num): """Gets a path from the workitem number. For example: 31942 will return 30000-39999/31000-31999/31900-31999 """ num = int(num) dig_len = len(str(num)) paths = [] for i in range(dig_len - 2): divisor = 10 ** (dig_len - i - 1) paths.append( "{}-{}".format((num // divisor) * divisor, (((num // divisor) + 1) * divisor) - 1) ) return "/".join(paths)
python
def get_path(num): """Gets a path from the workitem number. For example: 31942 will return 30000-39999/31000-31999/31900-31999 """ num = int(num) dig_len = len(str(num)) paths = [] for i in range(dig_len - 2): divisor = 10 ** (dig_len - i - 1) paths.append( "{}-{}".format((num // divisor) * divisor, (((num // divisor) + 1) * divisor) - 1) ) return "/".join(paths)
[ "def", "get_path", "(", "num", ")", ":", "num", "=", "int", "(", "num", ")", "dig_len", "=", "len", "(", "str", "(", "num", ")", ")", "paths", "=", "[", "]", "for", "i", "in", "range", "(", "dig_len", "-", "2", ")", ":", "divisor", "=", "10", "**", "(", "dig_len", "-", "i", "-", "1", ")", "paths", ".", "append", "(", "\"{}-{}\"", ".", "format", "(", "(", "num", "//", "divisor", ")", "*", "divisor", ",", "(", "(", "(", "num", "//", "divisor", ")", "+", "1", ")", "*", "divisor", ")", "-", "1", ")", ")", "return", "\"/\"", ".", "join", "(", "paths", ")" ]
Gets a path from the workitem number. For example: 31942 will return 30000-39999/31000-31999/31900-31999
[ "Gets", "a", "path", "from", "the", "workitem", "number", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/svn_polarion.py#L31-L44
train
mkoura/dump2polarion
dump2polarion/svn_polarion.py
WorkItemCache.get_tree
def get_tree(self, work_item_id): """Gets XML tree of the workitem.""" try: __, tcid = work_item_id.split("-") except ValueError: logger.warning("Couldn't load workitem %s, bad format", work_item_id) self._cache[work_item_id] = InvalidObject() return None path = os.path.join(self.test_case_dir, self.get_path(tcid), work_item_id, "workitem.xml") try: tree = etree.parse(path) # pylint: disable=broad-except except Exception: logger.warning("Couldn't load workitem %s", work_item_id) self._cache[work_item_id] = InvalidObject() return None return tree
python
def get_tree(self, work_item_id): """Gets XML tree of the workitem.""" try: __, tcid = work_item_id.split("-") except ValueError: logger.warning("Couldn't load workitem %s, bad format", work_item_id) self._cache[work_item_id] = InvalidObject() return None path = os.path.join(self.test_case_dir, self.get_path(tcid), work_item_id, "workitem.xml") try: tree = etree.parse(path) # pylint: disable=broad-except except Exception: logger.warning("Couldn't load workitem %s", work_item_id) self._cache[work_item_id] = InvalidObject() return None return tree
[ "def", "get_tree", "(", "self", ",", "work_item_id", ")", ":", "try", ":", "__", ",", "tcid", "=", "work_item_id", ".", "split", "(", "\"-\"", ")", "except", "ValueError", ":", "logger", ".", "warning", "(", "\"Couldn't load workitem %s, bad format\"", ",", "work_item_id", ")", "self", ".", "_cache", "[", "work_item_id", "]", "=", "InvalidObject", "(", ")", "return", "None", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "test_case_dir", ",", "self", ".", "get_path", "(", "tcid", ")", ",", "work_item_id", ",", "\"workitem.xml\"", ")", "try", ":", "tree", "=", "etree", ".", "parse", "(", "path", ")", "# pylint: disable=broad-except", "except", "Exception", ":", "logger", ".", "warning", "(", "\"Couldn't load workitem %s\"", ",", "work_item_id", ")", "self", ".", "_cache", "[", "work_item_id", "]", "=", "InvalidObject", "(", ")", "return", "None", "return", "tree" ]
Gets XML tree of the workitem.
[ "Gets", "XML", "tree", "of", "the", "workitem", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/svn_polarion.py#L46-L63
train
mkoura/dump2polarion
dump2polarion/svn_polarion.py
WorkItemCache.get_all_items
def get_all_items(self): """Walks the repo and returns work items.""" for item in os.walk(self.test_case_dir): if "workitem.xml" not in item[2]: continue case_id = os.path.split(item[0])[-1] if not (case_id and "*" not in case_id): continue item_cache = self[case_id] if not item_cache: continue if not item_cache.get("title"): continue yield item_cache
python
def get_all_items(self): """Walks the repo and returns work items.""" for item in os.walk(self.test_case_dir): if "workitem.xml" not in item[2]: continue case_id = os.path.split(item[0])[-1] if not (case_id and "*" not in case_id): continue item_cache = self[case_id] if not item_cache: continue if not item_cache.get("title"): continue yield item_cache
[ "def", "get_all_items", "(", "self", ")", ":", "for", "item", "in", "os", ".", "walk", "(", "self", ".", "test_case_dir", ")", ":", "if", "\"workitem.xml\"", "not", "in", "item", "[", "2", "]", ":", "continue", "case_id", "=", "os", ".", "path", ".", "split", "(", "item", "[", "0", "]", ")", "[", "-", "1", "]", "if", "not", "(", "case_id", "and", "\"*\"", "not", "in", "case_id", ")", ":", "continue", "item_cache", "=", "self", "[", "case_id", "]", "if", "not", "item_cache", ":", "continue", "if", "not", "item_cache", ".", "get", "(", "\"title\"", ")", ":", "continue", "yield", "item_cache" ]
Walks the repo and returns work items.
[ "Walks", "the", "repo", "and", "returns", "work", "items", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/svn_polarion.py#L135-L148
train
ckcollab/polished
polished/backends/base.py
BaseBackend._remove_files
def _remove_files(self, directory, pattern): ''' Removes all files matching the search path Arguments: search_path -- The path you would like to remove, can contain wildcards Example: self._remove_files("output/*.html") ''' for root, dirnames, file_names in os.walk(directory): for file_name in fnmatch.filter(file_names, pattern): os.remove(os.path.join(root, file_name))
python
def _remove_files(self, directory, pattern): ''' Removes all files matching the search path Arguments: search_path -- The path you would like to remove, can contain wildcards Example: self._remove_files("output/*.html") ''' for root, dirnames, file_names in os.walk(directory): for file_name in fnmatch.filter(file_names, pattern): os.remove(os.path.join(root, file_name))
[ "def", "_remove_files", "(", "self", ",", "directory", ",", "pattern", ")", ":", "for", "root", ",", "dirnames", ",", "file_names", "in", "os", ".", "walk", "(", "directory", ")", ":", "for", "file_name", "in", "fnmatch", ".", "filter", "(", "file_names", ",", "pattern", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file_name", ")", ")" ]
Removes all files matching the search path Arguments: search_path -- The path you would like to remove, can contain wildcards Example: self._remove_files("output/*.html")
[ "Removes", "all", "files", "matching", "the", "search", "path" ]
5a00b2fbe569bc957d1647c0849fd344db29b644
https://github.com/ckcollab/polished/blob/5a00b2fbe569bc957d1647c0849fd344db29b644/polished/backends/base.py#L52-L64
train
praekeltfoundation/seed-message-sender
message_sender/views.py
EventListener.post
def post(self, request, *args, **kwargs): """ Checks for expect event types before continuing """ serializer = EventSerializer(data=request.data) if not serializer.is_valid(): return Response( {"accepted": False, "reason": serializer.errors}, status=400 ) data = serializer.validated_data event_type = { "ack": "ack", "nack": "nack", "delivery_report": "delivery_succeeded", }.get(data["event_type"]) accepted, reason = process_event( data["user_message_id"], event_type, data["nack_reason"], data["timestamp"] ) return Response( {"accepted": accepted, "reason": reason}, status=200 if accepted else 400 )
python
def post(self, request, *args, **kwargs): """ Checks for expect event types before continuing """ serializer = EventSerializer(data=request.data) if not serializer.is_valid(): return Response( {"accepted": False, "reason": serializer.errors}, status=400 ) data = serializer.validated_data event_type = { "ack": "ack", "nack": "nack", "delivery_report": "delivery_succeeded", }.get(data["event_type"]) accepted, reason = process_event( data["user_message_id"], event_type, data["nack_reason"], data["timestamp"] ) return Response( {"accepted": accepted, "reason": reason}, status=200 if accepted else 400 )
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serializer", "=", "EventSerializer", "(", "data", "=", "request", ".", "data", ")", "if", "not", "serializer", ".", "is_valid", "(", ")", ":", "return", "Response", "(", "{", "\"accepted\"", ":", "False", ",", "\"reason\"", ":", "serializer", ".", "errors", "}", ",", "status", "=", "400", ")", "data", "=", "serializer", ".", "validated_data", "event_type", "=", "{", "\"ack\"", ":", "\"ack\"", ",", "\"nack\"", ":", "\"nack\"", ",", "\"delivery_report\"", ":", "\"delivery_succeeded\"", ",", "}", ".", "get", "(", "data", "[", "\"event_type\"", "]", ")", "accepted", ",", "reason", "=", "process_event", "(", "data", "[", "\"user_message_id\"", "]", ",", "event_type", ",", "data", "[", "\"nack_reason\"", "]", ",", "data", "[", "\"timestamp\"", "]", ")", "return", "Response", "(", "{", "\"accepted\"", ":", "accepted", ",", "\"reason\"", ":", "reason", "}", ",", "status", "=", "200", "if", "accepted", "else", "400", ")" ]
Checks for expect event types before continuing
[ "Checks", "for", "expect", "event", "types", "before", "continuing" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/views.py#L384-L409
train
carta/ldap_tools
src/ldap_tools/group.py
API.create
def create(self, group, grouptype): """ Create an LDAP Group. Raises: ldap3.core.exceptions.LDAPNoSuchObjectResult: an object involved with the request is missing ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: the entity being created already exists """ try: self.client.add( self.__distinguished_name(group), API.__object_class(), self.__ldap_attr(group, grouptype)) except ldap3.core.exceptions.LDAPNoSuchObjectResult: # pragma: no cover print( "Error creating LDAP Group.\nRequest: ", self.__ldap_attr(group, grouptype), "\nDistinguished Name: ", self.__distinguished_name(group), file=sys.stderr) except ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: # pragma: no cover print( "Error creating LDAP Group. Group already exists. \nRequest: ", self.__ldap_attr(group, grouptype), "\nDistinguished Name: ", self.__distinguished_name(group), file=sys.stderr)
python
def create(self, group, grouptype): """ Create an LDAP Group. Raises: ldap3.core.exceptions.LDAPNoSuchObjectResult: an object involved with the request is missing ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: the entity being created already exists """ try: self.client.add( self.__distinguished_name(group), API.__object_class(), self.__ldap_attr(group, grouptype)) except ldap3.core.exceptions.LDAPNoSuchObjectResult: # pragma: no cover print( "Error creating LDAP Group.\nRequest: ", self.__ldap_attr(group, grouptype), "\nDistinguished Name: ", self.__distinguished_name(group), file=sys.stderr) except ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: # pragma: no cover print( "Error creating LDAP Group. Group already exists. \nRequest: ", self.__ldap_attr(group, grouptype), "\nDistinguished Name: ", self.__distinguished_name(group), file=sys.stderr)
[ "def", "create", "(", "self", ",", "group", ",", "grouptype", ")", ":", "try", ":", "self", ".", "client", ".", "add", "(", "self", ".", "__distinguished_name", "(", "group", ")", ",", "API", ".", "__object_class", "(", ")", ",", "self", ".", "__ldap_attr", "(", "group", ",", "grouptype", ")", ")", "except", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPNoSuchObjectResult", ":", "# pragma: no cover", "print", "(", "\"Error creating LDAP Group.\\nRequest: \"", ",", "self", ".", "__ldap_attr", "(", "group", ",", "grouptype", ")", ",", "\"\\nDistinguished Name: \"", ",", "self", ".", "__distinguished_name", "(", "group", ")", ",", "file", "=", "sys", ".", "stderr", ")", "except", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPEntryAlreadyExistsResult", ":", "# pragma: no cover", "print", "(", "\"Error creating LDAP Group. Group already exists. \\nRequest: \"", ",", "self", ".", "__ldap_attr", "(", "group", ",", "grouptype", ")", ",", "\"\\nDistinguished Name: \"", ",", "self", ".", "__distinguished_name", "(", "group", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Create an LDAP Group. Raises: ldap3.core.exceptions.LDAPNoSuchObjectResult: an object involved with the request is missing ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: the entity being created already exists
[ "Create", "an", "LDAP", "Group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L18-L47
train
carta/ldap_tools
src/ldap_tools/group.py
API.add_user
def add_user(self, group, username): """ Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info. """ try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]} self.client.modify(self.__distinguished_name(group), operation)
python
def add_user(self, group, username): """ Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info. """ try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]} self.client.modify(self.__distinguished_name(group), operation)
[ "def", "add_user", "(", "self", ",", "group", ",", "username", ")", ":", "try", ":", "self", ".", "lookup_id", "(", "group", ")", "except", "ldap_tools", ".", "exceptions", ".", "InvalidResult", "as", "err", ":", "# pragma: no cover", "raise", "err", "from", "None", "operation", "=", "{", "'memberUid'", ":", "[", "(", "ldap3", ".", "MODIFY_ADD", ",", "[", "username", "]", ")", "]", "}", "self", ".", "client", ".", "modify", "(", "self", ".", "__distinguished_name", "(", "group", ")", ",", "operation", ")" ]
Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
[ "Add", "a", "user", "to", "the", "specified", "LDAP", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L53-L73
train
carta/ldap_tools
src/ldap_tools/group.py
API.remove_user
def remove_user(self, group, username): """ Remove a user from the specified LDAP group. Args: group: Name of group to update username: Username of user to remove Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info. """ try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]} self.client.modify(self.__distinguished_name(group), operation)
python
def remove_user(self, group, username): """ Remove a user from the specified LDAP group. Args: group: Name of group to update username: Username of user to remove Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info. """ try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]} self.client.modify(self.__distinguished_name(group), operation)
[ "def", "remove_user", "(", "self", ",", "group", ",", "username", ")", ":", "try", ":", "self", ".", "lookup_id", "(", "group", ")", "except", "ldap_tools", ".", "exceptions", ".", "InvalidResult", "as", "err", ":", "# pragma: no cover", "raise", "err", "from", "None", "operation", "=", "{", "'memberUid'", ":", "[", "(", "ldap3", ".", "MODIFY_DELETE", ",", "[", "username", "]", ")", "]", "}", "self", ".", "client", ".", "modify", "(", "self", ".", "__distinguished_name", "(", "group", ")", ",", "operation", ")" ]
Remove a user from the specified LDAP group. Args: group: Name of group to update username: Username of user to remove Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
[ "Remove", "a", "user", "from", "the", "specified", "LDAP", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L75-L95
train
carta/ldap_tools
src/ldap_tools/group.py
API.lookup_id
def lookup_id(self, group): """ Lookup GID for the given group. Args: group: Name of group whose ID needs to be looked up Returns: A bytestring representation of the group ID (gid) for the group specified Raises: ldap_tools.exceptions.NoGroupsFound: No Groups were returned by LDAP ldap_tools.exceptions.TooManyResults: More than one group was returned by LDAP """ filter = ["(cn={})".format(group), "(objectclass=posixGroup)"] results = self.client.search(filter, ['gidNumber']) if len(results) < 1: raise ldap_tools.exceptions.NoGroupsFound( 'No Groups Returned by LDAP') elif len(results) > 1: raise ldap_tools.exceptions.TooManyResults( 'Multiple groups found. Please narrow your search.') else: return results[0].gidNumber.value
python
def lookup_id(self, group): """ Lookup GID for the given group. Args: group: Name of group whose ID needs to be looked up Returns: A bytestring representation of the group ID (gid) for the group specified Raises: ldap_tools.exceptions.NoGroupsFound: No Groups were returned by LDAP ldap_tools.exceptions.TooManyResults: More than one group was returned by LDAP """ filter = ["(cn={})".format(group), "(objectclass=posixGroup)"] results = self.client.search(filter, ['gidNumber']) if len(results) < 1: raise ldap_tools.exceptions.NoGroupsFound( 'No Groups Returned by LDAP') elif len(results) > 1: raise ldap_tools.exceptions.TooManyResults( 'Multiple groups found. Please narrow your search.') else: return results[0].gidNumber.value
[ "def", "lookup_id", "(", "self", ",", "group", ")", ":", "filter", "=", "[", "\"(cn={})\"", ".", "format", "(", "group", ")", ",", "\"(objectclass=posixGroup)\"", "]", "results", "=", "self", ".", "client", ".", "search", "(", "filter", ",", "[", "'gidNumber'", "]", ")", "if", "len", "(", "results", ")", "<", "1", ":", "raise", "ldap_tools", ".", "exceptions", ".", "NoGroupsFound", "(", "'No Groups Returned by LDAP'", ")", "elif", "len", "(", "results", ")", ">", "1", ":", "raise", "ldap_tools", ".", "exceptions", ".", "TooManyResults", "(", "'Multiple groups found. Please narrow your search.'", ")", "else", ":", "return", "results", "[", "0", "]", ".", "gidNumber", ".", "value" ]
Lookup GID for the given group. Args: group: Name of group whose ID needs to be looked up Returns: A bytestring representation of the group ID (gid) for the group specified Raises: ldap_tools.exceptions.NoGroupsFound: No Groups were returned by LDAP ldap_tools.exceptions.TooManyResults: More than one group was returned by LDAP
[ "Lookup", "GID", "for", "the", "given", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L101-L130
train
carta/ldap_tools
src/ldap_tools/group.py
CLI.create
def create(config, group, type): """Create an LDAP group.""" if type not in ('user', 'service'): raise click.BadOptionUsage( # pragma: no cover "--grouptype must be 'user' or 'service'") client = Client() client.prepare_connection() group_api = API(client) group_api.create(group, type)
python
def create(config, group, type): """Create an LDAP group.""" if type not in ('user', 'service'): raise click.BadOptionUsage( # pragma: no cover "--grouptype must be 'user' or 'service'") client = Client() client.prepare_connection() group_api = API(client) group_api.create(group, type)
[ "def", "create", "(", "config", ",", "group", ",", "type", ")", ":", "if", "type", "not", "in", "(", "'user'", ",", "'service'", ")", ":", "raise", "click", ".", "BadOptionUsage", "(", "# pragma: no cover", "\"--grouptype must be 'user' or 'service'\"", ")", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "group_api", ".", "create", "(", "group", ",", "type", ")" ]
Create an LDAP group.
[ "Create", "an", "LDAP", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L165-L174
train
carta/ldap_tools
src/ldap_tools/group.py
CLI.delete
def delete(config, group, force): """Delete an LDAP group.""" if not force: if not click.confirm( 'Confirm that you want to delete group {}'.format(group)): sys.exit("Deletion of {} aborted".format(group)) client = Client() client.prepare_connection() group_api = API(client) group_api.delete(group)
python
def delete(config, group, force): """Delete an LDAP group.""" if not force: if not click.confirm( 'Confirm that you want to delete group {}'.format(group)): sys.exit("Deletion of {} aborted".format(group)) client = Client() client.prepare_connection() group_api = API(client) group_api.delete(group)
[ "def", "delete", "(", "config", ",", "group", ",", "force", ")", ":", "if", "not", "force", ":", "if", "not", "click", ".", "confirm", "(", "'Confirm that you want to delete group {}'", ".", "format", "(", "group", ")", ")", ":", "sys", ".", "exit", "(", "\"Deletion of {} aborted\"", ".", "format", "(", "group", ")", ")", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "group_api", ".", "delete", "(", "group", ")" ]
Delete an LDAP group.
[ "Delete", "an", "LDAP", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L180-L190
train
carta/ldap_tools
src/ldap_tools/group.py
CLI.add_user
def add_user(config, group, username): """Add specified user to specified group.""" client = Client() client.prepare_connection() group_api = API(client) try: group_api.add_user(group, username) except ldap_tools.exceptions.NoGroupsFound: # pragma: no cover print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: # pragma: no cover print("Query for group ({}) returned multiple results.".format( group)) except ldap3.TYPE_OR_VALUE_EXISTS: # pragma: no cover print("{} already exists in {}".format(username, group))
python
def add_user(config, group, username): """Add specified user to specified group.""" client = Client() client.prepare_connection() group_api = API(client) try: group_api.add_user(group, username) except ldap_tools.exceptions.NoGroupsFound: # pragma: no cover print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: # pragma: no cover print("Query for group ({}) returned multiple results.".format( group)) except ldap3.TYPE_OR_VALUE_EXISTS: # pragma: no cover print("{} already exists in {}".format(username, group))
[ "def", "add_user", "(", "config", ",", "group", ",", "username", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "try", ":", "group_api", ".", "add_user", "(", "group", ",", "username", ")", "except", "ldap_tools", ".", "exceptions", ".", "NoGroupsFound", ":", "# pragma: no cover", "print", "(", "\"Group ({}) not found\"", ".", "format", "(", "group", ")", ")", "except", "ldap_tools", ".", "exceptions", ".", "TooManyResults", ":", "# pragma: no cover", "print", "(", "\"Query for group ({}) returned multiple results.\"", ".", "format", "(", "group", ")", ")", "except", "ldap3", ".", "TYPE_OR_VALUE_EXISTS", ":", "# pragma: no cover", "print", "(", "\"{} already exists in {}\"", ".", "format", "(", "username", ",", "group", ")", ")" ]
Add specified user to specified group.
[ "Add", "specified", "user", "to", "specified", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L201-L214
train
carta/ldap_tools
src/ldap_tools/group.py
CLI.remove_user
def remove_user(config, group, username): """Remove specified user from specified group.""" client = Client() client.prepare_connection() group_api = API(client) try: group_api.remove_user(group, username) except ldap_tools.exceptions.NoGroupsFound: # pragma: no cover print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: # pragma: no cover print("Query for group ({}) returned multiple results.".format( group)) except ldap3.NO_SUCH_ATTRIBUTE: # pragma: no cover print("{} does not exist in {}".format(username, group))
python
def remove_user(config, group, username): """Remove specified user from specified group.""" client = Client() client.prepare_connection() group_api = API(client) try: group_api.remove_user(group, username) except ldap_tools.exceptions.NoGroupsFound: # pragma: no cover print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: # pragma: no cover print("Query for group ({}) returned multiple results.".format( group)) except ldap3.NO_SUCH_ATTRIBUTE: # pragma: no cover print("{} does not exist in {}".format(username, group))
[ "def", "remove_user", "(", "config", ",", "group", ",", "username", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "try", ":", "group_api", ".", "remove_user", "(", "group", ",", "username", ")", "except", "ldap_tools", ".", "exceptions", ".", "NoGroupsFound", ":", "# pragma: no cover", "print", "(", "\"Group ({}) not found\"", ".", "format", "(", "group", ")", ")", "except", "ldap_tools", ".", "exceptions", ".", "TooManyResults", ":", "# pragma: no cover", "print", "(", "\"Query for group ({}) returned multiple results.\"", ".", "format", "(", "group", ")", ")", "except", "ldap3", ".", "NO_SUCH_ATTRIBUTE", ":", "# pragma: no cover", "print", "(", "\"{} does not exist in {}\"", ".", "format", "(", "username", ",", "group", ")", ")" ]
Remove specified user from specified group.
[ "Remove", "specified", "user", "from", "specified", "group", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L228-L241
train
carta/ldap_tools
src/ldap_tools/group.py
CLI.index
def index(config): # pragma: no cover """Display group info in raw format.""" client = Client() client.prepare_connection() group_api = API(client) print(group_api.index())
python
def index(config): # pragma: no cover """Display group info in raw format.""" client = Client() client.prepare_connection() group_api = API(client) print(group_api.index())
[ "def", "index", "(", "config", ")", ":", "# pragma: no cover", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "print", "(", "group_api", ".", "index", "(", ")", ")" ]
Display group info in raw format.
[ "Display", "group", "info", "in", "raw", "format", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L245-L250
train
mkoura/dump2polarion
dump2polarion/results/importer.py
_get_importer
def _get_importer(input_file): """Selects importer based on input file type.""" __, ext = os.path.splitext(input_file) ext = ext.lower() if "ostriz" in input_file: from dump2polarion.results import ostriztools importer = ostriztools.import_ostriz elif ext == ".xml": # expect junit-report from pytest from dump2polarion.results import junittools importer = junittools.import_junit elif ext == ".csv": from dump2polarion.results import csvtools importer = csvtools.import_csv elif ext in dbtools.SQLITE_EXT: importer = dbtools.import_sqlite elif ext == ".json": from dump2polarion.results import jsontools importer = jsontools.import_json else: raise Dump2PolarionException("Cannot recognize type of input data, add file extension.") return importer
python
def _get_importer(input_file): """Selects importer based on input file type.""" __, ext = os.path.splitext(input_file) ext = ext.lower() if "ostriz" in input_file: from dump2polarion.results import ostriztools importer = ostriztools.import_ostriz elif ext == ".xml": # expect junit-report from pytest from dump2polarion.results import junittools importer = junittools.import_junit elif ext == ".csv": from dump2polarion.results import csvtools importer = csvtools.import_csv elif ext in dbtools.SQLITE_EXT: importer = dbtools.import_sqlite elif ext == ".json": from dump2polarion.results import jsontools importer = jsontools.import_json else: raise Dump2PolarionException("Cannot recognize type of input data, add file extension.") return importer
[ "def", "_get_importer", "(", "input_file", ")", ":", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "input_file", ")", "ext", "=", "ext", ".", "lower", "(", ")", "if", "\"ostriz\"", "in", "input_file", ":", "from", "dump2polarion", ".", "results", "import", "ostriztools", "importer", "=", "ostriztools", ".", "import_ostriz", "elif", "ext", "==", "\".xml\"", ":", "# expect junit-report from pytest", "from", "dump2polarion", ".", "results", "import", "junittools", "importer", "=", "junittools", ".", "import_junit", "elif", "ext", "==", "\".csv\"", ":", "from", "dump2polarion", ".", "results", "import", "csvtools", "importer", "=", "csvtools", ".", "import_csv", "elif", "ext", "in", "dbtools", ".", "SQLITE_EXT", ":", "importer", "=", "dbtools", ".", "import_sqlite", "elif", "ext", "==", "\".json\"", ":", "from", "dump2polarion", ".", "results", "import", "jsontools", "importer", "=", "jsontools", ".", "import_json", "else", ":", "raise", "Dump2PolarionException", "(", "\"Cannot recognize type of input data, add file extension.\"", ")", "return", "importer" ]
Selects importer based on input file type.
[ "Selects", "importer", "based", "on", "input", "file", "type", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/importer.py#L14-L41
train
smnorris/bcdata
bcdata/cli.py
parse_db_url
def parse_db_url(db_url): """provided a db url, return a dict with connection properties """ u = urlparse(db_url) db = {} db["database"] = u.path[1:] db["user"] = u.username db["password"] = u.password db["host"] = u.hostname db["port"] = u.port return db
python
def parse_db_url(db_url): """provided a db url, return a dict with connection properties """ u = urlparse(db_url) db = {} db["database"] = u.path[1:] db["user"] = u.username db["password"] = u.password db["host"] = u.hostname db["port"] = u.port return db
[ "def", "parse_db_url", "(", "db_url", ")", ":", "u", "=", "urlparse", "(", "db_url", ")", "db", "=", "{", "}", "db", "[", "\"database\"", "]", "=", "u", ".", "path", "[", "1", ":", "]", "db", "[", "\"user\"", "]", "=", "u", ".", "username", "db", "[", "\"password\"", "]", "=", "u", ".", "password", "db", "[", "\"host\"", "]", "=", "u", ".", "hostname", "db", "[", "\"port\"", "]", "=", "u", ".", "port", "return", "db" ]
provided a db url, return a dict with connection properties
[ "provided", "a", "db", "url", "return", "a", "dict", "with", "connection", "properties" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L27-L37
train
smnorris/bcdata
bcdata/cli.py
bounds_handler
def bounds_handler(ctx, param, value): """Handle different forms of bounds.""" retval = from_like_context(ctx, param, value) if retval is None and value is not None: try: value = value.strip(", []") retval = tuple(float(x) for x in re.split(r"[,\s]+", value)) assert len(retval) == 4 return retval except Exception: raise click.BadParameter( "{0!r} is not a valid bounding box representation".format(value) ) else: # pragma: no cover return retval
python
def bounds_handler(ctx, param, value): """Handle different forms of bounds.""" retval = from_like_context(ctx, param, value) if retval is None and value is not None: try: value = value.strip(", []") retval = tuple(float(x) for x in re.split(r"[,\s]+", value)) assert len(retval) == 4 return retval except Exception: raise click.BadParameter( "{0!r} is not a valid bounding box representation".format(value) ) else: # pragma: no cover return retval
[ "def", "bounds_handler", "(", "ctx", ",", "param", ",", "value", ")", ":", "retval", "=", "from_like_context", "(", "ctx", ",", "param", ",", "value", ")", "if", "retval", "is", "None", "and", "value", "is", "not", "None", ":", "try", ":", "value", "=", "value", ".", "strip", "(", "\", []\"", ")", "retval", "=", "tuple", "(", "float", "(", "x", ")", "for", "x", "in", "re", ".", "split", "(", "r\"[,\\s]+\"", ",", "value", ")", ")", "assert", "len", "(", "retval", ")", "==", "4", "return", "retval", "except", "Exception", ":", "raise", "click", ".", "BadParameter", "(", "\"{0!r} is not a valid bounding box representation\"", ".", "format", "(", "value", ")", ")", "else", ":", "# pragma: no cover", "return", "retval" ]
Handle different forms of bounds.
[ "Handle", "different", "forms", "of", "bounds", "." ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L58-L72
train
smnorris/bcdata
bcdata/cli.py
info
def info(dataset, indent, meta_member): """Print basic metadata about a DataBC WFS layer as JSON. Optionally print a single metadata item as a string. """ table = bcdata.validate_name(dataset) wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") info = {} info["name"] = table info["count"] = bcdata.get_count(table) info["schema"] = wfs.get_schema("pub:" + table) if meta_member: click.echo(info[meta_member]) else: click.echo(json.dumps(info, indent=indent))
python
def info(dataset, indent, meta_member): """Print basic metadata about a DataBC WFS layer as JSON. Optionally print a single metadata item as a string. """ table = bcdata.validate_name(dataset) wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") info = {} info["name"] = table info["count"] = bcdata.get_count(table) info["schema"] = wfs.get_schema("pub:" + table) if meta_member: click.echo(info[meta_member]) else: click.echo(json.dumps(info, indent=indent))
[ "def", "info", "(", "dataset", ",", "indent", ",", "meta_member", ")", ":", "table", "=", "bcdata", ".", "validate_name", "(", "dataset", ")", "wfs", "=", "WebFeatureService", "(", "url", "=", "bcdata", ".", "OWS_URL", ",", "version", "=", "\"2.0.0\"", ")", "info", "=", "{", "}", "info", "[", "\"name\"", "]", "=", "table", "info", "[", "\"count\"", "]", "=", "bcdata", ".", "get_count", "(", "table", ")", "info", "[", "\"schema\"", "]", "=", "wfs", ".", "get_schema", "(", "\"pub:\"", "+", "table", ")", "if", "meta_member", ":", "click", ".", "echo", "(", "info", "[", "meta_member", "]", ")", "else", ":", "click", ".", "echo", "(", "json", ".", "dumps", "(", "info", ",", "indent", "=", "indent", ")", ")" ]
Print basic metadata about a DataBC WFS layer as JSON. Optionally print a single metadata item as a string.
[ "Print", "basic", "metadata", "about", "a", "DataBC", "WFS", "layer", "as", "JSON", "." ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L120-L134
train
smnorris/bcdata
bcdata/cli.py
dem
def dem(bounds, src_crs, dst_crs, out_file, resolution): """Dump BC DEM to TIFF """ if not dst_crs: dst_crs = "EPSG:3005" bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
python
def dem(bounds, src_crs, dst_crs, out_file, resolution): """Dump BC DEM to TIFF """ if not dst_crs: dst_crs = "EPSG:3005" bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
[ "def", "dem", "(", "bounds", ",", "src_crs", ",", "dst_crs", ",", "out_file", ",", "resolution", ")", ":", "if", "not", "dst_crs", ":", "dst_crs", "=", "\"EPSG:3005\"", "bcdata", ".", "get_dem", "(", "bounds", ",", "out_file", "=", "out_file", ",", "src_crs", "=", "src_crs", ",", "dst_crs", "=", "dst_crs", ",", "resolution", "=", "resolution", ")" ]
Dump BC DEM to TIFF
[ "Dump", "BC", "DEM", "to", "TIFF" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L143-L148
train
smnorris/bcdata
bcdata/cli.py
dump
def dump(dataset, query, out_file, bounds): """Write DataBC features to stdout as GeoJSON feature collection. \b $ bcdata dump bc-airports $ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'" $ bcdata dump bc-airports --bounds xmin ymin xmax ymax The values of --bounds must be in BC Albers. It can also be combined to read bounds of a feature dataset using Fiona: \b $ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds) """ table = bcdata.validate_name(dataset) data = bcdata.get_data(table, query=query, bounds=bounds) if out_file: with open(out_file, "w") as f: json.dump(data.json(), f) else: sink = click.get_text_stream("stdout") sink.write(json.dumps(data))
python
def dump(dataset, query, out_file, bounds): """Write DataBC features to stdout as GeoJSON feature collection. \b $ bcdata dump bc-airports $ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'" $ bcdata dump bc-airports --bounds xmin ymin xmax ymax The values of --bounds must be in BC Albers. It can also be combined to read bounds of a feature dataset using Fiona: \b $ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds) """ table = bcdata.validate_name(dataset) data = bcdata.get_data(table, query=query, bounds=bounds) if out_file: with open(out_file, "w") as f: json.dump(data.json(), f) else: sink = click.get_text_stream("stdout") sink.write(json.dumps(data))
[ "def", "dump", "(", "dataset", ",", "query", ",", "out_file", ",", "bounds", ")", ":", "table", "=", "bcdata", ".", "validate_name", "(", "dataset", ")", "data", "=", "bcdata", ".", "get_data", "(", "table", ",", "query", "=", "query", ",", "bounds", "=", "bounds", ")", "if", "out_file", ":", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "data", ".", "json", "(", ")", ",", "f", ")", "else", ":", "sink", "=", "click", ".", "get_text_stream", "(", "\"stdout\"", ")", "sink", ".", "write", "(", "json", ".", "dumps", "(", "data", ")", ")" ]
Write DataBC features to stdout as GeoJSON feature collection. \b $ bcdata dump bc-airports $ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'" $ bcdata dump bc-airports --bounds xmin ymin xmax ymax The values of --bounds must be in BC Albers. It can also be combined to read bounds of a feature dataset using Fiona: \b $ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
[ "Write", "DataBC", "features", "to", "stdout", "as", "GeoJSON", "feature", "collection", "." ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L159-L181
train
smnorris/bcdata
bcdata/cli.py
cat
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby): """Write DataBC features to stdout as GeoJSON feature objects. """ # Note that cat does not concatenate! dump_kwds = {"sort_keys": True} if indent: dump_kwds["indent"] = indent if compact: dump_kwds["separators"] = (",", ":") table = bcdata.validate_name(dataset) for feat in bcdata.get_features( table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs ): click.echo(json.dumps(feat, **dump_kwds))
python
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby): """Write DataBC features to stdout as GeoJSON feature objects. """ # Note that cat does not concatenate! dump_kwds = {"sort_keys": True} if indent: dump_kwds["indent"] = indent if compact: dump_kwds["separators"] = (",", ":") table = bcdata.validate_name(dataset) for feat in bcdata.get_features( table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs ): click.echo(json.dumps(feat, **dump_kwds))
[ "def", "cat", "(", "dataset", ",", "query", ",", "bounds", ",", "indent", ",", "compact", ",", "dst_crs", ",", "pagesize", ",", "sortby", ")", ":", "# Note that cat does not concatenate!", "dump_kwds", "=", "{", "\"sort_keys\"", ":", "True", "}", "if", "indent", ":", "dump_kwds", "[", "\"indent\"", "]", "=", "indent", "if", "compact", ":", "dump_kwds", "[", "\"separators\"", "]", "=", "(", "\",\"", ",", "\":\"", ")", "table", "=", "bcdata", ".", "validate_name", "(", "dataset", ")", "for", "feat", "in", "bcdata", ".", "get_features", "(", "table", ",", "query", "=", "query", ",", "bounds", "=", "bounds", ",", "sortby", "=", "sortby", ",", "crs", "=", "dst_crs", ")", ":", "click", ".", "echo", "(", "json", ".", "dumps", "(", "feat", ",", "*", "*", "dump_kwds", ")", ")" ]
Write DataBC features to stdout as GeoJSON feature objects.
[ "Write", "DataBC", "features", "to", "stdout", "as", "GeoJSON", "feature", "objects", "." ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L198-L211
train
smnorris/bcdata
bcdata/cli.py
bc2pg
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers): """Download a DataBC WFS layer to postgres - an ogr2ogr wrapper. \b $ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis The default target database can be specified by setting the $DATABASE_URL environment variable. https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls """ src = bcdata.validate_name(dataset) src_schema, src_table = [i.lower() for i in src.split(".")] if not schema: schema = src_schema if not table: table = src_table # create schema if it does not exist conn = pgdata.connect(db_url) if schema not in conn.schemas: click.echo("Schema {} does not exist, creating it".format(schema)) conn.create_schema(schema) # build parameters for each required request param_dicts = bcdata.define_request( dataset, query=query, sortby=sortby, pagesize=pagesize ) try: # run the first request / load payload = urlencode(param_dicts[0], doseq=True) url = bcdata.WFS_URL + "?" + payload db = parse_db_url(db_url) db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format( h=db["host"], u=db["user"], db=db["database"], pwd=db["password"] ) # create the table if not append: command = [ "ogr2ogr", "-lco", "OVERWRITE=YES", "-lco", "SCHEMA={}".format(schema), "-lco", "GEOMETRY_NAME=geom", "-f", "PostgreSQL", db_string, "-t_srs", "EPSG:3005", "-nln", table, url, ] click.echo(" ".join(command)) subprocess.run(command) # append to table when append specified or processing many chunks if len(param_dicts) > 1 or append: # define starting index in list of requests if append: idx = 0 else: idx = 1 commands = [] for chunk, paramdict in enumerate(param_dicts[idx:]): payload = urlencode(paramdict, doseq=True) url = bcdata.WFS_URL + "?" + payload command = [ "ogr2ogr", "-update", "-append", "-f", "PostgreSQL", db_string + " active_schema=" + schema, "-t_srs", "EPSG:3005", "-nln", table, url, ] commands.append(command) # https://stackoverflow.com/questions/14533458 pool = Pool(max_workers) with click.progressbar( pool.imap(partial(call), commands), length=len(param_dicts) ) as bar: for returncode in bar: if returncode != 0: click.echo("Command failed: {}".format(returncode)) click.echo( "Load of {} to {} in {} complete".format(src, schema + "." + table, db_url) ) except Exception: click.echo("Data load failed") raise click.Abort()
python
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers): """Download a DataBC WFS layer to postgres - an ogr2ogr wrapper. \b $ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis The default target database can be specified by setting the $DATABASE_URL environment variable. https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls """ src = bcdata.validate_name(dataset) src_schema, src_table = [i.lower() for i in src.split(".")] if not schema: schema = src_schema if not table: table = src_table # create schema if it does not exist conn = pgdata.connect(db_url) if schema not in conn.schemas: click.echo("Schema {} does not exist, creating it".format(schema)) conn.create_schema(schema) # build parameters for each required request param_dicts = bcdata.define_request( dataset, query=query, sortby=sortby, pagesize=pagesize ) try: # run the first request / load payload = urlencode(param_dicts[0], doseq=True) url = bcdata.WFS_URL + "?" + payload db = parse_db_url(db_url) db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format( h=db["host"], u=db["user"], db=db["database"], pwd=db["password"] ) # create the table if not append: command = [ "ogr2ogr", "-lco", "OVERWRITE=YES", "-lco", "SCHEMA={}".format(schema), "-lco", "GEOMETRY_NAME=geom", "-f", "PostgreSQL", db_string, "-t_srs", "EPSG:3005", "-nln", table, url, ] click.echo(" ".join(command)) subprocess.run(command) # append to table when append specified or processing many chunks if len(param_dicts) > 1 or append: # define starting index in list of requests if append: idx = 0 else: idx = 1 commands = [] for chunk, paramdict in enumerate(param_dicts[idx:]): payload = urlencode(paramdict, doseq=True) url = bcdata.WFS_URL + "?" + payload command = [ "ogr2ogr", "-update", "-append", "-f", "PostgreSQL", db_string + " active_schema=" + schema, "-t_srs", "EPSG:3005", "-nln", table, url, ] commands.append(command) # https://stackoverflow.com/questions/14533458 pool = Pool(max_workers) with click.progressbar( pool.imap(partial(call), commands), length=len(param_dicts) ) as bar: for returncode in bar: if returncode != 0: click.echo("Command failed: {}".format(returncode)) click.echo( "Load of {} to {} in {} complete".format(src, schema + "." + table, db_url) ) except Exception: click.echo("Data load failed") raise click.Abort()
[ "def", "bc2pg", "(", "dataset", ",", "db_url", ",", "table", ",", "schema", ",", "query", ",", "append", ",", "pagesize", ",", "sortby", ",", "max_workers", ")", ":", "src", "=", "bcdata", ".", "validate_name", "(", "dataset", ")", "src_schema", ",", "src_table", "=", "[", "i", ".", "lower", "(", ")", "for", "i", "in", "src", ".", "split", "(", "\".\"", ")", "]", "if", "not", "schema", ":", "schema", "=", "src_schema", "if", "not", "table", ":", "table", "=", "src_table", "# create schema if it does not exist", "conn", "=", "pgdata", ".", "connect", "(", "db_url", ")", "if", "schema", "not", "in", "conn", ".", "schemas", ":", "click", ".", "echo", "(", "\"Schema {} does not exist, creating it\"", ".", "format", "(", "schema", ")", ")", "conn", ".", "create_schema", "(", "schema", ")", "# build parameters for each required request", "param_dicts", "=", "bcdata", ".", "define_request", "(", "dataset", ",", "query", "=", "query", ",", "sortby", "=", "sortby", ",", "pagesize", "=", "pagesize", ")", "try", ":", "# run the first request / load", "payload", "=", "urlencode", "(", "param_dicts", "[", "0", "]", ",", "doseq", "=", "True", ")", "url", "=", "bcdata", ".", "WFS_URL", "+", "\"?\"", "+", "payload", "db", "=", "parse_db_url", "(", "db_url", ")", "db_string", "=", "\"PG:host={h} user={u} dbname={db} password={pwd}\"", ".", "format", "(", "h", "=", "db", "[", "\"host\"", "]", ",", "u", "=", "db", "[", "\"user\"", "]", ",", "db", "=", "db", "[", "\"database\"", "]", ",", "pwd", "=", "db", "[", "\"password\"", "]", ")", "# create the table", "if", "not", "append", ":", "command", "=", "[", "\"ogr2ogr\"", ",", "\"-lco\"", ",", "\"OVERWRITE=YES\"", ",", "\"-lco\"", ",", "\"SCHEMA={}\"", ".", "format", "(", "schema", ")", ",", "\"-lco\"", ",", "\"GEOMETRY_NAME=geom\"", ",", "\"-f\"", ",", "\"PostgreSQL\"", ",", "db_string", ",", "\"-t_srs\"", ",", "\"EPSG:3005\"", ",", "\"-nln\"", ",", "table", ",", "url", ",", "]", "click", ".", "echo", "(", "\" \"", ".", "join", "(", "command", ")", ")", "subprocess", ".", "run", "(", "command", ")", "# append to table when append specified or processing many chunks", "if", "len", "(", "param_dicts", ")", ">", "1", "or", "append", ":", "# define starting index in list of requests", "if", "append", ":", "idx", "=", "0", "else", ":", "idx", "=", "1", "commands", "=", "[", "]", "for", "chunk", ",", "paramdict", "in", "enumerate", "(", "param_dicts", "[", "idx", ":", "]", ")", ":", "payload", "=", "urlencode", "(", "paramdict", ",", "doseq", "=", "True", ")", "url", "=", "bcdata", ".", "WFS_URL", "+", "\"?\"", "+", "payload", "command", "=", "[", "\"ogr2ogr\"", ",", "\"-update\"", ",", "\"-append\"", ",", "\"-f\"", ",", "\"PostgreSQL\"", ",", "db_string", "+", "\" active_schema=\"", "+", "schema", ",", "\"-t_srs\"", ",", "\"EPSG:3005\"", ",", "\"-nln\"", ",", "table", ",", "url", ",", "]", "commands", ".", "append", "(", "command", ")", "# https://stackoverflow.com/questions/14533458", "pool", "=", "Pool", "(", "max_workers", ")", "with", "click", ".", "progressbar", "(", "pool", ".", "imap", "(", "partial", "(", "call", ")", ",", "commands", ")", ",", "length", "=", "len", "(", "param_dicts", ")", ")", "as", "bar", ":", "for", "returncode", "in", "bar", ":", "if", "returncode", "!=", "0", ":", "click", ".", "echo", "(", "\"Command failed: {}\"", ".", "format", "(", "returncode", ")", ")", "click", ".", "echo", "(", "\"Load of {} to {} in {} complete\"", ".", "format", "(", "src", ",", "schema", "+", "\".\"", "+", "table", ",", "db_url", ")", ")", "except", "Exception", ":", "click", ".", "echo", "(", "\"Data load failed\"", ")", "raise", "click", ".", "Abort", "(", ")" ]
Download a DataBC WFS layer to postgres - an ogr2ogr wrapper. \b $ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis The default target database can be specified by setting the $DATABASE_URL environment variable. https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
[ "Download", "a", "DataBC", "WFS", "layer", "to", "postgres", "-", "an", "ogr2ogr", "wrapper", "." ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L235-L333
train
mgoral/subconvert
src/subconvert/parsing/Core.py
SubParser.__parseFormat
def __parseFormat(self, fmt, content, fps = 25): '''Actual parser. Please note that time_to is not required to process as not all subtitles provide it.''' headerFound = False subSection = '' for lineNo, line in enumerate(content): line = self._initialLinePrepare(line, lineNo) if not fmt.WITH_HEADER and not self._formatFound and lineNo > self._maxFmtSearch: return subSection = ''.join([subSection, line]) if fmt.WITH_HEADER and not headerFound: if lineNo > self._maxHeaderLen: return headerFound = fmt.addHeaderInfo(subSection, self._subtitles.header()) if headerFound: self._formatFound = True subSection = '' elif fmt.subtitleEnds(line) or (lineNo + 1) == len(content): subtitle = fmt.createSubtitle(fps, subSection) if subtitle is None: if subSection in ('\n', '\r\n', '\r'): subSection = '' continue elif self._subtitles.size() > 0: raise SubParsingError(_("Parsing error"), lineNo) else: return # store parsing result if new end marker occurred, then clear results if subtitle.start and subtitle.text: self._formatFound = True try: self._subtitles.append(subtitle) except SubException as msg: raise SubParsingError(msg, lineNo) elif subtitle.start and not subtitle.text: pass else: return subSection = ''
python
def __parseFormat(self, fmt, content, fps = 25): '''Actual parser. Please note that time_to is not required to process as not all subtitles provide it.''' headerFound = False subSection = '' for lineNo, line in enumerate(content): line = self._initialLinePrepare(line, lineNo) if not fmt.WITH_HEADER and not self._formatFound and lineNo > self._maxFmtSearch: return subSection = ''.join([subSection, line]) if fmt.WITH_HEADER and not headerFound: if lineNo > self._maxHeaderLen: return headerFound = fmt.addHeaderInfo(subSection, self._subtitles.header()) if headerFound: self._formatFound = True subSection = '' elif fmt.subtitleEnds(line) or (lineNo + 1) == len(content): subtitle = fmt.createSubtitle(fps, subSection) if subtitle is None: if subSection in ('\n', '\r\n', '\r'): subSection = '' continue elif self._subtitles.size() > 0: raise SubParsingError(_("Parsing error"), lineNo) else: return # store parsing result if new end marker occurred, then clear results if subtitle.start and subtitle.text: self._formatFound = True try: self._subtitles.append(subtitle) except SubException as msg: raise SubParsingError(msg, lineNo) elif subtitle.start and not subtitle.text: pass else: return subSection = ''
[ "def", "__parseFormat", "(", "self", ",", "fmt", ",", "content", ",", "fps", "=", "25", ")", ":", "headerFound", "=", "False", "subSection", "=", "''", "for", "lineNo", ",", "line", "in", "enumerate", "(", "content", ")", ":", "line", "=", "self", ".", "_initialLinePrepare", "(", "line", ",", "lineNo", ")", "if", "not", "fmt", ".", "WITH_HEADER", "and", "not", "self", ".", "_formatFound", "and", "lineNo", ">", "self", ".", "_maxFmtSearch", ":", "return", "subSection", "=", "''", ".", "join", "(", "[", "subSection", ",", "line", "]", ")", "if", "fmt", ".", "WITH_HEADER", "and", "not", "headerFound", ":", "if", "lineNo", ">", "self", ".", "_maxHeaderLen", ":", "return", "headerFound", "=", "fmt", ".", "addHeaderInfo", "(", "subSection", ",", "self", ".", "_subtitles", ".", "header", "(", ")", ")", "if", "headerFound", ":", "self", ".", "_formatFound", "=", "True", "subSection", "=", "''", "elif", "fmt", ".", "subtitleEnds", "(", "line", ")", "or", "(", "lineNo", "+", "1", ")", "==", "len", "(", "content", ")", ":", "subtitle", "=", "fmt", ".", "createSubtitle", "(", "fps", ",", "subSection", ")", "if", "subtitle", "is", "None", ":", "if", "subSection", "in", "(", "'\\n'", ",", "'\\r\\n'", ",", "'\\r'", ")", ":", "subSection", "=", "''", "continue", "elif", "self", ".", "_subtitles", ".", "size", "(", ")", ">", "0", ":", "raise", "SubParsingError", "(", "_", "(", "\"Parsing error\"", ")", ",", "lineNo", ")", "else", ":", "return", "# store parsing result if new end marker occurred, then clear results", "if", "subtitle", ".", "start", "and", "subtitle", ".", "text", ":", "self", ".", "_formatFound", "=", "True", "try", ":", "self", ".", "_subtitles", ".", "append", "(", "subtitle", ")", "except", "SubException", "as", "msg", ":", "raise", "SubParsingError", "(", "msg", ",", "lineNo", ")", "elif", "subtitle", ".", "start", "and", "not", "subtitle", ".", "text", ":", "pass", "else", ":", "return", "subSection", "=", "''" ]
Actual parser. Please note that time_to is not required to process as not all subtitles provide it.
[ "Actual", "parser", ".", "Please", "note", "that", "time_to", "is", "not", "required", "to", "process", "as", "not", "all", "subtitles", "provide", "it", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Core.py#L312-L353
train
mkoura/dump2polarion
dump2polarion/dumper_cli.py
get_args
def get_args(args=None): """Get command line arguments.""" parser = argparse.ArgumentParser(description="dump2polarion") parser.add_argument( "-i", "--input_file", required=True, help="Path to CSV, SQLite or JUnit reports file or importers XML file", ) parser.add_argument( "-o", "--output_file", help="Where to save the XML output file (default: not saved)" ) parser.add_argument("-t", "--testrun-id", help="Polarion test run id") parser.add_argument("-c", "--config-file", help="Path to config YAML") parser.add_argument( "-n", "--no-submit", action="store_true", help="Don't submit results to Polarion" ) parser.add_argument("--user", help="Username to use to submit results to Polarion") parser.add_argument("--password", help="Password to use to submit results to Polarion") parser.add_argument("--polarion-url", help="Base Polarion URL") parser.add_argument("-f", "--force", action="store_true", help="Don't validate test run id") parser.add_argument("--dry-run", action="store_true", help="Dry run, don't update anything") parser.add_argument("--no-verify", action="store_true", help="Don't verify import success") parser.add_argument( "--verify-timeout", type=int, default=300, metavar="SEC", help="How long to wait (in seconds) for verification of results submission" " (default: %(default)s)", ) parser.add_argument( "--job-log", help="Where to save the log file produced by the Importer (default: not saved)" ) parser.add_argument("--log-level", help="Set logging to specified level") return parser.parse_args(args)
python
def get_args(args=None): """Get command line arguments.""" parser = argparse.ArgumentParser(description="dump2polarion") parser.add_argument( "-i", "--input_file", required=True, help="Path to CSV, SQLite or JUnit reports file or importers XML file", ) parser.add_argument( "-o", "--output_file", help="Where to save the XML output file (default: not saved)" ) parser.add_argument("-t", "--testrun-id", help="Polarion test run id") parser.add_argument("-c", "--config-file", help="Path to config YAML") parser.add_argument( "-n", "--no-submit", action="store_true", help="Don't submit results to Polarion" ) parser.add_argument("--user", help="Username to use to submit results to Polarion") parser.add_argument("--password", help="Password to use to submit results to Polarion") parser.add_argument("--polarion-url", help="Base Polarion URL") parser.add_argument("-f", "--force", action="store_true", help="Don't validate test run id") parser.add_argument("--dry-run", action="store_true", help="Dry run, don't update anything") parser.add_argument("--no-verify", action="store_true", help="Don't verify import success") parser.add_argument( "--verify-timeout", type=int, default=300, metavar="SEC", help="How long to wait (in seconds) for verification of results submission" " (default: %(default)s)", ) parser.add_argument( "--job-log", help="Where to save the log file produced by the Importer (default: not saved)" ) parser.add_argument("--log-level", help="Set logging to specified level") return parser.parse_args(args)
[ "def", "get_args", "(", "args", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"dump2polarion\"", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--input_file\"", ",", "required", "=", "True", ",", "help", "=", "\"Path to CSV, SQLite or JUnit reports file or importers XML file\"", ",", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "\"--output_file\"", ",", "help", "=", "\"Where to save the XML output file (default: not saved)\"", ")", "parser", ".", "add_argument", "(", "\"-t\"", ",", "\"--testrun-id\"", ",", "help", "=", "\"Polarion test run id\"", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--config-file\"", ",", "help", "=", "\"Path to config YAML\"", ")", "parser", ".", "add_argument", "(", "\"-n\"", ",", "\"--no-submit\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't submit results to Polarion\"", ")", "parser", ".", "add_argument", "(", "\"--user\"", ",", "help", "=", "\"Username to use to submit results to Polarion\"", ")", "parser", ".", "add_argument", "(", "\"--password\"", ",", "help", "=", "\"Password to use to submit results to Polarion\"", ")", "parser", ".", "add_argument", "(", "\"--polarion-url\"", ",", "help", "=", "\"Base Polarion URL\"", ")", "parser", ".", "add_argument", "(", "\"-f\"", ",", "\"--force\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't validate test run id\"", ")", "parser", ".", "add_argument", "(", "\"--dry-run\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Dry run, don't update anything\"", ")", "parser", ".", "add_argument", "(", "\"--no-verify\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't verify import success\"", ")", "parser", ".", "add_argument", "(", "\"--verify-timeout\"", ",", "type", "=", "int", ",", "default", "=", "300", ",", "metavar", "=", "\"SEC\"", ",", "help", "=", "\"How long to wait (in seconds) for verification of results submission\"", "\" (default: %(default)s)\"", ",", ")", "parser", ".", "add_argument", "(", "\"--job-log\"", ",", "help", "=", "\"Where to save the log file produced by the Importer (default: not saved)\"", ")", "parser", ".", "add_argument", "(", "\"--log-level\"", ",", "help", "=", "\"Set logging to specified level\"", ")", "return", "parser", ".", "parse_args", "(", "args", ")" ]
Get command line arguments.
[ "Get", "command", "line", "arguments", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L26-L61
train
mkoura/dump2polarion
dump2polarion/dumper_cli.py
get_submit_args
def get_submit_args(args): """Gets arguments for the `submit_and_verify` method.""" submit_args = dict( testrun_id=args.testrun_id, user=args.user, password=args.password, no_verify=args.no_verify, verify_timeout=args.verify_timeout, log_file=args.job_log, dry_run=args.dry_run, ) submit_args = {k: v for k, v in submit_args.items() if v is not None} return Box(submit_args, frozen_box=True, default_box=True)
python
def get_submit_args(args): """Gets arguments for the `submit_and_verify` method.""" submit_args = dict( testrun_id=args.testrun_id, user=args.user, password=args.password, no_verify=args.no_verify, verify_timeout=args.verify_timeout, log_file=args.job_log, dry_run=args.dry_run, ) submit_args = {k: v for k, v in submit_args.items() if v is not None} return Box(submit_args, frozen_box=True, default_box=True)
[ "def", "get_submit_args", "(", "args", ")", ":", "submit_args", "=", "dict", "(", "testrun_id", "=", "args", ".", "testrun_id", ",", "user", "=", "args", ".", "user", ",", "password", "=", "args", ".", "password", ",", "no_verify", "=", "args", ".", "no_verify", ",", "verify_timeout", "=", "args", ".", "verify_timeout", ",", "log_file", "=", "args", ".", "job_log", ",", "dry_run", "=", "args", ".", "dry_run", ",", ")", "submit_args", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "submit_args", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "return", "Box", "(", "submit_args", ",", "frozen_box", "=", "True", ",", "default_box", "=", "True", ")" ]
Gets arguments for the `submit_and_verify` method.
[ "Gets", "arguments", "for", "the", "submit_and_verify", "method", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L64-L76
train
mkoura/dump2polarion
dump2polarion/dumper_cli.py
process_args
def process_args(args): """Processes passed arguments.""" passed_args = args if isinstance(args, argparse.Namespace): passed_args = vars(passed_args) elif hasattr(args, "to_dict"): passed_args = passed_args.to_dict() return Box(passed_args, frozen_box=True, default_box=True)
python
def process_args(args): """Processes passed arguments.""" passed_args = args if isinstance(args, argparse.Namespace): passed_args = vars(passed_args) elif hasattr(args, "to_dict"): passed_args = passed_args.to_dict() return Box(passed_args, frozen_box=True, default_box=True)
[ "def", "process_args", "(", "args", ")", ":", "passed_args", "=", "args", "if", "isinstance", "(", "args", ",", "argparse", ".", "Namespace", ")", ":", "passed_args", "=", "vars", "(", "passed_args", ")", "elif", "hasattr", "(", "args", ",", "\"to_dict\"", ")", ":", "passed_args", "=", "passed_args", ".", "to_dict", "(", ")", "return", "Box", "(", "passed_args", ",", "frozen_box", "=", "True", ",", "default_box", "=", "True", ")" ]
Processes passed arguments.
[ "Processes", "passed", "arguments", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L79-L87
train
mkoura/dump2polarion
dump2polarion/dumper_cli.py
submit_if_ready
def submit_if_ready(args, submit_args, config): """Submits the input XML file if it's already in the expected format.""" __, ext = os.path.splitext(args.input_file) if ext.lower() != ".xml": return None with io.open(args.input_file, encoding="utf-8") as input_file: xml = input_file.read(1024) if not ("<testsuites" in xml or "<testcases" in xml or "<requirements" in xml): return None if args.no_submit: logger.info("Nothing to do") return 0 # expect importer xml and just submit it response = dump2polarion.submit_and_verify( xml_file=args.input_file, config=config, **submit_args ) return 0 if response else 2
python
def submit_if_ready(args, submit_args, config): """Submits the input XML file if it's already in the expected format.""" __, ext = os.path.splitext(args.input_file) if ext.lower() != ".xml": return None with io.open(args.input_file, encoding="utf-8") as input_file: xml = input_file.read(1024) if not ("<testsuites" in xml or "<testcases" in xml or "<requirements" in xml): return None if args.no_submit: logger.info("Nothing to do") return 0 # expect importer xml and just submit it response = dump2polarion.submit_and_verify( xml_file=args.input_file, config=config, **submit_args ) return 0 if response else 2
[ "def", "submit_if_ready", "(", "args", ",", "submit_args", ",", "config", ")", ":", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "args", ".", "input_file", ")", "if", "ext", ".", "lower", "(", ")", "!=", "\".xml\"", ":", "return", "None", "with", "io", ".", "open", "(", "args", ".", "input_file", ",", "encoding", "=", "\"utf-8\"", ")", "as", "input_file", ":", "xml", "=", "input_file", ".", "read", "(", "1024", ")", "if", "not", "(", "\"<testsuites\"", "in", "xml", "or", "\"<testcases\"", "in", "xml", "or", "\"<requirements\"", "in", "xml", ")", ":", "return", "None", "if", "args", ".", "no_submit", ":", "logger", ".", "info", "(", "\"Nothing to do\"", ")", "return", "0", "# expect importer xml and just submit it", "response", "=", "dump2polarion", ".", "submit_and_verify", "(", "xml_file", "=", "args", ".", "input_file", ",", "config", "=", "config", ",", "*", "*", "submit_args", ")", "return", "0", "if", "response", "else", "2" ]
Submits the input XML file if it's already in the expected format.
[ "Submits", "the", "input", "XML", "file", "if", "it", "s", "already", "in", "the", "expected", "format", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L119-L139
train
mkoura/dump2polarion
dump2polarion/dumper_cli.py
dumper
def dumper(args, config, transform_func=None): """Dumper main function.""" args = process_args(args) submit_args = get_submit_args(args) submit_outcome = submit_if_ready(args, submit_args, config) if submit_outcome is not None: # submitted, nothing more to do return submit_outcome import_time = datetime.datetime.utcnow() try: records = dump2polarion.import_results(args.input_file, older_than=import_time) testrun_id = get_testrun_id(args, config, records.testrun) exporter = dump2polarion.XunitExport( testrun_id, records, config, transform_func=transform_func ) output = exporter.export() except NothingToDoException as info: logger.info(info) return 0 except (EnvironmentError, Dump2PolarionException) as err: logger.fatal(err) return 1 if args.output_file or args.no_submit: # when no output file is specified, the 'testrun_TESTRUN_ID-TIMESTAMP' # file will be created in current directory exporter.write_xml(output, args.output_file) if not args.no_submit: response = dump2polarion.submit_and_verify(output, config=config, **submit_args) __, ext = os.path.splitext(args.input_file) if ext.lower() in dbtools.SQLITE_EXT and response: dbtools.mark_exported_sqlite(args.input_file, import_time) return 0 if response else 2 return 0
python
def dumper(args, config, transform_func=None): """Dumper main function.""" args = process_args(args) submit_args = get_submit_args(args) submit_outcome = submit_if_ready(args, submit_args, config) if submit_outcome is not None: # submitted, nothing more to do return submit_outcome import_time = datetime.datetime.utcnow() try: records = dump2polarion.import_results(args.input_file, older_than=import_time) testrun_id = get_testrun_id(args, config, records.testrun) exporter = dump2polarion.XunitExport( testrun_id, records, config, transform_func=transform_func ) output = exporter.export() except NothingToDoException as info: logger.info(info) return 0 except (EnvironmentError, Dump2PolarionException) as err: logger.fatal(err) return 1 if args.output_file or args.no_submit: # when no output file is specified, the 'testrun_TESTRUN_ID-TIMESTAMP' # file will be created in current directory exporter.write_xml(output, args.output_file) if not args.no_submit: response = dump2polarion.submit_and_verify(output, config=config, **submit_args) __, ext = os.path.splitext(args.input_file) if ext.lower() in dbtools.SQLITE_EXT and response: dbtools.mark_exported_sqlite(args.input_file, import_time) return 0 if response else 2 return 0
[ "def", "dumper", "(", "args", ",", "config", ",", "transform_func", "=", "None", ")", ":", "args", "=", "process_args", "(", "args", ")", "submit_args", "=", "get_submit_args", "(", "args", ")", "submit_outcome", "=", "submit_if_ready", "(", "args", ",", "submit_args", ",", "config", ")", "if", "submit_outcome", "is", "not", "None", ":", "# submitted, nothing more to do", "return", "submit_outcome", "import_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "try", ":", "records", "=", "dump2polarion", ".", "import_results", "(", "args", ".", "input_file", ",", "older_than", "=", "import_time", ")", "testrun_id", "=", "get_testrun_id", "(", "args", ",", "config", ",", "records", ".", "testrun", ")", "exporter", "=", "dump2polarion", ".", "XunitExport", "(", "testrun_id", ",", "records", ",", "config", ",", "transform_func", "=", "transform_func", ")", "output", "=", "exporter", ".", "export", "(", ")", "except", "NothingToDoException", "as", "info", ":", "logger", ".", "info", "(", "info", ")", "return", "0", "except", "(", "EnvironmentError", ",", "Dump2PolarionException", ")", "as", "err", ":", "logger", ".", "fatal", "(", "err", ")", "return", "1", "if", "args", ".", "output_file", "or", "args", ".", "no_submit", ":", "# when no output file is specified, the 'testrun_TESTRUN_ID-TIMESTAMP'", "# file will be created in current directory", "exporter", ".", "write_xml", "(", "output", ",", "args", ".", "output_file", ")", "if", "not", "args", ".", "no_submit", ":", "response", "=", "dump2polarion", ".", "submit_and_verify", "(", "output", ",", "config", "=", "config", ",", "*", "*", "submit_args", ")", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "args", ".", "input_file", ")", "if", "ext", ".", "lower", "(", ")", "in", "dbtools", ".", "SQLITE_EXT", "and", "response", ":", "dbtools", ".", "mark_exported_sqlite", "(", "args", ".", "input_file", ",", "import_time", ")", "return", "0", "if", "response", "else", "2", "return", "0" ]
Dumper main function.
[ "Dumper", "main", "function", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L150-L190
train
carta/ldap_tools
src/ldap_tools/client.py
Client.load_ldap_config
def load_ldap_config(self): # pragma: no cover """Configure LDAP Client settings.""" try: with open('{}/ldap_info.yaml'.format(self.config_dir), 'r') as FILE: config = yaml.load(FILE) self.host = config['server'] self.user_dn = config['user_dn'] self.port = config['port'] self.basedn = config['basedn'] self.mail_domain = config['mail_domain'] self.service_ou = config['service_ou'] except OSError as err: print('{}: Config file ({}/ldap_info.yaml) not found'.format( type(err), self.config_dir))
python
def load_ldap_config(self): # pragma: no cover """Configure LDAP Client settings.""" try: with open('{}/ldap_info.yaml'.format(self.config_dir), 'r') as FILE: config = yaml.load(FILE) self.host = config['server'] self.user_dn = config['user_dn'] self.port = config['port'] self.basedn = config['basedn'] self.mail_domain = config['mail_domain'] self.service_ou = config['service_ou'] except OSError as err: print('{}: Config file ({}/ldap_info.yaml) not found'.format( type(err), self.config_dir))
[ "def", "load_ldap_config", "(", "self", ")", ":", "# pragma: no cover", "try", ":", "with", "open", "(", "'{}/ldap_info.yaml'", ".", "format", "(", "self", ".", "config_dir", ")", ",", "'r'", ")", "as", "FILE", ":", "config", "=", "yaml", ".", "load", "(", "FILE", ")", "self", ".", "host", "=", "config", "[", "'server'", "]", "self", ".", "user_dn", "=", "config", "[", "'user_dn'", "]", "self", ".", "port", "=", "config", "[", "'port'", "]", "self", ".", "basedn", "=", "config", "[", "'basedn'", "]", "self", ".", "mail_domain", "=", "config", "[", "'mail_domain'", "]", "self", ".", "service_ou", "=", "config", "[", "'service_ou'", "]", "except", "OSError", "as", "err", ":", "print", "(", "'{}: Config file ({}/ldap_info.yaml) not found'", ".", "format", "(", "type", "(", "err", ")", ",", "self", ".", "config_dir", ")", ")" ]
Configure LDAP Client settings.
[ "Configure", "LDAP", "Client", "settings", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L24-L38
train
carta/ldap_tools
src/ldap_tools/client.py
Client.load_ldap_password
def load_ldap_password(self): # pragma: no cover """Import LDAP password from file.""" with open('{}/ldap.secret'.format(self.config_dir), 'r') as FILE: secure_config = FILE.read() self.user_pw = base64.b64decode(secure_config.encode())
python
def load_ldap_password(self): # pragma: no cover """Import LDAP password from file.""" with open('{}/ldap.secret'.format(self.config_dir), 'r') as FILE: secure_config = FILE.read() self.user_pw = base64.b64decode(secure_config.encode())
[ "def", "load_ldap_password", "(", "self", ")", ":", "# pragma: no cover", "with", "open", "(", "'{}/ldap.secret'", ".", "format", "(", "self", ".", "config_dir", ")", ",", "'r'", ")", "as", "FILE", ":", "secure_config", "=", "FILE", ".", "read", "(", ")", "self", ".", "user_pw", "=", "base64", ".", "b64decode", "(", "secure_config", ".", "encode", "(", ")", ")" ]
Import LDAP password from file.
[ "Import", "LDAP", "password", "from", "file", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L40-L44
train
carta/ldap_tools
src/ldap_tools/client.py
Client.connection
def connection(self): # pragma: no cover """Establish LDAP connection.""" # self.server allows us to fetch server info # (including LDAP schema list) if we wish to # add this feature later self.server = ldap3.Server(self.host, port=self.port, get_info=ldap3.ALL) self.conn = ldap3.Connection( self.server, user=self.user_dn, password=self.user_pw, auto_bind=True, lazy=True, receive_timeout=1)
python
def connection(self): # pragma: no cover """Establish LDAP connection.""" # self.server allows us to fetch server info # (including LDAP schema list) if we wish to # add this feature later self.server = ldap3.Server(self.host, port=self.port, get_info=ldap3.ALL) self.conn = ldap3.Connection( self.server, user=self.user_dn, password=self.user_pw, auto_bind=True, lazy=True, receive_timeout=1)
[ "def", "connection", "(", "self", ")", ":", "# pragma: no cover", "# self.server allows us to fetch server info", "# (including LDAP schema list) if we wish to", "# add this feature later", "self", ".", "server", "=", "ldap3", ".", "Server", "(", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "get_info", "=", "ldap3", ".", "ALL", ")", "self", ".", "conn", "=", "ldap3", ".", "Connection", "(", "self", ".", "server", ",", "user", "=", "self", ".", "user_dn", ",", "password", "=", "self", ".", "user_pw", ",", "auto_bind", "=", "True", ",", "lazy", "=", "True", ",", "receive_timeout", "=", "1", ")" ]
Establish LDAP connection.
[ "Establish", "LDAP", "connection", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L46-L58
train
carta/ldap_tools
src/ldap_tools/client.py
Client.add
def add(self, distinguished_name, object_class, attributes): """ Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr """ self.conn.add(distinguished_name, object_class, attributes)
python
def add(self, distinguished_name, object_class, attributes): """ Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr """ self.conn.add(distinguished_name, object_class, attributes)
[ "def", "add", "(", "self", ",", "distinguished_name", ",", "object_class", ",", "attributes", ")", ":", "self", ".", "conn", ".", "add", "(", "distinguished_name", ",", "object_class", ",", "attributes", ")" ]
Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr
[ "Add", "object", "to", "LDAP", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L60-L72
train
carta/ldap_tools
src/ldap_tools/client.py
Client.search
def search(self, filter, attributes=None): """Search LDAP for records.""" if attributes is None: attributes = ['*'] if filter is None: filter = ["(objectclass=*)"] # Convert filter list into an LDAP-consumable format filterstr = "(&{})".format(''.join(filter)) self.conn.search( search_base=self.basedn, search_filter=filterstr, search_scope=ldap3.SUBTREE, attributes=attributes) return self.conn.entries
python
def search(self, filter, attributes=None): """Search LDAP for records.""" if attributes is None: attributes = ['*'] if filter is None: filter = ["(objectclass=*)"] # Convert filter list into an LDAP-consumable format filterstr = "(&{})".format(''.join(filter)) self.conn.search( search_base=self.basedn, search_filter=filterstr, search_scope=ldap3.SUBTREE, attributes=attributes) return self.conn.entries
[ "def", "search", "(", "self", ",", "filter", ",", "attributes", "=", "None", ")", ":", "if", "attributes", "is", "None", ":", "attributes", "=", "[", "'*'", "]", "if", "filter", "is", "None", ":", "filter", "=", "[", "\"(objectclass=*)\"", "]", "# Convert filter list into an LDAP-consumable format", "filterstr", "=", "\"(&{})\"", ".", "format", "(", "''", ".", "join", "(", "filter", ")", ")", "self", ".", "conn", ".", "search", "(", "search_base", "=", "self", ".", "basedn", ",", "search_filter", "=", "filterstr", ",", "search_scope", "=", "ldap3", ".", "SUBTREE", ",", "attributes", "=", "attributes", ")", "return", "self", ".", "conn", ".", "entries" ]
Search LDAP for records.
[ "Search", "LDAP", "for", "records", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L93-L108
train
carta/ldap_tools
src/ldap_tools/client.py
Client.get_max_id
def get_max_id(self, object_type, role): """Get the highest used ID.""" if object_type == 'user': objectclass = 'posixAccount' ldap_attr = 'uidNumber' elif object_type == 'group': # pragma: no cover objectclass = 'posixGroup' ldap_attr = 'gidNumber' else: raise ldap_tools.exceptions.InvalidResult('Unknown object type') minID, maxID = Client.__set_id_boundary(role) filter = [ "(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID) ] if maxID is not None: filter.append("({}<={})".format(ldap_attr, maxID)) id_list = self.search(filter, [ldap_attr]) if id_list == []: id = minID else: if object_type == 'user': id = max([i.uidNumber.value for i in id_list]) + 1 elif object_type == 'group': id = max([i.gidNumber.value for i in id_list]) + 1 else: raise ldap_tools.exceptions.InvalidResult('Unknown object') return id
python
def get_max_id(self, object_type, role): """Get the highest used ID.""" if object_type == 'user': objectclass = 'posixAccount' ldap_attr = 'uidNumber' elif object_type == 'group': # pragma: no cover objectclass = 'posixGroup' ldap_attr = 'gidNumber' else: raise ldap_tools.exceptions.InvalidResult('Unknown object type') minID, maxID = Client.__set_id_boundary(role) filter = [ "(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID) ] if maxID is not None: filter.append("({}<={})".format(ldap_attr, maxID)) id_list = self.search(filter, [ldap_attr]) if id_list == []: id = minID else: if object_type == 'user': id = max([i.uidNumber.value for i in id_list]) + 1 elif object_type == 'group': id = max([i.gidNumber.value for i in id_list]) + 1 else: raise ldap_tools.exceptions.InvalidResult('Unknown object') return id
[ "def", "get_max_id", "(", "self", ",", "object_type", ",", "role", ")", ":", "if", "object_type", "==", "'user'", ":", "objectclass", "=", "'posixAccount'", "ldap_attr", "=", "'uidNumber'", "elif", "object_type", "==", "'group'", ":", "# pragma: no cover", "objectclass", "=", "'posixGroup'", "ldap_attr", "=", "'gidNumber'", "else", ":", "raise", "ldap_tools", ".", "exceptions", ".", "InvalidResult", "(", "'Unknown object type'", ")", "minID", ",", "maxID", "=", "Client", ".", "__set_id_boundary", "(", "role", ")", "filter", "=", "[", "\"(objectclass={})\"", ".", "format", "(", "objectclass", ")", ",", "\"({}>={})\"", ".", "format", "(", "ldap_attr", ",", "minID", ")", "]", "if", "maxID", "is", "not", "None", ":", "filter", ".", "append", "(", "\"({}<={})\"", ".", "format", "(", "ldap_attr", ",", "maxID", ")", ")", "id_list", "=", "self", ".", "search", "(", "filter", ",", "[", "ldap_attr", "]", ")", "if", "id_list", "==", "[", "]", ":", "id", "=", "minID", "else", ":", "if", "object_type", "==", "'user'", ":", "id", "=", "max", "(", "[", "i", ".", "uidNumber", ".", "value", "for", "i", "in", "id_list", "]", ")", "+", "1", "elif", "object_type", "==", "'group'", ":", "id", "=", "max", "(", "[", "i", ".", "gidNumber", ".", "value", "for", "i", "in", "id_list", "]", ")", "+", "1", "else", ":", "raise", "ldap_tools", ".", "exceptions", ".", "InvalidResult", "(", "'Unknown object'", ")", "return", "id" ]
Get the highest used ID.
[ "Get", "the", "highest", "used", "ID", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L110-L142
train
smnorris/bcdata
bcdata/wcs.py
get_dem
def get_dem(bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25): """Get 25m DEM for provided bounds, write to GeoTIFF """ bbox = ",".join([str(b) for b in bounds]) # todo: validate resolution units are equivalent to src_crs units # build request payload = { "service": "WCS", "version": "1.0.0", "request": "GetCoverage", "coverage": "pub:bc_elevation_25m_bcalb", "Format": "GeoTIFF", "bbox": bbox, "CRS": src_crs, "RESPONSE_CRS": dst_crs, "resx": str(resolution), "resy": str(resolution), } # request data from WCS r = requests.get(bcdata.WCS_URL, params=payload) # save to tiff if r.status_code == 200: with open(out_file, "wb") as file: file.write(r.content) return out_file else: raise RuntimeError( "WCS request failed with status code {}".format(str(r.status_code)) )
python
def get_dem(bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25): """Get 25m DEM for provided bounds, write to GeoTIFF """ bbox = ",".join([str(b) for b in bounds]) # todo: validate resolution units are equivalent to src_crs units # build request payload = { "service": "WCS", "version": "1.0.0", "request": "GetCoverage", "coverage": "pub:bc_elevation_25m_bcalb", "Format": "GeoTIFF", "bbox": bbox, "CRS": src_crs, "RESPONSE_CRS": dst_crs, "resx": str(resolution), "resy": str(resolution), } # request data from WCS r = requests.get(bcdata.WCS_URL, params=payload) # save to tiff if r.status_code == 200: with open(out_file, "wb") as file: file.write(r.content) return out_file else: raise RuntimeError( "WCS request failed with status code {}".format(str(r.status_code)) )
[ "def", "get_dem", "(", "bounds", ",", "out_file", "=", "\"dem.tif\"", ",", "src_crs", "=", "\"EPSG:3005\"", ",", "dst_crs", "=", "\"EPSG:3005\"", ",", "resolution", "=", "25", ")", ":", "bbox", "=", "\",\"", ".", "join", "(", "[", "str", "(", "b", ")", "for", "b", "in", "bounds", "]", ")", "# todo: validate resolution units are equivalent to src_crs units", "# build request", "payload", "=", "{", "\"service\"", ":", "\"WCS\"", ",", "\"version\"", ":", "\"1.0.0\"", ",", "\"request\"", ":", "\"GetCoverage\"", ",", "\"coverage\"", ":", "\"pub:bc_elevation_25m_bcalb\"", ",", "\"Format\"", ":", "\"GeoTIFF\"", ",", "\"bbox\"", ":", "bbox", ",", "\"CRS\"", ":", "src_crs", ",", "\"RESPONSE_CRS\"", ":", "dst_crs", ",", "\"resx\"", ":", "str", "(", "resolution", ")", ",", "\"resy\"", ":", "str", "(", "resolution", ")", ",", "}", "# request data from WCS", "r", "=", "requests", ".", "get", "(", "bcdata", ".", "WCS_URL", ",", "params", "=", "payload", ")", "# save to tiff", "if", "r", ".", "status_code", "==", "200", ":", "with", "open", "(", "out_file", ",", "\"wb\"", ")", "as", "file", ":", "file", ".", "write", "(", "r", ".", "content", ")", "return", "out_file", "else", ":", "raise", "RuntimeError", "(", "\"WCS request failed with status code {}\"", ".", "format", "(", "str", "(", "r", ".", "status_code", ")", ")", ")" ]
Get 25m DEM for provided bounds, write to GeoTIFF
[ "Get", "25m", "DEM", "for", "provided", "bounds", "write", "to", "GeoTIFF" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wcs.py#L10-L38
train
carta/ldap_tools
src/ldap_tools/commands.py
main
def main(): # pragma: no cover """Enter main function.""" entry_point.add_command(CLI.version) entry_point.add_command(UserCLI.user) entry_point.add_command(GroupCLI.group) entry_point.add_command(AuditCLI.audit) entry_point.add_command(KeyCLI.key) entry_point()
python
def main(): # pragma: no cover """Enter main function.""" entry_point.add_command(CLI.version) entry_point.add_command(UserCLI.user) entry_point.add_command(GroupCLI.group) entry_point.add_command(AuditCLI.audit) entry_point.add_command(KeyCLI.key) entry_point()
[ "def", "main", "(", ")", ":", "# pragma: no cover", "entry_point", ".", "add_command", "(", "CLI", ".", "version", ")", "entry_point", ".", "add_command", "(", "UserCLI", ".", "user", ")", "entry_point", ".", "add_command", "(", "GroupCLI", ".", "group", ")", "entry_point", ".", "add_command", "(", "AuditCLI", ".", "audit", ")", "entry_point", ".", "add_command", "(", "KeyCLI", ".", "key", ")", "entry_point", "(", ")" ]
Enter main function.
[ "Enter", "main", "function", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/commands.py#L29-L37
train
rgmining/common
common/decorator.py
print_args
def print_args(output=sys.stdout): """Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout) """ def decorator(func): """The decorator function. """ @wraps(func) def _(*args, **kwargs): """The decorated function. """ output.write( "Args: {0}, KwArgs: {1}\n".format(str(args), str(kwargs))) return func(*args, **kwargs) return _ return decorator
python
def print_args(output=sys.stdout): """Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout) """ def decorator(func): """The decorator function. """ @wraps(func) def _(*args, **kwargs): """The decorated function. """ output.write( "Args: {0}, KwArgs: {1}\n".format(str(args), str(kwargs))) return func(*args, **kwargs) return _ return decorator
[ "def", "print_args", "(", "output", "=", "sys", ".", "stdout", ")", ":", "def", "decorator", "(", "func", ")", ":", "\"\"\"The decorator function.\n \"\"\"", "@", "wraps", "(", "func", ")", "def", "_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"The decorated function.\n \"\"\"", "output", ".", "write", "(", "\"Args: {0}, KwArgs: {1}\\n\"", ".", "format", "(", "str", "(", "args", ")", ",", "str", "(", "kwargs", ")", ")", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_", "return", "decorator" ]
Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout)
[ "Decorate", "a", "function", "so", "that", "print", "arguments", "before", "calling", "it", "." ]
2462a4d54f32a82eadd7b1e28675b3c8bcd172b2
https://github.com/rgmining/common/blob/2462a4d54f32a82eadd7b1e28675b3c8bcd172b2/common/decorator.py#L30-L47
train
rgmining/common
common/decorator.py
constant
def constant(func): """Decorate a function so that the result is a constant value. Functions wraped by this decorator will be run just one time. The computational result will be stored and reused for any other input. To store each result for each input, use :func:`memoized` instead. """ @wraps(func) def _(*args, **kwargs): """The decorated function. """ if not _.res: _.res = func(*args, **kwargs) return _.res _.res = None return _
python
def constant(func): """Decorate a function so that the result is a constant value. Functions wraped by this decorator will be run just one time. The computational result will be stored and reused for any other input. To store each result for each input, use :func:`memoized` instead. """ @wraps(func) def _(*args, **kwargs): """The decorated function. """ if not _.res: _.res = func(*args, **kwargs) return _.res _.res = None return _
[ "def", "constant", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"The decorated function.\n \"\"\"", "if", "not", "_", ".", "res", ":", "_", ".", "res", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_", ".", "res", "_", ".", "res", "=", "None", "return", "_" ]
Decorate a function so that the result is a constant value. Functions wraped by this decorator will be run just one time. The computational result will be stored and reused for any other input. To store each result for each input, use :func:`memoized` instead.
[ "Decorate", "a", "function", "so", "that", "the", "result", "is", "a", "constant", "value", "." ]
2462a4d54f32a82eadd7b1e28675b3c8bcd172b2
https://github.com/rgmining/common/blob/2462a4d54f32a82eadd7b1e28675b3c8bcd172b2/common/decorator.py#L70-L86
train
rgmining/common
common/decorator.py
memoized
def memoized(func): """Decorate a function to memoize results. Functions wraped by this decorator won't compute twice for each input. Any results will be stored. This decorator might increase used memory in order to shorten computational time. """ cache = {} @wraps(func) def memoized_function(*args): """The decorated function. """ try: return cache[args] except KeyError: value = func(*args) try: cache[args] = value except MemoryError: cache.clear() gc.collect() return value return memoized_function
python
def memoized(func): """Decorate a function to memoize results. Functions wraped by this decorator won't compute twice for each input. Any results will be stored. This decorator might increase used memory in order to shorten computational time. """ cache = {} @wraps(func) def memoized_function(*args): """The decorated function. """ try: return cache[args] except KeyError: value = func(*args) try: cache[args] = value except MemoryError: cache.clear() gc.collect() return value return memoized_function
[ "def", "memoized", "(", "func", ")", ":", "cache", "=", "{", "}", "@", "wraps", "(", "func", ")", "def", "memoized_function", "(", "*", "args", ")", ":", "\"\"\"The decorated function.\n \"\"\"", "try", ":", "return", "cache", "[", "args", "]", "except", "KeyError", ":", "value", "=", "func", "(", "*", "args", ")", "try", ":", "cache", "[", "args", "]", "=", "value", "except", "MemoryError", ":", "cache", ".", "clear", "(", ")", "gc", ".", "collect", "(", ")", "return", "value", "return", "memoized_function" ]
Decorate a function to memoize results. Functions wraped by this decorator won't compute twice for each input. Any results will be stored. This decorator might increase used memory in order to shorten computational time.
[ "Decorate", "a", "function", "to", "memoize", "results", "." ]
2462a4d54f32a82eadd7b1e28675b3c8bcd172b2
https://github.com/rgmining/common/blob/2462a4d54f32a82eadd7b1e28675b3c8bcd172b2/common/decorator.py#L89-L111
train
mkoura/dump2polarion
dump2polarion/results/dbtools.py
_open_sqlite
def _open_sqlite(db_file): """Opens database connection.""" db_file = os.path.expanduser(db_file) try: with open(db_file): # test that the file can be accessed pass return sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES) except (IOError, sqlite3.Error) as err: raise Dump2PolarionException("{}".format(err))
python
def _open_sqlite(db_file): """Opens database connection.""" db_file = os.path.expanduser(db_file) try: with open(db_file): # test that the file can be accessed pass return sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES) except (IOError, sqlite3.Error) as err: raise Dump2PolarionException("{}".format(err))
[ "def", "_open_sqlite", "(", "db_file", ")", ":", "db_file", "=", "os", ".", "path", ".", "expanduser", "(", "db_file", ")", "try", ":", "with", "open", "(", "db_file", ")", ":", "# test that the file can be accessed", "pass", "return", "sqlite3", ".", "connect", "(", "db_file", ",", "detect_types", "=", "sqlite3", ".", "PARSE_DECLTYPES", ")", "except", "(", "IOError", ",", "sqlite3", ".", "Error", ")", "as", "err", ":", "raise", "Dump2PolarionException", "(", "\"{}\"", ".", "format", "(", "err", ")", ")" ]
Opens database connection.
[ "Opens", "database", "connection", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/dbtools.py#L33-L42
train
mkoura/dump2polarion
dump2polarion/results/dbtools.py
import_sqlite
def import_sqlite(db_file, older_than=None, **kwargs): """Reads the content of the database file and returns imported data.""" conn = _open_sqlite(db_file) cur = conn.cursor() # get rows that were not exported yet select = "SELECT * FROM testcases WHERE exported != 'yes'" if older_than: cur.execute(" ".join((select, "AND sqltime < ?")), (older_than,)) else: cur.execute(select) columns = [description[0] for description in cur.description] rows = cur.fetchall() # map data to columns results = [] for row in rows: record = OrderedDict(list(zip(columns, row))) results.append(record) testrun = _get_testrun_from_sqlite(conn) conn.close() return xunit_exporter.ImportedData(results=results, testrun=testrun)
python
def import_sqlite(db_file, older_than=None, **kwargs): """Reads the content of the database file and returns imported data.""" conn = _open_sqlite(db_file) cur = conn.cursor() # get rows that were not exported yet select = "SELECT * FROM testcases WHERE exported != 'yes'" if older_than: cur.execute(" ".join((select, "AND sqltime < ?")), (older_than,)) else: cur.execute(select) columns = [description[0] for description in cur.description] rows = cur.fetchall() # map data to columns results = [] for row in rows: record = OrderedDict(list(zip(columns, row))) results.append(record) testrun = _get_testrun_from_sqlite(conn) conn.close() return xunit_exporter.ImportedData(results=results, testrun=testrun)
[ "def", "import_sqlite", "(", "db_file", ",", "older_than", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "_open_sqlite", "(", "db_file", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "# get rows that were not exported yet", "select", "=", "\"SELECT * FROM testcases WHERE exported != 'yes'\"", "if", "older_than", ":", "cur", ".", "execute", "(", "\" \"", ".", "join", "(", "(", "select", ",", "\"AND sqltime < ?\"", ")", ")", ",", "(", "older_than", ",", ")", ")", "else", ":", "cur", ".", "execute", "(", "select", ")", "columns", "=", "[", "description", "[", "0", "]", "for", "description", "in", "cur", ".", "description", "]", "rows", "=", "cur", ".", "fetchall", "(", ")", "# map data to columns", "results", "=", "[", "]", "for", "row", "in", "rows", ":", "record", "=", "OrderedDict", "(", "list", "(", "zip", "(", "columns", ",", "row", ")", ")", ")", "results", ".", "append", "(", "record", ")", "testrun", "=", "_get_testrun_from_sqlite", "(", "conn", ")", "conn", ".", "close", "(", ")", "return", "xunit_exporter", ".", "ImportedData", "(", "results", "=", "results", ",", "testrun", "=", "testrun", ")" ]
Reads the content of the database file and returns imported data.
[ "Reads", "the", "content", "of", "the", "database", "file", "and", "returns", "imported", "data", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/dbtools.py#L46-L69
train
mkoura/dump2polarion
dump2polarion/results/dbtools.py
mark_exported_sqlite
def mark_exported_sqlite(db_file, older_than=None): """Marks rows with verdict as exported.""" logger.debug("Marking rows in database as exported") conn = _open_sqlite(db_file) cur = conn.cursor() update = "UPDATE testcases SET exported = 'yes' WHERE verdict IS NOT null AND verdict != ''" if older_than: cur.execute(" ".join((update, "AND sqltime < ?")), (older_than,)) else: cur.execute(update) conn.commit() conn.close()
python
def mark_exported_sqlite(db_file, older_than=None): """Marks rows with verdict as exported.""" logger.debug("Marking rows in database as exported") conn = _open_sqlite(db_file) cur = conn.cursor() update = "UPDATE testcases SET exported = 'yes' WHERE verdict IS NOT null AND verdict != ''" if older_than: cur.execute(" ".join((update, "AND sqltime < ?")), (older_than,)) else: cur.execute(update) conn.commit() conn.close()
[ "def", "mark_exported_sqlite", "(", "db_file", ",", "older_than", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Marking rows in database as exported\"", ")", "conn", "=", "_open_sqlite", "(", "db_file", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "update", "=", "\"UPDATE testcases SET exported = 'yes' WHERE verdict IS NOT null AND verdict != ''\"", "if", "older_than", ":", "cur", ".", "execute", "(", "\" \"", ".", "join", "(", "(", "update", ",", "\"AND sqltime < ?\"", ")", ")", ",", "(", "older_than", ",", ")", ")", "else", ":", "cur", ".", "execute", "(", "update", ")", "conn", ".", "commit", "(", ")", "conn", ".", "close", "(", ")" ]
Marks rows with verdict as exported.
[ "Marks", "rows", "with", "verdict", "as", "exported", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/dbtools.py#L72-L83
train
mgoral/subconvert
src/subconvert/parsing/Formats.py
SubFormat.createSubtitle
def createSubtitle(self, fps, section): """Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot be parsed, None is returned. By default 'section' is checked against 'subPattern' regular expression.""" matched = self._pattern.search(section) if matched is not None: matchedDict = matched.groupdict() return Subtitle( self.frametime(fps, matchedDict.get("time_from")), self.frametime(fps, matchedDict.get("time_to")), self.formatSub(matchedDict.get("text")) ) return None
python
def createSubtitle(self, fps, section): """Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot be parsed, None is returned. By default 'section' is checked against 'subPattern' regular expression.""" matched = self._pattern.search(section) if matched is not None: matchedDict = matched.groupdict() return Subtitle( self.frametime(fps, matchedDict.get("time_from")), self.frametime(fps, matchedDict.get("time_to")), self.formatSub(matchedDict.get("text")) ) return None
[ "def", "createSubtitle", "(", "self", ",", "fps", ",", "section", ")", ":", "matched", "=", "self", ".", "_pattern", ".", "search", "(", "section", ")", "if", "matched", "is", "not", "None", ":", "matchedDict", "=", "matched", ".", "groupdict", "(", ")", "return", "Subtitle", "(", "self", ".", "frametime", "(", "fps", ",", "matchedDict", ".", "get", "(", "\"time_from\"", ")", ")", ",", "self", ".", "frametime", "(", "fps", ",", "matchedDict", ".", "get", "(", "\"time_to\"", ")", ")", ",", "self", ".", "formatSub", "(", "matchedDict", ".", "get", "(", "\"text\"", ")", ")", ")", "return", "None" ]
Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot be parsed, None is returned. By default 'section' is checked against 'subPattern' regular expression.
[ "Returns", "a", "correct", "Subtitle", "object", "from", "a", "text", "given", "in", "section", ".", "If", "section", "cannot", "be", "parsed", "None", "is", "returned", ".", "By", "default", "section", "is", "checked", "against", "subPattern", "regular", "expression", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Formats.py#L130-L142
train
mgoral/subconvert
src/subconvert/parsing/Formats.py
SubFormat.convertTime
def convertTime(self, frametime, which): """Convert FrameTime object to properly formatted string that describes subtitle start or end time.""" SubAssert(frametime.frame >= 0, _("Negative time present.")) return frametime.frame
python
def convertTime(self, frametime, which): """Convert FrameTime object to properly formatted string that describes subtitle start or end time.""" SubAssert(frametime.frame >= 0, _("Negative time present.")) return frametime.frame
[ "def", "convertTime", "(", "self", ",", "frametime", ",", "which", ")", ":", "SubAssert", "(", "frametime", ".", "frame", ">=", "0", ",", "_", "(", "\"Negative time present.\"", ")", ")", "return", "frametime", ".", "frame" ]
Convert FrameTime object to properly formatted string that describes subtitle start or end time.
[ "Convert", "FrameTime", "object", "to", "properly", "formatted", "string", "that", "describes", "subtitle", "start", "or", "end", "time", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Formats.py#L181-L185
train
pneff/wsgiservice
wsgiservice/status.py
_set_location
def _set_location(instance, location): """Sets a ``Location`` response header. If the location does not start with a slash, the path of the current request is prepended. :param instance: Resource instance (used to access the request and response) :type instance: :class:`webob.resource.Resource` """ location = str(location) if not location.startswith('/'): location = urljoin(instance.request_path.rstrip('/') + '/', location) instance.response.location = location
python
def _set_location(instance, location): """Sets a ``Location`` response header. If the location does not start with a slash, the path of the current request is prepended. :param instance: Resource instance (used to access the request and response) :type instance: :class:`webob.resource.Resource` """ location = str(location) if not location.startswith('/'): location = urljoin(instance.request_path.rstrip('/') + '/', location) instance.response.location = location
[ "def", "_set_location", "(", "instance", ",", "location", ")", ":", "location", "=", "str", "(", "location", ")", "if", "not", "location", ".", "startswith", "(", "'/'", ")", ":", "location", "=", "urljoin", "(", "instance", ".", "request_path", ".", "rstrip", "(", "'/'", ")", "+", "'/'", ",", "location", ")", "instance", ".", "response", ".", "location", "=", "location" ]
Sets a ``Location`` response header. If the location does not start with a slash, the path of the current request is prepended. :param instance: Resource instance (used to access the request and response) :type instance: :class:`webob.resource.Resource`
[ "Sets", "a", "Location", "response", "header", ".", "If", "the", "location", "does", "not", "start", "with", "a", "slash", "the", "path", "of", "the", "current", "request", "is", "prepended", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/status.py#L367-L378
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/templatetags/deployutils_extratags.py
no_cache
def no_cache(asset_url): """ Removes query parameters """ pos = asset_url.rfind('?') if pos > 0: asset_url = asset_url[:pos] return asset_url
python
def no_cache(asset_url): """ Removes query parameters """ pos = asset_url.rfind('?') if pos > 0: asset_url = asset_url[:pos] return asset_url
[ "def", "no_cache", "(", "asset_url", ")", ":", "pos", "=", "asset_url", ".", "rfind", "(", "'?'", ")", "if", "pos", ">", "0", ":", "asset_url", "=", "asset_url", "[", ":", "pos", "]", "return", "asset_url" ]
Removes query parameters
[ "Removes", "query", "parameters" ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/templatetags/deployutils_extratags.py#L61-L68
train
SignalN/language
language/ngrams.py
__ngrams
def __ngrams(s, n=3): """ Raw n-grams from a sequence If the sequence is a string, it will return char-level n-grams. If the sequence is a list of words, it will return word-level n-grams. Note: it treats space (' ') and punctuation like any other character. >>> ngrams('This is not a test!') [('T', 'h', 'i'), ('h', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'i'), (' ', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'n'), (' ', 'n', 'o'), ('n', 'o', 't'), ('o', 't', ' '), ('t', ' ', 'a'), (' ', 'a', ' '), ('a', ' ', 't'), (' ', 't', 'e'), ('t', 'e', 's'), ('e', 's', 't'), ('s', 't', '!')] >>> ngrams(["This", "is", "not", "a", "test!"]) [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] Args: s: a string or a list of strings n: an int for the n in n-gram Returns: list: tuples of char-level or word-level n-grams """ return list(zip(*[s[i:] for i in range(n)]))
python
def __ngrams(s, n=3): """ Raw n-grams from a sequence If the sequence is a string, it will return char-level n-grams. If the sequence is a list of words, it will return word-level n-grams. Note: it treats space (' ') and punctuation like any other character. >>> ngrams('This is not a test!') [('T', 'h', 'i'), ('h', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'i'), (' ', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'n'), (' ', 'n', 'o'), ('n', 'o', 't'), ('o', 't', ' '), ('t', ' ', 'a'), (' ', 'a', ' '), ('a', ' ', 't'), (' ', 't', 'e'), ('t', 'e', 's'), ('e', 's', 't'), ('s', 't', '!')] >>> ngrams(["This", "is", "not", "a", "test!"]) [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] Args: s: a string or a list of strings n: an int for the n in n-gram Returns: list: tuples of char-level or word-level n-grams """ return list(zip(*[s[i:] for i in range(n)]))
[ "def", "__ngrams", "(", "s", ",", "n", "=", "3", ")", ":", "return", "list", "(", "zip", "(", "*", "[", "s", "[", "i", ":", "]", "for", "i", "in", "range", "(", "n", ")", "]", ")", ")" ]
Raw n-grams from a sequence If the sequence is a string, it will return char-level n-grams. If the sequence is a list of words, it will return word-level n-grams. Note: it treats space (' ') and punctuation like any other character. >>> ngrams('This is not a test!') [('T', 'h', 'i'), ('h', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'i'), (' ', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'n'), (' ', 'n', 'o'), ('n', 'o', 't'), ('o', 't', ' '), ('t', ' ', 'a'), (' ', 'a', ' '), ('a', ' ', 't'), (' ', 't', 'e'), ('t', 'e', 's'), ('e', 's', 't'), ('s', 't', '!')] >>> ngrams(["This", "is", "not", "a", "test!"]) [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] Args: s: a string or a list of strings n: an int for the n in n-gram Returns: list: tuples of char-level or word-level n-grams
[ "Raw", "n", "-", "grams", "from", "a", "sequence" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L3-L27
train
SignalN/language
language/ngrams.py
word_ngrams
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
python
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
[ "def", "word_ngrams", "(", "s", ",", "n", "=", "3", ",", "token_fn", "=", "tokens", ".", "on_whitespace", ")", ":", "tokens", "=", "token_fn", "(", "s", ")", "return", "__ngrams", "(", "tokens", ",", "n", "=", "min", "(", "len", "(", "tokens", ")", ",", "n", ")", ")" ]
Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams
[ "Word", "-", "level", "n", "-", "grams", "in", "a", "string" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L29-L51
train
SignalN/language
language/ngrams.py
char_ngrams
def char_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Character-level n-grams from within the words in a string. By default, the word boundary is assumed to be whitespace. n-grams are not taken across word boundaries, only within words. If a word's length is less than or equal to n, the n-grams are simply a list with the word itself. >>> ng.char_ngrams('This is not a test!') ['Thi', 'his', 'is', 'not', 'a', 'tes', 'est', 'st!'] Therefore some n-grams may have a length less than n, like 'is' and 'a' in this example. Args: s: a string n: an int for the n in n-gram token_fn: a function that splits a string into a list of strings Returns: list: strings of char-level n-grams """ tokens = token_fn(s) ngram_tuples = [__ngrams(t, n=min(len(t), n)) for t in tokens] def unpack(l): return sum(l, []) def untuple(l): return [''.join(t) for t in l] return untuple(unpack(ngram_tuples))
python
def char_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Character-level n-grams from within the words in a string. By default, the word boundary is assumed to be whitespace. n-grams are not taken across word boundaries, only within words. If a word's length is less than or equal to n, the n-grams are simply a list with the word itself. >>> ng.char_ngrams('This is not a test!') ['Thi', 'his', 'is', 'not', 'a', 'tes', 'est', 'st!'] Therefore some n-grams may have a length less than n, like 'is' and 'a' in this example. Args: s: a string n: an int for the n in n-gram token_fn: a function that splits a string into a list of strings Returns: list: strings of char-level n-grams """ tokens = token_fn(s) ngram_tuples = [__ngrams(t, n=min(len(t), n)) for t in tokens] def unpack(l): return sum(l, []) def untuple(l): return [''.join(t) for t in l] return untuple(unpack(ngram_tuples))
[ "def", "char_ngrams", "(", "s", ",", "n", "=", "3", ",", "token_fn", "=", "tokens", ".", "on_whitespace", ")", ":", "tokens", "=", "token_fn", "(", "s", ")", "ngram_tuples", "=", "[", "__ngrams", "(", "t", ",", "n", "=", "min", "(", "len", "(", "t", ")", ",", "n", ")", ")", "for", "t", "in", "tokens", "]", "def", "unpack", "(", "l", ")", ":", "return", "sum", "(", "l", ",", "[", "]", ")", "def", "untuple", "(", "l", ")", ":", "return", "[", "''", ".", "join", "(", "t", ")", "for", "t", "in", "l", "]", "return", "untuple", "(", "unpack", "(", "ngram_tuples", ")", ")" ]
Character-level n-grams from within the words in a string. By default, the word boundary is assumed to be whitespace. n-grams are not taken across word boundaries, only within words. If a word's length is less than or equal to n, the n-grams are simply a list with the word itself. >>> ng.char_ngrams('This is not a test!') ['Thi', 'his', 'is', 'not', 'a', 'tes', 'est', 'st!'] Therefore some n-grams may have a length less than n, like 'is' and 'a' in this example. Args: s: a string n: an int for the n in n-gram token_fn: a function that splits a string into a list of strings Returns: list: strings of char-level n-grams
[ "Character", "-", "level", "n", "-", "grams", "from", "within", "the", "words", "in", "a", "string", "." ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L53-L83
train
SignalN/language
language/ngrams.py
__matches
def __matches(s1, s2, ngrams_fn, n=3): """ Returns the n-grams that match between two sequences See also: SequenceMatcher.get_matching_blocks Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) return ngrams1.intersection(ngrams2)
python
def __matches(s1, s2, ngrams_fn, n=3): """ Returns the n-grams that match between two sequences See also: SequenceMatcher.get_matching_blocks Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) return ngrams1.intersection(ngrams2)
[ "def", "__matches", "(", "s1", ",", "s2", ",", "ngrams_fn", ",", "n", "=", "3", ")", ":", "ngrams1", ",", "ngrams2", "=", "set", "(", "ngrams_fn", "(", "s1", ",", "n", "=", "n", ")", ")", ",", "set", "(", "ngrams_fn", "(", "s2", ",", "n", "=", "n", ")", ")", "return", "ngrams1", ".", "intersection", "(", "ngrams2", ")" ]
Returns the n-grams that match between two sequences See also: SequenceMatcher.get_matching_blocks Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set:
[ "Returns", "the", "n", "-", "grams", "that", "match", "between", "two", "sequences" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L86-L101
train
SignalN/language
language/ngrams.py
char_matches
def char_matches(s1, s2, n=3): """ Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings """ return __matches(s1, s2, char_ngrams, n=n)
python
def char_matches(s1, s2, n=3): """ Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings """ return __matches(s1, s2, char_ngrams, n=n)
[ "def", "char_matches", "(", "s1", ",", "s2", ",", "n", "=", "3", ")", ":", "return", "__matches", "(", "s1", ",", "s2", ",", "char_ngrams", ",", "n", "=", "n", ")" ]
Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
[ "Character", "-", "level", "n", "-", "grams", "that", "match", "between", "two", "strings" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L103-L115
train
SignalN/language
language/ngrams.py
word_matches
def word_matches(s1, s2, n=3): """ Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings """ return __matches(s1, s2, word_ngrams, n=n)
python
def word_matches(s1, s2, n=3): """ Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings """ return __matches(s1, s2, word_ngrams, n=n)
[ "def", "word_matches", "(", "s1", ",", "s2", ",", "n", "=", "3", ")", ":", "return", "__matches", "(", "s1", ",", "s2", ",", "word_ngrams", ",", "n", "=", "n", ")" ]
Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
[ "Word", "-", "level", "n", "-", "grams", "that", "match", "between", "two", "strings" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L117-L129
train
SignalN/language
language/ngrams.py
__similarity
def __similarity(s1, s2, ngrams_fn, n=3): """ The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
python
def __similarity(s1, s2, ngrams_fn, n=3): """ The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
[ "def", "__similarity", "(", "s1", ",", "s2", ",", "ngrams_fn", ",", "n", "=", "3", ")", ":", "ngrams1", ",", "ngrams2", "=", "set", "(", "ngrams_fn", "(", "s1", ",", "n", "=", "n", ")", ")", ",", "set", "(", "ngrams_fn", "(", "s2", ",", "n", "=", "n", ")", ")", "matches", "=", "ngrams1", ".", "intersection", "(", "ngrams2", ")", "return", "2", "*", "len", "(", "matches", ")", "/", "(", "len", "(", "ngrams1", ")", "+", "len", "(", "ngrams2", ")", ")" ]
The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching
[ "The", "fraction", "of", "n", "-", "grams", "matching", "between", "two", "sequences" ]
5c50c78f65bcc2c999b44d530e7412185248352d
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L131-L145
train
markfinger/assembla
assembla/api.py
API._get_json
def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
python
def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
[ "def", "_get_json", "(", "self", ",", "model", ",", "space", "=", "None", ",", "rel_path", "=", "None", ",", "extra_params", "=", "None", ",", "get_all", "=", "None", ")", ":", "# Only API.spaces and API.event should not provide", "# the `space argument", "if", "space", "is", "None", "and", "model", "not", "in", "(", "Space", ",", "Event", ")", ":", "raise", "Exception", "(", "'In general, `API._get_json` should always '", "'be called with a `space` argument.'", ")", "if", "not", "extra_params", ":", "extra_params", "=", "{", "}", "# Handle pagination for requests carrying large amounts of data", "extra_params", "[", "'page'", "]", "=", "extra_params", ".", "get", "(", "'page'", ",", "1", ")", "# Generate the url to hit", "url", "=", "'{0}/{1}/{2}.json?{3}'", ".", "format", "(", "settings", ".", "API_ROOT_PATH", ",", "settings", ".", "API_VERSION", ",", "rel_path", "or", "model", ".", "rel_path", ",", "urllib", ".", "urlencode", "(", "extra_params", ")", ",", ")", "# If the cache is being used and the url has been hit already", "if", "self", ".", "cache_responses", "and", "url", "in", "self", ".", "cache", ":", "response", "=", "self", ".", "cache", "[", "url", "]", "else", ":", "# Fetch the data", "headers", "=", "{", "'X-Api-Key'", ":", "self", ".", "key", ",", "'X-Api-Secret'", ":", "self", ".", "secret", ",", "}", "response", "=", "self", ".", "session", ".", "get", "(", "url", "=", "url", ",", "headers", "=", "headers", ")", "# If the cache is being used, update it", "if", "self", ".", "cache_responses", ":", "self", ".", "cache", "[", "url", "]", "=", "response", "if", "response", ".", "status_code", "==", "200", ":", "# OK", "results", "=", "[", "]", "json_response", "=", "response", ".", "json", "(", ")", "for", "obj", "in", "json_response", ":", "instance", "=", "model", "(", "data", "=", "obj", ")", "instance", ".", "api", "=", "self", "if", "space", ":", "instance", ".", "space", "=", "space", "results", ".", "append", "(", "instance", ")", "# If it looks like there are more pages to fetch,", "# try and fetch the next one", "per_page", "=", "extra_params", ".", "get", "(", "'per_page'", ",", "None", ")", "if", "(", "get_all", "and", "per_page", "and", "len", "(", "json_response", ")", "and", "per_page", "==", "len", "(", "json_response", ")", ")", ":", "extra_params", "[", "'page'", "]", "+=", "1", "results", "=", "results", "+", "self", ".", "_get_json", "(", "model", ",", "space", ",", "rel_path", ",", "extra_params", ",", "get_all", "=", "get_all", ")", "return", "results", "elif", "response", ".", "status_code", "==", "204", ":", "# No Content", "return", "[", "]", "else", ":", "# Most likely a 404 Not Found", "raise", "Exception", "(", "'Code {0} returned from `{1}`. Response text: \"{2}\".'", ".", "format", "(", "response", ".", "status_code", ",", "url", ",", "response", ".", "text", ")", ")" ]
Base level method for fetching data from the API
[ "Base", "level", "method", "for", "fetching", "data", "from", "the", "API" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L42-L112
train
markfinger/assembla
assembla/api.py
API._post_json
def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
python
def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
[ "def", "_post_json", "(", "self", ",", "instance", ",", "space", "=", "None", ",", "rel_path", "=", "None", ",", "extra_params", "=", "None", ")", ":", "model", "=", "type", "(", "instance", ")", "# Only API.spaces and API.event should not provide", "# the `space argument", "if", "space", "is", "None", "and", "model", "not", "in", "(", "Space", ",", "Event", ")", ":", "raise", "Exception", "(", "'In general, `API._post_json` should always '", "'be called with a `space` argument.'", ")", "if", "'number'", "in", "instance", ".", "data", ":", "raise", "AttributeError", "(", "'You cannot create a ticket which already has a number'", ")", "if", "not", "extra_params", ":", "extra_params", "=", "{", "}", "# Generate the url to hit", "url", "=", "'{0}/{1}/{2}?{3}'", ".", "format", "(", "settings", ".", "API_ROOT_PATH", ",", "settings", ".", "API_VERSION", ",", "rel_path", "or", "model", ".", "rel_path", ",", "urllib", ".", "urlencode", "(", "extra_params", ")", ",", ")", "# Fetch the data", "response", "=", "requests", ".", "post", "(", "url", "=", "url", ",", "data", "=", "json", ".", "dumps", "(", "instance", ".", "data", ")", ",", "headers", "=", "{", "'X-Api-Key'", ":", "self", ".", "key", ",", "'X-Api-Secret'", ":", "self", ".", "secret", ",", "'Content-type'", ":", "\"application/json\"", ",", "}", ",", ")", "if", "response", ".", "status_code", "==", "201", ":", "# OK", "instance", "=", "model", "(", "data", "=", "response", ".", "json", "(", ")", ")", "instance", ".", "api", "=", "self", "if", "space", ":", "instance", ".", "space", "=", "space", "return", "instance", "else", ":", "# Most likely a 404 Not Found", "raise", "Exception", "(", "'Code {0} returned from `{1}`. Response text: \"{2}\".'", ".", "format", "(", "response", ".", "status_code", ",", "url", ",", "response", ".", "text", ")", ")" ]
Base level method for updating data via the API
[ "Base", "level", "method", "for", "updating", "data", "via", "the", "API" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L114-L169
train
markfinger/assembla
assembla/api.py
API._put_json
def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
python
def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
[ "def", "_put_json", "(", "self", ",", "instance", ",", "space", "=", "None", ",", "rel_path", "=", "None", ",", "extra_params", "=", "None", ",", "id_field", "=", "None", ")", ":", "model", "=", "type", "(", "instance", ")", "# Only API.spaces and API.event should not provide", "# the `space argument", "if", "space", "is", "None", "and", "model", "not", "in", "(", "Space", ",", "Event", ")", ":", "raise", "Exception", "(", "'In general, `API._put_json` should always '", "'be called with a `space` argument.'", ")", "if", "not", "extra_params", ":", "extra_params", "=", "{", "}", "if", "not", "id_field", ":", "id_field", "=", "'number'", "# Generate the url to hit", "url", "=", "'{0}/{1}/{2}/{3}.json?{4}'", ".", "format", "(", "settings", ".", "API_ROOT_PATH", ",", "settings", ".", "API_VERSION", ",", "rel_path", "or", "model", ".", "rel_path", ",", "instance", "[", "id_field", "]", ",", "urllib", ".", "urlencode", "(", "extra_params", ")", ",", ")", "# Fetch the data", "response", "=", "requests", ".", "put", "(", "url", "=", "url", ",", "data", "=", "json", ".", "dumps", "(", "instance", ".", "data", ")", ",", "headers", "=", "{", "'X-Api-Key'", ":", "self", ".", "key", ",", "'X-Api-Secret'", ":", "self", ".", "secret", ",", "'Content-type'", ":", "\"application/json\"", ",", "}", ",", ")", "if", "response", ".", "status_code", "==", "204", ":", "# OK", "return", "instance", "else", ":", "# Most likely a 404 Not Found", "raise", "Exception", "(", "'Code {0} returned from `{1}`. Response text: \"{2}\".'", ".", "format", "(", "response", ".", "status_code", ",", "url", ",", "response", ".", "text", ")", ")" ]
Base level method for adding new data to the API
[ "Base", "level", "method", "for", "adding", "new", "data", "to", "the", "API" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L171-L221
train
markfinger/assembla
assembla/api.py
API._delete_json
def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
python
def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
[ "def", "_delete_json", "(", "self", ",", "instance", ",", "space", "=", "None", ",", "rel_path", "=", "None", ",", "extra_params", "=", "None", ",", "id_field", "=", "None", ",", "append_to_path", "=", "None", ")", ":", "model", "=", "type", "(", "instance", ")", "# Only API.spaces and API.event should not provide", "# the `space argument", "if", "space", "is", "None", "and", "model", "not", "in", "(", "Space", ",", "Event", ")", ":", "raise", "Exception", "(", "'In general, `API._delete_json` should always '", "'be called with a `space` argument.'", ")", "if", "not", "extra_params", ":", "extra_params", "=", "{", "}", "if", "not", "id_field", ":", "id_field", "=", "'number'", "if", "not", "instance", ".", "get", "(", "id_field", ",", "None", ")", ":", "raise", "AttributeError", "(", "'%s does not have a value for the id field \\'%s\\''", "%", "(", "instance", ".", "__class__", ".", "__name__", ",", "id_field", ")", ")", "# Generate the url to hit", "url", "=", "'{0}/{1}/{2}/{3}{4}.json?{5}'", ".", "format", "(", "settings", ".", "API_ROOT_PATH", ",", "settings", ".", "API_VERSION", ",", "rel_path", "or", "model", ".", "rel_path", ",", "instance", "[", "id_field", "]", ",", "append_to_path", "or", "''", ",", "urllib", ".", "urlencode", "(", "extra_params", ")", ",", ")", "# Fetch the data", "response", "=", "requests", ".", "delete", "(", "url", "=", "url", ",", "headers", "=", "{", "'X-Api-Key'", ":", "self", ".", "key", ",", "'X-Api-Secret'", ":", "self", ".", "secret", ",", "'Content-type'", ":", "\"application/json\"", ",", "}", ",", ")", "if", "response", ".", "status_code", "==", "204", ":", "# OK", "return", "True", "else", ":", "# Most likely a 404 Not Found", "raise", "Exception", "(", "'Code {0} returned from `{1}`. Response text: \"{2}\".'", ".", "format", "(", "response", ".", "status_code", ",", "url", ",", "response", ".", "text", ")", ")" ]
Base level method for removing data from the API
[ "Base", "level", "method", "for", "removing", "data", "from", "the", "API" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L223-L281
train
markfinger/assembla
assembla/api.py
API._bind_variables
def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
python
def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
[ "def", "_bind_variables", "(", "self", ",", "instance", ",", "space", ")", ":", "instance", ".", "api", "=", "self", "if", "space", ":", "instance", ".", "space", "=", "space", "return", "instance" ]
Bind related variables to the instance
[ "Bind", "related", "variables", "to", "the", "instance" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L283-L290
train
markfinger/assembla
assembla/api.py
Space.tickets
def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space )
python
def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space )
[ "def", "tickets", "(", "self", ",", "extra_params", "=", "None", ")", ":", "# Default params", "params", "=", "{", "'per_page'", ":", "settings", ".", "MAX_PER_PAGE", ",", "'report'", ":", "0", ",", "# Report 0 is all tickets", "}", "if", "extra_params", ":", "params", ".", "update", "(", "extra_params", ")", "return", "self", ".", "api", ".", "_get_json", "(", "Ticket", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'tickets'", ")", ",", "extra_params", "=", "params", ",", "get_all", "=", "True", ",", "# Retrieve all tickets in the space", ")" ]
All Tickets in this Space
[ "All", "Tickets", "in", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L301-L321
train
markfinger/assembla
assembla/api.py
Space.milestones
def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space )
python
def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space )
[ "def", "milestones", "(", "self", ",", "extra_params", "=", "None", ")", ":", "# Default params", "params", "=", "{", "'per_page'", ":", "settings", ".", "MAX_PER_PAGE", ",", "}", "if", "extra_params", ":", "params", ".", "update", "(", "extra_params", ")", "return", "self", ".", "api", ".", "_get_json", "(", "Milestone", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'milestones/all'", ")", ",", "extra_params", "=", "params", ",", "get_all", "=", "True", ",", "# Retrieve all milestones in the space", ")" ]
All Milestones in this Space
[ "All", "Milestones", "in", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L324-L343
train
markfinger/assembla
assembla/api.py
Space.tools
def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, )
python
def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, )
[ "def", "tools", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "SpaceTool", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'space_tools'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
All Tools in this Space
[ "All", "Tools", "in", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L346-L355
train
markfinger/assembla
assembla/api.py
Space.components
def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, )
python
def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, )
[ "def", "components", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "Component", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'ticket_components'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
All components in this Space
[ "All", "components", "in", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L358-L367
train
markfinger/assembla
assembla/api.py
Space.users
def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, )
python
def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, )
[ "def", "users", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "User", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'users'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
All Users with access to this Space
[ "All", "Users", "with", "access", "to", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L370-L379
train
markfinger/assembla
assembla/api.py
Space.tags
def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, )
python
def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, )
[ "def", "tags", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "Tag", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'tags'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
All Tags in this Space
[ "All", "Tags", "in", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L382-L391
train
markfinger/assembla
assembla/api.py
Space.wiki_pages
def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
python
def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
[ "def", "wiki_pages", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "WikiPage", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'wiki_pages'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
All Wiki Pages with access to this Space
[ "All", "Wiki", "Pages", "with", "access", "to", "this", "Space" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L394-L403
train
markfinger/assembla
assembla/api.py
Milestone.tickets
def tickets(self, extra_params=None): """ All Tickets which are a part of this Milestone """ return filter( lambda ticket: ticket.get('milestone_id', None) == self['id'], self.space.tickets(extra_params=extra_params) )
python
def tickets(self, extra_params=None): """ All Tickets which are a part of this Milestone """ return filter( lambda ticket: ticket.get('milestone_id', None) == self['id'], self.space.tickets(extra_params=extra_params) )
[ "def", "tickets", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "filter", "(", "lambda", "ticket", ":", "ticket", ".", "get", "(", "'milestone_id'", ",", "None", ")", "==", "self", "[", "'id'", "]", ",", "self", ".", "space", ".", "tickets", "(", "extra_params", "=", "extra_params", ")", ")" ]
All Tickets which are a part of this Milestone
[ "All", "Tickets", "which", "are", "a", "part", "of", "this", "Milestone" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L426-L433
train
markfinger/assembla
assembla/api.py
Ticket.tags
def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket )
python
def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket )
[ "def", "tags", "(", "self", ",", "extra_params", "=", "None", ")", ":", "# Default params", "params", "=", "{", "'per_page'", ":", "settings", ".", "MAX_PER_PAGE", ",", "}", "if", "extra_params", ":", "params", ".", "update", "(", "extra_params", ")", "return", "self", ".", "api", ".", "_get_json", "(", "Tag", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'tickets/%s/tags'", "%", "self", "[", "'number'", "]", ")", ",", "extra_params", "=", "params", ",", "get_all", "=", "True", ",", "# Retrieve all tags in the ticket", ")" ]
All Tags in this Ticket
[ "All", "Tags", "in", "this", "Ticket" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L438-L459
train
markfinger/assembla
assembla/api.py
Ticket.milestone
def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0]
python
def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0]
[ "def", "milestone", "(", "self", ",", "extra_params", "=", "None", ")", ":", "if", "self", ".", "get", "(", "'milestone_id'", ",", "None", ")", ":", "milestones", "=", "self", ".", "space", ".", "milestones", "(", "id", "=", "self", "[", "'milestone_id'", "]", ",", "extra_params", "=", "extra_params", ")", "if", "milestones", ":", "return", "milestones", "[", "0", "]" ]
The Milestone that the Ticket is a part of
[ "The", "Milestone", "that", "the", "Ticket", "is", "a", "part", "of" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L462-L469
train
markfinger/assembla
assembla/api.py
Ticket.user
def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0]
python
def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0]
[ "def", "user", "(", "self", ",", "extra_params", "=", "None", ")", ":", "if", "self", ".", "get", "(", "'assigned_to_id'", ",", "None", ")", ":", "users", "=", "self", ".", "space", ".", "users", "(", "id", "=", "self", "[", "'assigned_to_id'", "]", ",", "extra_params", "=", "extra_params", ")", "if", "users", ":", "return", "users", "[", "0", "]" ]
The User currently assigned to the Ticket
[ "The", "User", "currently", "assigned", "to", "the", "Ticket" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L471-L481
train
markfinger/assembla
assembla/api.py
Ticket.component
def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0]
python
def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0]
[ "def", "component", "(", "self", ",", "extra_params", "=", "None", ")", ":", "if", "self", ".", "get", "(", "'component_id'", ",", "None", ")", ":", "components", "=", "self", ".", "space", ".", "components", "(", "id", "=", "self", "[", "'component_id'", "]", ",", "extra_params", "=", "extra_params", ")", "if", "components", ":", "return", "components", "[", "0", "]" ]
The Component currently assigned to the Ticket
[ "The", "Component", "currently", "assigned", "to", "the", "Ticket" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L483-L490
train
markfinger/assembla
assembla/api.py
Ticket.comments
def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket )
python
def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket )
[ "def", "comments", "(", "self", ",", "extra_params", "=", "None", ")", ":", "# Default params", "params", "=", "{", "'per_page'", ":", "settings", ".", "MAX_PER_PAGE", ",", "}", "if", "extra_params", ":", "params", ".", "update", "(", "extra_params", ")", "return", "self", ".", "api", ".", "_get_json", "(", "TicketComment", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'tickets/%s/ticket_comments'", "%", "self", "[", "'number'", "]", ")", ",", "extra_params", "=", "params", ",", "get_all", "=", "True", ",", "# Retrieve all comments in the ticket", ")" ]
All Comments in this Ticket
[ "All", "Comments", "in", "this", "Ticket" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L493-L514
train
markfinger/assembla
assembla/api.py
Ticket.write
def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
python
def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
[ "def", "write", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'space'", ")", ":", "raise", "AttributeError", "(", "\"A ticket must have a 'space' attribute before you can write it to Assembla.\"", ")", "if", "self", ".", "get", "(", "'number'", ")", ":", "# Modifying an existing ticket", "method", "=", "self", ".", "space", ".", "api", ".", "_put_json", "else", ":", "# Creating a new ticket", "method", "=", "self", ".", "space", ".", "api", ".", "_post_json", "return", "method", "(", "self", ",", "space", "=", "self", ".", "space", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'tickets'", ")", ",", ")" ]
Create or update the Ticket on Assembla
[ "Create", "or", "update", "the", "Ticket", "on", "Assembla" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L517-L533
train
markfinger/assembla
assembla/api.py
Ticket.delete
def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
python
def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
[ "def", "delete", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'space'", ")", ":", "raise", "AttributeError", "(", "\"A ticket must have a 'space' attribute before you can remove it from Assembla.\"", ")", "return", "self", ".", "space", ".", "api", ".", "_delete_json", "(", "self", ",", "space", "=", "self", ".", "space", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'tickets'", ")", ",", ")" ]
Remove the Ticket from Assembla
[ "Remove", "the", "Ticket", "from", "Assembla" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L535-L546
train
markfinger/assembla
assembla/api.py
User.tickets
def tickets(self, extra_params=None): """ A User's tickets across all available spaces """ tickets = [] for space in self.api.spaces(): tickets += filter( lambda ticket: ticket.get('assigned_to_id', None) == self['id'], space.tickets(extra_params=extra_params) ) return tickets
python
def tickets(self, extra_params=None): """ A User's tickets across all available spaces """ tickets = [] for space in self.api.spaces(): tickets += filter( lambda ticket: ticket.get('assigned_to_id', None) == self['id'], space.tickets(extra_params=extra_params) ) return tickets
[ "def", "tickets", "(", "self", ",", "extra_params", "=", "None", ")", ":", "tickets", "=", "[", "]", "for", "space", "in", "self", ".", "api", ".", "spaces", "(", ")", ":", "tickets", "+=", "filter", "(", "lambda", "ticket", ":", "ticket", ".", "get", "(", "'assigned_to_id'", ",", "None", ")", "==", "self", "[", "'id'", "]", ",", "space", ".", "tickets", "(", "extra_params", "=", "extra_params", ")", ")", "return", "tickets" ]
A User's tickets across all available spaces
[ "A", "User", "s", "tickets", "across", "all", "available", "spaces" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L559-L569
train