repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.validation_statuses | def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | python | def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | [
"def",
"validation_statuses",
"(",
"self",
",",
"area_uuid",
")",
":",
"path",
"=",
"\"/area/{uuid}/validations\"",
".",
"format",
"(",
"uuid",
"=",
"area_uuid",
")",
"result",
"=",
"self",
".",
"_make_request",
"(",
"'get'",
",",
"path",
")",
"return",
"result",
".",
"json",
"(",
")"
]
| Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained | [
"Get",
"count",
"of",
"validation",
"statuses",
"for",
"all",
"files",
"in",
"upload_area"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L214-L225 | train |
yoeo/guesslang | guesslang/guesser.py | Guess.language_name | def language_name(self, text: str) -> str:
"""Predict the programming language name of the given source code.
:param text: source code.
:return: language name
"""
values = extract(text)
input_fn = _to_func(([values], []))
pos: int = next(self._classifier.predict_classes(input_fn=input_fn))
LOGGER.debug("Predicted language position %s", pos)
return sorted(self.languages)[pos] | python | def language_name(self, text: str) -> str:
"""Predict the programming language name of the given source code.
:param text: source code.
:return: language name
"""
values = extract(text)
input_fn = _to_func(([values], []))
pos: int = next(self._classifier.predict_classes(input_fn=input_fn))
LOGGER.debug("Predicted language position %s", pos)
return sorted(self.languages)[pos] | [
"def",
"language_name",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"str",
":",
"values",
"=",
"extract",
"(",
"text",
")",
"input_fn",
"=",
"_to_func",
"(",
"(",
"[",
"values",
"]",
",",
"[",
"]",
")",
")",
"pos",
":",
"int",
"=",
"next",
"(",
"self",
".",
"_classifier",
".",
"predict_classes",
"(",
"input_fn",
"=",
"input_fn",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"Predicted language position %s\"",
",",
"pos",
")",
"return",
"sorted",
"(",
"self",
".",
"languages",
")",
"[",
"pos",
"]"
]
| Predict the programming language name of the given source code.
:param text: source code.
:return: language name | [
"Predict",
"the",
"programming",
"language",
"name",
"of",
"the",
"given",
"source",
"code",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/guesser.py#L61-L72 | train |
yoeo/guesslang | guesslang/guesser.py | Guess.scores | def scores(self, text: str) -> Dict[str, float]:
"""A score for each language corresponding to the probability that
the text is written in the given language.
The score is a `float` value between 0.0 and 1.0
:param text: source code.
:return: language to score dictionary
"""
values = extract(text)
input_fn = _to_func(([values], []))
prediction = self._classifier.predict_proba(input_fn=input_fn)
probabilities = next(prediction).tolist()
sorted_languages = sorted(self.languages)
return dict(zip(sorted_languages, probabilities)) | python | def scores(self, text: str) -> Dict[str, float]:
"""A score for each language corresponding to the probability that
the text is written in the given language.
The score is a `float` value between 0.0 and 1.0
:param text: source code.
:return: language to score dictionary
"""
values = extract(text)
input_fn = _to_func(([values], []))
prediction = self._classifier.predict_proba(input_fn=input_fn)
probabilities = next(prediction).tolist()
sorted_languages = sorted(self.languages)
return dict(zip(sorted_languages, probabilities)) | [
"def",
"scores",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"float",
"]",
":",
"values",
"=",
"extract",
"(",
"text",
")",
"input_fn",
"=",
"_to_func",
"(",
"(",
"[",
"values",
"]",
",",
"[",
"]",
")",
")",
"prediction",
"=",
"self",
".",
"_classifier",
".",
"predict_proba",
"(",
"input_fn",
"=",
"input_fn",
")",
"probabilities",
"=",
"next",
"(",
"prediction",
")",
".",
"tolist",
"(",
")",
"sorted_languages",
"=",
"sorted",
"(",
"self",
".",
"languages",
")",
"return",
"dict",
"(",
"zip",
"(",
"sorted_languages",
",",
"probabilities",
")",
")"
]
| A score for each language corresponding to the probability that
the text is written in the given language.
The score is a `float` value between 0.0 and 1.0
:param text: source code.
:return: language to score dictionary | [
"A",
"score",
"for",
"each",
"language",
"corresponding",
"to",
"the",
"probability",
"that",
"the",
"text",
"is",
"written",
"in",
"the",
"given",
"language",
".",
"The",
"score",
"is",
"a",
"float",
"value",
"between",
"0",
".",
"0",
"and",
"1",
".",
"0"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/guesser.py#L74-L87 | train |
yoeo/guesslang | guesslang/guesser.py | Guess.probable_languages | def probable_languages(
self,
text: str,
max_languages: int = 3) -> Tuple[str, ...]:
"""List of most probable programming languages,
the list is ordered from the most probable to the least probable one.
:param text: source code.
:param max_languages: maximum number of listed languages.
:return: languages list
"""
scores = self.scores(text)
# Sorted from the most probable language to the least probable
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
languages, probabilities = list(zip(*sorted_scores))
# Find the most distant consecutive languages.
# A logarithmic scale is used here because the probabilities
# are most of the time really close to zero
rescaled_probabilities = [log(proba) for proba in probabilities]
distances = [
rescaled_probabilities[pos] - rescaled_probabilities[pos+1]
for pos in range(len(rescaled_probabilities)-1)]
max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0]
limit = min(max_distance_pos, max_languages)
return languages[:limit] | python | def probable_languages(
self,
text: str,
max_languages: int = 3) -> Tuple[str, ...]:
"""List of most probable programming languages,
the list is ordered from the most probable to the least probable one.
:param text: source code.
:param max_languages: maximum number of listed languages.
:return: languages list
"""
scores = self.scores(text)
# Sorted from the most probable language to the least probable
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
languages, probabilities = list(zip(*sorted_scores))
# Find the most distant consecutive languages.
# A logarithmic scale is used here because the probabilities
# are most of the time really close to zero
rescaled_probabilities = [log(proba) for proba in probabilities]
distances = [
rescaled_probabilities[pos] - rescaled_probabilities[pos+1]
for pos in range(len(rescaled_probabilities)-1)]
max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0]
limit = min(max_distance_pos, max_languages)
return languages[:limit] | [
"def",
"probable_languages",
"(",
"self",
",",
"text",
":",
"str",
",",
"max_languages",
":",
"int",
"=",
"3",
")",
"->",
"Tuple",
"[",
"str",
",",
"...",
"]",
":",
"scores",
"=",
"self",
".",
"scores",
"(",
"text",
")",
"# Sorted from the most probable language to the least probable",
"sorted_scores",
"=",
"sorted",
"(",
"scores",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"languages",
",",
"probabilities",
"=",
"list",
"(",
"zip",
"(",
"*",
"sorted_scores",
")",
")",
"# Find the most distant consecutive languages.",
"# A logarithmic scale is used here because the probabilities",
"# are most of the time really close to zero",
"rescaled_probabilities",
"=",
"[",
"log",
"(",
"proba",
")",
"for",
"proba",
"in",
"probabilities",
"]",
"distances",
"=",
"[",
"rescaled_probabilities",
"[",
"pos",
"]",
"-",
"rescaled_probabilities",
"[",
"pos",
"+",
"1",
"]",
"for",
"pos",
"in",
"range",
"(",
"len",
"(",
"rescaled_probabilities",
")",
"-",
"1",
")",
"]",
"max_distance_pos",
"=",
"max",
"(",
"enumerate",
"(",
"distances",
",",
"1",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
")",
"[",
"0",
"]",
"limit",
"=",
"min",
"(",
"max_distance_pos",
",",
"max_languages",
")",
"return",
"languages",
"[",
":",
"limit",
"]"
]
| List of most probable programming languages,
the list is ordered from the most probable to the least probable one.
:param text: source code.
:param max_languages: maximum number of listed languages.
:return: languages list | [
"List",
"of",
"most",
"probable",
"programming",
"languages",
"the",
"list",
"is",
"ordered",
"from",
"the",
"most",
"probable",
"to",
"the",
"least",
"probable",
"one",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/guesser.py#L89-L116 | train |
yoeo/guesslang | guesslang/guesser.py | Guess.learn | def learn(self, input_dir: str) -> float:
"""Learn languages features from source files.
:raise GuesslangError: when the default model is used for learning
:param input_dir: source code files directory.
:return: learning accuracy
"""
if self.is_default:
LOGGER.error("Cannot learn using default model")
raise GuesslangError('Cannot learn using default "readonly" model')
languages = self.languages
LOGGER.info("Extract training data")
extensions = [ext for exts in languages.values() for ext in exts]
files = search_files(input_dir, extensions)
nb_files = len(files)
chunk_size = min(int(CHUNK_PROPORTION * nb_files), CHUNK_SIZE)
LOGGER.debug("Evaluation files count: %d", chunk_size)
LOGGER.debug("Training files count: %d", nb_files - chunk_size)
batches = _pop_many(files, chunk_size)
LOGGER.debug("Prepare evaluation data")
evaluation_data = extract_from_files(next(batches), languages)
LOGGER.debug("Evaluation data count: %d", len(evaluation_data[0]))
accuracy = 0
total = ceil(nb_files / chunk_size) - 1
LOGGER.info("Start learning")
for pos, training_files in enumerate(batches, 1):
LOGGER.info("Step %.2f%%", 100 * pos / total)
LOGGER.debug("Training data extraction")
training_data = extract_from_files(training_files, languages)
LOGGER.debug("Training data count: %d", len(training_data[0]))
steps = int(FITTING_FACTOR * len(training_data[0]) / 100)
LOGGER.debug("Fitting, steps count: %d", steps)
self._classifier.fit(input_fn=_to_func(training_data), steps=steps)
LOGGER.debug("Evaluation")
accuracy = self._classifier.evaluate(
input_fn=_to_func(evaluation_data), steps=1)['accuracy']
_comment(accuracy)
return accuracy | python | def learn(self, input_dir: str) -> float:
"""Learn languages features from source files.
:raise GuesslangError: when the default model is used for learning
:param input_dir: source code files directory.
:return: learning accuracy
"""
if self.is_default:
LOGGER.error("Cannot learn using default model")
raise GuesslangError('Cannot learn using default "readonly" model')
languages = self.languages
LOGGER.info("Extract training data")
extensions = [ext for exts in languages.values() for ext in exts]
files = search_files(input_dir, extensions)
nb_files = len(files)
chunk_size = min(int(CHUNK_PROPORTION * nb_files), CHUNK_SIZE)
LOGGER.debug("Evaluation files count: %d", chunk_size)
LOGGER.debug("Training files count: %d", nb_files - chunk_size)
batches = _pop_many(files, chunk_size)
LOGGER.debug("Prepare evaluation data")
evaluation_data = extract_from_files(next(batches), languages)
LOGGER.debug("Evaluation data count: %d", len(evaluation_data[0]))
accuracy = 0
total = ceil(nb_files / chunk_size) - 1
LOGGER.info("Start learning")
for pos, training_files in enumerate(batches, 1):
LOGGER.info("Step %.2f%%", 100 * pos / total)
LOGGER.debug("Training data extraction")
training_data = extract_from_files(training_files, languages)
LOGGER.debug("Training data count: %d", len(training_data[0]))
steps = int(FITTING_FACTOR * len(training_data[0]) / 100)
LOGGER.debug("Fitting, steps count: %d", steps)
self._classifier.fit(input_fn=_to_func(training_data), steps=steps)
LOGGER.debug("Evaluation")
accuracy = self._classifier.evaluate(
input_fn=_to_func(evaluation_data), steps=1)['accuracy']
_comment(accuracy)
return accuracy | [
"def",
"learn",
"(",
"self",
",",
"input_dir",
":",
"str",
")",
"->",
"float",
":",
"if",
"self",
".",
"is_default",
":",
"LOGGER",
".",
"error",
"(",
"\"Cannot learn using default model\"",
")",
"raise",
"GuesslangError",
"(",
"'Cannot learn using default \"readonly\" model'",
")",
"languages",
"=",
"self",
".",
"languages",
"LOGGER",
".",
"info",
"(",
"\"Extract training data\"",
")",
"extensions",
"=",
"[",
"ext",
"for",
"exts",
"in",
"languages",
".",
"values",
"(",
")",
"for",
"ext",
"in",
"exts",
"]",
"files",
"=",
"search_files",
"(",
"input_dir",
",",
"extensions",
")",
"nb_files",
"=",
"len",
"(",
"files",
")",
"chunk_size",
"=",
"min",
"(",
"int",
"(",
"CHUNK_PROPORTION",
"*",
"nb_files",
")",
",",
"CHUNK_SIZE",
")",
"LOGGER",
".",
"debug",
"(",
"\"Evaluation files count: %d\"",
",",
"chunk_size",
")",
"LOGGER",
".",
"debug",
"(",
"\"Training files count: %d\"",
",",
"nb_files",
"-",
"chunk_size",
")",
"batches",
"=",
"_pop_many",
"(",
"files",
",",
"chunk_size",
")",
"LOGGER",
".",
"debug",
"(",
"\"Prepare evaluation data\"",
")",
"evaluation_data",
"=",
"extract_from_files",
"(",
"next",
"(",
"batches",
")",
",",
"languages",
")",
"LOGGER",
".",
"debug",
"(",
"\"Evaluation data count: %d\"",
",",
"len",
"(",
"evaluation_data",
"[",
"0",
"]",
")",
")",
"accuracy",
"=",
"0",
"total",
"=",
"ceil",
"(",
"nb_files",
"/",
"chunk_size",
")",
"-",
"1",
"LOGGER",
".",
"info",
"(",
"\"Start learning\"",
")",
"for",
"pos",
",",
"training_files",
"in",
"enumerate",
"(",
"batches",
",",
"1",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Step %.2f%%\"",
",",
"100",
"*",
"pos",
"/",
"total",
")",
"LOGGER",
".",
"debug",
"(",
"\"Training data extraction\"",
")",
"training_data",
"=",
"extract_from_files",
"(",
"training_files",
",",
"languages",
")",
"LOGGER",
".",
"debug",
"(",
"\"Training data count: %d\"",
",",
"len",
"(",
"training_data",
"[",
"0",
"]",
")",
")",
"steps",
"=",
"int",
"(",
"FITTING_FACTOR",
"*",
"len",
"(",
"training_data",
"[",
"0",
"]",
")",
"/",
"100",
")",
"LOGGER",
".",
"debug",
"(",
"\"Fitting, steps count: %d\"",
",",
"steps",
")",
"self",
".",
"_classifier",
".",
"fit",
"(",
"input_fn",
"=",
"_to_func",
"(",
"training_data",
")",
",",
"steps",
"=",
"steps",
")",
"LOGGER",
".",
"debug",
"(",
"\"Evaluation\"",
")",
"accuracy",
"=",
"self",
".",
"_classifier",
".",
"evaluate",
"(",
"input_fn",
"=",
"_to_func",
"(",
"evaluation_data",
")",
",",
"steps",
"=",
"1",
")",
"[",
"'accuracy'",
"]",
"_comment",
"(",
"accuracy",
")",
"return",
"accuracy"
]
| Learn languages features from source files.
:raise GuesslangError: when the default model is used for learning
:param input_dir: source code files directory.
:return: learning accuracy | [
"Learn",
"languages",
"features",
"from",
"source",
"files",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/guesser.py#L118-L164 | train |
yoeo/guesslang | tools/report_graph.py | main | def main():
"""Report graph creator command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'reportfile', type=argparse.FileType('r'),
help="test report file generated by `guesslang --test TESTDIR`")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
report = json.load(args.reportfile)
graph_data = _build_graph(report)
index_path = _prepare_resources(graph_data)
webbrowser.open(str(index_path)) | python | def main():
"""Report graph creator command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'reportfile', type=argparse.FileType('r'),
help="test report file generated by `guesslang --test TESTDIR`")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
report = json.load(args.reportfile)
graph_data = _build_graph(report)
index_path = _prepare_resources(graph_data)
webbrowser.open(str(index_path)) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'reportfile'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
",",
"help",
"=",
"\"test report file generated by `guesslang --test TESTDIR`\"",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"show debug messages\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"config_logging",
"(",
"args",
".",
"debug",
")",
"report",
"=",
"json",
".",
"load",
"(",
"args",
".",
"reportfile",
")",
"graph_data",
"=",
"_build_graph",
"(",
"report",
")",
"index_path",
"=",
"_prepare_resources",
"(",
"graph_data",
")",
"webbrowser",
".",
"open",
"(",
"str",
"(",
"index_path",
")",
")"
]
| Report graph creator command line | [
"Report",
"graph",
"creator",
"command",
"line"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/tools/report_graph.py#L25-L42 | train |
yoeo/guesslang | guesslang/utils.py | search_files | def search_files(source: str, extensions: List[str]) -> List[Path]:
"""Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames
"""
files = [
path for path in Path(source).glob('**/*')
if path.is_file() and path.suffix.lstrip('.') in extensions]
nb_files = len(files)
LOGGER.debug("Total files found: %d", nb_files)
if nb_files < NB_FILES_MIN:
LOGGER.error("Too few source files")
raise GuesslangError(
'{} source files found in {}. {} files minimum is required'.format(
nb_files, source, NB_FILES_MIN))
random.shuffle(files)
return files | python | def search_files(source: str, extensions: List[str]) -> List[Path]:
"""Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames
"""
files = [
path for path in Path(source).glob('**/*')
if path.is_file() and path.suffix.lstrip('.') in extensions]
nb_files = len(files)
LOGGER.debug("Total files found: %d", nb_files)
if nb_files < NB_FILES_MIN:
LOGGER.error("Too few source files")
raise GuesslangError(
'{} source files found in {}. {} files minimum is required'.format(
nb_files, source, NB_FILES_MIN))
random.shuffle(files)
return files | [
"def",
"search_files",
"(",
"source",
":",
"str",
",",
"extensions",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"Path",
"]",
":",
"files",
"=",
"[",
"path",
"for",
"path",
"in",
"Path",
"(",
"source",
")",
".",
"glob",
"(",
"'**/*'",
")",
"if",
"path",
".",
"is_file",
"(",
")",
"and",
"path",
".",
"suffix",
".",
"lstrip",
"(",
"'.'",
")",
"in",
"extensions",
"]",
"nb_files",
"=",
"len",
"(",
"files",
")",
"LOGGER",
".",
"debug",
"(",
"\"Total files found: %d\"",
",",
"nb_files",
")",
"if",
"nb_files",
"<",
"NB_FILES_MIN",
":",
"LOGGER",
".",
"error",
"(",
"\"Too few source files\"",
")",
"raise",
"GuesslangError",
"(",
"'{} source files found in {}. {} files minimum is required'",
".",
"format",
"(",
"nb_files",
",",
"source",
",",
"NB_FILES_MIN",
")",
")",
"random",
".",
"shuffle",
"(",
"files",
")",
"return",
"files"
]
| Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames | [
"Retrieve",
"files",
"located",
"the",
"source",
"directory",
"and",
"its",
"subdirectories",
"whose",
"extension",
"match",
"one",
"of",
"the",
"listed",
"extensions",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/utils.py#L30-L52 | train |
yoeo/guesslang | guesslang/utils.py | extract_from_files | def extract_from_files(
files: List[Path],
languages: Dict[str, List[str]]) -> DataSet:
"""Extract arrays of features from the given files.
:param files: list of paths
:param languages: language name =>
associated file extension list
:return: features
"""
enumerator = enumerate(sorted(languages.items()))
rank_map = {ext: rank for rank, (_, exts) in enumerator for ext in exts}
with multiprocessing.Pool(initializer=_process_init) as pool:
file_iterator = ((path, rank_map) for path in files)
arrays = _to_arrays(pool.starmap(_extract_features, file_iterator))
LOGGER.debug("Extracted arrays count: %d", len(arrays[0]))
return arrays | python | def extract_from_files(
files: List[Path],
languages: Dict[str, List[str]]) -> DataSet:
"""Extract arrays of features from the given files.
:param files: list of paths
:param languages: language name =>
associated file extension list
:return: features
"""
enumerator = enumerate(sorted(languages.items()))
rank_map = {ext: rank for rank, (_, exts) in enumerator for ext in exts}
with multiprocessing.Pool(initializer=_process_init) as pool:
file_iterator = ((path, rank_map) for path in files)
arrays = _to_arrays(pool.starmap(_extract_features, file_iterator))
LOGGER.debug("Extracted arrays count: %d", len(arrays[0]))
return arrays | [
"def",
"extract_from_files",
"(",
"files",
":",
"List",
"[",
"Path",
"]",
",",
"languages",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"DataSet",
":",
"enumerator",
"=",
"enumerate",
"(",
"sorted",
"(",
"languages",
".",
"items",
"(",
")",
")",
")",
"rank_map",
"=",
"{",
"ext",
":",
"rank",
"for",
"rank",
",",
"(",
"_",
",",
"exts",
")",
"in",
"enumerator",
"for",
"ext",
"in",
"exts",
"}",
"with",
"multiprocessing",
".",
"Pool",
"(",
"initializer",
"=",
"_process_init",
")",
"as",
"pool",
":",
"file_iterator",
"=",
"(",
"(",
"path",
",",
"rank_map",
")",
"for",
"path",
"in",
"files",
")",
"arrays",
"=",
"_to_arrays",
"(",
"pool",
".",
"starmap",
"(",
"_extract_features",
",",
"file_iterator",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"Extracted arrays count: %d\"",
",",
"len",
"(",
"arrays",
"[",
"0",
"]",
")",
")",
"return",
"arrays"
]
| Extract arrays of features from the given files.
:param files: list of paths
:param languages: language name =>
associated file extension list
:return: features | [
"Extract",
"arrays",
"of",
"features",
"from",
"the",
"given",
"files",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/utils.py#L55-L73 | train |
yoeo/guesslang | guesslang/utils.py | safe_read_file | def safe_read_file(file_path: Path) -> str:
"""Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content
"""
for encoding in FILE_ENCODINGS:
try:
return file_path.read_text(encoding=encoding)
except UnicodeError:
pass # Ignore encoding error
raise GuesslangError('Encoding not supported for {!s}'.format(file_path)) | python | def safe_read_file(file_path: Path) -> str:
"""Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content
"""
for encoding in FILE_ENCODINGS:
try:
return file_path.read_text(encoding=encoding)
except UnicodeError:
pass # Ignore encoding error
raise GuesslangError('Encoding not supported for {!s}'.format(file_path)) | [
"def",
"safe_read_file",
"(",
"file_path",
":",
"Path",
")",
"->",
"str",
":",
"for",
"encoding",
"in",
"FILE_ENCODINGS",
":",
"try",
":",
"return",
"file_path",
".",
"read_text",
"(",
"encoding",
"=",
"encoding",
")",
"except",
"UnicodeError",
":",
"pass",
"# Ignore encoding error",
"raise",
"GuesslangError",
"(",
"'Encoding not supported for {!s}'",
".",
"format",
"(",
"file_path",
")",
")"
]
| Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content | [
"Read",
"a",
"text",
"file",
".",
"Several",
"text",
"encodings",
"are",
"tried",
"until",
"the",
"file",
"content",
"is",
"correctly",
"decoded",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/utils.py#L106-L120 | train |
yoeo/guesslang | guesslang/config.py | config_logging | def config_logging(debug: bool = False) -> None:
"""Set-up application and `tensorflow` logging.
:param debug: show or hide debug messages
"""
if debug:
level = 'DEBUG'
tf_level = tf.logging.INFO
else:
level = 'INFO'
tf_level = tf.logging.ERROR
logging_config = config_dict('logging.json')
for logger in logging_config['loggers'].values():
logger['level'] = level
logging.config.dictConfig(logging_config)
tf.logging.set_verbosity(tf_level) | python | def config_logging(debug: bool = False) -> None:
"""Set-up application and `tensorflow` logging.
:param debug: show or hide debug messages
"""
if debug:
level = 'DEBUG'
tf_level = tf.logging.INFO
else:
level = 'INFO'
tf_level = tf.logging.ERROR
logging_config = config_dict('logging.json')
for logger in logging_config['loggers'].values():
logger['level'] = level
logging.config.dictConfig(logging_config)
tf.logging.set_verbosity(tf_level) | [
"def",
"config_logging",
"(",
"debug",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"if",
"debug",
":",
"level",
"=",
"'DEBUG'",
"tf_level",
"=",
"tf",
".",
"logging",
".",
"INFO",
"else",
":",
"level",
"=",
"'INFO'",
"tf_level",
"=",
"tf",
".",
"logging",
".",
"ERROR",
"logging_config",
"=",
"config_dict",
"(",
"'logging.json'",
")",
"for",
"logger",
"in",
"logging_config",
"[",
"'loggers'",
"]",
".",
"values",
"(",
")",
":",
"logger",
"[",
"'level'",
"]",
"=",
"level",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"logging_config",
")",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf_level",
")"
]
| Set-up application and `tensorflow` logging.
:param debug: show or hide debug messages | [
"Set",
"-",
"up",
"application",
"and",
"tensorflow",
"logging",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/config.py#L53-L70 | train |
yoeo/guesslang | guesslang/config.py | config_dict | def config_dict(name: str) -> Dict[str, Any]:
"""Load a JSON configuration dict from Guesslang config directory.
:param name: the JSON file name.
:return: configuration
"""
try:
content = resource_string(PACKAGE, DATADIR.format(name)).decode()
except DistributionNotFound as error:
LOGGER.warning("Cannot load %s from packages: %s", name, error)
content = DATA_FALLBACK.joinpath(name).read_text()
return cast(Dict[str, Any], json.loads(content)) | python | def config_dict(name: str) -> Dict[str, Any]:
"""Load a JSON configuration dict from Guesslang config directory.
:param name: the JSON file name.
:return: configuration
"""
try:
content = resource_string(PACKAGE, DATADIR.format(name)).decode()
except DistributionNotFound as error:
LOGGER.warning("Cannot load %s from packages: %s", name, error)
content = DATA_FALLBACK.joinpath(name).read_text()
return cast(Dict[str, Any], json.loads(content)) | [
"def",
"config_dict",
"(",
"name",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"try",
":",
"content",
"=",
"resource_string",
"(",
"PACKAGE",
",",
"DATADIR",
".",
"format",
"(",
"name",
")",
")",
".",
"decode",
"(",
")",
"except",
"DistributionNotFound",
"as",
"error",
":",
"LOGGER",
".",
"warning",
"(",
"\"Cannot load %s from packages: %s\"",
",",
"name",
",",
"error",
")",
"content",
"=",
"DATA_FALLBACK",
".",
"joinpath",
"(",
"name",
")",
".",
"read_text",
"(",
")",
"return",
"cast",
"(",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"json",
".",
"loads",
"(",
"content",
")",
")"
]
| Load a JSON configuration dict from Guesslang config directory.
:param name: the JSON file name.
:return: configuration | [
"Load",
"a",
"JSON",
"configuration",
"dict",
"from",
"Guesslang",
"config",
"directory",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/config.py#L73-L85 | train |
yoeo/guesslang | guesslang/config.py | model_info | def model_info(model_dir: Optional[str] = None) -> Tuple[str, bool]:
"""Retrieve Guesslang model directory name,
and tells if it is the default model.
:param model_dir: model location, if `None` default model is selected
:return: selected model directory with an indication
that the model is the default or not
"""
if model_dir is None:
try:
model_dir = resource_filename(PACKAGE, DATADIR.format('model'))
except DistributionNotFound as error:
LOGGER.warning("Cannot load model from packages: %s", error)
model_dir = str(DATA_FALLBACK.joinpath('model').absolute())
is_default_model = True
else:
is_default_model = False
model_path = Path(model_dir)
model_path.mkdir(exist_ok=True)
LOGGER.debug("Using model: %s, default: %s", model_path, is_default_model)
return (model_dir, is_default_model) | python | def model_info(model_dir: Optional[str] = None) -> Tuple[str, bool]:
"""Retrieve Guesslang model directory name,
and tells if it is the default model.
:param model_dir: model location, if `None` default model is selected
:return: selected model directory with an indication
that the model is the default or not
"""
if model_dir is None:
try:
model_dir = resource_filename(PACKAGE, DATADIR.format('model'))
except DistributionNotFound as error:
LOGGER.warning("Cannot load model from packages: %s", error)
model_dir = str(DATA_FALLBACK.joinpath('model').absolute())
is_default_model = True
else:
is_default_model = False
model_path = Path(model_dir)
model_path.mkdir(exist_ok=True)
LOGGER.debug("Using model: %s, default: %s", model_path, is_default_model)
return (model_dir, is_default_model) | [
"def",
"model_info",
"(",
"model_dir",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"str",
",",
"bool",
"]",
":",
"if",
"model_dir",
"is",
"None",
":",
"try",
":",
"model_dir",
"=",
"resource_filename",
"(",
"PACKAGE",
",",
"DATADIR",
".",
"format",
"(",
"'model'",
")",
")",
"except",
"DistributionNotFound",
"as",
"error",
":",
"LOGGER",
".",
"warning",
"(",
"\"Cannot load model from packages: %s\"",
",",
"error",
")",
"model_dir",
"=",
"str",
"(",
"DATA_FALLBACK",
".",
"joinpath",
"(",
"'model'",
")",
".",
"absolute",
"(",
")",
")",
"is_default_model",
"=",
"True",
"else",
":",
"is_default_model",
"=",
"False",
"model_path",
"=",
"Path",
"(",
"model_dir",
")",
"model_path",
".",
"mkdir",
"(",
"exist_ok",
"=",
"True",
")",
"LOGGER",
".",
"debug",
"(",
"\"Using model: %s, default: %s\"",
",",
"model_path",
",",
"is_default_model",
")",
"return",
"(",
"model_dir",
",",
"is_default_model",
")"
]
| Retrieve Guesslang model directory name,
and tells if it is the default model.
:param model_dir: model location, if `None` default model is selected
:return: selected model directory with an indication
that the model is the default or not | [
"Retrieve",
"Guesslang",
"model",
"directory",
"name",
"and",
"tells",
"if",
"it",
"is",
"the",
"default",
"model",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/config.py#L88-L110 | train |
yoeo/guesslang | guesslang/config.py | ColorLogFormatter.format | def format(self, record: logging.LogRecord) -> str:
"""Format log records to produce colored messages.
:param record: log record
:return: log message
"""
if platform.system() != 'Linux': # Avoid funny logs on Windows & MacOS
return super().format(record)
record.msg = (
self.STYLE[record.levelname] + record.msg + self.STYLE['END'])
record.levelname = (
self.STYLE['LEVEL'] + record.levelname + self.STYLE['END'])
return super().format(record) | python | def format(self, record: logging.LogRecord) -> str:
"""Format log records to produce colored messages.
:param record: log record
:return: log message
"""
if platform.system() != 'Linux': # Avoid funny logs on Windows & MacOS
return super().format(record)
record.msg = (
self.STYLE[record.levelname] + record.msg + self.STYLE['END'])
record.levelname = (
self.STYLE['LEVEL'] + record.levelname + self.STYLE['END'])
return super().format(record) | [
"def",
"format",
"(",
"self",
",",
"record",
":",
"logging",
".",
"LogRecord",
")",
"->",
"str",
":",
"if",
"platform",
".",
"system",
"(",
")",
"!=",
"'Linux'",
":",
"# Avoid funny logs on Windows & MacOS",
"return",
"super",
"(",
")",
".",
"format",
"(",
"record",
")",
"record",
".",
"msg",
"=",
"(",
"self",
".",
"STYLE",
"[",
"record",
".",
"levelname",
"]",
"+",
"record",
".",
"msg",
"+",
"self",
".",
"STYLE",
"[",
"'END'",
"]",
")",
"record",
".",
"levelname",
"=",
"(",
"self",
".",
"STYLE",
"[",
"'LEVEL'",
"]",
"+",
"record",
".",
"levelname",
"+",
"self",
".",
"STYLE",
"[",
"'END'",
"]",
")",
"return",
"super",
"(",
")",
".",
"format",
"(",
"record",
")"
]
| Format log records to produce colored messages.
:param record: log record
:return: log message | [
"Format",
"log",
"records",
"to",
"produce",
"colored",
"messages",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/config.py#L37-L50 | train |
yoeo/guesslang | tools/download_github_repo.py | main | def main():
"""Github repositories downloaded command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'githubtoken',
help="Github OAuth token, see https://developer.github.com/v3/oauth/")
parser.add_argument('destination', help="location of the downloaded repos")
parser.add_argument(
'-n', '--nbrepo', help="number of repositories per language",
type=int, default=1000)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
destination = Path(args.destination)
nb_repos = args.nbrepo
token = args.githubtoken
languages = config_dict('languages.json')
destination.mkdir(exist_ok=True)
for pos, language in enumerate(sorted(languages), 1):
LOGGER.info("Step %.2f%%, %s", 100 * pos / len(languages), language)
LOGGER.info("Fetch %d repos infos for language %s", nb_repos, language)
repos = _retrieve_repo_details(language, nb_repos, token)
LOGGER.info("%d repos details kept. Downloading", len(repos))
_download_repos(language, repos, destination)
LOGGER.info("Language %s repos downloaded", language)
LOGGER.debug("Exit OK") | python | def main():
"""Github repositories downloaded command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'githubtoken',
help="Github OAuth token, see https://developer.github.com/v3/oauth/")
parser.add_argument('destination', help="location of the downloaded repos")
parser.add_argument(
'-n', '--nbrepo', help="number of repositories per language",
type=int, default=1000)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
destination = Path(args.destination)
nb_repos = args.nbrepo
token = args.githubtoken
languages = config_dict('languages.json')
destination.mkdir(exist_ok=True)
for pos, language in enumerate(sorted(languages), 1):
LOGGER.info("Step %.2f%%, %s", 100 * pos / len(languages), language)
LOGGER.info("Fetch %d repos infos for language %s", nb_repos, language)
repos = _retrieve_repo_details(language, nb_repos, token)
LOGGER.info("%d repos details kept. Downloading", len(repos))
_download_repos(language, repos, destination)
LOGGER.info("Language %s repos downloaded", language)
LOGGER.debug("Exit OK") | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'githubtoken'",
",",
"help",
"=",
"\"Github OAuth token, see https://developer.github.com/v3/oauth/\"",
")",
"parser",
".",
"add_argument",
"(",
"'destination'",
",",
"help",
"=",
"\"location of the downloaded repos\"",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--nbrepo'",
",",
"help",
"=",
"\"number of repositories per language\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1000",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"show debug messages\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"config_logging",
"(",
"args",
".",
"debug",
")",
"destination",
"=",
"Path",
"(",
"args",
".",
"destination",
")",
"nb_repos",
"=",
"args",
".",
"nbrepo",
"token",
"=",
"args",
".",
"githubtoken",
"languages",
"=",
"config_dict",
"(",
"'languages.json'",
")",
"destination",
".",
"mkdir",
"(",
"exist_ok",
"=",
"True",
")",
"for",
"pos",
",",
"language",
"in",
"enumerate",
"(",
"sorted",
"(",
"languages",
")",
",",
"1",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Step %.2f%%, %s\"",
",",
"100",
"*",
"pos",
"/",
"len",
"(",
"languages",
")",
",",
"language",
")",
"LOGGER",
".",
"info",
"(",
"\"Fetch %d repos infos for language %s\"",
",",
"nb_repos",
",",
"language",
")",
"repos",
"=",
"_retrieve_repo_details",
"(",
"language",
",",
"nb_repos",
",",
"token",
")",
"LOGGER",
".",
"info",
"(",
"\"%d repos details kept. Downloading\"",
",",
"len",
"(",
"repos",
")",
")",
"_download_repos",
"(",
"language",
",",
"repos",
",",
"destination",
")",
"LOGGER",
".",
"info",
"(",
"\"Language %s repos downloaded\"",
",",
"language",
")",
"LOGGER",
".",
"debug",
"(",
"\"Exit OK\"",
")"
]
| Github repositories downloaded command line | [
"Github",
"repositories",
"downloaded",
"command",
"line"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/tools/download_github_repo.py#L54-L87 | train |
yoeo/guesslang | tools/download_github_repo.py | retry | def retry(default=None):
"""Retry functions after failures"""
def decorator(func):
"""Retry decorator"""
@functools.wraps(func)
def _wrapper(*args, **kw):
for pos in range(1, MAX_RETRIES):
try:
return func(*args, **kw)
except (RuntimeError, requests.ConnectionError) as error:
LOGGER.warning("Failed: %s, %s", type(error), error)
# Wait a bit before retrying
for _ in range(pos):
_rest()
LOGGER.warning("Request Aborted")
return default
return _wrapper
return decorator | python | def retry(default=None):
"""Retry functions after failures"""
def decorator(func):
"""Retry decorator"""
@functools.wraps(func)
def _wrapper(*args, **kw):
for pos in range(1, MAX_RETRIES):
try:
return func(*args, **kw)
except (RuntimeError, requests.ConnectionError) as error:
LOGGER.warning("Failed: %s, %s", type(error), error)
# Wait a bit before retrying
for _ in range(pos):
_rest()
LOGGER.warning("Request Aborted")
return default
return _wrapper
return decorator | [
"def",
"retry",
"(",
"default",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\"Retry decorator\"\"\"",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"for",
"pos",
"in",
"range",
"(",
"1",
",",
"MAX_RETRIES",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"except",
"(",
"RuntimeError",
",",
"requests",
".",
"ConnectionError",
")",
"as",
"error",
":",
"LOGGER",
".",
"warning",
"(",
"\"Failed: %s, %s\"",
",",
"type",
"(",
"error",
")",
",",
"error",
")",
"# Wait a bit before retrying",
"for",
"_",
"in",
"range",
"(",
"pos",
")",
":",
"_rest",
"(",
")",
"LOGGER",
".",
"warning",
"(",
"\"Request Aborted\"",
")",
"return",
"default",
"return",
"_wrapper",
"return",
"decorator"
]
| Retry functions after failures | [
"Retry",
"functions",
"after",
"failures"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/tools/download_github_repo.py#L117-L140 | train |
yoeo/guesslang | tools/make_keywords.py | main | def main():
"""Keywords generator command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('learn', help="learning source codes directory")
parser.add_argument('keywords', help="output keywords file, JSON")
parser.add_argument(
'-n', '--nbkeywords', type=int, default=10000,
help="the number of keywords to keep")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
learn_path = Path(args.learn)
keywords_path = Path(args.keywords)
nb_keywords = args.nbkeywords
languages = config_dict('languages.json')
exts = {ext: lang for lang, exts in languages.items() for ext in exts}
term_count = Counter()
document_count = Counter()
pos = 0
LOGGER.info("Reading files form %s", learn_path)
for pos, path in enumerate(Path(learn_path).glob('**/*'), 1):
if pos % STEP == 0:
LOGGER.debug("Processed %d", pos)
gc.collect() # Cleanup dirt
if not path.is_file() or not exts.get(path.suffix.lstrip('.')):
continue
counter = _extract(path)
term_count.update(counter)
document_count.update(counter.keys())
nb_terms = sum(term_count.values())
nb_documents = pos - 1
if not nb_documents:
LOGGER.error("No source files found in %s", learn_path)
raise RuntimeError('No source files in {}'.format(learn_path))
LOGGER.info("%d unique terms found", len(term_count))
terms = _most_frequent(
(term_count, nb_terms), (document_count, nb_documents), nb_keywords)
keywords = {
token: int(hashlib.sha1(token.encode()).hexdigest(), 16)
for token in terms
}
with keywords_path.open('w') as keywords_file:
json.dump(keywords, keywords_file, indent=2, sort_keys=True)
LOGGER.info("%d keywords written into %s", len(keywords), keywords_path)
LOGGER.debug("Exit OK") | python | def main():
"""Keywords generator command line"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('learn', help="learning source codes directory")
parser.add_argument('keywords', help="output keywords file, JSON")
parser.add_argument(
'-n', '--nbkeywords', type=int, default=10000,
help="the number of keywords to keep")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
learn_path = Path(args.learn)
keywords_path = Path(args.keywords)
nb_keywords = args.nbkeywords
languages = config_dict('languages.json')
exts = {ext: lang for lang, exts in languages.items() for ext in exts}
term_count = Counter()
document_count = Counter()
pos = 0
LOGGER.info("Reading files form %s", learn_path)
for pos, path in enumerate(Path(learn_path).glob('**/*'), 1):
if pos % STEP == 0:
LOGGER.debug("Processed %d", pos)
gc.collect() # Cleanup dirt
if not path.is_file() or not exts.get(path.suffix.lstrip('.')):
continue
counter = _extract(path)
term_count.update(counter)
document_count.update(counter.keys())
nb_terms = sum(term_count.values())
nb_documents = pos - 1
if not nb_documents:
LOGGER.error("No source files found in %s", learn_path)
raise RuntimeError('No source files in {}'.format(learn_path))
LOGGER.info("%d unique terms found", len(term_count))
terms = _most_frequent(
(term_count, nb_terms), (document_count, nb_documents), nb_keywords)
keywords = {
token: int(hashlib.sha1(token.encode()).hexdigest(), 16)
for token in terms
}
with keywords_path.open('w') as keywords_file:
json.dump(keywords, keywords_file, indent=2, sort_keys=True)
LOGGER.info("%d keywords written into %s", len(keywords), keywords_path)
LOGGER.debug("Exit OK") | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'learn'",
",",
"help",
"=",
"\"learning source codes directory\"",
")",
"parser",
".",
"add_argument",
"(",
"'keywords'",
",",
"help",
"=",
"\"output keywords file, JSON\"",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--nbkeywords'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
",",
"help",
"=",
"\"the number of keywords to keep\"",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"show debug messages\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"config_logging",
"(",
"args",
".",
"debug",
")",
"learn_path",
"=",
"Path",
"(",
"args",
".",
"learn",
")",
"keywords_path",
"=",
"Path",
"(",
"args",
".",
"keywords",
")",
"nb_keywords",
"=",
"args",
".",
"nbkeywords",
"languages",
"=",
"config_dict",
"(",
"'languages.json'",
")",
"exts",
"=",
"{",
"ext",
":",
"lang",
"for",
"lang",
",",
"exts",
"in",
"languages",
".",
"items",
"(",
")",
"for",
"ext",
"in",
"exts",
"}",
"term_count",
"=",
"Counter",
"(",
")",
"document_count",
"=",
"Counter",
"(",
")",
"pos",
"=",
"0",
"LOGGER",
".",
"info",
"(",
"\"Reading files form %s\"",
",",
"learn_path",
")",
"for",
"pos",
",",
"path",
"in",
"enumerate",
"(",
"Path",
"(",
"learn_path",
")",
".",
"glob",
"(",
"'**/*'",
")",
",",
"1",
")",
":",
"if",
"pos",
"%",
"STEP",
"==",
"0",
":",
"LOGGER",
".",
"debug",
"(",
"\"Processed %d\"",
",",
"pos",
")",
"gc",
".",
"collect",
"(",
")",
"# Cleanup dirt",
"if",
"not",
"path",
".",
"is_file",
"(",
")",
"or",
"not",
"exts",
".",
"get",
"(",
"path",
".",
"suffix",
".",
"lstrip",
"(",
"'.'",
")",
")",
":",
"continue",
"counter",
"=",
"_extract",
"(",
"path",
")",
"term_count",
".",
"update",
"(",
"counter",
")",
"document_count",
".",
"update",
"(",
"counter",
".",
"keys",
"(",
")",
")",
"nb_terms",
"=",
"sum",
"(",
"term_count",
".",
"values",
"(",
")",
")",
"nb_documents",
"=",
"pos",
"-",
"1",
"if",
"not",
"nb_documents",
":",
"LOGGER",
".",
"error",
"(",
"\"No source files found in %s\"",
",",
"learn_path",
")",
"raise",
"RuntimeError",
"(",
"'No source files in {}'",
".",
"format",
"(",
"learn_path",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"%d unique terms found\"",
",",
"len",
"(",
"term_count",
")",
")",
"terms",
"=",
"_most_frequent",
"(",
"(",
"term_count",
",",
"nb_terms",
")",
",",
"(",
"document_count",
",",
"nb_documents",
")",
",",
"nb_keywords",
")",
"keywords",
"=",
"{",
"token",
":",
"int",
"(",
"hashlib",
".",
"sha1",
"(",
"token",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
",",
"16",
")",
"for",
"token",
"in",
"terms",
"}",
"with",
"keywords_path",
".",
"open",
"(",
"'w'",
")",
"as",
"keywords_file",
":",
"json",
".",
"dump",
"(",
"keywords",
",",
"keywords_file",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
"LOGGER",
".",
"info",
"(",
"\"%d keywords written into %s\"",
",",
"len",
"(",
"keywords",
")",
",",
"keywords_path",
")",
"LOGGER",
".",
"debug",
"(",
"\"Exit OK\"",
")"
]
| Keywords generator command line | [
"Keywords",
"generator",
"command",
"line"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/tools/make_keywords.py#L29-L87 | train |
yoeo/guesslang | guesslang/__main__.py | main | def main() -> None:
"""Run command line"""
try:
_real_main()
except GuesslangError as error:
LOGGER.critical("Failed: %s", error)
sys.exit(-1)
except KeyboardInterrupt:
LOGGER.critical("Cancelled!")
sys.exit(-2) | python | def main() -> None:
"""Run command line"""
try:
_real_main()
except GuesslangError as error:
LOGGER.critical("Failed: %s", error)
sys.exit(-1)
except KeyboardInterrupt:
LOGGER.critical("Cancelled!")
sys.exit(-2) | [
"def",
"main",
"(",
")",
"->",
"None",
":",
"try",
":",
"_real_main",
"(",
")",
"except",
"GuesslangError",
"as",
"error",
":",
"LOGGER",
".",
"critical",
"(",
"\"Failed: %s\"",
",",
"error",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"LOGGER",
".",
"critical",
"(",
"\"Cancelled!\"",
")",
"sys",
".",
"exit",
"(",
"-",
"2",
")"
]
| Run command line | [
"Run",
"command",
"line"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/__main__.py#L19-L28 | train |
yoeo/guesslang | guesslang/extractor.py | split | def split(text: str) -> List[str]:
"""Split a text into a list of tokens.
:param text: the text to split
:return: tokens
"""
return [word for word in SEPARATOR.split(text) if word.strip(' \t')] | python | def split(text: str) -> List[str]:
"""Split a text into a list of tokens.
:param text: the text to split
:return: tokens
"""
return [word for word in SEPARATOR.split(text) if word.strip(' \t')] | [
"def",
"split",
"(",
"text",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"[",
"word",
"for",
"word",
"in",
"SEPARATOR",
".",
"split",
"(",
"text",
")",
"if",
"word",
".",
"strip",
"(",
"' \\t'",
")",
"]"
]
| Split a text into a list of tokens.
:param text: the text to split
:return: tokens | [
"Split",
"a",
"text",
"into",
"a",
"list",
"of",
"tokens",
"."
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/guesslang/extractor.py#L34-L40 | train |
yoeo/guesslang | tools/unzip_repos.py | main | def main():
"""Files extractor command line"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', help="location of the downloaded repos")
parser.add_argument('destination', help="location of the extracted files")
parser.add_argument(
'-t', '--nb-test-files', help="number of testing files per language",
type=int, default=5000)
parser.add_argument(
'-l', '--nb-learn-files', help="number of learning files per language",
type=int, default=10000)
parser.add_argument(
'-r', '--remove', help="remove repos that cannot be read",
action='store_true', default=False)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
source = Path(args.source)
destination = Path(args.destination)
nb_test = args.nb_test_files
nb_learn = args.nb_learn_files
remove = args.remove
repos = _find_repos(source)
split_repos = _split_repos(repos, nb_test, nb_learn)
split_files = _find_files(*split_repos, nb_test, nb_learn, remove)
_unzip_all(*split_files, destination)
LOGGER.info("Files saved into %s", destination)
LOGGER.debug("Exit OK") | python | def main():
"""Files extractor command line"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', help="location of the downloaded repos")
parser.add_argument('destination', help="location of the extracted files")
parser.add_argument(
'-t', '--nb-test-files', help="number of testing files per language",
type=int, default=5000)
parser.add_argument(
'-l', '--nb-learn-files', help="number of learning files per language",
type=int, default=10000)
parser.add_argument(
'-r', '--remove', help="remove repos that cannot be read",
action='store_true', default=False)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
source = Path(args.source)
destination = Path(args.destination)
nb_test = args.nb_test_files
nb_learn = args.nb_learn_files
remove = args.remove
repos = _find_repos(source)
split_repos = _split_repos(repos, nb_test, nb_learn)
split_files = _find_files(*split_repos, nb_test, nb_learn, remove)
_unzip_all(*split_files, destination)
LOGGER.info("Files saved into %s", destination)
LOGGER.debug("Exit OK") | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'source'",
",",
"help",
"=",
"\"location of the downloaded repos\"",
")",
"parser",
".",
"add_argument",
"(",
"'destination'",
",",
"help",
"=",
"\"location of the extracted files\"",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--nb-test-files'",
",",
"help",
"=",
"\"number of testing files per language\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5000",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--nb-learn-files'",
",",
"help",
"=",
"\"number of learning files per language\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--remove'",
",",
"help",
"=",
"\"remove repos that cannot be read\"",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"show debug messages\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"config_logging",
"(",
"args",
".",
"debug",
")",
"source",
"=",
"Path",
"(",
"args",
".",
"source",
")",
"destination",
"=",
"Path",
"(",
"args",
".",
"destination",
")",
"nb_test",
"=",
"args",
".",
"nb_test_files",
"nb_learn",
"=",
"args",
".",
"nb_learn_files",
"remove",
"=",
"args",
".",
"remove",
"repos",
"=",
"_find_repos",
"(",
"source",
")",
"split_repos",
"=",
"_split_repos",
"(",
"repos",
",",
"nb_test",
",",
"nb_learn",
")",
"split_files",
"=",
"_find_files",
"(",
"*",
"split_repos",
",",
"nb_test",
",",
"nb_learn",
",",
"remove",
")",
"_unzip_all",
"(",
"*",
"split_files",
",",
"destination",
")",
"LOGGER",
".",
"info",
"(",
"\"Files saved into %s\"",
",",
"destination",
")",
"LOGGER",
".",
"debug",
"(",
"\"Exit OK\"",
")"
]
| Files extractor command line | [
"Files",
"extractor",
"command",
"line"
]
| 03e33b77c73238c0fe4600147e8c926515a2887f | https://github.com/yoeo/guesslang/blob/03e33b77c73238c0fe4600147e8c926515a2887f/tools/unzip_repos.py#L30-L65 | train |
innolitics/dicom-numpy | dicom_numpy/combine_slices.py | combine_slices | def combine_slices(slice_datasets, rescale=None):
'''
Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
'''
if len(slice_datasets) == 0:
raise DicomImportException("Must provide at least one DICOM dataset")
_validate_slices_form_uniform_grid(slice_datasets)
voxels = _merge_slice_pixel_arrays(slice_datasets, rescale)
transform = _ijk_to_patient_xyz_transform_matrix(slice_datasets)
return voxels, transform | python | def combine_slices(slice_datasets, rescale=None):
'''
Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
'''
if len(slice_datasets) == 0:
raise DicomImportException("Must provide at least one DICOM dataset")
_validate_slices_form_uniform_grid(slice_datasets)
voxels = _merge_slice_pixel_arrays(slice_datasets, rescale)
transform = _ijk_to_patient_xyz_transform_matrix(slice_datasets)
return voxels, transform | [
"def",
"combine_slices",
"(",
"slice_datasets",
",",
"rescale",
"=",
"None",
")",
":",
"if",
"len",
"(",
"slice_datasets",
")",
"==",
"0",
":",
"raise",
"DicomImportException",
"(",
"\"Must provide at least one DICOM dataset\"",
")",
"_validate_slices_form_uniform_grid",
"(",
"slice_datasets",
")",
"voxels",
"=",
"_merge_slice_pixel_arrays",
"(",
"slice_datasets",
",",
"rescale",
")",
"transform",
"=",
"_ijk_to_patient_xyz_transform_matrix",
"(",
"slice_datasets",
")",
"return",
"voxels",
",",
"transform"
]
| Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised. | [
"Given",
"a",
"list",
"of",
"pydicom",
"datasets",
"for",
"an",
"image",
"series",
"stitch",
"them",
"together",
"into",
"a",
"three",
"-",
"dimensional",
"numpy",
"array",
".",
"Also",
"calculate",
"a",
"4x4",
"affine",
"transformation",
"matrix",
"that",
"converts",
"the",
"ijk",
"-",
"pixel",
"-",
"indices",
"into",
"the",
"xyz",
"-",
"coordinates",
"in",
"the",
"DICOM",
"patient",
"s",
"coordinate",
"system",
"."
]
| c870f0302276e7eaa0b66e641bacee19fe090296 | https://github.com/innolitics/dicom-numpy/blob/c870f0302276e7eaa0b66e641bacee19fe090296/dicom_numpy/combine_slices.py#L12-L74 | train |
innolitics/dicom-numpy | dicom_numpy/combine_slices.py | _validate_slices_form_uniform_grid | def _validate_slices_form_uniform_grid(slice_datasets):
'''
Perform various data checks to ensure that the list of slices form a
evenly-spaced grid of data.
Some of these checks are probably not required if the data follows the
DICOM specification, however it seems pertinent to check anyway.
'''
invariant_properties = [
'Modality',
'SOPClassUID',
'SeriesInstanceUID',
'Rows',
'Columns',
'PixelSpacing',
'PixelRepresentation',
'BitsAllocated',
'BitsStored',
'HighBit',
]
for property_name in invariant_properties:
_slice_attribute_equal(slice_datasets, property_name)
_validate_image_orientation(slice_datasets[0].ImageOrientationPatient)
_slice_ndarray_attribute_almost_equal(slice_datasets, 'ImageOrientationPatient', 1e-5)
slice_positions = _slice_positions(slice_datasets)
_check_for_missing_slices(slice_positions) | python | def _validate_slices_form_uniform_grid(slice_datasets):
'''
Perform various data checks to ensure that the list of slices form a
evenly-spaced grid of data.
Some of these checks are probably not required if the data follows the
DICOM specification, however it seems pertinent to check anyway.
'''
invariant_properties = [
'Modality',
'SOPClassUID',
'SeriesInstanceUID',
'Rows',
'Columns',
'PixelSpacing',
'PixelRepresentation',
'BitsAllocated',
'BitsStored',
'HighBit',
]
for property_name in invariant_properties:
_slice_attribute_equal(slice_datasets, property_name)
_validate_image_orientation(slice_datasets[0].ImageOrientationPatient)
_slice_ndarray_attribute_almost_equal(slice_datasets, 'ImageOrientationPatient', 1e-5)
slice_positions = _slice_positions(slice_datasets)
_check_for_missing_slices(slice_positions) | [
"def",
"_validate_slices_form_uniform_grid",
"(",
"slice_datasets",
")",
":",
"invariant_properties",
"=",
"[",
"'Modality'",
",",
"'SOPClassUID'",
",",
"'SeriesInstanceUID'",
",",
"'Rows'",
",",
"'Columns'",
",",
"'PixelSpacing'",
",",
"'PixelRepresentation'",
",",
"'BitsAllocated'",
",",
"'BitsStored'",
",",
"'HighBit'",
",",
"]",
"for",
"property_name",
"in",
"invariant_properties",
":",
"_slice_attribute_equal",
"(",
"slice_datasets",
",",
"property_name",
")",
"_validate_image_orientation",
"(",
"slice_datasets",
"[",
"0",
"]",
".",
"ImageOrientationPatient",
")",
"_slice_ndarray_attribute_almost_equal",
"(",
"slice_datasets",
",",
"'ImageOrientationPatient'",
",",
"1e-5",
")",
"slice_positions",
"=",
"_slice_positions",
"(",
"slice_datasets",
")",
"_check_for_missing_slices",
"(",
"slice_positions",
")"
]
| Perform various data checks to ensure that the list of slices form a
evenly-spaced grid of data.
Some of these checks are probably not required if the data follows the
DICOM specification, however it seems pertinent to check anyway. | [
"Perform",
"various",
"data",
"checks",
"to",
"ensure",
"that",
"the",
"list",
"of",
"slices",
"form",
"a",
"evenly",
"-",
"spaced",
"grid",
"of",
"data",
".",
"Some",
"of",
"these",
"checks",
"are",
"probably",
"not",
"required",
"if",
"the",
"data",
"follows",
"the",
"DICOM",
"specification",
"however",
"it",
"seems",
"pertinent",
"to",
"check",
"anyway",
"."
]
| c870f0302276e7eaa0b66e641bacee19fe090296 | https://github.com/innolitics/dicom-numpy/blob/c870f0302276e7eaa0b66e641bacee19fe090296/dicom_numpy/combine_slices.py#L126-L153 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockLocatorBase.parse_url | def parse_url(cls, string): # pylint: disable=redefined-outer-name
"""
If it can be parsed as a version_guid with no preceding org + offering, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a org + offering, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
Raises:
InvalidKeyError: if string cannot be parsed -or- string ends with a newline.
"""
match = cls.URL_RE.match(string)
if not match:
raise InvalidKeyError(cls, string)
return match.groupdict() | python | def parse_url(cls, string): # pylint: disable=redefined-outer-name
"""
If it can be parsed as a version_guid with no preceding org + offering, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a org + offering, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
Raises:
InvalidKeyError: if string cannot be parsed -or- string ends with a newline.
"""
match = cls.URL_RE.match(string)
if not match:
raise InvalidKeyError(cls, string)
return match.groupdict() | [
"def",
"parse_url",
"(",
"cls",
",",
"string",
")",
":",
"# pylint: disable=redefined-outer-name",
"match",
"=",
"cls",
".",
"URL_RE",
".",
"match",
"(",
"string",
")",
"if",
"not",
"match",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"string",
")",
"return",
"match",
".",
"groupdict",
"(",
")"
]
| If it can be parsed as a version_guid with no preceding org + offering, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a org + offering, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
Raises:
InvalidKeyError: if string cannot be parsed -or- string ends with a newline. | [
"If",
"it",
"can",
"be",
"parsed",
"as",
"a",
"version_guid",
"with",
"no",
"preceding",
"org",
"+",
"offering",
"returns",
"a",
"dict",
"with",
"key",
"version_guid",
"and",
"the",
"value"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L110-L124 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | CourseLocator.offering | def offering(self):
"""
Deprecated. Use course and run independently.
"""
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.course and not self.run:
return None
elif not self.run and self.course:
return self.course
return "/".join([self.course, self.run]) | python | def offering(self):
"""
Deprecated. Use course and run independently.
"""
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.course and not self.run:
return None
elif not self.run and self.course:
return self.course
return "/".join([self.course, self.run]) | [
"def",
"offering",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Offering is no longer a supported property of Locator. Please use the course and run properties.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"if",
"not",
"self",
".",
"course",
"and",
"not",
"self",
".",
"run",
":",
"return",
"None",
"elif",
"not",
"self",
".",
"run",
"and",
"self",
".",
"course",
":",
"return",
"self",
".",
"course",
"return",
"\"/\"",
".",
"join",
"(",
"[",
"self",
".",
"course",
",",
"self",
".",
"run",
"]",
")"
]
| Deprecated. Use course and run independently. | [
"Deprecated",
".",
"Use",
"course",
"and",
"run",
"independently",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L234-L247 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | CourseLocator.make_usage_key_from_deprecated_string | def make_usage_key_from_deprecated_string(self, location_url):
"""
Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.
NOTE: this prejudicially takes the tag, org, and course from the url not self.
Raises:
InvalidKeyError: if the url does not parse
"""
warnings.warn(
"make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key",
DeprecationWarning,
stacklevel=2
)
return BlockUsageLocator.from_string(location_url).replace(run=self.run) | python | def make_usage_key_from_deprecated_string(self, location_url):
"""
Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.
NOTE: this prejudicially takes the tag, org, and course from the url not self.
Raises:
InvalidKeyError: if the url does not parse
"""
warnings.warn(
"make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key",
DeprecationWarning,
stacklevel=2
)
return BlockUsageLocator.from_string(location_url).replace(run=self.run) | [
"def",
"make_usage_key_from_deprecated_string",
"(",
"self",
",",
"location_url",
")",
":",
"warnings",
".",
"warn",
"(",
"\"make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"BlockUsageLocator",
".",
"from_string",
"(",
"location_url",
")",
".",
"replace",
"(",
"run",
"=",
"self",
".",
"run",
")"
]
| Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.
NOTE: this prejudicially takes the tag, org, and course from the url not self.
Raises:
InvalidKeyError: if the url does not parse | [
"Deprecated",
"mechanism",
"for",
"creating",
"a",
"UsageKey",
"given",
"a",
"CourseKey",
"and",
"a",
"serialized",
"Location",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L283-L297 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockUsageLocator._from_string | def _from_string(cls, serialized):
"""
Requests CourseLocator to deserialize its part and then adds the local deserialization of block
"""
# Allow access to _from_string protected method
course_key = CourseLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = cls.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
return cls(course_key, parsed_parts.get('block_type'), block_id) | python | def _from_string(cls, serialized):
"""
Requests CourseLocator to deserialize its part and then adds the local deserialization of block
"""
# Allow access to _from_string protected method
course_key = CourseLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = cls.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
return cls(course_key, parsed_parts.get('block_type'), block_id) | [
"def",
"_from_string",
"(",
"cls",
",",
"serialized",
")",
":",
"# Allow access to _from_string protected method",
"course_key",
"=",
"CourseLocator",
".",
"_from_string",
"(",
"serialized",
")",
"# pylint: disable=protected-access",
"parsed_parts",
"=",
"cls",
".",
"parse_url",
"(",
"serialized",
")",
"block_id",
"=",
"parsed_parts",
".",
"get",
"(",
"'block_id'",
",",
"None",
")",
"if",
"block_id",
"is",
"None",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"serialized",
")",
"return",
"cls",
"(",
"course_key",
",",
"parsed_parts",
".",
"get",
"(",
"'block_type'",
")",
",",
"block_id",
")"
]
| Requests CourseLocator to deserialize its part and then adds the local deserialization of block | [
"Requests",
"CourseLocator",
"to",
"deserialize",
"its",
"part",
"and",
"then",
"adds",
"the",
"local",
"deserialization",
"of",
"block"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L720-L730 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockUsageLocator._parse_block_ref | def _parse_block_ref(cls, block_ref, deprecated=False):
"""
Given `block_ref`, tries to parse it into a valid block reference.
Returns `block_ref` if it is valid.
Raises:
InvalidKeyError: if `block_ref` is invalid.
"""
if deprecated and block_ref is None:
return None
if isinstance(block_ref, LocalId):
return block_ref
is_valid_deprecated = deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref)
is_valid = cls.ALLOWED_ID_RE.match(block_ref)
if is_valid or is_valid_deprecated:
return block_ref
else:
raise InvalidKeyError(cls, block_ref) | python | def _parse_block_ref(cls, block_ref, deprecated=False):
"""
Given `block_ref`, tries to parse it into a valid block reference.
Returns `block_ref` if it is valid.
Raises:
InvalidKeyError: if `block_ref` is invalid.
"""
if deprecated and block_ref is None:
return None
if isinstance(block_ref, LocalId):
return block_ref
is_valid_deprecated = deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref)
is_valid = cls.ALLOWED_ID_RE.match(block_ref)
if is_valid or is_valid_deprecated:
return block_ref
else:
raise InvalidKeyError(cls, block_ref) | [
"def",
"_parse_block_ref",
"(",
"cls",
",",
"block_ref",
",",
"deprecated",
"=",
"False",
")",
":",
"if",
"deprecated",
"and",
"block_ref",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"block_ref",
",",
"LocalId",
")",
":",
"return",
"block_ref",
"is_valid_deprecated",
"=",
"deprecated",
"and",
"cls",
".",
"DEPRECATED_ALLOWED_ID_RE",
".",
"match",
"(",
"block_ref",
")",
"is_valid",
"=",
"cls",
".",
"ALLOWED_ID_RE",
".",
"match",
"(",
"block_ref",
")",
"if",
"is_valid",
"or",
"is_valid_deprecated",
":",
"return",
"block_ref",
"else",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"block_ref",
")"
]
| Given `block_ref`, tries to parse it into a valid block reference.
Returns `block_ref` if it is valid.
Raises:
InvalidKeyError: if `block_ref` is invalid. | [
"Given",
"block_ref",
"tries",
"to",
"parse",
"it",
"into",
"a",
"valid",
"block",
"reference",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L766-L788 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockUsageLocator.html_id | def html_id(self):
"""
Return an id which can be used on an html page as an id attr of an html element. It is currently also
persisted by some clients to identify blocks.
To make compatible with old Location object functionality. I don't believe this behavior fits at this
place, but I have no way to override. We should clearly define the purpose and restrictions of this
(e.g., I'm assuming periods are fine).
"""
if self.deprecated:
id_fields = [self.DEPRECATED_TAG, self.org, self.course, self.block_type, self.block_id, self.version_guid]
id_string = u"-".join([v for v in id_fields if v is not None])
return self.clean_for_html(id_string)
else:
return self.block_id | python | def html_id(self):
"""
Return an id which can be used on an html page as an id attr of an html element. It is currently also
persisted by some clients to identify blocks.
To make compatible with old Location object functionality. I don't believe this behavior fits at this
place, but I have no way to override. We should clearly define the purpose and restrictions of this
(e.g., I'm assuming periods are fine).
"""
if self.deprecated:
id_fields = [self.DEPRECATED_TAG, self.org, self.course, self.block_type, self.block_id, self.version_guid]
id_string = u"-".join([v for v in id_fields if v is not None])
return self.clean_for_html(id_string)
else:
return self.block_id | [
"def",
"html_id",
"(",
"self",
")",
":",
"if",
"self",
".",
"deprecated",
":",
"id_fields",
"=",
"[",
"self",
".",
"DEPRECATED_TAG",
",",
"self",
".",
"org",
",",
"self",
".",
"course",
",",
"self",
".",
"block_type",
",",
"self",
".",
"block_id",
",",
"self",
".",
"version_guid",
"]",
"id_string",
"=",
"u\"-\"",
".",
"join",
"(",
"[",
"v",
"for",
"v",
"in",
"id_fields",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"return",
"self",
".",
"clean_for_html",
"(",
"id_string",
")",
"else",
":",
"return",
"self",
".",
"block_id"
]
| Return an id which can be used on an html page as an id attr of an html element. It is currently also
persisted by some clients to identify blocks.
To make compatible with old Location object functionality. I don't believe this behavior fits at this
place, but I have no way to override. We should clearly define the purpose and restrictions of this
(e.g., I'm assuming periods are fine). | [
"Return",
"an",
"id",
"which",
"can",
"be",
"used",
"on",
"an",
"html",
"page",
"as",
"an",
"id",
"attr",
"of",
"an",
"html",
"element",
".",
"It",
"is",
"currently",
"also",
"persisted",
"by",
"some",
"clients",
"to",
"identify",
"blocks",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L930-L944 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockUsageLocator.to_deprecated_son | def to_deprecated_son(self, prefix='', tag='i4x'):
"""
Returns a SON object that represents this location
"""
# This preserves the old SON keys ('tag', 'org', 'course', 'category', 'name', 'revision'),
# because that format was used to store data historically in mongo
# adding tag b/c deprecated form used it
son = SON({prefix + 'tag': tag})
for field_name in ('org', 'course'):
# Temporary filtering of run field because deprecated form left it out
son[prefix + field_name] = getattr(self.course_key, field_name)
for (dep_field_name, field_name) in [('category', 'block_type'), ('name', 'block_id')]:
son[prefix + dep_field_name] = getattr(self, field_name)
son[prefix + 'revision'] = self.course_key.branch
return son | python | def to_deprecated_son(self, prefix='', tag='i4x'):
"""
Returns a SON object that represents this location
"""
# This preserves the old SON keys ('tag', 'org', 'course', 'category', 'name', 'revision'),
# because that format was used to store data historically in mongo
# adding tag b/c deprecated form used it
son = SON({prefix + 'tag': tag})
for field_name in ('org', 'course'):
# Temporary filtering of run field because deprecated form left it out
son[prefix + field_name] = getattr(self.course_key, field_name)
for (dep_field_name, field_name) in [('category', 'block_type'), ('name', 'block_id')]:
son[prefix + dep_field_name] = getattr(self, field_name)
son[prefix + 'revision'] = self.course_key.branch
return son | [
"def",
"to_deprecated_son",
"(",
"self",
",",
"prefix",
"=",
"''",
",",
"tag",
"=",
"'i4x'",
")",
":",
"# This preserves the old SON keys ('tag', 'org', 'course', 'category', 'name', 'revision'),",
"# because that format was used to store data historically in mongo",
"# adding tag b/c deprecated form used it",
"son",
"=",
"SON",
"(",
"{",
"prefix",
"+",
"'tag'",
":",
"tag",
"}",
")",
"for",
"field_name",
"in",
"(",
"'org'",
",",
"'course'",
")",
":",
"# Temporary filtering of run field because deprecated form left it out",
"son",
"[",
"prefix",
"+",
"field_name",
"]",
"=",
"getattr",
"(",
"self",
".",
"course_key",
",",
"field_name",
")",
"for",
"(",
"dep_field_name",
",",
"field_name",
")",
"in",
"[",
"(",
"'category'",
",",
"'block_type'",
")",
",",
"(",
"'name'",
",",
"'block_id'",
")",
"]",
":",
"son",
"[",
"prefix",
"+",
"dep_field_name",
"]",
"=",
"getattr",
"(",
"self",
",",
"field_name",
")",
"son",
"[",
"prefix",
"+",
"'revision'",
"]",
"=",
"self",
".",
"course_key",
".",
"branch",
"return",
"son"
]
| Returns a SON object that represents this location | [
"Returns",
"a",
"SON",
"object",
"that",
"represents",
"this",
"location"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L996-L1012 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | BlockUsageLocator._from_deprecated_son | def _from_deprecated_son(cls, id_dict, run):
"""
Return the Location decoding this id_dict and run
"""
course_key = CourseLocator(
id_dict['org'],
id_dict['course'],
run,
id_dict['revision'],
deprecated=True,
)
return cls(course_key, id_dict['category'], id_dict['name'], deprecated=True) | python | def _from_deprecated_son(cls, id_dict, run):
"""
Return the Location decoding this id_dict and run
"""
course_key = CourseLocator(
id_dict['org'],
id_dict['course'],
run,
id_dict['revision'],
deprecated=True,
)
return cls(course_key, id_dict['category'], id_dict['name'], deprecated=True) | [
"def",
"_from_deprecated_son",
"(",
"cls",
",",
"id_dict",
",",
"run",
")",
":",
"course_key",
"=",
"CourseLocator",
"(",
"id_dict",
"[",
"'org'",
"]",
",",
"id_dict",
"[",
"'course'",
"]",
",",
"run",
",",
"id_dict",
"[",
"'revision'",
"]",
",",
"deprecated",
"=",
"True",
",",
")",
"return",
"cls",
"(",
"course_key",
",",
"id_dict",
"[",
"'category'",
"]",
",",
"id_dict",
"[",
"'name'",
"]",
",",
"deprecated",
"=",
"True",
")"
]
| Return the Location decoding this id_dict and run | [
"Return",
"the",
"Location",
"decoding",
"this",
"id_dict",
"and",
"run"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L1015-L1026 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | LibraryUsageLocator._from_string | def _from_string(cls, serialized):
"""
Requests LibraryLocator to deserialize its part and then adds the local deserialization of block
"""
# Allow access to _from_string protected method
library_key = LibraryLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = LibraryLocator.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
block_type = parsed_parts.get('block_type')
if block_type is None:
raise InvalidKeyError(cls, serialized)
return cls(library_key, parsed_parts.get('block_type'), block_id) | python | def _from_string(cls, serialized):
"""
Requests LibraryLocator to deserialize its part and then adds the local deserialization of block
"""
# Allow access to _from_string protected method
library_key = LibraryLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = LibraryLocator.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
block_type = parsed_parts.get('block_type')
if block_type is None:
raise InvalidKeyError(cls, serialized)
return cls(library_key, parsed_parts.get('block_type'), block_id) | [
"def",
"_from_string",
"(",
"cls",
",",
"serialized",
")",
":",
"# Allow access to _from_string protected method",
"library_key",
"=",
"LibraryLocator",
".",
"_from_string",
"(",
"serialized",
")",
"# pylint: disable=protected-access",
"parsed_parts",
"=",
"LibraryLocator",
".",
"parse_url",
"(",
"serialized",
")",
"block_id",
"=",
"parsed_parts",
".",
"get",
"(",
"'block_id'",
",",
"None",
")",
"if",
"block_id",
"is",
"None",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"serialized",
")",
"block_type",
"=",
"parsed_parts",
".",
"get",
"(",
"'block_type'",
")",
"if",
"block_type",
"is",
"None",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"serialized",
")",
"return",
"cls",
"(",
"library_key",
",",
"parsed_parts",
".",
"get",
"(",
"'block_type'",
")",
",",
"block_id",
")"
]
| Requests LibraryLocator to deserialize its part and then adds the local deserialization of block | [
"Requests",
"LibraryLocator",
"to",
"deserialize",
"its",
"part",
"and",
"then",
"adds",
"the",
"local",
"deserialization",
"of",
"block"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L1087-L1103 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | LibraryUsageLocator.for_branch | def for_branch(self, branch):
"""
Return a UsageLocator for the same block in a different branch of the library.
"""
return self.replace(library_key=self.library_key.for_branch(branch)) | python | def for_branch(self, branch):
"""
Return a UsageLocator for the same block in a different branch of the library.
"""
return self.replace(library_key=self.library_key.for_branch(branch)) | [
"def",
"for_branch",
"(",
"self",
",",
"branch",
")",
":",
"return",
"self",
".",
"replace",
"(",
"library_key",
"=",
"self",
".",
"library_key",
".",
"for_branch",
"(",
"branch",
")",
")"
]
| Return a UsageLocator for the same block in a different branch of the library. | [
"Return",
"a",
"UsageLocator",
"for",
"the",
"same",
"block",
"in",
"a",
"different",
"branch",
"of",
"the",
"library",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L1116-L1120 | train |
edx/opaque-keys | opaque_keys/edx/locator.py | LibraryUsageLocator.for_version | def for_version(self, version_guid):
"""
Return a UsageLocator for the same block in a different version of the library.
"""
return self.replace(library_key=self.library_key.for_version(version_guid)) | python | def for_version(self, version_guid):
"""
Return a UsageLocator for the same block in a different version of the library.
"""
return self.replace(library_key=self.library_key.for_version(version_guid)) | [
"def",
"for_version",
"(",
"self",
",",
"version_guid",
")",
":",
"return",
"self",
".",
"replace",
"(",
"library_key",
"=",
"self",
".",
"library_key",
".",
"for_version",
"(",
"version_guid",
")",
")"
]
| Return a UsageLocator for the same block in a different version of the library. | [
"Return",
"a",
"UsageLocator",
"for",
"the",
"same",
"block",
"in",
"a",
"different",
"version",
"of",
"the",
"library",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L1122-L1126 | train |
edx/opaque-keys | opaque_keys/edx/django/models.py | _strip_object | def _strip_object(key):
"""
Strips branch and version info if the given key supports those attributes.
"""
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key | python | def _strip_object(key):
"""
Strips branch and version info if the given key supports those attributes.
"""
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key | [
"def",
"_strip_object",
"(",
"key",
")",
":",
"if",
"hasattr",
"(",
"key",
",",
"'version_agnostic'",
")",
"and",
"hasattr",
"(",
"key",
",",
"'for_branch'",
")",
":",
"return",
"key",
".",
"for_branch",
"(",
"None",
")",
".",
"version_agnostic",
"(",
")",
"else",
":",
"return",
"key"
]
| Strips branch and version info if the given key supports those attributes. | [
"Strips",
"branch",
"and",
"version",
"info",
"if",
"the",
"given",
"key",
"supports",
"those",
"attributes",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/django/models.py#L58-L65 | train |
edx/opaque-keys | opaque_keys/edx/django/models.py | _strip_value | def _strip_value(value, lookup='exact'):
"""
Helper function to remove the branch and version information from the given value,
which could be a single object or a list.
"""
if lookup == 'in':
stripped_value = [_strip_object(el) for el in value]
else:
stripped_value = _strip_object(value)
return stripped_value | python | def _strip_value(value, lookup='exact'):
"""
Helper function to remove the branch and version information from the given value,
which could be a single object or a list.
"""
if lookup == 'in':
stripped_value = [_strip_object(el) for el in value]
else:
stripped_value = _strip_object(value)
return stripped_value | [
"def",
"_strip_value",
"(",
"value",
",",
"lookup",
"=",
"'exact'",
")",
":",
"if",
"lookup",
"==",
"'in'",
":",
"stripped_value",
"=",
"[",
"_strip_object",
"(",
"el",
")",
"for",
"el",
"in",
"value",
"]",
"else",
":",
"stripped_value",
"=",
"_strip_object",
"(",
"value",
")",
"return",
"stripped_value"
]
| Helper function to remove the branch and version information from the given value,
which could be a single object or a list. | [
"Helper",
"function",
"to",
"remove",
"the",
"branch",
"and",
"version",
"information",
"from",
"the",
"given",
"value",
"which",
"could",
"be",
"a",
"single",
"object",
"or",
"a",
"list",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/django/models.py#L68-L77 | train |
edx/opaque-keys | opaque_keys/edx/locations.py | LocationBase._deprecation_warning | def _deprecation_warning(cls):
"""Display a deprecation warning for the given cls"""
if issubclass(cls, Location):
warnings.warn(
"Location is deprecated! Please use locator.BlockUsageLocator",
DeprecationWarning,
stacklevel=3
)
elif issubclass(cls, AssetLocation):
warnings.warn(
"AssetLocation is deprecated! Please use locator.AssetLocator",
DeprecationWarning,
stacklevel=3
)
else:
warnings.warn(
"{} is deprecated!".format(cls),
DeprecationWarning,
stacklevel=3
) | python | def _deprecation_warning(cls):
"""Display a deprecation warning for the given cls"""
if issubclass(cls, Location):
warnings.warn(
"Location is deprecated! Please use locator.BlockUsageLocator",
DeprecationWarning,
stacklevel=3
)
elif issubclass(cls, AssetLocation):
warnings.warn(
"AssetLocation is deprecated! Please use locator.AssetLocator",
DeprecationWarning,
stacklevel=3
)
else:
warnings.warn(
"{} is deprecated!".format(cls),
DeprecationWarning,
stacklevel=3
) | [
"def",
"_deprecation_warning",
"(",
"cls",
")",
":",
"if",
"issubclass",
"(",
"cls",
",",
"Location",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Location is deprecated! Please use locator.BlockUsageLocator\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"3",
")",
"elif",
"issubclass",
"(",
"cls",
",",
"AssetLocation",
")",
":",
"warnings",
".",
"warn",
"(",
"\"AssetLocation is deprecated! Please use locator.AssetLocator\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"3",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"{} is deprecated!\"",
".",
"format",
"(",
"cls",
")",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"3",
")"
]
| Display a deprecation warning for the given cls | [
"Display",
"a",
"deprecation",
"warning",
"for",
"the",
"given",
"cls"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locations.py#L84-L103 | train |
edx/opaque-keys | opaque_keys/edx/locations.py | LocationBase._check_location_part | def _check_location_part(cls, val, regexp):
"""Deprecated. See CourseLocator._check_location_part"""
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp) | python | def _check_location_part(cls, val, regexp):
"""Deprecated. See CourseLocator._check_location_part"""
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp) | [
"def",
"_check_location_part",
"(",
"cls",
",",
"val",
",",
"regexp",
")",
":",
"cls",
".",
"_deprecation_warning",
"(",
")",
"return",
"CourseLocator",
".",
"_check_location_part",
"(",
"val",
",",
"regexp",
")"
]
| Deprecated. See CourseLocator._check_location_part | [
"Deprecated",
".",
"See",
"CourseLocator",
".",
"_check_location_part"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locations.py#L116-L119 | train |
edx/opaque-keys | opaque_keys/edx/locations.py | LocationBase._clean | def _clean(cls, value, invalid):
"""Deprecated. See BlockUsageLocator._clean"""
cls._deprecation_warning()
return BlockUsageLocator._clean(value, invalid) | python | def _clean(cls, value, invalid):
"""Deprecated. See BlockUsageLocator._clean"""
cls._deprecation_warning()
return BlockUsageLocator._clean(value, invalid) | [
"def",
"_clean",
"(",
"cls",
",",
"value",
",",
"invalid",
")",
":",
"cls",
".",
"_deprecation_warning",
"(",
")",
"return",
"BlockUsageLocator",
".",
"_clean",
"(",
"value",
",",
"invalid",
")"
]
| Deprecated. See BlockUsageLocator._clean | [
"Deprecated",
".",
"See",
"BlockUsageLocator",
".",
"_clean"
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locations.py#L122-L125 | train |
edx/opaque-keys | opaque_keys/edx/asides.py | _join_keys_v1 | def _join_keys_v1(left, right):
"""
Join two keys into a format separable by using _split_keys_v1.
"""
if left.endswith(':') or '::' in left:
raise ValueError("Can't join a left string ending in ':' or containing '::'")
return u"{}::{}".format(_encode_v1(left), _encode_v1(right)) | python | def _join_keys_v1(left, right):
"""
Join two keys into a format separable by using _split_keys_v1.
"""
if left.endswith(':') or '::' in left:
raise ValueError("Can't join a left string ending in ':' or containing '::'")
return u"{}::{}".format(_encode_v1(left), _encode_v1(right)) | [
"def",
"_join_keys_v1",
"(",
"left",
",",
"right",
")",
":",
"if",
"left",
".",
"endswith",
"(",
"':'",
")",
"or",
"'::'",
"in",
"left",
":",
"raise",
"ValueError",
"(",
"\"Can't join a left string ending in ':' or containing '::'\"",
")",
"return",
"u\"{}::{}\"",
".",
"format",
"(",
"_encode_v1",
"(",
"left",
")",
",",
"_encode_v1",
"(",
"right",
")",
")"
]
| Join two keys into a format separable by using _split_keys_v1. | [
"Join",
"two",
"keys",
"into",
"a",
"format",
"separable",
"by",
"using",
"_split_keys_v1",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/asides.py#L49-L55 | train |
edx/opaque-keys | opaque_keys/edx/asides.py | _split_keys_v1 | def _split_keys_v1(joined):
"""
Split two keys out a string created by _join_keys_v1.
"""
left, _, right = joined.partition('::')
return _decode_v1(left), _decode_v1(right) | python | def _split_keys_v1(joined):
"""
Split two keys out a string created by _join_keys_v1.
"""
left, _, right = joined.partition('::')
return _decode_v1(left), _decode_v1(right) | [
"def",
"_split_keys_v1",
"(",
"joined",
")",
":",
"left",
",",
"_",
",",
"right",
"=",
"joined",
".",
"partition",
"(",
"'::'",
")",
"return",
"_decode_v1",
"(",
"left",
")",
",",
"_decode_v1",
"(",
"right",
")"
]
| Split two keys out a string created by _join_keys_v1. | [
"Split",
"two",
"keys",
"out",
"a",
"string",
"created",
"by",
"_join_keys_v1",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/asides.py#L58-L63 | train |
edx/opaque-keys | opaque_keys/edx/asides.py | _split_keys_v2 | def _split_keys_v2(joined):
"""
Split two keys out a string created by _join_keys_v2.
"""
left, _, right = joined.rpartition('::')
return _decode_v2(left), _decode_v2(right) | python | def _split_keys_v2(joined):
"""
Split two keys out a string created by _join_keys_v2.
"""
left, _, right = joined.rpartition('::')
return _decode_v2(left), _decode_v2(right) | [
"def",
"_split_keys_v2",
"(",
"joined",
")",
":",
"left",
",",
"_",
",",
"right",
"=",
"joined",
".",
"rpartition",
"(",
"'::'",
")",
"return",
"_decode_v2",
"(",
"left",
")",
",",
"_decode_v2",
"(",
"right",
")"
]
| Split two keys out a string created by _join_keys_v2. | [
"Split",
"two",
"keys",
"out",
"a",
"string",
"created",
"by",
"_join_keys_v2",
"."
]
| 9807168660c12e0551c8fdd58fd1bc6b0bcb0a54 | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/asides.py#L97-L102 | train |
dbcli/athenacli | athenacli/completion_refresher.py | refresher | def refresher(name, refreshers=CompletionRefresher.refreshers):
"""Decorator to add the decorated function to the dictionary of
refreshers. Any function decorated with a @refresher will be executed as
part of the completion refresh routine."""
def wrapper(wrapped):
refreshers[name] = wrapped
return wrapped
return wrapper | python | def refresher(name, refreshers=CompletionRefresher.refreshers):
"""Decorator to add the decorated function to the dictionary of
refreshers. Any function decorated with a @refresher will be executed as
part of the completion refresh routine."""
def wrapper(wrapped):
refreshers[name] = wrapped
return wrapped
return wrapper | [
"def",
"refresher",
"(",
"name",
",",
"refreshers",
"=",
"CompletionRefresher",
".",
"refreshers",
")",
":",
"def",
"wrapper",
"(",
"wrapped",
")",
":",
"refreshers",
"[",
"name",
"]",
"=",
"wrapped",
"return",
"wrapped",
"return",
"wrapper"
]
| Decorator to add the decorated function to the dictionary of
refreshers. Any function decorated with a @refresher will be executed as
part of the completion refresh routine. | [
"Decorator",
"to",
"add",
"the",
"decorated",
"function",
"to",
"the",
"dictionary",
"of",
"refreshers",
".",
"Any",
"function",
"decorated",
"with",
"a"
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/completion_refresher.py#L86-L93 | train |
dbcli/athenacli | athenacli/completion_refresher.py | CompletionRefresher.refresh | def refresh(self, executor, callbacks, completer_options=None):
"""Creates a SQLCompleter object and populates it with the relevant
completion suggestions in a background thread.
executor - SQLExecute object, used to extract the credentials to connect
to the database.
callbacks - A function or a list of functions to call after the thread
has completed the refresh. The newly created completion
object will be passed in as an argument to each callback.
completer_options - dict of options to pass to SQLCompleter.
"""
if completer_options is None:
completer_options = {}
if self.is_refreshing():
self._restart_refresh.set()
return [(None, None, None, 'Auto-completion refresh restarted.')]
else:
self._completer_thread = threading.Thread(
target=self._bg_refresh,
args=(executor, callbacks, completer_options),
name='completion_refresh')
self._completer_thread.setDaemon(True)
self._completer_thread.start()
return [(None, None, None,
'Auto-completion refresh started in the background.')] | python | def refresh(self, executor, callbacks, completer_options=None):
"""Creates a SQLCompleter object and populates it with the relevant
completion suggestions in a background thread.
executor - SQLExecute object, used to extract the credentials to connect
to the database.
callbacks - A function or a list of functions to call after the thread
has completed the refresh. The newly created completion
object will be passed in as an argument to each callback.
completer_options - dict of options to pass to SQLCompleter.
"""
if completer_options is None:
completer_options = {}
if self.is_refreshing():
self._restart_refresh.set()
return [(None, None, None, 'Auto-completion refresh restarted.')]
else:
self._completer_thread = threading.Thread(
target=self._bg_refresh,
args=(executor, callbacks, completer_options),
name='completion_refresh')
self._completer_thread.setDaemon(True)
self._completer_thread.start()
return [(None, None, None,
'Auto-completion refresh started in the background.')] | [
"def",
"refresh",
"(",
"self",
",",
"executor",
",",
"callbacks",
",",
"completer_options",
"=",
"None",
")",
":",
"if",
"completer_options",
"is",
"None",
":",
"completer_options",
"=",
"{",
"}",
"if",
"self",
".",
"is_refreshing",
"(",
")",
":",
"self",
".",
"_restart_refresh",
".",
"set",
"(",
")",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"'Auto-completion refresh restarted.'",
")",
"]",
"else",
":",
"self",
".",
"_completer_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_bg_refresh",
",",
"args",
"=",
"(",
"executor",
",",
"callbacks",
",",
"completer_options",
")",
",",
"name",
"=",
"'completion_refresh'",
")",
"self",
".",
"_completer_thread",
".",
"setDaemon",
"(",
"True",
")",
"self",
".",
"_completer_thread",
".",
"start",
"(",
")",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"'Auto-completion refresh started in the background.'",
")",
"]"
]
| Creates a SQLCompleter object and populates it with the relevant
completion suggestions in a background thread.
executor - SQLExecute object, used to extract the credentials to connect
to the database.
callbacks - A function or a list of functions to call after the thread
has completed the refresh. The newly created completion
object will be passed in as an argument to each callback.
completer_options - dict of options to pass to SQLCompleter. | [
"Creates",
"a",
"SQLCompleter",
"object",
"and",
"populates",
"it",
"with",
"the",
"relevant",
"completion",
"suggestions",
"in",
"a",
"background",
"thread",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/completion_refresher.py#L20-L45 | train |
dbcli/athenacli | athenacli/packages/special/utils.py | handle_cd_command | def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1] if len(tokens) > 1 else None
if not directory:
return False, "No folder name was provided."
try:
os.chdir(directory)
subprocess.call(['pwd'])
return True, None
except OSError as e:
return False, e.strerror | python | def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1] if len(tokens) > 1 else None
if not directory:
return False, "No folder name was provided."
try:
os.chdir(directory)
subprocess.call(['pwd'])
return True, None
except OSError as e:
return False, e.strerror | [
"def",
"handle_cd_command",
"(",
"arg",
")",
":",
"CD_CMD",
"=",
"'cd'",
"tokens",
"=",
"arg",
".",
"split",
"(",
"CD_CMD",
"+",
"' '",
")",
"directory",
"=",
"tokens",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"tokens",
")",
">",
"1",
"else",
"None",
"if",
"not",
"directory",
":",
"return",
"False",
",",
"\"No folder name was provided.\"",
"try",
":",
"os",
".",
"chdir",
"(",
"directory",
")",
"subprocess",
".",
"call",
"(",
"[",
"'pwd'",
"]",
")",
"return",
"True",
",",
"None",
"except",
"OSError",
"as",
"e",
":",
"return",
"False",
",",
"e",
".",
"strerror"
]
| Handles a `cd` shell command by calling python's os.chdir. | [
"Handles",
"a",
"cd",
"shell",
"command",
"by",
"calling",
"python",
"s",
"os",
".",
"chdir",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/utils.py#L5-L17 | train |
dbcli/athenacli | athenacli/packages/special/iocommands.py | get_editor_query | def get_editor_query(sql):
"""Get the query part of an editor command."""
sql = sql.strip()
# The reason we can't simply do .strip('\e') is that it strips characters,
# not a substring. So it'll strip "e" in the end of the sql also!
# Ex: "select * from style\e" -> "select * from styl".
pattern = re.compile('(^\\\e|\\\e$)')
while pattern.search(sql):
sql = pattern.sub('', sql)
return sql | python | def get_editor_query(sql):
"""Get the query part of an editor command."""
sql = sql.strip()
# The reason we can't simply do .strip('\e') is that it strips characters,
# not a substring. So it'll strip "e" in the end of the sql also!
# Ex: "select * from style\e" -> "select * from styl".
pattern = re.compile('(^\\\e|\\\e$)')
while pattern.search(sql):
sql = pattern.sub('', sql)
return sql | [
"def",
"get_editor_query",
"(",
"sql",
")",
":",
"sql",
"=",
"sql",
".",
"strip",
"(",
")",
"# The reason we can't simply do .strip('\\e') is that it strips characters,",
"# not a substring. So it'll strip \"e\" in the end of the sql also!",
"# Ex: \"select * from style\\e\" -> \"select * from styl\".",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'(^\\\\\\e|\\\\\\e$)'",
")",
"while",
"pattern",
".",
"search",
"(",
"sql",
")",
":",
"sql",
"=",
"pattern",
".",
"sub",
"(",
"''",
",",
"sql",
")",
"return",
"sql"
]
| Get the query part of an editor command. | [
"Get",
"the",
"query",
"part",
"of",
"an",
"editor",
"command",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/iocommands.py#L108-L119 | train |
dbcli/athenacli | athenacli/packages/special/iocommands.py | delete_favorite_query | def delete_favorite_query(arg, **_):
"""Delete an existing favorite query.
"""
usage = 'Syntax: \\fd name.\n\n' + favoritequeries.usage
if not arg:
return [(None, None, None, usage)]
status = favoritequeries.delete(arg)
return [(None, None, None, status)] | python | def delete_favorite_query(arg, **_):
"""Delete an existing favorite query.
"""
usage = 'Syntax: \\fd name.\n\n' + favoritequeries.usage
if not arg:
return [(None, None, None, usage)]
status = favoritequeries.delete(arg)
return [(None, None, None, status)] | [
"def",
"delete_favorite_query",
"(",
"arg",
",",
"*",
"*",
"_",
")",
":",
"usage",
"=",
"'Syntax: \\\\fd name.\\n\\n'",
"+",
"favoritequeries",
".",
"usage",
"if",
"not",
"arg",
":",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"usage",
")",
"]",
"status",
"=",
"favoritequeries",
".",
"delete",
"(",
"arg",
")",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"status",
")",
"]"
]
| Delete an existing favorite query. | [
"Delete",
"an",
"existing",
"favorite",
"query",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/iocommands.py#L239-L248 | train |
dbcli/athenacli | athenacli/packages/special/iocommands.py | execute_system_command | def execute_system_command(arg, **_):
"""Execute a system shell command."""
usage = "Syntax: system [command].\n"
if not arg:
return [(None, None, None, usage)]
try:
command = arg.strip()
if command.startswith('cd'):
ok, error_message = handle_cd_command(arg)
if not ok:
return [(None, None, None, error_message)]
return [(None, None, None, '')]
args = arg.split(' ')
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
response = output if not error else error
# Python 3 returns bytes. This needs to be decoded to a string.
if isinstance(response, bytes):
encoding = locale.getpreferredencoding(False)
response = response.decode(encoding)
return [(None, None, None, response)]
except OSError as e:
return [(None, None, None, 'OSError: %s' % e.strerror)] | python | def execute_system_command(arg, **_):
"""Execute a system shell command."""
usage = "Syntax: system [command].\n"
if not arg:
return [(None, None, None, usage)]
try:
command = arg.strip()
if command.startswith('cd'):
ok, error_message = handle_cd_command(arg)
if not ok:
return [(None, None, None, error_message)]
return [(None, None, None, '')]
args = arg.split(' ')
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
response = output if not error else error
# Python 3 returns bytes. This needs to be decoded to a string.
if isinstance(response, bytes):
encoding = locale.getpreferredencoding(False)
response = response.decode(encoding)
return [(None, None, None, response)]
except OSError as e:
return [(None, None, None, 'OSError: %s' % e.strerror)] | [
"def",
"execute_system_command",
"(",
"arg",
",",
"*",
"*",
"_",
")",
":",
"usage",
"=",
"\"Syntax: system [command].\\n\"",
"if",
"not",
"arg",
":",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"usage",
")",
"]",
"try",
":",
"command",
"=",
"arg",
".",
"strip",
"(",
")",
"if",
"command",
".",
"startswith",
"(",
"'cd'",
")",
":",
"ok",
",",
"error_message",
"=",
"handle_cd_command",
"(",
"arg",
")",
"if",
"not",
"ok",
":",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"error_message",
")",
"]",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"''",
")",
"]",
"args",
"=",
"arg",
".",
"split",
"(",
"' '",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
",",
"error",
"=",
"process",
".",
"communicate",
"(",
")",
"response",
"=",
"output",
"if",
"not",
"error",
"else",
"error",
"# Python 3 returns bytes. This needs to be decoded to a string.",
"if",
"isinstance",
"(",
"response",
",",
"bytes",
")",
":",
"encoding",
"=",
"locale",
".",
"getpreferredencoding",
"(",
"False",
")",
"response",
"=",
"response",
".",
"decode",
"(",
"encoding",
")",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"response",
")",
"]",
"except",
"OSError",
"as",
"e",
":",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"'OSError: %s'",
"%",
"e",
".",
"strerror",
")",
"]"
]
| Execute a system shell command. | [
"Execute",
"a",
"system",
"shell",
"command",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/iocommands.py#L253-L280 | train |
dbcli/athenacli | athenacli/main.py | need_completion_refresh | def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
tokens = {
'use', '\\u',
'create',
'drop'
}
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
if first_token.lower() in tokens:
return True
except Exception:
return False | python | def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
tokens = {
'use', '\\u',
'create',
'drop'
}
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
if first_token.lower() in tokens:
return True
except Exception:
return False | [
"def",
"need_completion_refresh",
"(",
"queries",
")",
":",
"tokens",
"=",
"{",
"'use'",
",",
"'\\\\u'",
",",
"'create'",
",",
"'drop'",
"}",
"for",
"query",
"in",
"sqlparse",
".",
"split",
"(",
"queries",
")",
":",
"try",
":",
"first_token",
"=",
"query",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"first_token",
".",
"lower",
"(",
")",
"in",
"tokens",
":",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
]
| Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db. | [
"Determines",
"if",
"the",
"completion",
"needs",
"a",
"refresh",
"by",
"checking",
"if",
"the",
"sql",
"statement",
"is",
"an",
"alter",
"create",
"drop",
"or",
"change",
"db",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L607-L622 | train |
dbcli/athenacli | athenacli/main.py | is_mutating | def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop',
'replace', 'truncate', 'load'])
return status.split(None, 1)[0].lower() in mutating | python | def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop',
'replace', 'truncate', 'load'])
return status.split(None, 1)[0].lower() in mutating | [
"def",
"is_mutating",
"(",
"status",
")",
":",
"if",
"not",
"status",
":",
"return",
"False",
"mutating",
"=",
"set",
"(",
"[",
"'insert'",
",",
"'update'",
",",
"'delete'",
",",
"'alter'",
",",
"'create'",
",",
"'drop'",
",",
"'replace'",
",",
"'truncate'",
",",
"'load'",
"]",
")",
"return",
"status",
".",
"split",
"(",
"None",
",",
"1",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"mutating"
]
| Determines if the statement is mutating based on the status. | [
"Determines",
"if",
"the",
"statement",
"is",
"mutating",
"based",
"on",
"the",
"status",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L625-L632 | train |
dbcli/athenacli | athenacli/main.py | AthenaCli.change_prompt_format | def change_prompt_format(self, arg, **_):
"""
Change the prompt format.
"""
if not arg:
message = 'Missing required argument, format.'
return [(None, None, None, message)]
self.prompt = self.get_prompt(arg)
return [(None, None, None, "Changed prompt format to %s" % arg)] | python | def change_prompt_format(self, arg, **_):
"""
Change the prompt format.
"""
if not arg:
message = 'Missing required argument, format.'
return [(None, None, None, message)]
self.prompt = self.get_prompt(arg)
return [(None, None, None, "Changed prompt format to %s" % arg)] | [
"def",
"change_prompt_format",
"(",
"self",
",",
"arg",
",",
"*",
"*",
"_",
")",
":",
"if",
"not",
"arg",
":",
"message",
"=",
"'Missing required argument, format.'",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"message",
")",
"]",
"self",
".",
"prompt",
"=",
"self",
".",
"get_prompt",
"(",
"arg",
")",
"return",
"[",
"(",
"None",
",",
"None",
",",
"None",
",",
"\"Changed prompt format to %s\"",
"%",
"arg",
")",
"]"
]
| Change the prompt format. | [
"Change",
"the",
"prompt",
"format",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L184-L193 | train |
dbcli/athenacli | athenacli/main.py | AthenaCli.get_output_margin | def get_output_margin(self, status=None):
"""Get the output margin (number of rows for the prompt, footer and
timing message."""
margin = self.get_reserved_space() + self.get_prompt(self.prompt).count('\n') + 1
if special.is_timing_enabled():
margin += 1
if status:
margin += 1 + status.count('\n')
return margin | python | def get_output_margin(self, status=None):
"""Get the output margin (number of rows for the prompt, footer and
timing message."""
margin = self.get_reserved_space() + self.get_prompt(self.prompt).count('\n') + 1
if special.is_timing_enabled():
margin += 1
if status:
margin += 1 + status.count('\n')
return margin | [
"def",
"get_output_margin",
"(",
"self",
",",
"status",
"=",
"None",
")",
":",
"margin",
"=",
"self",
".",
"get_reserved_space",
"(",
")",
"+",
"self",
".",
"get_prompt",
"(",
"self",
".",
"prompt",
")",
".",
"count",
"(",
"'\\n'",
")",
"+",
"1",
"if",
"special",
".",
"is_timing_enabled",
"(",
")",
":",
"margin",
"+=",
"1",
"if",
"status",
":",
"margin",
"+=",
"1",
"+",
"status",
".",
"count",
"(",
"'\\n'",
")",
"return",
"margin"
]
| Get the output margin (number of rows for the prompt, footer and
timing message. | [
"Get",
"the",
"output",
"margin",
"(",
"number",
"of",
"rows",
"for",
"the",
"prompt",
"footer",
"and",
"timing",
"message",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L362-L371 | train |
dbcli/athenacli | athenacli/main.py | AthenaCli.output | def output(self, output, status=None):
"""Output text to stdout or a pager command.
The status text is not outputted to pager or files.
The message will be logged in the audit log, if enabled. The
message will be written to the tee file, if enabled. The
message will be written to the output file, if enabled.
"""
if output:
size = self.cli.output.get_size()
margin = self.get_output_margin(status)
fits = True
buf = []
output_via_pager = self.explicit_pager and special.is_pager_enabled()
for i, line in enumerate(output, 1):
special.write_tee(line)
special.write_once(line)
if fits or output_via_pager:
# buffering
buf.append(line)
if len(line) > size.columns or i > (size.rows - margin):
fits = False
if not self.explicit_pager and special.is_pager_enabled():
# doesn't fit, use pager
output_via_pager = True
if not output_via_pager:
# doesn't fit, flush buffer
for line in buf:
click.secho(line)
buf = []
else:
click.secho(line)
if buf:
if output_via_pager:
# sadly click.echo_via_pager doesn't accept generators
click.echo_via_pager("\n".join(buf))
else:
for line in buf:
click.secho(line)
if status:
click.secho(status) | python | def output(self, output, status=None):
"""Output text to stdout or a pager command.
The status text is not outputted to pager or files.
The message will be logged in the audit log, if enabled. The
message will be written to the tee file, if enabled. The
message will be written to the output file, if enabled.
"""
if output:
size = self.cli.output.get_size()
margin = self.get_output_margin(status)
fits = True
buf = []
output_via_pager = self.explicit_pager and special.is_pager_enabled()
for i, line in enumerate(output, 1):
special.write_tee(line)
special.write_once(line)
if fits or output_via_pager:
# buffering
buf.append(line)
if len(line) > size.columns or i > (size.rows - margin):
fits = False
if not self.explicit_pager and special.is_pager_enabled():
# doesn't fit, use pager
output_via_pager = True
if not output_via_pager:
# doesn't fit, flush buffer
for line in buf:
click.secho(line)
buf = []
else:
click.secho(line)
if buf:
if output_via_pager:
# sadly click.echo_via_pager doesn't accept generators
click.echo_via_pager("\n".join(buf))
else:
for line in buf:
click.secho(line)
if status:
click.secho(status) | [
"def",
"output",
"(",
"self",
",",
"output",
",",
"status",
"=",
"None",
")",
":",
"if",
"output",
":",
"size",
"=",
"self",
".",
"cli",
".",
"output",
".",
"get_size",
"(",
")",
"margin",
"=",
"self",
".",
"get_output_margin",
"(",
"status",
")",
"fits",
"=",
"True",
"buf",
"=",
"[",
"]",
"output_via_pager",
"=",
"self",
".",
"explicit_pager",
"and",
"special",
".",
"is_pager_enabled",
"(",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"output",
",",
"1",
")",
":",
"special",
".",
"write_tee",
"(",
"line",
")",
"special",
".",
"write_once",
"(",
"line",
")",
"if",
"fits",
"or",
"output_via_pager",
":",
"# buffering",
"buf",
".",
"append",
"(",
"line",
")",
"if",
"len",
"(",
"line",
")",
">",
"size",
".",
"columns",
"or",
"i",
">",
"(",
"size",
".",
"rows",
"-",
"margin",
")",
":",
"fits",
"=",
"False",
"if",
"not",
"self",
".",
"explicit_pager",
"and",
"special",
".",
"is_pager_enabled",
"(",
")",
":",
"# doesn't fit, use pager",
"output_via_pager",
"=",
"True",
"if",
"not",
"output_via_pager",
":",
"# doesn't fit, flush buffer",
"for",
"line",
"in",
"buf",
":",
"click",
".",
"secho",
"(",
"line",
")",
"buf",
"=",
"[",
"]",
"else",
":",
"click",
".",
"secho",
"(",
"line",
")",
"if",
"buf",
":",
"if",
"output_via_pager",
":",
"# sadly click.echo_via_pager doesn't accept generators",
"click",
".",
"echo_via_pager",
"(",
"\"\\n\"",
".",
"join",
"(",
"buf",
")",
")",
"else",
":",
"for",
"line",
"in",
"buf",
":",
"click",
".",
"secho",
"(",
"line",
")",
"if",
"status",
":",
"click",
".",
"secho",
"(",
"status",
")"
]
| Output text to stdout or a pager command.
The status text is not outputted to pager or files.
The message will be logged in the audit log, if enabled. The
message will be written to the tee file, if enabled. The
message will be written to the output file, if enabled. | [
"Output",
"text",
"to",
"stdout",
"or",
"a",
"pager",
"command",
".",
"The",
"status",
"text",
"is",
"not",
"outputted",
"to",
"pager",
"or",
"files",
".",
"The",
"message",
"will",
"be",
"logged",
"in",
"the",
"audit",
"log",
"if",
"enabled",
".",
"The",
"message",
"will",
"be",
"written",
"to",
"the",
"tee",
"file",
"if",
"enabled",
".",
"The",
"message",
"will",
"be",
"written",
"to",
"the",
"output",
"file",
"if",
"enabled",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L373-L418 | train |
dbcli/athenacli | athenacli/main.py | AthenaCli._on_completions_refreshed | def _on_completions_refreshed(self, new_completer):
"""Swap the completer object in cli with the newly created completer.
"""
with self._completer_lock:
self.completer = new_completer
# When cli is first launched we call refresh_completions before
# instantiating the cli object. So it is necessary to check if cli
# exists before trying the replace the completer object in cli.
if self.cli:
self.cli.current_buffer.completer = new_completer
if self.cli:
# After refreshing, redraw the CLI to clear the statusbar
# "Refreshing completions..." indicator
self.cli.request_redraw() | python | def _on_completions_refreshed(self, new_completer):
"""Swap the completer object in cli with the newly created completer.
"""
with self._completer_lock:
self.completer = new_completer
# When cli is first launched we call refresh_completions before
# instantiating the cli object. So it is necessary to check if cli
# exists before trying the replace the completer object in cli.
if self.cli:
self.cli.current_buffer.completer = new_completer
if self.cli:
# After refreshing, redraw the CLI to clear the statusbar
# "Refreshing completions..." indicator
self.cli.request_redraw() | [
"def",
"_on_completions_refreshed",
"(",
"self",
",",
"new_completer",
")",
":",
"with",
"self",
".",
"_completer_lock",
":",
"self",
".",
"completer",
"=",
"new_completer",
"# When cli is first launched we call refresh_completions before",
"# instantiating the cli object. So it is necessary to check if cli",
"# exists before trying the replace the completer object in cli.",
"if",
"self",
".",
"cli",
":",
"self",
".",
"cli",
".",
"current_buffer",
".",
"completer",
"=",
"new_completer",
"if",
"self",
".",
"cli",
":",
"# After refreshing, redraw the CLI to clear the statusbar",
"# \"Refreshing completions...\" indicator",
"self",
".",
"cli",
".",
"request_redraw",
"(",
")"
]
| Swap the completer object in cli with the newly created completer. | [
"Swap",
"the",
"completer",
"object",
"in",
"cli",
"with",
"the",
"newly",
"created",
"completer",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L497-L511 | train |
dbcli/athenacli | athenacli/main.py | AthenaCli.get_reserved_space | def get_reserved_space(self):
"""Get the number of lines to reserve for the completion menu."""
reserved_space_ratio = .45
max_reserved_space = 8
_, height = click.get_terminal_size()
return min(int(round(height * reserved_space_ratio)), max_reserved_space) | python | def get_reserved_space(self):
"""Get the number of lines to reserve for the completion menu."""
reserved_space_ratio = .45
max_reserved_space = 8
_, height = click.get_terminal_size()
return min(int(round(height * reserved_space_ratio)), max_reserved_space) | [
"def",
"get_reserved_space",
"(",
"self",
")",
":",
"reserved_space_ratio",
"=",
".45",
"max_reserved_space",
"=",
"8",
"_",
",",
"height",
"=",
"click",
".",
"get_terminal_size",
"(",
")",
"return",
"min",
"(",
"int",
"(",
"round",
"(",
"height",
"*",
"reserved_space_ratio",
")",
")",
",",
"max_reserved_space",
")"
]
| Get the number of lines to reserve for the completion menu. | [
"Get",
"the",
"number",
"of",
"lines",
"to",
"reserve",
"for",
"the",
"completion",
"menu",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L595-L600 | train |
dbcli/athenacli | athenacli/completer.py | AthenaCompleter.find_matches | def find_matches(text, collection, start_only=False, fuzzy=True, casing=None):
"""Find completion matches for the given text.
Given the user's input text and a collection of available
completions, find completions matching the last word of the
text.
If `start_only` is True, the text will match an available
completion only at the beginning. Otherwise, a completion is
considered a match if the text appears anywhere within it.
yields prompt_toolkit Completion instances for any matches found
in the collection of available completions.
"""
last = last_word(text, include='most_punctuations')
text = last.lower()
completions = []
if fuzzy:
regex = '.*?'.join(map(escape, text))
pat = compile('(%s)' % regex)
for item in sorted(collection):
r = pat.search(item.lower())
if r:
completions.append((len(r.group()), r.start(), item))
else:
match_end_limit = len(text) if start_only else None
for item in sorted(collection):
match_point = item.lower().find(text, 0, match_end_limit)
if match_point >= 0:
completions.append((len(text), match_point, item))
if casing == 'auto':
casing = 'lower' if last and last[-1].islower() else 'upper'
def apply_case(kw):
if casing == 'upper':
return kw.upper()
return kw.lower()
return (Completion(z if casing is None else apply_case(z), -len(text))
for x, y, z in sorted(completions)) | python | def find_matches(text, collection, start_only=False, fuzzy=True, casing=None):
"""Find completion matches for the given text.
Given the user's input text and a collection of available
completions, find completions matching the last word of the
text.
If `start_only` is True, the text will match an available
completion only at the beginning. Otherwise, a completion is
considered a match if the text appears anywhere within it.
yields prompt_toolkit Completion instances for any matches found
in the collection of available completions.
"""
last = last_word(text, include='most_punctuations')
text = last.lower()
completions = []
if fuzzy:
regex = '.*?'.join(map(escape, text))
pat = compile('(%s)' % regex)
for item in sorted(collection):
r = pat.search(item.lower())
if r:
completions.append((len(r.group()), r.start(), item))
else:
match_end_limit = len(text) if start_only else None
for item in sorted(collection):
match_point = item.lower().find(text, 0, match_end_limit)
if match_point >= 0:
completions.append((len(text), match_point, item))
if casing == 'auto':
casing = 'lower' if last and last[-1].islower() else 'upper'
def apply_case(kw):
if casing == 'upper':
return kw.upper()
return kw.lower()
return (Completion(z if casing is None else apply_case(z), -len(text))
for x, y, z in sorted(completions)) | [
"def",
"find_matches",
"(",
"text",
",",
"collection",
",",
"start_only",
"=",
"False",
",",
"fuzzy",
"=",
"True",
",",
"casing",
"=",
"None",
")",
":",
"last",
"=",
"last_word",
"(",
"text",
",",
"include",
"=",
"'most_punctuations'",
")",
"text",
"=",
"last",
".",
"lower",
"(",
")",
"completions",
"=",
"[",
"]",
"if",
"fuzzy",
":",
"regex",
"=",
"'.*?'",
".",
"join",
"(",
"map",
"(",
"escape",
",",
"text",
")",
")",
"pat",
"=",
"compile",
"(",
"'(%s)'",
"%",
"regex",
")",
"for",
"item",
"in",
"sorted",
"(",
"collection",
")",
":",
"r",
"=",
"pat",
".",
"search",
"(",
"item",
".",
"lower",
"(",
")",
")",
"if",
"r",
":",
"completions",
".",
"append",
"(",
"(",
"len",
"(",
"r",
".",
"group",
"(",
")",
")",
",",
"r",
".",
"start",
"(",
")",
",",
"item",
")",
")",
"else",
":",
"match_end_limit",
"=",
"len",
"(",
"text",
")",
"if",
"start_only",
"else",
"None",
"for",
"item",
"in",
"sorted",
"(",
"collection",
")",
":",
"match_point",
"=",
"item",
".",
"lower",
"(",
")",
".",
"find",
"(",
"text",
",",
"0",
",",
"match_end_limit",
")",
"if",
"match_point",
">=",
"0",
":",
"completions",
".",
"append",
"(",
"(",
"len",
"(",
"text",
")",
",",
"match_point",
",",
"item",
")",
")",
"if",
"casing",
"==",
"'auto'",
":",
"casing",
"=",
"'lower'",
"if",
"last",
"and",
"last",
"[",
"-",
"1",
"]",
".",
"islower",
"(",
")",
"else",
"'upper'",
"def",
"apply_case",
"(",
"kw",
")",
":",
"if",
"casing",
"==",
"'upper'",
":",
"return",
"kw",
".",
"upper",
"(",
")",
"return",
"kw",
".",
"lower",
"(",
")",
"return",
"(",
"Completion",
"(",
"z",
"if",
"casing",
"is",
"None",
"else",
"apply_case",
"(",
"z",
")",
",",
"-",
"len",
"(",
"text",
")",
")",
"for",
"x",
",",
"y",
",",
"z",
"in",
"sorted",
"(",
"completions",
")",
")"
]
| Find completion matches for the given text.
Given the user's input text and a collection of available
completions, find completions matching the last word of the
text.
If `start_only` is True, the text will match an available
completion only at the beginning. Otherwise, a completion is
considered a match if the text appears anywhere within it.
yields prompt_toolkit Completion instances for any matches found
in the collection of available completions. | [
"Find",
"completion",
"matches",
"for",
"the",
"given",
"text",
".",
"Given",
"the",
"user",
"s",
"input",
"text",
"and",
"a",
"collection",
"of",
"available",
"completions",
"find",
"completions",
"matching",
"the",
"last",
"word",
"of",
"the",
"text",
".",
"If",
"start_only",
"is",
"True",
"the",
"text",
"will",
"match",
"an",
"available",
"completion",
"only",
"at",
"the",
"beginning",
".",
"Otherwise",
"a",
"completion",
"is",
"considered",
"a",
"match",
"if",
"the",
"text",
"appears",
"anywhere",
"within",
"it",
".",
"yields",
"prompt_toolkit",
"Completion",
"instances",
"for",
"any",
"matches",
"found",
"in",
"the",
"collection",
"of",
"available",
"completions",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/completer.py#L157-L196 | train |
dbcli/athenacli | athenacli/config.py | log | def log(logger, level, message):
"""Logs message to stderr if logging isn't initialized."""
if logger.parent.name != 'root':
logger.log(level, message)
else:
print(message, file=sys.stderr) | python | def log(logger, level, message):
"""Logs message to stderr if logging isn't initialized."""
if logger.parent.name != 'root':
logger.log(level, message)
else:
print(message, file=sys.stderr) | [
"def",
"log",
"(",
"logger",
",",
"level",
",",
"message",
")",
":",
"if",
"logger",
".",
"parent",
".",
"name",
"!=",
"'root'",
":",
"logger",
".",
"log",
"(",
"level",
",",
"message",
")",
"else",
":",
"print",
"(",
"message",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
]
| Logs message to stderr if logging isn't initialized. | [
"Logs",
"message",
"to",
"stderr",
"if",
"logging",
"isn",
"t",
"initialized",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/config.py#L42-L48 | train |
dbcli/athenacli | athenacli/config.py | read_config_file | def read_config_file(f):
"""Read a config file."""
if isinstance(f, basestring):
f = os.path.expanduser(f)
try:
config = ConfigObj(f, interpolation=False, encoding='utf8')
except ConfigObjError as e:
log(LOGGER, logging.ERROR, "Unable to parse line {0} of config file "
"'{1}'.".format(e.line_number, f))
log(LOGGER, logging.ERROR, "Using successfully parsed config values.")
return e.config
except (IOError, OSError) as e:
log(LOGGER, logging.WARNING, "You don't have permission to read "
"config file '{0}'.".format(e.filename))
return None
return config | python | def read_config_file(f):
"""Read a config file."""
if isinstance(f, basestring):
f = os.path.expanduser(f)
try:
config = ConfigObj(f, interpolation=False, encoding='utf8')
except ConfigObjError as e:
log(LOGGER, logging.ERROR, "Unable to parse line {0} of config file "
"'{1}'.".format(e.line_number, f))
log(LOGGER, logging.ERROR, "Using successfully parsed config values.")
return e.config
except (IOError, OSError) as e:
log(LOGGER, logging.WARNING, "You don't have permission to read "
"config file '{0}'.".format(e.filename))
return None
return config | [
"def",
"read_config_file",
"(",
"f",
")",
":",
"if",
"isinstance",
"(",
"f",
",",
"basestring",
")",
":",
"f",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"f",
")",
"try",
":",
"config",
"=",
"ConfigObj",
"(",
"f",
",",
"interpolation",
"=",
"False",
",",
"encoding",
"=",
"'utf8'",
")",
"except",
"ConfigObjError",
"as",
"e",
":",
"log",
"(",
"LOGGER",
",",
"logging",
".",
"ERROR",
",",
"\"Unable to parse line {0} of config file \"",
"\"'{1}'.\"",
".",
"format",
"(",
"e",
".",
"line_number",
",",
"f",
")",
")",
"log",
"(",
"LOGGER",
",",
"logging",
".",
"ERROR",
",",
"\"Using successfully parsed config values.\"",
")",
"return",
"e",
".",
"config",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"e",
":",
"log",
"(",
"LOGGER",
",",
"logging",
".",
"WARNING",
",",
"\"You don't have permission to read \"",
"\"config file '{0}'.\"",
".",
"format",
"(",
"e",
".",
"filename",
")",
")",
"return",
"None",
"return",
"config"
]
| Read a config file. | [
"Read",
"a",
"config",
"file",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/config.py#L51-L69 | train |
dbcli/athenacli | athenacli/config.py | read_config_files | def read_config_files(files):
"""Read and merge a list of config files."""
config = ConfigObj()
for _file in files:
_config = read_config_file(_file)
if bool(_config) is True:
config.merge(_config)
config.filename = _config.filename
return config | python | def read_config_files(files):
"""Read and merge a list of config files."""
config = ConfigObj()
for _file in files:
_config = read_config_file(_file)
if bool(_config) is True:
config.merge(_config)
config.filename = _config.filename
return config | [
"def",
"read_config_files",
"(",
"files",
")",
":",
"config",
"=",
"ConfigObj",
"(",
")",
"for",
"_file",
"in",
"files",
":",
"_config",
"=",
"read_config_file",
"(",
"_file",
")",
"if",
"bool",
"(",
"_config",
")",
"is",
"True",
":",
"config",
".",
"merge",
"(",
"_config",
")",
"config",
".",
"filename",
"=",
"_config",
".",
"filename",
"return",
"config"
]
| Read and merge a list of config files. | [
"Read",
"and",
"merge",
"a",
"list",
"of",
"config",
"files",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/config.py#L72-L83 | train |
dbcli/athenacli | athenacli/key_bindings.py | cli_bindings | def cli_bindings():
"""
Custom key bindings for cli.
"""
key_binding_manager = KeyBindingManager(
enable_open_in_editor=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_search=True,
enable_abort_and_exit_bindings=True)
@key_binding_manager.registry.add_binding(Keys.F2)
def _(event):
"""
Enable/Disable SmartCompletion Mode.
"""
_logger.debug('Detected F2 key.')
buf = event.cli.current_buffer
buf.completer.smart_completion = not buf.completer.smart_completion
@key_binding_manager.registry.add_binding(Keys.F3)
def _(event):
"""
Enable/Disable Multiline Mode.
"""
_logger.debug('Detected F3 key.')
buf = event.cli.current_buffer
buf.always_multiline = not buf.always_multiline
@key_binding_manager.registry.add_binding(Keys.F4)
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
_logger.debug('Detected F4 key.')
if event.cli.editing_mode == EditingMode.VI:
event.cli.editing_mode = EditingMode.EMACS
else:
event.cli.editing_mode = EditingMode.VI
@key_binding_manager.registry.add_binding(Keys.Tab)
def _(event):
"""
Force autocompletion at cursor.
"""
_logger.debug('Detected <Tab> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=True)
@key_binding_manager.registry.add_binding(Keys.ControlSpace)
def _(event):
"""
Initialize autocompletion at cursor.
If the autocompletion menu is not showing, display it with the
appropriate completions for the context.
If the menu is showing, select the next completion.
"""
_logger.debug('Detected <C-Space> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=False)
@key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion())
def _(event):
"""
Makes the enter key work as the tab key only when showing the menu.
"""
_logger.debug('Detected <C-J> key.')
event.current_buffer.complete_state = None
b = event.cli.current_buffer
b.complete_state = None
return key_binding_manager | python | def cli_bindings():
"""
Custom key bindings for cli.
"""
key_binding_manager = KeyBindingManager(
enable_open_in_editor=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_search=True,
enable_abort_and_exit_bindings=True)
@key_binding_manager.registry.add_binding(Keys.F2)
def _(event):
"""
Enable/Disable SmartCompletion Mode.
"""
_logger.debug('Detected F2 key.')
buf = event.cli.current_buffer
buf.completer.smart_completion = not buf.completer.smart_completion
@key_binding_manager.registry.add_binding(Keys.F3)
def _(event):
"""
Enable/Disable Multiline Mode.
"""
_logger.debug('Detected F3 key.')
buf = event.cli.current_buffer
buf.always_multiline = not buf.always_multiline
@key_binding_manager.registry.add_binding(Keys.F4)
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
_logger.debug('Detected F4 key.')
if event.cli.editing_mode == EditingMode.VI:
event.cli.editing_mode = EditingMode.EMACS
else:
event.cli.editing_mode = EditingMode.VI
@key_binding_manager.registry.add_binding(Keys.Tab)
def _(event):
"""
Force autocompletion at cursor.
"""
_logger.debug('Detected <Tab> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=True)
@key_binding_manager.registry.add_binding(Keys.ControlSpace)
def _(event):
"""
Initialize autocompletion at cursor.
If the autocompletion menu is not showing, display it with the
appropriate completions for the context.
If the menu is showing, select the next completion.
"""
_logger.debug('Detected <C-Space> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=False)
@key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion())
def _(event):
"""
Makes the enter key work as the tab key only when showing the menu.
"""
_logger.debug('Detected <C-J> key.')
event.current_buffer.complete_state = None
b = event.cli.current_buffer
b.complete_state = None
return key_binding_manager | [
"def",
"cli_bindings",
"(",
")",
":",
"key_binding_manager",
"=",
"KeyBindingManager",
"(",
"enable_open_in_editor",
"=",
"True",
",",
"enable_system_bindings",
"=",
"True",
",",
"enable_auto_suggest_bindings",
"=",
"True",
",",
"enable_search",
"=",
"True",
",",
"enable_abort_and_exit_bindings",
"=",
"True",
")",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"F2",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Enable/Disable SmartCompletion Mode.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected F2 key.'",
")",
"buf",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"buf",
".",
"completer",
".",
"smart_completion",
"=",
"not",
"buf",
".",
"completer",
".",
"smart_completion",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"F3",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Enable/Disable Multiline Mode.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected F3 key.'",
")",
"buf",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"buf",
".",
"always_multiline",
"=",
"not",
"buf",
".",
"always_multiline",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"F4",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Toggle between Vi and Emacs mode.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected F4 key.'",
")",
"if",
"event",
".",
"cli",
".",
"editing_mode",
"==",
"EditingMode",
".",
"VI",
":",
"event",
".",
"cli",
".",
"editing_mode",
"=",
"EditingMode",
".",
"EMACS",
"else",
":",
"event",
".",
"cli",
".",
"editing_mode",
"=",
"EditingMode",
".",
"VI",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"Tab",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Force autocompletion at cursor.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected <Tab> key.'",
")",
"b",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"if",
"b",
".",
"complete_state",
":",
"b",
".",
"complete_next",
"(",
")",
"else",
":",
"event",
".",
"cli",
".",
"start_completion",
"(",
"select_first",
"=",
"True",
")",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"ControlSpace",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Initialize autocompletion at cursor.\n If the autocompletion menu is not showing, display it with the\n appropriate completions for the context.\n If the menu is showing, select the next completion.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected <C-Space> key.'",
")",
"b",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"if",
"b",
".",
"complete_state",
":",
"b",
".",
"complete_next",
"(",
")",
"else",
":",
"event",
".",
"cli",
".",
"start_completion",
"(",
"select_first",
"=",
"False",
")",
"@",
"key_binding_manager",
".",
"registry",
".",
"add_binding",
"(",
"Keys",
".",
"ControlJ",
",",
"filter",
"=",
"HasSelectedCompletion",
"(",
")",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Makes the enter key work as the tab key only when showing the menu.\n \"\"\"",
"_logger",
".",
"debug",
"(",
"'Detected <C-J> key.'",
")",
"event",
".",
"current_buffer",
".",
"complete_state",
"=",
"None",
"b",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"b",
".",
"complete_state",
"=",
"None",
"return",
"key_binding_manager"
]
| Custom key bindings for cli. | [
"Custom",
"key",
"bindings",
"for",
"cli",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/key_bindings.py#L10-L89 | train |
dbcli/athenacli | athenacli/packages/prompt_utils.py | prompt | def prompt(*args, **kwargs):
"""Prompt the user for input and handle any abort exceptions."""
try:
return click.prompt(*args, **kwargs)
except click.Abort:
return False | python | def prompt(*args, **kwargs):
"""Prompt the user for input and handle any abort exceptions."""
try:
return click.prompt(*args, **kwargs)
except click.Abort:
return False | [
"def",
"prompt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"click",
".",
"prompt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"click",
".",
"Abort",
":",
"return",
"False"
]
| Prompt the user for input and handle any abort exceptions. | [
"Prompt",
"the",
"user",
"for",
"input",
"and",
"handle",
"any",
"abort",
"exceptions",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/prompt_utils.py#L30-L35 | train |
dbcli/athenacli | athenacli/sqlexecute.py | SQLExecute.run | def run(self, statement):
'''Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status).
'''
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None)
# Split the sql into separate queries and run each one.
components = sqlparse.split(statement)
for sql in components:
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
# \G is treated specially since we have to set the expanded output.
if sql.endswith('\\G'):
special.set_expanded_output(True)
sql = sql[:-2].strip()
cur = self.conn.cursor()
try:
for result in special.execute(cur, sql):
yield result
except special.CommandNotFound: # Regular SQL
cur.execute(sql)
yield self.get_result(cur) | python | def run(self, statement):
'''Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status).
'''
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None)
# Split the sql into separate queries and run each one.
components = sqlparse.split(statement)
for sql in components:
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
# \G is treated specially since we have to set the expanded output.
if sql.endswith('\\G'):
special.set_expanded_output(True)
sql = sql[:-2].strip()
cur = self.conn.cursor()
try:
for result in special.execute(cur, sql):
yield result
except special.CommandNotFound: # Regular SQL
cur.execute(sql)
yield self.get_result(cur) | [
"def",
"run",
"(",
"self",
",",
"statement",
")",
":",
"# Remove spaces and EOL",
"statement",
"=",
"statement",
".",
"strip",
"(",
")",
"if",
"not",
"statement",
":",
"# Empty string",
"yield",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"# Split the sql into separate queries and run each one.",
"components",
"=",
"sqlparse",
".",
"split",
"(",
"statement",
")",
"for",
"sql",
"in",
"components",
":",
"# Remove spaces, eol and semi-colons.",
"sql",
"=",
"sql",
".",
"rstrip",
"(",
"';'",
")",
"# \\G is treated specially since we have to set the expanded output.",
"if",
"sql",
".",
"endswith",
"(",
"'\\\\G'",
")",
":",
"special",
".",
"set_expanded_output",
"(",
"True",
")",
"sql",
"=",
"sql",
"[",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
"cur",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"try",
":",
"for",
"result",
"in",
"special",
".",
"execute",
"(",
"cur",
",",
"sql",
")",
":",
"yield",
"result",
"except",
"special",
".",
"CommandNotFound",
":",
"# Regular SQL",
"cur",
".",
"execute",
"(",
"sql",
")",
"yield",
"self",
".",
"get_result",
"(",
"cur",
")"
]
| Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status). | [
"Execute",
"the",
"sql",
"in",
"the",
"database",
"and",
"return",
"the",
"results",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L52-L82 | train |
dbcli/athenacli | athenacli/sqlexecute.py | SQLExecute.get_result | def get_result(self, cursor):
'''Get the current result's data from the cursor.'''
title = headers = None
# cursor.description is not None for queries that return result sets,
# e.g. SELECT or SHOW.
if cursor.description is not None:
headers = [x[0] for x in cursor.description]
rows = cursor.fetchall()
status = '%d row%s in set' % (len(rows), '' if len(rows) == 1 else 's')
else:
logger.debug('No rows in result.')
rows = None
status = 'Query OK'
return (title, rows, headers, status) | python | def get_result(self, cursor):
'''Get the current result's data from the cursor.'''
title = headers = None
# cursor.description is not None for queries that return result sets,
# e.g. SELECT or SHOW.
if cursor.description is not None:
headers = [x[0] for x in cursor.description]
rows = cursor.fetchall()
status = '%d row%s in set' % (len(rows), '' if len(rows) == 1 else 's')
else:
logger.debug('No rows in result.')
rows = None
status = 'Query OK'
return (title, rows, headers, status) | [
"def",
"get_result",
"(",
"self",
",",
"cursor",
")",
":",
"title",
"=",
"headers",
"=",
"None",
"# cursor.description is not None for queries that return result sets,",
"# e.g. SELECT or SHOW.",
"if",
"cursor",
".",
"description",
"is",
"not",
"None",
":",
"headers",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"cursor",
".",
"description",
"]",
"rows",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"status",
"=",
"'%d row%s in set'",
"%",
"(",
"len",
"(",
"rows",
")",
",",
"''",
"if",
"len",
"(",
"rows",
")",
"==",
"1",
"else",
"'s'",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'No rows in result.'",
")",
"rows",
"=",
"None",
"status",
"=",
"'Query OK'",
"return",
"(",
"title",
",",
"rows",
",",
"headers",
",",
"status",
")"
]
| Get the current result's data from the cursor. | [
"Get",
"the",
"current",
"result",
"s",
"data",
"from",
"the",
"cursor",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L84-L98 | train |
dbcli/athenacli | athenacli/sqlexecute.py | SQLExecute.tables | def tables(self):
'''Yields table names.'''
with self.conn.cursor() as cur:
cur.execute(self.TABLES_QUERY)
for row in cur:
yield row | python | def tables(self):
'''Yields table names.'''
with self.conn.cursor() as cur:
cur.execute(self.TABLES_QUERY)
for row in cur:
yield row | [
"def",
"tables",
"(",
"self",
")",
":",
"with",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"as",
"cur",
":",
"cur",
".",
"execute",
"(",
"self",
".",
"TABLES_QUERY",
")",
"for",
"row",
"in",
"cur",
":",
"yield",
"row"
]
| Yields table names. | [
"Yields",
"table",
"names",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L100-L105 | train |
dbcli/athenacli | athenacli/sqlexecute.py | SQLExecute.table_columns | def table_columns(self):
'''Yields column names.'''
with self.conn.cursor() as cur:
cur.execute(self.TABLE_COLUMNS_QUERY % self.database)
for row in cur:
yield row | python | def table_columns(self):
'''Yields column names.'''
with self.conn.cursor() as cur:
cur.execute(self.TABLE_COLUMNS_QUERY % self.database)
for row in cur:
yield row | [
"def",
"table_columns",
"(",
"self",
")",
":",
"with",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"as",
"cur",
":",
"cur",
".",
"execute",
"(",
"self",
".",
"TABLE_COLUMNS_QUERY",
"%",
"self",
".",
"database",
")",
"for",
"row",
"in",
"cur",
":",
"yield",
"row"
]
| Yields column names. | [
"Yields",
"column",
"names",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L107-L112 | train |
dbcli/athenacli | athenacli/clitoolbar.py | create_toolbar_tokens_func | def create_toolbar_tokens_func(get_is_refreshing, show_fish_help):
"""
Return a function that generates the toolbar tokens.
"""
token = Token.Toolbar
def get_toolbar_tokens(cli):
result = []
result.append((token, ' '))
if cli.buffers[DEFAULT_BUFFER].always_multiline:
result.append((token.On, '[F3] Multiline: ON '))
else:
result.append((token.Off, '[F3] Multiline: OFF '))
if cli.buffers[DEFAULT_BUFFER].always_multiline:
result.append((token,
' (Semi-colon [;] will end the line)'))
if cli.editing_mode == EditingMode.VI:
result.append((
token.On,
'Vi-mode ({})'.format(_get_vi_mode(cli))
))
if show_fish_help():
result.append((token, ' Right-arrow to complete suggestion'))
if get_is_refreshing():
result.append((token, ' Refreshing completions...'))
return result
return get_toolbar_tokens | python | def create_toolbar_tokens_func(get_is_refreshing, show_fish_help):
"""
Return a function that generates the toolbar tokens.
"""
token = Token.Toolbar
def get_toolbar_tokens(cli):
result = []
result.append((token, ' '))
if cli.buffers[DEFAULT_BUFFER].always_multiline:
result.append((token.On, '[F3] Multiline: ON '))
else:
result.append((token.Off, '[F3] Multiline: OFF '))
if cli.buffers[DEFAULT_BUFFER].always_multiline:
result.append((token,
' (Semi-colon [;] will end the line)'))
if cli.editing_mode == EditingMode.VI:
result.append((
token.On,
'Vi-mode ({})'.format(_get_vi_mode(cli))
))
if show_fish_help():
result.append((token, ' Right-arrow to complete suggestion'))
if get_is_refreshing():
result.append((token, ' Refreshing completions...'))
return result
return get_toolbar_tokens | [
"def",
"create_toolbar_tokens_func",
"(",
"get_is_refreshing",
",",
"show_fish_help",
")",
":",
"token",
"=",
"Token",
".",
"Toolbar",
"def",
"get_toolbar_tokens",
"(",
"cli",
")",
":",
"result",
"=",
"[",
"]",
"result",
".",
"append",
"(",
"(",
"token",
",",
"' '",
")",
")",
"if",
"cli",
".",
"buffers",
"[",
"DEFAULT_BUFFER",
"]",
".",
"always_multiline",
":",
"result",
".",
"append",
"(",
"(",
"token",
".",
"On",
",",
"'[F3] Multiline: ON '",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"(",
"token",
".",
"Off",
",",
"'[F3] Multiline: OFF '",
")",
")",
"if",
"cli",
".",
"buffers",
"[",
"DEFAULT_BUFFER",
"]",
".",
"always_multiline",
":",
"result",
".",
"append",
"(",
"(",
"token",
",",
"' (Semi-colon [;] will end the line)'",
")",
")",
"if",
"cli",
".",
"editing_mode",
"==",
"EditingMode",
".",
"VI",
":",
"result",
".",
"append",
"(",
"(",
"token",
".",
"On",
",",
"'Vi-mode ({})'",
".",
"format",
"(",
"_get_vi_mode",
"(",
"cli",
")",
")",
")",
")",
"if",
"show_fish_help",
"(",
")",
":",
"result",
".",
"append",
"(",
"(",
"token",
",",
"' Right-arrow to complete suggestion'",
")",
")",
"if",
"get_is_refreshing",
"(",
")",
":",
"result",
".",
"append",
"(",
"(",
"token",
",",
"' Refreshing completions...'",
")",
")",
"return",
"result",
"return",
"get_toolbar_tokens"
]
| Return a function that generates the toolbar tokens. | [
"Return",
"a",
"function",
"that",
"generates",
"the",
"toolbar",
"tokens",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/clitoolbar.py#L6-L38 | train |
dbcli/athenacli | athenacli/clitoolbar.py | _get_vi_mode | def _get_vi_mode(cli):
"""Get the current vi mode for display."""
return {
InputMode.INSERT: 'I',
InputMode.NAVIGATION: 'N',
InputMode.REPLACE: 'R',
InputMode.INSERT_MULTIPLE: 'M'
}[cli.vi_state.input_mode] | python | def _get_vi_mode(cli):
"""Get the current vi mode for display."""
return {
InputMode.INSERT: 'I',
InputMode.NAVIGATION: 'N',
InputMode.REPLACE: 'R',
InputMode.INSERT_MULTIPLE: 'M'
}[cli.vi_state.input_mode] | [
"def",
"_get_vi_mode",
"(",
"cli",
")",
":",
"return",
"{",
"InputMode",
".",
"INSERT",
":",
"'I'",
",",
"InputMode",
".",
"NAVIGATION",
":",
"'N'",
",",
"InputMode",
".",
"REPLACE",
":",
"'R'",
",",
"InputMode",
".",
"INSERT_MULTIPLE",
":",
"'M'",
"}",
"[",
"cli",
".",
"vi_state",
".",
"input_mode",
"]"
]
| Get the current vi mode for display. | [
"Get",
"the",
"current",
"vi",
"mode",
"for",
"display",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/clitoolbar.py#L41-L48 | train |
dbcli/athenacli | athenacli/packages/special/__init__.py | export | def export(defn):
"""Decorator to explicitly mark functions that are exposed in a lib."""
globals()[defn.__name__] = defn
__all__.append(defn.__name__)
return defn | python | def export(defn):
"""Decorator to explicitly mark functions that are exposed in a lib."""
globals()[defn.__name__] = defn
__all__.append(defn.__name__)
return defn | [
"def",
"export",
"(",
"defn",
")",
":",
"globals",
"(",
")",
"[",
"defn",
".",
"__name__",
"]",
"=",
"defn",
"__all__",
".",
"append",
"(",
"defn",
".",
"__name__",
")",
"return",
"defn"
]
| Decorator to explicitly mark functions that are exposed in a lib. | [
"Decorator",
"to",
"explicitly",
"mark",
"functions",
"that",
"are",
"exposed",
"in",
"a",
"lib",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/__init__.py#L5-L9 | train |
dbcli/athenacli | athenacli/packages/special/main.py | execute | def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound('Command not found: %s' % command)
# "help <SQL KEYWORD> is a special case.
if command == 'help' and arg:
return show_keyword_help(cur=cur, arg=arg)
if special_cmd.arg_type == NO_QUERY:
return special_cmd.handler()
elif special_cmd.arg_type == PARSED_QUERY:
return special_cmd.handler(cur=cur, arg=arg, verbose=verbose)
elif special_cmd.arg_type == RAW_QUERY:
return special_cmd.handler(cur=cur, query=sql) | python | def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound('Command not found: %s' % command)
# "help <SQL KEYWORD> is a special case.
if command == 'help' and arg:
return show_keyword_help(cur=cur, arg=arg)
if special_cmd.arg_type == NO_QUERY:
return special_cmd.handler()
elif special_cmd.arg_type == PARSED_QUERY:
return special_cmd.handler(cur=cur, arg=arg, verbose=verbose)
elif special_cmd.arg_type == RAW_QUERY:
return special_cmd.handler(cur=cur, query=sql) | [
"def",
"execute",
"(",
"cur",
",",
"sql",
")",
":",
"command",
",",
"verbose",
",",
"arg",
"=",
"parse_special_command",
"(",
"sql",
")",
"if",
"(",
"command",
"not",
"in",
"COMMANDS",
")",
"and",
"(",
"command",
".",
"lower",
"(",
")",
"not",
"in",
"COMMANDS",
")",
":",
"raise",
"CommandNotFound",
"try",
":",
"special_cmd",
"=",
"COMMANDS",
"[",
"command",
"]",
"except",
"KeyError",
":",
"special_cmd",
"=",
"COMMANDS",
"[",
"command",
".",
"lower",
"(",
")",
"]",
"if",
"special_cmd",
".",
"case_sensitive",
":",
"raise",
"CommandNotFound",
"(",
"'Command not found: %s'",
"%",
"command",
")",
"# \"help <SQL KEYWORD> is a special case.",
"if",
"command",
"==",
"'help'",
"and",
"arg",
":",
"return",
"show_keyword_help",
"(",
"cur",
"=",
"cur",
",",
"arg",
"=",
"arg",
")",
"if",
"special_cmd",
".",
"arg_type",
"==",
"NO_QUERY",
":",
"return",
"special_cmd",
".",
"handler",
"(",
")",
"elif",
"special_cmd",
".",
"arg_type",
"==",
"PARSED_QUERY",
":",
"return",
"special_cmd",
".",
"handler",
"(",
"cur",
"=",
"cur",
",",
"arg",
"=",
"arg",
",",
"verbose",
"=",
"verbose",
")",
"elif",
"special_cmd",
".",
"arg_type",
"==",
"RAW_QUERY",
":",
"return",
"special_cmd",
".",
"handler",
"(",
"cur",
"=",
"cur",
",",
"query",
"=",
"sql",
")"
]
| Execute a special command and return the results. If the special command
is not supported a KeyError will be raised. | [
"Execute",
"a",
"special",
"command",
"and",
"return",
"the",
"results",
".",
"If",
"the",
"special",
"command",
"is",
"not",
"supported",
"a",
"KeyError",
"will",
"be",
"raised",
"."
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/main.py#L51-L76 | train |
dbcli/athenacli | athenacli/packages/parseutils.py | find_prev_keyword | def find_prev_keyword(sql):
""" Find the last sql keyword in an SQL statement
Returns the value of the last keyword, and the text of the query with
everything after the last keyword stripped
"""
if not sql.strip():
return None, ''
parsed = sqlparse.parse(sql)[0]
flattened = list(parsed.flatten())
logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN')
for t in reversed(flattened):
if t.value == '(' or (t.is_keyword and (
t.value.upper() not in logical_operators)):
# Find the location of token t in the original parsed statement
# We can't use parsed.token_index(t) because t may be a child token
# inside a TokenList, in which case token_index thows an error
# Minimal example:
# p = sqlparse.parse('select * from foo where bar')
# t = list(p.flatten())[-3] # The "Where" token
# p.token_index(t) # Throws ValueError: not in list
idx = flattened.index(t)
# Combine the string values of all tokens in the original list
# up to and including the target keyword token t, to produce a
# query string with everything after the keyword token removed
text = ''.join(tok.value for tok in flattened[:idx+1])
return t, text
return None, '' | python | def find_prev_keyword(sql):
""" Find the last sql keyword in an SQL statement
Returns the value of the last keyword, and the text of the query with
everything after the last keyword stripped
"""
if not sql.strip():
return None, ''
parsed = sqlparse.parse(sql)[0]
flattened = list(parsed.flatten())
logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN')
for t in reversed(flattened):
if t.value == '(' or (t.is_keyword and (
t.value.upper() not in logical_operators)):
# Find the location of token t in the original parsed statement
# We can't use parsed.token_index(t) because t may be a child token
# inside a TokenList, in which case token_index thows an error
# Minimal example:
# p = sqlparse.parse('select * from foo where bar')
# t = list(p.flatten())[-3] # The "Where" token
# p.token_index(t) # Throws ValueError: not in list
idx = flattened.index(t)
# Combine the string values of all tokens in the original list
# up to and including the target keyword token t, to produce a
# query string with everything after the keyword token removed
text = ''.join(tok.value for tok in flattened[:idx+1])
return t, text
return None, '' | [
"def",
"find_prev_keyword",
"(",
"sql",
")",
":",
"if",
"not",
"sql",
".",
"strip",
"(",
")",
":",
"return",
"None",
",",
"''",
"parsed",
"=",
"sqlparse",
".",
"parse",
"(",
"sql",
")",
"[",
"0",
"]",
"flattened",
"=",
"list",
"(",
"parsed",
".",
"flatten",
"(",
")",
")",
"logical_operators",
"=",
"(",
"'AND'",
",",
"'OR'",
",",
"'NOT'",
",",
"'BETWEEN'",
")",
"for",
"t",
"in",
"reversed",
"(",
"flattened",
")",
":",
"if",
"t",
".",
"value",
"==",
"'('",
"or",
"(",
"t",
".",
"is_keyword",
"and",
"(",
"t",
".",
"value",
".",
"upper",
"(",
")",
"not",
"in",
"logical_operators",
")",
")",
":",
"# Find the location of token t in the original parsed statement",
"# We can't use parsed.token_index(t) because t may be a child token",
"# inside a TokenList, in which case token_index thows an error",
"# Minimal example:",
"# p = sqlparse.parse('select * from foo where bar')",
"# t = list(p.flatten())[-3] # The \"Where\" token",
"# p.token_index(t) # Throws ValueError: not in list",
"idx",
"=",
"flattened",
".",
"index",
"(",
"t",
")",
"# Combine the string values of all tokens in the original list",
"# up to and including the target keyword token t, to produce a",
"# query string with everything after the keyword token removed",
"text",
"=",
"''",
".",
"join",
"(",
"tok",
".",
"value",
"for",
"tok",
"in",
"flattened",
"[",
":",
"idx",
"+",
"1",
"]",
")",
"return",
"t",
",",
"text",
"return",
"None",
",",
"''"
]
| Find the last sql keyword in an SQL statement
Returns the value of the last keyword, and the text of the query with
everything after the last keyword stripped | [
"Find",
"the",
"last",
"sql",
"keyword",
"in",
"an",
"SQL",
"statement",
"Returns",
"the",
"value",
"of",
"the",
"last",
"keyword",
"and",
"the",
"text",
"of",
"the",
"query",
"with",
"everything",
"after",
"the",
"last",
"keyword",
"stripped"
]
| bcab59e4953145866430083e902ed4d042d4ebba | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/parseutils.py#L153-L184 | train |
divio/cmsplugin-filer | cmsplugin_filer_teaser/cms_plugins.py | FilerTeaserPlugin._get_thumbnail_options | def _get_thumbnail_options(self, context, instance):
"""
Return the size and options of the thumbnail that should be inserted
"""
width, height = None, None
subject_location = False
placeholder_width = context.get('width', None)
placeholder_height = context.get('height', None)
if instance.use_autoscale and placeholder_width:
# use the placeholder width as a hint for sizing
width = int(placeholder_width)
if instance.use_autoscale and placeholder_height:
height = int(placeholder_height)
elif instance.width:
width = instance.width
if instance.height:
height = instance.height
if instance.image:
if instance.image.subject_location:
subject_location = instance.image.subject_location
if not height and width:
# height was not externally defined: use ratio to scale it by the width
height = int(float(width) * float(instance.image.height) / float(instance.image.width))
if not width and height:
# width was not externally defined: use ratio to scale it by the height
width = int(float(height) * float(instance.image.width) / float(instance.image.height))
if not width:
# width is still not defined. fallback the actual image width
width = instance.image.width
if not height:
# height is still not defined. fallback the actual image height
height = instance.image.height
return {'size': (width, height),
'subject_location': subject_location} | python | def _get_thumbnail_options(self, context, instance):
"""
Return the size and options of the thumbnail that should be inserted
"""
width, height = None, None
subject_location = False
placeholder_width = context.get('width', None)
placeholder_height = context.get('height', None)
if instance.use_autoscale and placeholder_width:
# use the placeholder width as a hint for sizing
width = int(placeholder_width)
if instance.use_autoscale and placeholder_height:
height = int(placeholder_height)
elif instance.width:
width = instance.width
if instance.height:
height = instance.height
if instance.image:
if instance.image.subject_location:
subject_location = instance.image.subject_location
if not height and width:
# height was not externally defined: use ratio to scale it by the width
height = int(float(width) * float(instance.image.height) / float(instance.image.width))
if not width and height:
# width was not externally defined: use ratio to scale it by the height
width = int(float(height) * float(instance.image.width) / float(instance.image.height))
if not width:
# width is still not defined. fallback the actual image width
width = instance.image.width
if not height:
# height is still not defined. fallback the actual image height
height = instance.image.height
return {'size': (width, height),
'subject_location': subject_location} | [
"def",
"_get_thumbnail_options",
"(",
"self",
",",
"context",
",",
"instance",
")",
":",
"width",
",",
"height",
"=",
"None",
",",
"None",
"subject_location",
"=",
"False",
"placeholder_width",
"=",
"context",
".",
"get",
"(",
"'width'",
",",
"None",
")",
"placeholder_height",
"=",
"context",
".",
"get",
"(",
"'height'",
",",
"None",
")",
"if",
"instance",
".",
"use_autoscale",
"and",
"placeholder_width",
":",
"# use the placeholder width as a hint for sizing",
"width",
"=",
"int",
"(",
"placeholder_width",
")",
"if",
"instance",
".",
"use_autoscale",
"and",
"placeholder_height",
":",
"height",
"=",
"int",
"(",
"placeholder_height",
")",
"elif",
"instance",
".",
"width",
":",
"width",
"=",
"instance",
".",
"width",
"if",
"instance",
".",
"height",
":",
"height",
"=",
"instance",
".",
"height",
"if",
"instance",
".",
"image",
":",
"if",
"instance",
".",
"image",
".",
"subject_location",
":",
"subject_location",
"=",
"instance",
".",
"image",
".",
"subject_location",
"if",
"not",
"height",
"and",
"width",
":",
"# height was not externally defined: use ratio to scale it by the width",
"height",
"=",
"int",
"(",
"float",
"(",
"width",
")",
"*",
"float",
"(",
"instance",
".",
"image",
".",
"height",
")",
"/",
"float",
"(",
"instance",
".",
"image",
".",
"width",
")",
")",
"if",
"not",
"width",
"and",
"height",
":",
"# width was not externally defined: use ratio to scale it by the height",
"width",
"=",
"int",
"(",
"float",
"(",
"height",
")",
"*",
"float",
"(",
"instance",
".",
"image",
".",
"width",
")",
"/",
"float",
"(",
"instance",
".",
"image",
".",
"height",
")",
")",
"if",
"not",
"width",
":",
"# width is still not defined. fallback the actual image width",
"width",
"=",
"instance",
".",
"image",
".",
"width",
"if",
"not",
"height",
":",
"# height is still not defined. fallback the actual image height",
"height",
"=",
"instance",
".",
"image",
".",
"height",
"return",
"{",
"'size'",
":",
"(",
"width",
",",
"height",
")",
",",
"'subject_location'",
":",
"subject_location",
"}"
]
| Return the size and options of the thumbnail that should be inserted | [
"Return",
"the",
"size",
"and",
"options",
"of",
"the",
"thumbnail",
"that",
"should",
"be",
"inserted"
]
| 4f9b0307dd768852ead64e651b743a165b3efccb | https://github.com/divio/cmsplugin-filer/blob/4f9b0307dd768852ead64e651b743a165b3efccb/cmsplugin_filer_teaser/cms_plugins.py#L42-L75 | train |
divio/cmsplugin-filer | cmsplugin_filer_image/integrations/ckeditor.py | create_image_plugin | def create_image_plugin(filename, image, parent_plugin, **kwargs):
"""
Used for drag-n-drop image insertion with djangocms-text-ckeditor.
Set TEXT_SAVE_IMAGE_FUNCTION='cmsplugin_filer_image.integrations.ckeditor.create_image_plugin' to enable.
"""
from cmsplugin_filer_image.models import FilerImage
from filer.models import Image
image_plugin = FilerImage()
image_plugin.placeholder = parent_plugin.placeholder
image_plugin.parent = CMSPlugin.objects.get(pk=parent_plugin.id)
image_plugin.position = CMSPlugin.objects.filter(parent=parent_plugin).count()
image_plugin.language = parent_plugin.language
image_plugin.plugin_type = 'FilerImagePlugin'
image.seek(0)
image_model = Image.objects.create(file=SimpleUploadedFile(name=filename, content=image.read()))
image_plugin.image = image_model
image_plugin.save()
return image_plugin | python | def create_image_plugin(filename, image, parent_plugin, **kwargs):
"""
Used for drag-n-drop image insertion with djangocms-text-ckeditor.
Set TEXT_SAVE_IMAGE_FUNCTION='cmsplugin_filer_image.integrations.ckeditor.create_image_plugin' to enable.
"""
from cmsplugin_filer_image.models import FilerImage
from filer.models import Image
image_plugin = FilerImage()
image_plugin.placeholder = parent_plugin.placeholder
image_plugin.parent = CMSPlugin.objects.get(pk=parent_plugin.id)
image_plugin.position = CMSPlugin.objects.filter(parent=parent_plugin).count()
image_plugin.language = parent_plugin.language
image_plugin.plugin_type = 'FilerImagePlugin'
image.seek(0)
image_model = Image.objects.create(file=SimpleUploadedFile(name=filename, content=image.read()))
image_plugin.image = image_model
image_plugin.save()
return image_plugin | [
"def",
"create_image_plugin",
"(",
"filename",
",",
"image",
",",
"parent_plugin",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"cmsplugin_filer_image",
".",
"models",
"import",
"FilerImage",
"from",
"filer",
".",
"models",
"import",
"Image",
"image_plugin",
"=",
"FilerImage",
"(",
")",
"image_plugin",
".",
"placeholder",
"=",
"parent_plugin",
".",
"placeholder",
"image_plugin",
".",
"parent",
"=",
"CMSPlugin",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"parent_plugin",
".",
"id",
")",
"image_plugin",
".",
"position",
"=",
"CMSPlugin",
".",
"objects",
".",
"filter",
"(",
"parent",
"=",
"parent_plugin",
")",
".",
"count",
"(",
")",
"image_plugin",
".",
"language",
"=",
"parent_plugin",
".",
"language",
"image_plugin",
".",
"plugin_type",
"=",
"'FilerImagePlugin'",
"image",
".",
"seek",
"(",
"0",
")",
"image_model",
"=",
"Image",
".",
"objects",
".",
"create",
"(",
"file",
"=",
"SimpleUploadedFile",
"(",
"name",
"=",
"filename",
",",
"content",
"=",
"image",
".",
"read",
"(",
")",
")",
")",
"image_plugin",
".",
"image",
"=",
"image_model",
"image_plugin",
".",
"save",
"(",
")",
"return",
"image_plugin"
]
| Used for drag-n-drop image insertion with djangocms-text-ckeditor.
Set TEXT_SAVE_IMAGE_FUNCTION='cmsplugin_filer_image.integrations.ckeditor.create_image_plugin' to enable. | [
"Used",
"for",
"drag",
"-",
"n",
"-",
"drop",
"image",
"insertion",
"with",
"djangocms",
"-",
"text",
"-",
"ckeditor",
".",
"Set",
"TEXT_SAVE_IMAGE_FUNCTION",
"=",
"cmsplugin_filer_image",
".",
"integrations",
".",
"ckeditor",
".",
"create_image_plugin",
"to",
"enable",
"."
]
| 4f9b0307dd768852ead64e651b743a165b3efccb | https://github.com/divio/cmsplugin-filer/blob/4f9b0307dd768852ead64e651b743a165b3efccb/cmsplugin_filer_image/integrations/ckeditor.py#L6-L23 | train |
divio/cmsplugin-filer | cmsplugin_filer_utils/migration.py | rename_tables | def rename_tables(db, table_mapping, reverse=False):
"""
renames tables from source to destination name, if the source exists and the destination does
not exist yet.
"""
from django.db import connection
if reverse:
table_mapping = [(dst, src) for src, dst in table_mapping]
table_names = connection.introspection.table_names()
for source, destination in table_mapping:
if source in table_names and destination in table_names:
print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination))
elif source in table_names and destination not in table_names:
print(u" - renaming {0} to {1}".format(source, destination))
db.rename_table(source, destination) | python | def rename_tables(db, table_mapping, reverse=False):
"""
renames tables from source to destination name, if the source exists and the destination does
not exist yet.
"""
from django.db import connection
if reverse:
table_mapping = [(dst, src) for src, dst in table_mapping]
table_names = connection.introspection.table_names()
for source, destination in table_mapping:
if source in table_names and destination in table_names:
print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination))
elif source in table_names and destination not in table_names:
print(u" - renaming {0} to {1}".format(source, destination))
db.rename_table(source, destination) | [
"def",
"rename_tables",
"(",
"db",
",",
"table_mapping",
",",
"reverse",
"=",
"False",
")",
":",
"from",
"django",
".",
"db",
"import",
"connection",
"if",
"reverse",
":",
"table_mapping",
"=",
"[",
"(",
"dst",
",",
"src",
")",
"for",
"src",
",",
"dst",
"in",
"table_mapping",
"]",
"table_names",
"=",
"connection",
".",
"introspection",
".",
"table_names",
"(",
")",
"for",
"source",
",",
"destination",
"in",
"table_mapping",
":",
"if",
"source",
"in",
"table_names",
"and",
"destination",
"in",
"table_names",
":",
"print",
"(",
"u\" WARNING: not renaming {0} to {1}, because both tables already exist.\"",
".",
"format",
"(",
"source",
",",
"destination",
")",
")",
"elif",
"source",
"in",
"table_names",
"and",
"destination",
"not",
"in",
"table_names",
":",
"print",
"(",
"u\" - renaming {0} to {1}\"",
".",
"format",
"(",
"source",
",",
"destination",
")",
")",
"db",
".",
"rename_table",
"(",
"source",
",",
"destination",
")"
]
| renames tables from source to destination name, if the source exists and the destination does
not exist yet. | [
"renames",
"tables",
"from",
"source",
"to",
"destination",
"name",
"if",
"the",
"source",
"exists",
"and",
"the",
"destination",
"does",
"not",
"exist",
"yet",
"."
]
| 4f9b0307dd768852ead64e651b743a165b3efccb | https://github.com/divio/cmsplugin-filer/blob/4f9b0307dd768852ead64e651b743a165b3efccb/cmsplugin_filer_utils/migration.py#L4-L18 | train |
sorgerlab/indra | indra/util/statement_presentation.py | group_and_sort_statements | def group_and_sort_statements(stmt_list, ev_totals=None):
"""Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type.
"""
def _count(stmt):
if ev_totals is None:
return len(stmt.evidence)
else:
return ev_totals[stmt.get_hash()]
stmt_rows = defaultdict(list)
stmt_counts = defaultdict(lambda: 0)
arg_counts = defaultdict(lambda: 0)
for key, s in _get_keyed_stmts(stmt_list):
# Update the counts, and add key if needed.
stmt_rows[key].append(s)
# Keep track of the total evidence counts for this statement and the
# arguments.
stmt_counts[key] += _count(s)
# Add up the counts for the arguments, pairwise for Complexes and
# Conversions. This allows, for example, a complex between MEK, ERK,
# and something else to lend weight to the interactions between MEK
# and ERK.
if key[0] == 'Conversion':
subj = key[1]
for obj in key[2] + key[3]:
arg_counts[(subj, obj)] += _count(s)
else:
arg_counts[key[1:]] += _count(s)
# Sort the rows by count and agent names.
def process_rows(stmt_rows):
for key, stmts in stmt_rows.items():
verb = key[0]
inps = key[1:]
sub_count = stmt_counts[key]
arg_count = arg_counts[inps]
if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2:
if all([len(set(ag.name for ag in s.agent_list())) > 2
for s in stmts]):
continue
new_key = (arg_count, inps, sub_count, verb)
stmts = sorted(stmts,
key=lambda s: _count(s) + 1/(1+len(s.agent_list())),
reverse=True)
yield new_key, verb, stmts
sorted_groups = sorted(process_rows(stmt_rows),
key=lambda tpl: tpl[0], reverse=True)
return sorted_groups | python | def group_and_sort_statements(stmt_list, ev_totals=None):
"""Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type.
"""
def _count(stmt):
if ev_totals is None:
return len(stmt.evidence)
else:
return ev_totals[stmt.get_hash()]
stmt_rows = defaultdict(list)
stmt_counts = defaultdict(lambda: 0)
arg_counts = defaultdict(lambda: 0)
for key, s in _get_keyed_stmts(stmt_list):
# Update the counts, and add key if needed.
stmt_rows[key].append(s)
# Keep track of the total evidence counts for this statement and the
# arguments.
stmt_counts[key] += _count(s)
# Add up the counts for the arguments, pairwise for Complexes and
# Conversions. This allows, for example, a complex between MEK, ERK,
# and something else to lend weight to the interactions between MEK
# and ERK.
if key[0] == 'Conversion':
subj = key[1]
for obj in key[2] + key[3]:
arg_counts[(subj, obj)] += _count(s)
else:
arg_counts[key[1:]] += _count(s)
# Sort the rows by count and agent names.
def process_rows(stmt_rows):
for key, stmts in stmt_rows.items():
verb = key[0]
inps = key[1:]
sub_count = stmt_counts[key]
arg_count = arg_counts[inps]
if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2:
if all([len(set(ag.name for ag in s.agent_list())) > 2
for s in stmts]):
continue
new_key = (arg_count, inps, sub_count, verb)
stmts = sorted(stmts,
key=lambda s: _count(s) + 1/(1+len(s.agent_list())),
reverse=True)
yield new_key, verb, stmts
sorted_groups = sorted(process_rows(stmt_rows),
key=lambda tpl: tpl[0], reverse=True)
return sorted_groups | [
"def",
"group_and_sort_statements",
"(",
"stmt_list",
",",
"ev_totals",
"=",
"None",
")",
":",
"def",
"_count",
"(",
"stmt",
")",
":",
"if",
"ev_totals",
"is",
"None",
":",
"return",
"len",
"(",
"stmt",
".",
"evidence",
")",
"else",
":",
"return",
"ev_totals",
"[",
"stmt",
".",
"get_hash",
"(",
")",
"]",
"stmt_rows",
"=",
"defaultdict",
"(",
"list",
")",
"stmt_counts",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"arg_counts",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"for",
"key",
",",
"s",
"in",
"_get_keyed_stmts",
"(",
"stmt_list",
")",
":",
"# Update the counts, and add key if needed.",
"stmt_rows",
"[",
"key",
"]",
".",
"append",
"(",
"s",
")",
"# Keep track of the total evidence counts for this statement and the",
"# arguments.",
"stmt_counts",
"[",
"key",
"]",
"+=",
"_count",
"(",
"s",
")",
"# Add up the counts for the arguments, pairwise for Complexes and",
"# Conversions. This allows, for example, a complex between MEK, ERK,",
"# and something else to lend weight to the interactions between MEK",
"# and ERK.",
"if",
"key",
"[",
"0",
"]",
"==",
"'Conversion'",
":",
"subj",
"=",
"key",
"[",
"1",
"]",
"for",
"obj",
"in",
"key",
"[",
"2",
"]",
"+",
"key",
"[",
"3",
"]",
":",
"arg_counts",
"[",
"(",
"subj",
",",
"obj",
")",
"]",
"+=",
"_count",
"(",
"s",
")",
"else",
":",
"arg_counts",
"[",
"key",
"[",
"1",
":",
"]",
"]",
"+=",
"_count",
"(",
"s",
")",
"# Sort the rows by count and agent names.",
"def",
"process_rows",
"(",
"stmt_rows",
")",
":",
"for",
"key",
",",
"stmts",
"in",
"stmt_rows",
".",
"items",
"(",
")",
":",
"verb",
"=",
"key",
"[",
"0",
"]",
"inps",
"=",
"key",
"[",
"1",
":",
"]",
"sub_count",
"=",
"stmt_counts",
"[",
"key",
"]",
"arg_count",
"=",
"arg_counts",
"[",
"inps",
"]",
"if",
"verb",
"==",
"'Complex'",
"and",
"sub_count",
"==",
"arg_count",
"and",
"len",
"(",
"inps",
")",
"<=",
"2",
":",
"if",
"all",
"(",
"[",
"len",
"(",
"set",
"(",
"ag",
".",
"name",
"for",
"ag",
"in",
"s",
".",
"agent_list",
"(",
")",
")",
")",
">",
"2",
"for",
"s",
"in",
"stmts",
"]",
")",
":",
"continue",
"new_key",
"=",
"(",
"arg_count",
",",
"inps",
",",
"sub_count",
",",
"verb",
")",
"stmts",
"=",
"sorted",
"(",
"stmts",
",",
"key",
"=",
"lambda",
"s",
":",
"_count",
"(",
"s",
")",
"+",
"1",
"/",
"(",
"1",
"+",
"len",
"(",
"s",
".",
"agent_list",
"(",
")",
")",
")",
",",
"reverse",
"=",
"True",
")",
"yield",
"new_key",
",",
"verb",
",",
"stmts",
"sorted_groups",
"=",
"sorted",
"(",
"process_rows",
"(",
"stmt_rows",
")",
",",
"key",
"=",
"lambda",
"tpl",
":",
"tpl",
"[",
"0",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"sorted_groups"
]
| Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type. | [
"Group",
"statements",
"by",
"type",
"and",
"arguments",
"and",
"sort",
"by",
"prevalence",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/statement_presentation.py#L40-L109 | train |
sorgerlab/indra | indra/util/statement_presentation.py | make_stmt_from_sort_key | def make_stmt_from_sort_key(key, verb):
"""Make a Statement from the sort key.
Specifically, the sort key used by `group_and_sort_statements`.
"""
def make_agent(name):
if name == 'None' or name is None:
return None
return Agent(name)
StmtClass = get_statement_by_name(verb)
inps = list(key[1])
if verb == 'Complex':
stmt = StmtClass([make_agent(name) for name in inps])
elif verb == 'Conversion':
stmt = StmtClass(make_agent(inps[0]),
[make_agent(name) for name in inps[1]],
[make_agent(name) for name in inps[2]])
elif verb == 'ActiveForm' or verb == 'HasActivity':
stmt = StmtClass(make_agent(inps[0]), inps[1], inps[2])
else:
stmt = StmtClass(*[make_agent(name) for name in inps])
return stmt | python | def make_stmt_from_sort_key(key, verb):
"""Make a Statement from the sort key.
Specifically, the sort key used by `group_and_sort_statements`.
"""
def make_agent(name):
if name == 'None' or name is None:
return None
return Agent(name)
StmtClass = get_statement_by_name(verb)
inps = list(key[1])
if verb == 'Complex':
stmt = StmtClass([make_agent(name) for name in inps])
elif verb == 'Conversion':
stmt = StmtClass(make_agent(inps[0]),
[make_agent(name) for name in inps[1]],
[make_agent(name) for name in inps[2]])
elif verb == 'ActiveForm' or verb == 'HasActivity':
stmt = StmtClass(make_agent(inps[0]), inps[1], inps[2])
else:
stmt = StmtClass(*[make_agent(name) for name in inps])
return stmt | [
"def",
"make_stmt_from_sort_key",
"(",
"key",
",",
"verb",
")",
":",
"def",
"make_agent",
"(",
"name",
")",
":",
"if",
"name",
"==",
"'None'",
"or",
"name",
"is",
"None",
":",
"return",
"None",
"return",
"Agent",
"(",
"name",
")",
"StmtClass",
"=",
"get_statement_by_name",
"(",
"verb",
")",
"inps",
"=",
"list",
"(",
"key",
"[",
"1",
"]",
")",
"if",
"verb",
"==",
"'Complex'",
":",
"stmt",
"=",
"StmtClass",
"(",
"[",
"make_agent",
"(",
"name",
")",
"for",
"name",
"in",
"inps",
"]",
")",
"elif",
"verb",
"==",
"'Conversion'",
":",
"stmt",
"=",
"StmtClass",
"(",
"make_agent",
"(",
"inps",
"[",
"0",
"]",
")",
",",
"[",
"make_agent",
"(",
"name",
")",
"for",
"name",
"in",
"inps",
"[",
"1",
"]",
"]",
",",
"[",
"make_agent",
"(",
"name",
")",
"for",
"name",
"in",
"inps",
"[",
"2",
"]",
"]",
")",
"elif",
"verb",
"==",
"'ActiveForm'",
"or",
"verb",
"==",
"'HasActivity'",
":",
"stmt",
"=",
"StmtClass",
"(",
"make_agent",
"(",
"inps",
"[",
"0",
"]",
")",
",",
"inps",
"[",
"1",
"]",
",",
"inps",
"[",
"2",
"]",
")",
"else",
":",
"stmt",
"=",
"StmtClass",
"(",
"*",
"[",
"make_agent",
"(",
"name",
")",
"for",
"name",
"in",
"inps",
"]",
")",
"return",
"stmt"
]
| Make a Statement from the sort key.
Specifically, the sort key used by `group_and_sort_statements`. | [
"Make",
"a",
"Statement",
"from",
"the",
"sort",
"key",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/statement_presentation.py#L112-L134 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | get_ecs_cluster_for_queue | def get_ecs_cluster_for_queue(queue_name, batch_client=None):
"""Get the name of the ecs cluster using the batch client."""
if batch_client is None:
batch_client = boto3.client('batch')
queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])
if len(queue_resp['jobQueues']) == 1:
queue = queue_resp['jobQueues'][0]
else:
raise BatchReadingError('Error finding queue with name %s.'
% queue_name)
compute_env_names = queue['computeEnvironmentOrder']
if len(compute_env_names) == 1:
compute_env_name = compute_env_names[0]['computeEnvironment']
else:
raise BatchReadingError('Error finding the compute environment name '
'for %s.' % queue_name)
compute_envs = batch_client.describe_compute_environments(
computeEnvironments=[compute_env_name]
)['computeEnvironments']
if len(compute_envs) == 1:
compute_env = compute_envs[0]
else:
raise BatchReadingError("Error getting compute environment %s for %s. "
"Got %d environments instead of 1."
% (compute_env_name, queue_name,
len(compute_envs)))
ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])
return ecs_cluster_name | python | def get_ecs_cluster_for_queue(queue_name, batch_client=None):
"""Get the name of the ecs cluster using the batch client."""
if batch_client is None:
batch_client = boto3.client('batch')
queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])
if len(queue_resp['jobQueues']) == 1:
queue = queue_resp['jobQueues'][0]
else:
raise BatchReadingError('Error finding queue with name %s.'
% queue_name)
compute_env_names = queue['computeEnvironmentOrder']
if len(compute_env_names) == 1:
compute_env_name = compute_env_names[0]['computeEnvironment']
else:
raise BatchReadingError('Error finding the compute environment name '
'for %s.' % queue_name)
compute_envs = batch_client.describe_compute_environments(
computeEnvironments=[compute_env_name]
)['computeEnvironments']
if len(compute_envs) == 1:
compute_env = compute_envs[0]
else:
raise BatchReadingError("Error getting compute environment %s for %s. "
"Got %d environments instead of 1."
% (compute_env_name, queue_name,
len(compute_envs)))
ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])
return ecs_cluster_name | [
"def",
"get_ecs_cluster_for_queue",
"(",
"queue_name",
",",
"batch_client",
"=",
"None",
")",
":",
"if",
"batch_client",
"is",
"None",
":",
"batch_client",
"=",
"boto3",
".",
"client",
"(",
"'batch'",
")",
"queue_resp",
"=",
"batch_client",
".",
"describe_job_queues",
"(",
"jobQueues",
"=",
"[",
"queue_name",
"]",
")",
"if",
"len",
"(",
"queue_resp",
"[",
"'jobQueues'",
"]",
")",
"==",
"1",
":",
"queue",
"=",
"queue_resp",
"[",
"'jobQueues'",
"]",
"[",
"0",
"]",
"else",
":",
"raise",
"BatchReadingError",
"(",
"'Error finding queue with name %s.'",
"%",
"queue_name",
")",
"compute_env_names",
"=",
"queue",
"[",
"'computeEnvironmentOrder'",
"]",
"if",
"len",
"(",
"compute_env_names",
")",
"==",
"1",
":",
"compute_env_name",
"=",
"compute_env_names",
"[",
"0",
"]",
"[",
"'computeEnvironment'",
"]",
"else",
":",
"raise",
"BatchReadingError",
"(",
"'Error finding the compute environment name '",
"'for %s.'",
"%",
"queue_name",
")",
"compute_envs",
"=",
"batch_client",
".",
"describe_compute_environments",
"(",
"computeEnvironments",
"=",
"[",
"compute_env_name",
"]",
")",
"[",
"'computeEnvironments'",
"]",
"if",
"len",
"(",
"compute_envs",
")",
"==",
"1",
":",
"compute_env",
"=",
"compute_envs",
"[",
"0",
"]",
"else",
":",
"raise",
"BatchReadingError",
"(",
"\"Error getting compute environment %s for %s. \"",
"\"Got %d environments instead of 1.\"",
"%",
"(",
"compute_env_name",
",",
"queue_name",
",",
"len",
"(",
"compute_envs",
")",
")",
")",
"ecs_cluster_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"compute_env",
"[",
"'ecsClusterArn'",
"]",
")",
"return",
"ecs_cluster_name"
]
| Get the name of the ecs cluster using the batch client. | [
"Get",
"the",
"name",
"of",
"the",
"ecs",
"cluster",
"using",
"the",
"batch",
"client",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L275-L306 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | tag_instances_on_cluster | def tag_instances_on_cluster(cluster_name, project='cwc'):
"""Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
"""
# Get the relevant instance ids from the ecs cluster
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if not task_arns:
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(
cluster=cluster_name,
containerInstances=[task['containerInstanceArn'] for task in tasks]
)['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
# Instantiate each instance to tag as a resource and create project tag
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return | python | def tag_instances_on_cluster(cluster_name, project='cwc'):
"""Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
"""
# Get the relevant instance ids from the ecs cluster
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if not task_arns:
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(
cluster=cluster_name,
containerInstances=[task['containerInstanceArn'] for task in tasks]
)['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
# Instantiate each instance to tag as a resource and create project tag
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return | [
"def",
"tag_instances_on_cluster",
"(",
"cluster_name",
",",
"project",
"=",
"'cwc'",
")",
":",
"# Get the relevant instance ids from the ecs cluster",
"ecs",
"=",
"boto3",
".",
"client",
"(",
"'ecs'",
")",
"task_arns",
"=",
"ecs",
".",
"list_tasks",
"(",
"cluster",
"=",
"cluster_name",
")",
"[",
"'taskArns'",
"]",
"if",
"not",
"task_arns",
":",
"return",
"tasks",
"=",
"ecs",
".",
"describe_tasks",
"(",
"cluster",
"=",
"cluster_name",
",",
"tasks",
"=",
"task_arns",
")",
"[",
"'tasks'",
"]",
"container_instances",
"=",
"ecs",
".",
"describe_container_instances",
"(",
"cluster",
"=",
"cluster_name",
",",
"containerInstances",
"=",
"[",
"task",
"[",
"'containerInstanceArn'",
"]",
"for",
"task",
"in",
"tasks",
"]",
")",
"[",
"'containerInstances'",
"]",
"ec2_instance_ids",
"=",
"[",
"ci",
"[",
"'ec2InstanceId'",
"]",
"for",
"ci",
"in",
"container_instances",
"]",
"# Instantiate each instance to tag as a resource and create project tag",
"for",
"instance_id",
"in",
"ec2_instance_ids",
":",
"tag_instance",
"(",
"instance_id",
",",
"project",
"=",
"project",
")",
"return"
]
| Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with. | [
"Adds",
"project",
"tag",
"to",
"untagged",
"instances",
"in",
"a",
"given",
"cluster",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L309-L335 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | submit_reading | def submit_reading(basename, pmid_list_filename, readers, start_ix=None,
end_ix=None, pmids_per_job=3000, num_tries=2,
force_read=False, force_fulltext=False, project_name=None):
"""Submit an old-style pmid-centered no-database s3 only reading job.
This function is provided for the sake of backward compatibility. It is
preferred that you use the object-oriented PmidSubmitter and the
submit_reading job going forward.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.set_options(force_read, force_fulltext)
sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job,
num_tries)
return sub.job_list | python | def submit_reading(basename, pmid_list_filename, readers, start_ix=None,
end_ix=None, pmids_per_job=3000, num_tries=2,
force_read=False, force_fulltext=False, project_name=None):
"""Submit an old-style pmid-centered no-database s3 only reading job.
This function is provided for the sake of backward compatibility. It is
preferred that you use the object-oriented PmidSubmitter and the
submit_reading job going forward.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.set_options(force_read, force_fulltext)
sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job,
num_tries)
return sub.job_list | [
"def",
"submit_reading",
"(",
"basename",
",",
"pmid_list_filename",
",",
"readers",
",",
"start_ix",
"=",
"None",
",",
"end_ix",
"=",
"None",
",",
"pmids_per_job",
"=",
"3000",
",",
"num_tries",
"=",
"2",
",",
"force_read",
"=",
"False",
",",
"force_fulltext",
"=",
"False",
",",
"project_name",
"=",
"None",
")",
":",
"sub",
"=",
"PmidSubmitter",
"(",
"basename",
",",
"readers",
",",
"project_name",
")",
"sub",
".",
"set_options",
"(",
"force_read",
",",
"force_fulltext",
")",
"sub",
".",
"submit_reading",
"(",
"pmid_list_filename",
",",
"start_ix",
",",
"end_ix",
",",
"pmids_per_job",
",",
"num_tries",
")",
"return",
"sub",
".",
"job_list"
]
| Submit an old-style pmid-centered no-database s3 only reading job.
This function is provided for the sake of backward compatibility. It is
preferred that you use the object-oriented PmidSubmitter and the
submit_reading job going forward. | [
"Submit",
"an",
"old",
"-",
"style",
"pmid",
"-",
"centered",
"no",
"-",
"database",
"s3",
"only",
"reading",
"job",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L555-L568 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | submit_combine | def submit_combine(basename, readers, job_ids=None, project_name=None):
"""Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub | python | def submit_combine(basename, readers, job_ids=None, project_name=None):
"""Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub | [
"def",
"submit_combine",
"(",
"basename",
",",
"readers",
",",
"job_ids",
"=",
"None",
",",
"project_name",
"=",
"None",
")",
":",
"sub",
"=",
"PmidSubmitter",
"(",
"basename",
",",
"readers",
",",
"project_name",
")",
"sub",
".",
"job_list",
"=",
"job_ids",
"sub",
".",
"submit_combine",
"(",
")",
"return",
"sub"
]
| Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods. | [
"Submit",
"a",
"batch",
"job",
"to",
"combine",
"the",
"outputs",
"of",
"a",
"reading",
"job",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L571-L580 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | Submitter.submit_reading | def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,
num_tries=1, stagger=0):
"""Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings.
"""
# stash this for later.
self.ids_per_job = ids_per_job
# Upload the pmid_list to Amazon S3
id_list_key = 'reading_results/%s/%s' % (self.basename,
self._s3_input_name)
s3_client = boto3.client('s3')
s3_client.upload_file(input_fname, bucket_name, id_list_key)
# If no end index is specified, read all the PMIDs
if end_ix is None:
with open(input_fname, 'rt') as f:
lines = f.readlines()
end_ix = len(lines)
if start_ix is None:
start_ix = 0
# Get environment variables
environment_vars = get_environment()
# Iterate over the list of PMIDs and submit the job in chunks
batch_client = boto3.client('batch', region_name='us-east-1')
job_list = []
for job_start_ix in range(start_ix, end_ix, ids_per_job):
sleep(stagger)
job_end_ix = job_start_ix + ids_per_job
if job_end_ix > end_ix:
job_end_ix = end_ix
job_name, cmd = self._make_command(job_start_ix, job_end_ix)
command_list = get_batch_command(cmd, purpose=self._purpose,
project=self.project_name)
logger.info('Command list: %s' % str(command_list))
job_info = batch_client.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=self._job_def,
containerOverrides={
'environment': environment_vars,
'command': command_list},
retryStrategy={'attempts': num_tries}
)
logger.info("submitted...")
job_list.append({'jobId': job_info['jobId']})
self.job_list = job_list
return job_list | python | def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,
num_tries=1, stagger=0):
"""Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings.
"""
# stash this for later.
self.ids_per_job = ids_per_job
# Upload the pmid_list to Amazon S3
id_list_key = 'reading_results/%s/%s' % (self.basename,
self._s3_input_name)
s3_client = boto3.client('s3')
s3_client.upload_file(input_fname, bucket_name, id_list_key)
# If no end index is specified, read all the PMIDs
if end_ix is None:
with open(input_fname, 'rt') as f:
lines = f.readlines()
end_ix = len(lines)
if start_ix is None:
start_ix = 0
# Get environment variables
environment_vars = get_environment()
# Iterate over the list of PMIDs and submit the job in chunks
batch_client = boto3.client('batch', region_name='us-east-1')
job_list = []
for job_start_ix in range(start_ix, end_ix, ids_per_job):
sleep(stagger)
job_end_ix = job_start_ix + ids_per_job
if job_end_ix > end_ix:
job_end_ix = end_ix
job_name, cmd = self._make_command(job_start_ix, job_end_ix)
command_list = get_batch_command(cmd, purpose=self._purpose,
project=self.project_name)
logger.info('Command list: %s' % str(command_list))
job_info = batch_client.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=self._job_def,
containerOverrides={
'environment': environment_vars,
'command': command_list},
retryStrategy={'attempts': num_tries}
)
logger.info("submitted...")
job_list.append({'jobId': job_info['jobId']})
self.job_list = job_list
return job_list | [
"def",
"submit_reading",
"(",
"self",
",",
"input_fname",
",",
"start_ix",
",",
"end_ix",
",",
"ids_per_job",
",",
"num_tries",
"=",
"1",
",",
"stagger",
"=",
"0",
")",
":",
"# stash this for later.",
"self",
".",
"ids_per_job",
"=",
"ids_per_job",
"# Upload the pmid_list to Amazon S3",
"id_list_key",
"=",
"'reading_results/%s/%s'",
"%",
"(",
"self",
".",
"basename",
",",
"self",
".",
"_s3_input_name",
")",
"s3_client",
"=",
"boto3",
".",
"client",
"(",
"'s3'",
")",
"s3_client",
".",
"upload_file",
"(",
"input_fname",
",",
"bucket_name",
",",
"id_list_key",
")",
"# If no end index is specified, read all the PMIDs",
"if",
"end_ix",
"is",
"None",
":",
"with",
"open",
"(",
"input_fname",
",",
"'rt'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"end_ix",
"=",
"len",
"(",
"lines",
")",
"if",
"start_ix",
"is",
"None",
":",
"start_ix",
"=",
"0",
"# Get environment variables",
"environment_vars",
"=",
"get_environment",
"(",
")",
"# Iterate over the list of PMIDs and submit the job in chunks",
"batch_client",
"=",
"boto3",
".",
"client",
"(",
"'batch'",
",",
"region_name",
"=",
"'us-east-1'",
")",
"job_list",
"=",
"[",
"]",
"for",
"job_start_ix",
"in",
"range",
"(",
"start_ix",
",",
"end_ix",
",",
"ids_per_job",
")",
":",
"sleep",
"(",
"stagger",
")",
"job_end_ix",
"=",
"job_start_ix",
"+",
"ids_per_job",
"if",
"job_end_ix",
">",
"end_ix",
":",
"job_end_ix",
"=",
"end_ix",
"job_name",
",",
"cmd",
"=",
"self",
".",
"_make_command",
"(",
"job_start_ix",
",",
"job_end_ix",
")",
"command_list",
"=",
"get_batch_command",
"(",
"cmd",
",",
"purpose",
"=",
"self",
".",
"_purpose",
",",
"project",
"=",
"self",
".",
"project_name",
")",
"logger",
".",
"info",
"(",
"'Command list: %s'",
"%",
"str",
"(",
"command_list",
")",
")",
"job_info",
"=",
"batch_client",
".",
"submit_job",
"(",
"jobName",
"=",
"job_name",
",",
"jobQueue",
"=",
"self",
".",
"_job_queue",
",",
"jobDefinition",
"=",
"self",
".",
"_job_def",
",",
"containerOverrides",
"=",
"{",
"'environment'",
":",
"environment_vars",
",",
"'command'",
":",
"command_list",
"}",
",",
"retryStrategy",
"=",
"{",
"'attempts'",
":",
"num_tries",
"}",
")",
"logger",
".",
"info",
"(",
"\"submitted...\"",
")",
"job_list",
".",
"append",
"(",
"{",
"'jobId'",
":",
"job_info",
"[",
"'jobId'",
"]",
"}",
")",
"self",
".",
"job_list",
"=",
"job_list",
"return",
"job_list"
]
| Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings. | [
"Submit",
"a",
"batch",
"of",
"reading",
"jobs"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L397-L466 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | Submitter.watch_and_wait | def watch_and_wait(self, poll_interval=10, idle_log_timeout=None,
kill_on_timeout=False, stash_log_method=None,
tag_instances=False, **kwargs):
"""This provides shortcut access to the wait_for_complete_function."""
return wait_for_complete(self._job_queue, job_list=self.job_list,
job_name_prefix=self.basename,
poll_interval=poll_interval,
idle_log_timeout=idle_log_timeout,
kill_on_log_timeout=kill_on_timeout,
stash_log_method=stash_log_method,
tag_instances=tag_instances, **kwargs) | python | def watch_and_wait(self, poll_interval=10, idle_log_timeout=None,
kill_on_timeout=False, stash_log_method=None,
tag_instances=False, **kwargs):
"""This provides shortcut access to the wait_for_complete_function."""
return wait_for_complete(self._job_queue, job_list=self.job_list,
job_name_prefix=self.basename,
poll_interval=poll_interval,
idle_log_timeout=idle_log_timeout,
kill_on_log_timeout=kill_on_timeout,
stash_log_method=stash_log_method,
tag_instances=tag_instances, **kwargs) | [
"def",
"watch_and_wait",
"(",
"self",
",",
"poll_interval",
"=",
"10",
",",
"idle_log_timeout",
"=",
"None",
",",
"kill_on_timeout",
"=",
"False",
",",
"stash_log_method",
"=",
"None",
",",
"tag_instances",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"wait_for_complete",
"(",
"self",
".",
"_job_queue",
",",
"job_list",
"=",
"self",
".",
"job_list",
",",
"job_name_prefix",
"=",
"self",
".",
"basename",
",",
"poll_interval",
"=",
"poll_interval",
",",
"idle_log_timeout",
"=",
"idle_log_timeout",
",",
"kill_on_log_timeout",
"=",
"kill_on_timeout",
",",
"stash_log_method",
"=",
"stash_log_method",
",",
"tag_instances",
"=",
"tag_instances",
",",
"*",
"*",
"kwargs",
")"
]
| This provides shortcut access to the wait_for_complete_function. | [
"This",
"provides",
"shortcut",
"access",
"to",
"the",
"wait_for_complete_function",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L468-L478 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | Submitter.run | def run(self, input_fname, ids_per_job, stagger=0, **wait_params):
"""Run this submission all the way.
This method will run both `submit_reading` and `watch_and_wait`,
blocking on the latter.
"""
submit_thread = Thread(target=self.submit_reading,
args=(input_fname, 0, None, ids_per_job),
kwargs={'stagger': stagger},
daemon=True)
submit_thread.start()
self.watch_and_wait(**wait_params)
submit_thread.join(0)
if submit_thread.is_alive():
logger.warning("Submit thread is still running even after job"
"completion.")
return | python | def run(self, input_fname, ids_per_job, stagger=0, **wait_params):
"""Run this submission all the way.
This method will run both `submit_reading` and `watch_and_wait`,
blocking on the latter.
"""
submit_thread = Thread(target=self.submit_reading,
args=(input_fname, 0, None, ids_per_job),
kwargs={'stagger': stagger},
daemon=True)
submit_thread.start()
self.watch_and_wait(**wait_params)
submit_thread.join(0)
if submit_thread.is_alive():
logger.warning("Submit thread is still running even after job"
"completion.")
return | [
"def",
"run",
"(",
"self",
",",
"input_fname",
",",
"ids_per_job",
",",
"stagger",
"=",
"0",
",",
"*",
"*",
"wait_params",
")",
":",
"submit_thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"submit_reading",
",",
"args",
"=",
"(",
"input_fname",
",",
"0",
",",
"None",
",",
"ids_per_job",
")",
",",
"kwargs",
"=",
"{",
"'stagger'",
":",
"stagger",
"}",
",",
"daemon",
"=",
"True",
")",
"submit_thread",
".",
"start",
"(",
")",
"self",
".",
"watch_and_wait",
"(",
"*",
"*",
"wait_params",
")",
"submit_thread",
".",
"join",
"(",
"0",
")",
"if",
"submit_thread",
".",
"is_alive",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Submit thread is still running even after job\"",
"\"completion.\"",
")",
"return"
]
| Run this submission all the way.
This method will run both `submit_reading` and `watch_and_wait`,
blocking on the latter. | [
"Run",
"this",
"submission",
"all",
"the",
"way",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L480-L496 | train |
sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | PmidSubmitter.set_options | def set_options(self, force_read=False, force_fulltext=False):
"""Set the options for this run."""
self.options['force_read'] = force_read
self.options['force_fulltext'] = force_fulltext
return | python | def set_options(self, force_read=False, force_fulltext=False):
"""Set the options for this run."""
self.options['force_read'] = force_read
self.options['force_fulltext'] = force_fulltext
return | [
"def",
"set_options",
"(",
"self",
",",
"force_read",
"=",
"False",
",",
"force_fulltext",
"=",
"False",
")",
":",
"self",
".",
"options",
"[",
"'force_read'",
"]",
"=",
"force_read",
"self",
".",
"options",
"[",
"'force_fulltext'",
"]",
"=",
"force_fulltext",
"return"
]
| Set the options for this run. | [
"Set",
"the",
"options",
"for",
"this",
"run",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L517-L521 | train |
sorgerlab/indra | indra/databases/chebi_client.py | get_chebi_name_from_id | def get_chebi_name_from_id(chebi_id, offline=False):
"""Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
chebi_name = chebi_id_to_name.get(chebi_id)
if chebi_name is None and not offline:
chebi_name = get_chebi_name_from_id_web(chebi_id)
return chebi_name | python | def get_chebi_name_from_id(chebi_id, offline=False):
"""Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
chebi_name = chebi_id_to_name.get(chebi_id)
if chebi_name is None and not offline:
chebi_name = get_chebi_name_from_id_web(chebi_id)
return chebi_name | [
"def",
"get_chebi_name_from_id",
"(",
"chebi_id",
",",
"offline",
"=",
"False",
")",
":",
"chebi_name",
"=",
"chebi_id_to_name",
".",
"get",
"(",
"chebi_id",
")",
"if",
"chebi_name",
"is",
"None",
"and",
"not",
"offline",
":",
"chebi_name",
"=",
"get_chebi_name_from_id_web",
"(",
"chebi_id",
")",
"return",
"chebi_name"
]
| Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned. | [
"Return",
"a",
"ChEBI",
"name",
"corresponding",
"to",
"the",
"given",
"ChEBI",
"ID",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chebi_client.py#L86-L106 | train |
sorgerlab/indra | indra/databases/chebi_client.py | get_chebi_name_from_id_web | def get_chebi_name_from_id_web(chebi_id):
"""Return a ChEBI mame corresponding to a given ChEBI ID using a REST API.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
url_base = 'http://www.ebi.ac.uk/webservices/chebi/2.0/test/'
url_fmt = url_base + 'getCompleteEntity?chebiId=%s'
resp = requests.get(url_fmt % chebi_id)
if resp.status_code != 200:
logger.warning("Got bad code form CHEBI client: %s" % resp.status_code)
return None
tree = etree.fromstring(resp.content)
# Get rid of the namespaces.
# Credit: https://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml
for elem in tree.getiterator():
if not hasattr(elem.tag, 'find'):
continue # (1)
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(tree, cleanup_namespaces=True)
elem = tree.find('Body/getCompleteEntityResponse/return/chebiAsciiName')
if elem is not None:
return elem.text
return None | python | def get_chebi_name_from_id_web(chebi_id):
"""Return a ChEBI mame corresponding to a given ChEBI ID using a REST API.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
url_base = 'http://www.ebi.ac.uk/webservices/chebi/2.0/test/'
url_fmt = url_base + 'getCompleteEntity?chebiId=%s'
resp = requests.get(url_fmt % chebi_id)
if resp.status_code != 200:
logger.warning("Got bad code form CHEBI client: %s" % resp.status_code)
return None
tree = etree.fromstring(resp.content)
# Get rid of the namespaces.
# Credit: https://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml
for elem in tree.getiterator():
if not hasattr(elem.tag, 'find'):
continue # (1)
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(tree, cleanup_namespaces=True)
elem = tree.find('Body/getCompleteEntityResponse/return/chebiAsciiName')
if elem is not None:
return elem.text
return None | [
"def",
"get_chebi_name_from_id_web",
"(",
"chebi_id",
")",
":",
"url_base",
"=",
"'http://www.ebi.ac.uk/webservices/chebi/2.0/test/'",
"url_fmt",
"=",
"url_base",
"+",
"'getCompleteEntity?chebiId=%s'",
"resp",
"=",
"requests",
".",
"get",
"(",
"url_fmt",
"%",
"chebi_id",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"logger",
".",
"warning",
"(",
"\"Got bad code form CHEBI client: %s\"",
"%",
"resp",
".",
"status_code",
")",
"return",
"None",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"resp",
".",
"content",
")",
"# Get rid of the namespaces.",
"# Credit: https://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml",
"for",
"elem",
"in",
"tree",
".",
"getiterator",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"elem",
".",
"tag",
",",
"'find'",
")",
":",
"continue",
"# (1)",
"i",
"=",
"elem",
".",
"tag",
".",
"find",
"(",
"'}'",
")",
"if",
"i",
">=",
"0",
":",
"elem",
".",
"tag",
"=",
"elem",
".",
"tag",
"[",
"i",
"+",
"1",
":",
"]",
"objectify",
".",
"deannotate",
"(",
"tree",
",",
"cleanup_namespaces",
"=",
"True",
")",
"elem",
"=",
"tree",
".",
"find",
"(",
"'Body/getCompleteEntityResponse/return/chebiAsciiName'",
")",
"if",
"elem",
"is",
"not",
"None",
":",
"return",
"elem",
".",
"text",
"return",
"None"
]
| Return a ChEBI mame corresponding to a given ChEBI ID using a REST API.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned. | [
"Return",
"a",
"ChEBI",
"mame",
"corresponding",
"to",
"a",
"given",
"ChEBI",
"ID",
"using",
"a",
"REST",
"API",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chebi_client.py#L179-L214 | train |
sorgerlab/indra | indra/tools/executable_subnetwork.py | get_subnetwork | def get_subnetwork(statements, nodes, relevance_network=None,
relevance_node_lim=10):
"""Return a PySB model based on a subset of given INDRA Statements.
Statements are first filtered for nodes in the given list and other nodes
are optionally added based on relevance in a given network. The filtered
statements are then assembled into an executable model using INDRA's
PySB Assembler.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to extract a subnetwork from.
nodes : list[str]
The names of the nodes to extract the subnetwork for.
relevance_network : Optional[str]
The UUID of the NDEx network in which nodes relevant to the given
nodes are found.
relevance_node_lim : Optional[int]
The maximal number of additional nodes to add to the subnetwork
based on relevance.
Returns
-------
model : pysb.Model
A PySB model object assembled using INDRA's PySB Assembler from
the INDRA Statements corresponding to the subnetwork.
"""
if relevance_network is not None:
relevant_nodes = _find_relevant_nodes(nodes, relevance_network,
relevance_node_lim)
all_nodes = nodes + relevant_nodes
else:
all_nodes = nodes
filtered_statements = _filter_statements(statements, all_nodes)
pa = PysbAssembler()
pa.add_statements(filtered_statements)
model = pa.make_model()
return model | python | def get_subnetwork(statements, nodes, relevance_network=None,
relevance_node_lim=10):
"""Return a PySB model based on a subset of given INDRA Statements.
Statements are first filtered for nodes in the given list and other nodes
are optionally added based on relevance in a given network. The filtered
statements are then assembled into an executable model using INDRA's
PySB Assembler.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to extract a subnetwork from.
nodes : list[str]
The names of the nodes to extract the subnetwork for.
relevance_network : Optional[str]
The UUID of the NDEx network in which nodes relevant to the given
nodes are found.
relevance_node_lim : Optional[int]
The maximal number of additional nodes to add to the subnetwork
based on relevance.
Returns
-------
model : pysb.Model
A PySB model object assembled using INDRA's PySB Assembler from
the INDRA Statements corresponding to the subnetwork.
"""
if relevance_network is not None:
relevant_nodes = _find_relevant_nodes(nodes, relevance_network,
relevance_node_lim)
all_nodes = nodes + relevant_nodes
else:
all_nodes = nodes
filtered_statements = _filter_statements(statements, all_nodes)
pa = PysbAssembler()
pa.add_statements(filtered_statements)
model = pa.make_model()
return model | [
"def",
"get_subnetwork",
"(",
"statements",
",",
"nodes",
",",
"relevance_network",
"=",
"None",
",",
"relevance_node_lim",
"=",
"10",
")",
":",
"if",
"relevance_network",
"is",
"not",
"None",
":",
"relevant_nodes",
"=",
"_find_relevant_nodes",
"(",
"nodes",
",",
"relevance_network",
",",
"relevance_node_lim",
")",
"all_nodes",
"=",
"nodes",
"+",
"relevant_nodes",
"else",
":",
"all_nodes",
"=",
"nodes",
"filtered_statements",
"=",
"_filter_statements",
"(",
"statements",
",",
"all_nodes",
")",
"pa",
"=",
"PysbAssembler",
"(",
")",
"pa",
".",
"add_statements",
"(",
"filtered_statements",
")",
"model",
"=",
"pa",
".",
"make_model",
"(",
")",
"return",
"model"
]
| Return a PySB model based on a subset of given INDRA Statements.
Statements are first filtered for nodes in the given list and other nodes
are optionally added based on relevance in a given network. The filtered
statements are then assembled into an executable model using INDRA's
PySB Assembler.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to extract a subnetwork from.
nodes : list[str]
The names of the nodes to extract the subnetwork for.
relevance_network : Optional[str]
The UUID of the NDEx network in which nodes relevant to the given
nodes are found.
relevance_node_lim : Optional[int]
The maximal number of additional nodes to add to the subnetwork
based on relevance.
Returns
-------
model : pysb.Model
A PySB model object assembled using INDRA's PySB Assembler from
the INDRA Statements corresponding to the subnetwork. | [
"Return",
"a",
"PySB",
"model",
"based",
"on",
"a",
"subset",
"of",
"given",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/executable_subnetwork.py#L7-L45 | train |
sorgerlab/indra | indra/tools/executable_subnetwork.py | _filter_statements | def _filter_statements(statements, agents):
"""Return INDRA Statements which have Agents in the given list.
Only statements are returned in which all appearing Agents as in the
agents list.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to filter.
agents : list[str]
A list of agent names that need to appear in filtered statements.
Returns
-------
filtered_statements : list[indra.statements.Statement]
The list of filtered INDRA Statements.
"""
filtered_statements = []
for s in stmts:
if all([a is not None for a in s.agent_list()]) and \
all([a.name in agents for a in s.agent_list()]):
filtered_statements.append(s)
return filtered_statements | python | def _filter_statements(statements, agents):
"""Return INDRA Statements which have Agents in the given list.
Only statements are returned in which all appearing Agents as in the
agents list.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to filter.
agents : list[str]
A list of agent names that need to appear in filtered statements.
Returns
-------
filtered_statements : list[indra.statements.Statement]
The list of filtered INDRA Statements.
"""
filtered_statements = []
for s in stmts:
if all([a is not None for a in s.agent_list()]) and \
all([a.name in agents for a in s.agent_list()]):
filtered_statements.append(s)
return filtered_statements | [
"def",
"_filter_statements",
"(",
"statements",
",",
"agents",
")",
":",
"filtered_statements",
"=",
"[",
"]",
"for",
"s",
"in",
"stmts",
":",
"if",
"all",
"(",
"[",
"a",
"is",
"not",
"None",
"for",
"a",
"in",
"s",
".",
"agent_list",
"(",
")",
"]",
")",
"and",
"all",
"(",
"[",
"a",
".",
"name",
"in",
"agents",
"for",
"a",
"in",
"s",
".",
"agent_list",
"(",
")",
"]",
")",
":",
"filtered_statements",
".",
"append",
"(",
"s",
")",
"return",
"filtered_statements"
]
| Return INDRA Statements which have Agents in the given list.
Only statements are returned in which all appearing Agents as in the
agents list.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to filter.
agents : list[str]
A list of agent names that need to appear in filtered statements.
Returns
-------
filtered_statements : list[indra.statements.Statement]
The list of filtered INDRA Statements. | [
"Return",
"INDRA",
"Statements",
"which",
"have",
"Agents",
"in",
"the",
"given",
"list",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/executable_subnetwork.py#L47-L70 | train |
sorgerlab/indra | indra/tools/executable_subnetwork.py | _find_relevant_nodes | def _find_relevant_nodes(query_nodes, relevance_network, relevance_node_lim):
"""Return a list of nodes that are relevant for the query.
Parameters
----------
query_nodes : list[str]
A list of node names to query for.
relevance_network : str
The UUID of the NDEx network to query relevance in.
relevance_node_lim : int
The number of top relevant nodes to return.
Returns
-------
nodes : list[str]
A list of node names that are relevant for the query.
"""
all_nodes = relevance_client.get_relevant_nodes(relevance_network,
query_nodes)
nodes = [n[0] for n in all_nodes[:relevance_node_lim]]
return nodes | python | def _find_relevant_nodes(query_nodes, relevance_network, relevance_node_lim):
"""Return a list of nodes that are relevant for the query.
Parameters
----------
query_nodes : list[str]
A list of node names to query for.
relevance_network : str
The UUID of the NDEx network to query relevance in.
relevance_node_lim : int
The number of top relevant nodes to return.
Returns
-------
nodes : list[str]
A list of node names that are relevant for the query.
"""
all_nodes = relevance_client.get_relevant_nodes(relevance_network,
query_nodes)
nodes = [n[0] for n in all_nodes[:relevance_node_lim]]
return nodes | [
"def",
"_find_relevant_nodes",
"(",
"query_nodes",
",",
"relevance_network",
",",
"relevance_node_lim",
")",
":",
"all_nodes",
"=",
"relevance_client",
".",
"get_relevant_nodes",
"(",
"relevance_network",
",",
"query_nodes",
")",
"nodes",
"=",
"[",
"n",
"[",
"0",
"]",
"for",
"n",
"in",
"all_nodes",
"[",
":",
"relevance_node_lim",
"]",
"]",
"return",
"nodes"
]
| Return a list of nodes that are relevant for the query.
Parameters
----------
query_nodes : list[str]
A list of node names to query for.
relevance_network : str
The UUID of the NDEx network to query relevance in.
relevance_node_lim : int
The number of top relevant nodes to return.
Returns
-------
nodes : list[str]
A list of node names that are relevant for the query. | [
"Return",
"a",
"list",
"of",
"nodes",
"that",
"are",
"relevant",
"for",
"the",
"query",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/executable_subnetwork.py#L72-L92 | train |
sorgerlab/indra | indra/sources/hume/api.py | process_jsonld_file | def process_jsonld_file(fname):
"""Process a JSON-LD file in the new format to extract Statements.
Parameters
----------
fname : str
The path to the JSON-LD file to be processed.
Returns
-------
indra.sources.hume.HumeProcessor
A HumeProcessor instance, which contains a list of INDRA Statements
as its statements attribute.
"""
with open(fname, 'r') as fh:
json_dict = json.load(fh)
return process_jsonld(json_dict) | python | def process_jsonld_file(fname):
"""Process a JSON-LD file in the new format to extract Statements.
Parameters
----------
fname : str
The path to the JSON-LD file to be processed.
Returns
-------
indra.sources.hume.HumeProcessor
A HumeProcessor instance, which contains a list of INDRA Statements
as its statements attribute.
"""
with open(fname, 'r') as fh:
json_dict = json.load(fh)
return process_jsonld(json_dict) | [
"def",
"process_jsonld_file",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"fh",
":",
"json_dict",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"return",
"process_jsonld",
"(",
"json_dict",
")"
]
| Process a JSON-LD file in the new format to extract Statements.
Parameters
----------
fname : str
The path to the JSON-LD file to be processed.
Returns
-------
indra.sources.hume.HumeProcessor
A HumeProcessor instance, which contains a list of INDRA Statements
as its statements attribute. | [
"Process",
"a",
"JSON",
"-",
"LD",
"file",
"in",
"the",
"new",
"format",
"to",
"extract",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/api.py#L10-L26 | train |
sorgerlab/indra | indra/util/aws.py | tag_instance | def tag_instance(instance_id, **tags):
"""Tag a single ec2 instance."""
logger.debug("Got request to add tags %s to instance %s."
% (str(tags), instance_id))
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
# Remove None's from `tags`
filtered_tags = {k: v for k, v in tags.items() if v and k}
# Check for existing tags
if instance.tags is not None:
existing_tags = {tag.get('Key'): tag.get('Value')
for tag in instance.tags}
logger.debug("Ignoring existing tags; %s" % str(existing_tags))
for tag_key in existing_tags.keys():
filtered_tags.pop(tag_key, None)
# If we have new tags to add, add them.
tag_list = [{'Key': k, 'Value': v} for k, v in filtered_tags.items()]
if len(tag_list):
logger.info('Adding project tags "%s" to instance %s'
% (filtered_tags, instance_id))
instance.create_tags(Tags=tag_list)
else:
logger.info('No new tags from: %s' % str(tags))
return | python | def tag_instance(instance_id, **tags):
"""Tag a single ec2 instance."""
logger.debug("Got request to add tags %s to instance %s."
% (str(tags), instance_id))
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
# Remove None's from `tags`
filtered_tags = {k: v for k, v in tags.items() if v and k}
# Check for existing tags
if instance.tags is not None:
existing_tags = {tag.get('Key'): tag.get('Value')
for tag in instance.tags}
logger.debug("Ignoring existing tags; %s" % str(existing_tags))
for tag_key in existing_tags.keys():
filtered_tags.pop(tag_key, None)
# If we have new tags to add, add them.
tag_list = [{'Key': k, 'Value': v} for k, v in filtered_tags.items()]
if len(tag_list):
logger.info('Adding project tags "%s" to instance %s'
% (filtered_tags, instance_id))
instance.create_tags(Tags=tag_list)
else:
logger.info('No new tags from: %s' % str(tags))
return | [
"def",
"tag_instance",
"(",
"instance_id",
",",
"*",
"*",
"tags",
")",
":",
"logger",
".",
"debug",
"(",
"\"Got request to add tags %s to instance %s.\"",
"%",
"(",
"str",
"(",
"tags",
")",
",",
"instance_id",
")",
")",
"ec2",
"=",
"boto3",
".",
"resource",
"(",
"'ec2'",
")",
"instance",
"=",
"ec2",
".",
"Instance",
"(",
"instance_id",
")",
"# Remove None's from `tags`",
"filtered_tags",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"tags",
".",
"items",
"(",
")",
"if",
"v",
"and",
"k",
"}",
"# Check for existing tags",
"if",
"instance",
".",
"tags",
"is",
"not",
"None",
":",
"existing_tags",
"=",
"{",
"tag",
".",
"get",
"(",
"'Key'",
")",
":",
"tag",
".",
"get",
"(",
"'Value'",
")",
"for",
"tag",
"in",
"instance",
".",
"tags",
"}",
"logger",
".",
"debug",
"(",
"\"Ignoring existing tags; %s\"",
"%",
"str",
"(",
"existing_tags",
")",
")",
"for",
"tag_key",
"in",
"existing_tags",
".",
"keys",
"(",
")",
":",
"filtered_tags",
".",
"pop",
"(",
"tag_key",
",",
"None",
")",
"# If we have new tags to add, add them.",
"tag_list",
"=",
"[",
"{",
"'Key'",
":",
"k",
",",
"'Value'",
":",
"v",
"}",
"for",
"k",
",",
"v",
"in",
"filtered_tags",
".",
"items",
"(",
")",
"]",
"if",
"len",
"(",
"tag_list",
")",
":",
"logger",
".",
"info",
"(",
"'Adding project tags \"%s\" to instance %s'",
"%",
"(",
"filtered_tags",
",",
"instance_id",
")",
")",
"instance",
".",
"create_tags",
"(",
"Tags",
"=",
"tag_list",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'No new tags from: %s'",
"%",
"str",
"(",
"tags",
")",
")",
"return"
]
| Tag a single ec2 instance. | [
"Tag",
"a",
"single",
"ec2",
"instance",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L36-L62 | train |
sorgerlab/indra | indra/util/aws.py | tag_myself | def tag_myself(project='cwc', **other_tags):
"""Function run when indra is used in an EC2 instance to apply tags."""
base_url = "http://169.254.169.254"
try:
resp = requests.get(base_url + "/latest/meta-data/instance-id")
except requests.exceptions.ConnectionError:
logger.warning("Could not connect to service. Note this should only "
"be run from within a batch job.")
return
instance_id = resp.text
tag_instance(instance_id, project=project, **other_tags)
return | python | def tag_myself(project='cwc', **other_tags):
"""Function run when indra is used in an EC2 instance to apply tags."""
base_url = "http://169.254.169.254"
try:
resp = requests.get(base_url + "/latest/meta-data/instance-id")
except requests.exceptions.ConnectionError:
logger.warning("Could not connect to service. Note this should only "
"be run from within a batch job.")
return
instance_id = resp.text
tag_instance(instance_id, project=project, **other_tags)
return | [
"def",
"tag_myself",
"(",
"project",
"=",
"'cwc'",
",",
"*",
"*",
"other_tags",
")",
":",
"base_url",
"=",
"\"http://169.254.169.254\"",
"try",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"base_url",
"+",
"\"/latest/meta-data/instance-id\"",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
":",
"logger",
".",
"warning",
"(",
"\"Could not connect to service. Note this should only \"",
"\"be run from within a batch job.\"",
")",
"return",
"instance_id",
"=",
"resp",
".",
"text",
"tag_instance",
"(",
"instance_id",
",",
"project",
"=",
"project",
",",
"*",
"*",
"other_tags",
")",
"return"
]
| Function run when indra is used in an EC2 instance to apply tags. | [
"Function",
"run",
"when",
"indra",
"is",
"used",
"in",
"an",
"EC2",
"instance",
"to",
"apply",
"tags",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L65-L76 | train |
sorgerlab/indra | indra/util/aws.py | get_batch_command | def get_batch_command(command_list, project=None, purpose=None):
"""Get the command appropriate for running something on batch."""
command_str = ' '.join(command_list)
ret = ['python', '-m', 'indra.util.aws', 'run_in_batch', command_str]
if not project and has_config('DEFAULT_AWS_PROJECT'):
project = get_config('DEFAULT_AWS_PROJECT')
if project:
ret += ['--project', project]
if purpose:
ret += ['--purpose', purpose]
return ret | python | def get_batch_command(command_list, project=None, purpose=None):
"""Get the command appropriate for running something on batch."""
command_str = ' '.join(command_list)
ret = ['python', '-m', 'indra.util.aws', 'run_in_batch', command_str]
if not project and has_config('DEFAULT_AWS_PROJECT'):
project = get_config('DEFAULT_AWS_PROJECT')
if project:
ret += ['--project', project]
if purpose:
ret += ['--purpose', purpose]
return ret | [
"def",
"get_batch_command",
"(",
"command_list",
",",
"project",
"=",
"None",
",",
"purpose",
"=",
"None",
")",
":",
"command_str",
"=",
"' '",
".",
"join",
"(",
"command_list",
")",
"ret",
"=",
"[",
"'python'",
",",
"'-m'",
",",
"'indra.util.aws'",
",",
"'run_in_batch'",
",",
"command_str",
"]",
"if",
"not",
"project",
"and",
"has_config",
"(",
"'DEFAULT_AWS_PROJECT'",
")",
":",
"project",
"=",
"get_config",
"(",
"'DEFAULT_AWS_PROJECT'",
")",
"if",
"project",
":",
"ret",
"+=",
"[",
"'--project'",
",",
"project",
"]",
"if",
"purpose",
":",
"ret",
"+=",
"[",
"'--purpose'",
",",
"purpose",
"]",
"return",
"ret"
]
| Get the command appropriate for running something on batch. | [
"Get",
"the",
"command",
"appropriate",
"for",
"running",
"something",
"on",
"batch",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L79-L89 | train |
sorgerlab/indra | indra/util/aws.py | get_jobs | def get_jobs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Returns a list of dicts with jobName and jobId for each job with the
given status."""
batch = boto3.client('batch')
jobs = batch.list_jobs(jobQueue=job_queue, jobStatus=job_status)
return jobs.get('jobSummaryList') | python | def get_jobs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Returns a list of dicts with jobName and jobId for each job with the
given status."""
batch = boto3.client('batch')
jobs = batch.list_jobs(jobQueue=job_queue, jobStatus=job_status)
return jobs.get('jobSummaryList') | [
"def",
"get_jobs",
"(",
"job_queue",
"=",
"'run_reach_queue'",
",",
"job_status",
"=",
"'RUNNING'",
")",
":",
"batch",
"=",
"boto3",
".",
"client",
"(",
"'batch'",
")",
"jobs",
"=",
"batch",
".",
"list_jobs",
"(",
"jobQueue",
"=",
"job_queue",
",",
"jobStatus",
"=",
"job_status",
")",
"return",
"jobs",
".",
"get",
"(",
"'jobSummaryList'",
")"
]
| Returns a list of dicts with jobName and jobId for each job with the
given status. | [
"Returns",
"a",
"list",
"of",
"dicts",
"with",
"jobName",
"and",
"jobId",
"for",
"each",
"job",
"with",
"the",
"given",
"status",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L101-L106 | train |
sorgerlab/indra | indra/util/aws.py | get_job_log | def get_job_log(job_info, log_group_name='/aws/batch/job',
write_file=True, verbose=False):
"""Gets the Cloudwatch log associated with the given job.
Parameters
----------
job_info : dict
dict containing entries for 'jobName' and 'jobId', e.g., as returned
by get_jobs()
log_group_name : string
Name of the log group; defaults to '/aws/batch/job'
write_file : boolean
If True, writes the downloaded log to a text file with the filename
'%s_%s.log' % (job_name, job_id)
Returns
-------
list of strings
The event messages in the log, with the earliest events listed first.
"""
job_name = job_info['jobName']
job_id = job_info['jobId']
logs = boto3.client('logs')
batch = boto3.client('batch')
resp = batch.describe_jobs(jobs=[job_id])
job_desc = resp['jobs'][0]
job_def_name = job_desc['jobDefinition'].split('/')[-1].split(':')[0]
task_arn_id = job_desc['container']['taskArn'].split('/')[-1]
log_stream_name = '%s/default/%s' % (job_def_name, task_arn_id)
stream_resp = logs.describe_log_streams(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_name)
streams = stream_resp.get('logStreams')
if not streams:
logger.warning('No streams for job')
return None
elif len(streams) > 1:
logger.warning('More than 1 stream for job, returning first')
log_stream_name = streams[0]['logStreamName']
if verbose:
logger.info("Getting log for %s/%s" % (job_name, job_id))
out_file = ('%s_%s.log' % (job_name, job_id)) if write_file else None
lines = get_log_by_name(log_group_name, log_stream_name, out_file, verbose)
return lines | python | def get_job_log(job_info, log_group_name='/aws/batch/job',
write_file=True, verbose=False):
"""Gets the Cloudwatch log associated with the given job.
Parameters
----------
job_info : dict
dict containing entries for 'jobName' and 'jobId', e.g., as returned
by get_jobs()
log_group_name : string
Name of the log group; defaults to '/aws/batch/job'
write_file : boolean
If True, writes the downloaded log to a text file with the filename
'%s_%s.log' % (job_name, job_id)
Returns
-------
list of strings
The event messages in the log, with the earliest events listed first.
"""
job_name = job_info['jobName']
job_id = job_info['jobId']
logs = boto3.client('logs')
batch = boto3.client('batch')
resp = batch.describe_jobs(jobs=[job_id])
job_desc = resp['jobs'][0]
job_def_name = job_desc['jobDefinition'].split('/')[-1].split(':')[0]
task_arn_id = job_desc['container']['taskArn'].split('/')[-1]
log_stream_name = '%s/default/%s' % (job_def_name, task_arn_id)
stream_resp = logs.describe_log_streams(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_name)
streams = stream_resp.get('logStreams')
if not streams:
logger.warning('No streams for job')
return None
elif len(streams) > 1:
logger.warning('More than 1 stream for job, returning first')
log_stream_name = streams[0]['logStreamName']
if verbose:
logger.info("Getting log for %s/%s" % (job_name, job_id))
out_file = ('%s_%s.log' % (job_name, job_id)) if write_file else None
lines = get_log_by_name(log_group_name, log_stream_name, out_file, verbose)
return lines | [
"def",
"get_job_log",
"(",
"job_info",
",",
"log_group_name",
"=",
"'/aws/batch/job'",
",",
"write_file",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"job_name",
"=",
"job_info",
"[",
"'jobName'",
"]",
"job_id",
"=",
"job_info",
"[",
"'jobId'",
"]",
"logs",
"=",
"boto3",
".",
"client",
"(",
"'logs'",
")",
"batch",
"=",
"boto3",
".",
"client",
"(",
"'batch'",
")",
"resp",
"=",
"batch",
".",
"describe_jobs",
"(",
"jobs",
"=",
"[",
"job_id",
"]",
")",
"job_desc",
"=",
"resp",
"[",
"'jobs'",
"]",
"[",
"0",
"]",
"job_def_name",
"=",
"job_desc",
"[",
"'jobDefinition'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"task_arn_id",
"=",
"job_desc",
"[",
"'container'",
"]",
"[",
"'taskArn'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"log_stream_name",
"=",
"'%s/default/%s'",
"%",
"(",
"job_def_name",
",",
"task_arn_id",
")",
"stream_resp",
"=",
"logs",
".",
"describe_log_streams",
"(",
"logGroupName",
"=",
"log_group_name",
",",
"logStreamNamePrefix",
"=",
"log_stream_name",
")",
"streams",
"=",
"stream_resp",
".",
"get",
"(",
"'logStreams'",
")",
"if",
"not",
"streams",
":",
"logger",
".",
"warning",
"(",
"'No streams for job'",
")",
"return",
"None",
"elif",
"len",
"(",
"streams",
")",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"'More than 1 stream for job, returning first'",
")",
"log_stream_name",
"=",
"streams",
"[",
"0",
"]",
"[",
"'logStreamName'",
"]",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Getting log for %s/%s\"",
"%",
"(",
"job_name",
",",
"job_id",
")",
")",
"out_file",
"=",
"(",
"'%s_%s.log'",
"%",
"(",
"job_name",
",",
"job_id",
")",
")",
"if",
"write_file",
"else",
"None",
"lines",
"=",
"get_log_by_name",
"(",
"log_group_name",
",",
"log_stream_name",
",",
"out_file",
",",
"verbose",
")",
"return",
"lines"
]
| Gets the Cloudwatch log associated with the given job.
Parameters
----------
job_info : dict
dict containing entries for 'jobName' and 'jobId', e.g., as returned
by get_jobs()
log_group_name : string
Name of the log group; defaults to '/aws/batch/job'
write_file : boolean
If True, writes the downloaded log to a text file with the filename
'%s_%s.log' % (job_name, job_id)
Returns
-------
list of strings
The event messages in the log, with the earliest events listed first. | [
"Gets",
"the",
"Cloudwatch",
"log",
"associated",
"with",
"the",
"given",
"job",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L109-L153 | train |
sorgerlab/indra | indra/util/aws.py | get_log_by_name | def get_log_by_name(log_group_name, log_stream_name, out_file=None,
verbose=True):
"""Download a log given the log's group and stream name.
Parameters
----------
log_group_name : str
The name of the log group, e.g. /aws/batch/job.
log_stream_name : str
The name of the log stream, e.g. run_reach_jobdef/default/<UUID>
Returns
-------
lines : list[str]
The lines of the log as a list.
"""
logs = boto3.client('logs')
kwargs = {'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'startFromHead': True}
lines = []
while True:
response = logs.get_log_events(**kwargs)
# If we've gotten all the events already, the nextForwardToken for
# this call will be the same as the last one
if response.get('nextForwardToken') == kwargs.get('nextToken'):
break
else:
events = response.get('events')
if events:
lines += ['%s: %s\n' % (evt['timestamp'], evt['message'])
for evt in events]
kwargs['nextToken'] = response.get('nextForwardToken')
if verbose:
logger.info('%d %s' % (len(lines), lines[-1]))
if out_file:
with open(out_file, 'wt') as f:
for line in lines:
f.write(line)
return lines | python | def get_log_by_name(log_group_name, log_stream_name, out_file=None,
verbose=True):
"""Download a log given the log's group and stream name.
Parameters
----------
log_group_name : str
The name of the log group, e.g. /aws/batch/job.
log_stream_name : str
The name of the log stream, e.g. run_reach_jobdef/default/<UUID>
Returns
-------
lines : list[str]
The lines of the log as a list.
"""
logs = boto3.client('logs')
kwargs = {'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'startFromHead': True}
lines = []
while True:
response = logs.get_log_events(**kwargs)
# If we've gotten all the events already, the nextForwardToken for
# this call will be the same as the last one
if response.get('nextForwardToken') == kwargs.get('nextToken'):
break
else:
events = response.get('events')
if events:
lines += ['%s: %s\n' % (evt['timestamp'], evt['message'])
for evt in events]
kwargs['nextToken'] = response.get('nextForwardToken')
if verbose:
logger.info('%d %s' % (len(lines), lines[-1]))
if out_file:
with open(out_file, 'wt') as f:
for line in lines:
f.write(line)
return lines | [
"def",
"get_log_by_name",
"(",
"log_group_name",
",",
"log_stream_name",
",",
"out_file",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"logs",
"=",
"boto3",
".",
"client",
"(",
"'logs'",
")",
"kwargs",
"=",
"{",
"'logGroupName'",
":",
"log_group_name",
",",
"'logStreamName'",
":",
"log_stream_name",
",",
"'startFromHead'",
":",
"True",
"}",
"lines",
"=",
"[",
"]",
"while",
"True",
":",
"response",
"=",
"logs",
".",
"get_log_events",
"(",
"*",
"*",
"kwargs",
")",
"# If we've gotten all the events already, the nextForwardToken for",
"# this call will be the same as the last one",
"if",
"response",
".",
"get",
"(",
"'nextForwardToken'",
")",
"==",
"kwargs",
".",
"get",
"(",
"'nextToken'",
")",
":",
"break",
"else",
":",
"events",
"=",
"response",
".",
"get",
"(",
"'events'",
")",
"if",
"events",
":",
"lines",
"+=",
"[",
"'%s: %s\\n'",
"%",
"(",
"evt",
"[",
"'timestamp'",
"]",
",",
"evt",
"[",
"'message'",
"]",
")",
"for",
"evt",
"in",
"events",
"]",
"kwargs",
"[",
"'nextToken'",
"]",
"=",
"response",
".",
"get",
"(",
"'nextForwardToken'",
")",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"'%d %s'",
"%",
"(",
"len",
"(",
"lines",
")",
",",
"lines",
"[",
"-",
"1",
"]",
")",
")",
"if",
"out_file",
":",
"with",
"open",
"(",
"out_file",
",",
"'wt'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"lines",
":",
"f",
".",
"write",
"(",
"line",
")",
"return",
"lines"
]
| Download a log given the log's group and stream name.
Parameters
----------
log_group_name : str
The name of the log group, e.g. /aws/batch/job.
log_stream_name : str
The name of the log stream, e.g. run_reach_jobdef/default/<UUID>
Returns
-------
lines : list[str]
The lines of the log as a list. | [
"Download",
"a",
"log",
"given",
"the",
"log",
"s",
"group",
"and",
"stream",
"name",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L156-L196 | train |
sorgerlab/indra | indra/util/aws.py | dump_logs | def dump_logs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Write logs for all jobs with given the status to files."""
jobs = get_jobs(job_queue, job_status)
for job in jobs:
get_job_log(job, write_file=True) | python | def dump_logs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Write logs for all jobs with given the status to files."""
jobs = get_jobs(job_queue, job_status)
for job in jobs:
get_job_log(job, write_file=True) | [
"def",
"dump_logs",
"(",
"job_queue",
"=",
"'run_reach_queue'",
",",
"job_status",
"=",
"'RUNNING'",
")",
":",
"jobs",
"=",
"get_jobs",
"(",
"job_queue",
",",
"job_status",
")",
"for",
"job",
"in",
"jobs",
":",
"get_job_log",
"(",
"job",
",",
"write_file",
"=",
"True",
")"
]
| Write logs for all jobs with given the status to files. | [
"Write",
"logs",
"for",
"all",
"jobs",
"with",
"given",
"the",
"status",
"to",
"files",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L199-L203 | train |
sorgerlab/indra | indra/util/aws.py | get_s3_file_tree | def get_s3_file_tree(s3, bucket, prefix):
"""Overcome s3 response limit and return NestedDict tree of paths.
The NestedDict object also allows the user to search by the ends of a path.
The tree mimics a file directory structure, with the leave nodes being the
full unbroken key. For example, 'path/to/file.txt' would be retrieved by
ret['path']['to']['file.txt']['key']
The NestedDict object returned also has the capability to get paths that
lead to a certain value. So if you wanted all paths that lead to something
called 'file.txt', you could use
ret.get_paths('file.txt')
For more details, see the NestedDict docs.
"""
def get_some_keys(keys, marker=None):
if marker:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix,
Marker=marker)
else:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix)
keys.extend([entry['Key'] for entry in relevant_files['Contents']
if entry['Key'] != marker])
return relevant_files['IsTruncated']
file_keys = []
marker = None
while get_some_keys(file_keys, marker):
marker = file_keys[-1]
file_tree = NestedDict()
pref_path = prefix.split('/')[:-1] # avoid the trailing empty str.
for key in file_keys:
full_path = key.split('/')
relevant_path = full_path[len(pref_path):]
curr = file_tree
for step in relevant_path:
curr = curr[step]
curr['key'] = key
return file_tree | python | def get_s3_file_tree(s3, bucket, prefix):
"""Overcome s3 response limit and return NestedDict tree of paths.
The NestedDict object also allows the user to search by the ends of a path.
The tree mimics a file directory structure, with the leave nodes being the
full unbroken key. For example, 'path/to/file.txt' would be retrieved by
ret['path']['to']['file.txt']['key']
The NestedDict object returned also has the capability to get paths that
lead to a certain value. So if you wanted all paths that lead to something
called 'file.txt', you could use
ret.get_paths('file.txt')
For more details, see the NestedDict docs.
"""
def get_some_keys(keys, marker=None):
if marker:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix,
Marker=marker)
else:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix)
keys.extend([entry['Key'] for entry in relevant_files['Contents']
if entry['Key'] != marker])
return relevant_files['IsTruncated']
file_keys = []
marker = None
while get_some_keys(file_keys, marker):
marker = file_keys[-1]
file_tree = NestedDict()
pref_path = prefix.split('/')[:-1] # avoid the trailing empty str.
for key in file_keys:
full_path = key.split('/')
relevant_path = full_path[len(pref_path):]
curr = file_tree
for step in relevant_path:
curr = curr[step]
curr['key'] = key
return file_tree | [
"def",
"get_s3_file_tree",
"(",
"s3",
",",
"bucket",
",",
"prefix",
")",
":",
"def",
"get_some_keys",
"(",
"keys",
",",
"marker",
"=",
"None",
")",
":",
"if",
"marker",
":",
"relevant_files",
"=",
"s3",
".",
"list_objects",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
",",
"Marker",
"=",
"marker",
")",
"else",
":",
"relevant_files",
"=",
"s3",
".",
"list_objects",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
")",
"keys",
".",
"extend",
"(",
"[",
"entry",
"[",
"'Key'",
"]",
"for",
"entry",
"in",
"relevant_files",
"[",
"'Contents'",
"]",
"if",
"entry",
"[",
"'Key'",
"]",
"!=",
"marker",
"]",
")",
"return",
"relevant_files",
"[",
"'IsTruncated'",
"]",
"file_keys",
"=",
"[",
"]",
"marker",
"=",
"None",
"while",
"get_some_keys",
"(",
"file_keys",
",",
"marker",
")",
":",
"marker",
"=",
"file_keys",
"[",
"-",
"1",
"]",
"file_tree",
"=",
"NestedDict",
"(",
")",
"pref_path",
"=",
"prefix",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
"# avoid the trailing empty str.",
"for",
"key",
"in",
"file_keys",
":",
"full_path",
"=",
"key",
".",
"split",
"(",
"'/'",
")",
"relevant_path",
"=",
"full_path",
"[",
"len",
"(",
"pref_path",
")",
":",
"]",
"curr",
"=",
"file_tree",
"for",
"step",
"in",
"relevant_path",
":",
"curr",
"=",
"curr",
"[",
"step",
"]",
"curr",
"[",
"'key'",
"]",
"=",
"key",
"return",
"file_tree"
]
| Overcome s3 response limit and return NestedDict tree of paths.
The NestedDict object also allows the user to search by the ends of a path.
The tree mimics a file directory structure, with the leave nodes being the
full unbroken key. For example, 'path/to/file.txt' would be retrieved by
ret['path']['to']['file.txt']['key']
The NestedDict object returned also has the capability to get paths that
lead to a certain value. So if you wanted all paths that lead to something
called 'file.txt', you could use
ret.get_paths('file.txt')
For more details, see the NestedDict docs. | [
"Overcome",
"s3",
"response",
"limit",
"and",
"return",
"NestedDict",
"tree",
"of",
"paths",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/aws.py#L206-L248 | train |
sorgerlab/indra | indra/assemblers/sif/assembler.py | SifAssembler.print_model | def print_model(self, include_unsigned_edges=False):
"""Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = ''
for edge in self.graph.edges(data=True):
n1 = edge[0]
n2 = edge[1]
data = edge[2]
polarity = data.get('polarity')
if polarity == 'negative':
rel = '-1'
elif polarity == 'positive':
rel = '1'
elif include_unsigned_edges:
rel = '0'
else:
continue
sif_str += '%s %s %s\n' % (n1, rel, n2)
return sif_str | python | def print_model(self, include_unsigned_edges=False):
"""Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = ''
for edge in self.graph.edges(data=True):
n1 = edge[0]
n2 = edge[1]
data = edge[2]
polarity = data.get('polarity')
if polarity == 'negative':
rel = '-1'
elif polarity == 'positive':
rel = '1'
elif include_unsigned_edges:
rel = '0'
else:
continue
sif_str += '%s %s %s\n' % (n1, rel, n2)
return sif_str | [
"def",
"print_model",
"(",
"self",
",",
"include_unsigned_edges",
"=",
"False",
")",
":",
"sif_str",
"=",
"''",
"for",
"edge",
"in",
"self",
".",
"graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"n1",
"=",
"edge",
"[",
"0",
"]",
"n2",
"=",
"edge",
"[",
"1",
"]",
"data",
"=",
"edge",
"[",
"2",
"]",
"polarity",
"=",
"data",
".",
"get",
"(",
"'polarity'",
")",
"if",
"polarity",
"==",
"'negative'",
":",
"rel",
"=",
"'-1'",
"elif",
"polarity",
"==",
"'positive'",
":",
"rel",
"=",
"'1'",
"elif",
"include_unsigned_edges",
":",
"rel",
"=",
"'0'",
"else",
":",
"continue",
"sif_str",
"+=",
"'%s %s %s\\n'",
"%",
"(",
"n1",
",",
"rel",
",",
"n2",
")",
"return",
"sif_str"
]
| Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False. | [
"Return",
"a",
"SIF",
"string",
"of",
"the",
"assembled",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sif/assembler.py#L98-L122 | train |
sorgerlab/indra | indra/assemblers/sif/assembler.py | SifAssembler.save_model | def save_model(self, fname, include_unsigned_edges=False):
"""Save the assembled model's SIF string into a file.
Parameters
----------
fname : str
The name of the file to save the SIF into.
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = self.print_model(include_unsigned_edges)
with open(fname, 'wb') as fh:
fh.write(sif_str.encode('utf-8')) | python | def save_model(self, fname, include_unsigned_edges=False):
"""Save the assembled model's SIF string into a file.
Parameters
----------
fname : str
The name of the file to save the SIF into.
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = self.print_model(include_unsigned_edges)
with open(fname, 'wb') as fh:
fh.write(sif_str.encode('utf-8')) | [
"def",
"save_model",
"(",
"self",
",",
"fname",
",",
"include_unsigned_edges",
"=",
"False",
")",
":",
"sif_str",
"=",
"self",
".",
"print_model",
"(",
"include_unsigned_edges",
")",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"sif_str",
".",
"encode",
"(",
"'utf-8'",
")",
")"
]
| Save the assembled model's SIF string into a file.
Parameters
----------
fname : str
The name of the file to save the SIF into.
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False. | [
"Save",
"the",
"assembled",
"model",
"s",
"SIF",
"string",
"into",
"a",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sif/assembler.py#L124-L137 | train |
sorgerlab/indra | indra/assemblers/sif/assembler.py | SifAssembler.print_boolean_net | def print_boolean_net(self, out_file=None):
"""Return a Boolean network from the assembled graph.
See https://github.com/ialbert/booleannet for details about
the format used to encode the Boolean rules.
Parameters
----------
out_file : Optional[str]
A file name in which the Boolean network is saved.
Returns
-------
full_str : str
The string representing the Boolean network.
"""
init_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
init_str += '%s = False\n' % node_name
rule_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
in_edges = self.graph.in_edges(node_key)
if not in_edges:
continue
parents = [e[0] for e in in_edges]
polarities = [self.graph.edge[e[0]][node_key]['polarity']
for e in in_edges]
pos_parents = [par for par, pol in zip(parents, polarities) if
pol == 'positive']
neg_parents = [par for par, pol in zip(parents, polarities) if
pol == 'negative']
rhs_pos_parts = []
for par in pos_parents:
rhs_pos_parts.append(self.graph.node[par]['name'])
rhs_pos_str = ' or '.join(rhs_pos_parts)
rhs_neg_parts = []
for par in neg_parents:
rhs_neg_parts.append(self.graph.node[par]['name'])
rhs_neg_str = ' or '.join(rhs_neg_parts)
if rhs_pos_str:
if rhs_neg_str:
rhs_str = '(' + rhs_pos_str + \
') and not (' + rhs_neg_str + ')'
else:
rhs_str = rhs_pos_str
else:
rhs_str = 'not (' + rhs_neg_str + ')'
node_eq = '%s* = %s\n' % (node_name, rhs_str)
rule_str += node_eq
full_str = init_str + '\n' + rule_str
if out_file is not None:
with open(out_file, 'wt') as fh:
fh.write(full_str)
return full_str | python | def print_boolean_net(self, out_file=None):
"""Return a Boolean network from the assembled graph.
See https://github.com/ialbert/booleannet for details about
the format used to encode the Boolean rules.
Parameters
----------
out_file : Optional[str]
A file name in which the Boolean network is saved.
Returns
-------
full_str : str
The string representing the Boolean network.
"""
init_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
init_str += '%s = False\n' % node_name
rule_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
in_edges = self.graph.in_edges(node_key)
if not in_edges:
continue
parents = [e[0] for e in in_edges]
polarities = [self.graph.edge[e[0]][node_key]['polarity']
for e in in_edges]
pos_parents = [par for par, pol in zip(parents, polarities) if
pol == 'positive']
neg_parents = [par for par, pol in zip(parents, polarities) if
pol == 'negative']
rhs_pos_parts = []
for par in pos_parents:
rhs_pos_parts.append(self.graph.node[par]['name'])
rhs_pos_str = ' or '.join(rhs_pos_parts)
rhs_neg_parts = []
for par in neg_parents:
rhs_neg_parts.append(self.graph.node[par]['name'])
rhs_neg_str = ' or '.join(rhs_neg_parts)
if rhs_pos_str:
if rhs_neg_str:
rhs_str = '(' + rhs_pos_str + \
') and not (' + rhs_neg_str + ')'
else:
rhs_str = rhs_pos_str
else:
rhs_str = 'not (' + rhs_neg_str + ')'
node_eq = '%s* = %s\n' % (node_name, rhs_str)
rule_str += node_eq
full_str = init_str + '\n' + rule_str
if out_file is not None:
with open(out_file, 'wt') as fh:
fh.write(full_str)
return full_str | [
"def",
"print_boolean_net",
"(",
"self",
",",
"out_file",
"=",
"None",
")",
":",
"init_str",
"=",
"''",
"for",
"node_key",
"in",
"self",
".",
"graph",
".",
"nodes",
"(",
")",
":",
"node_name",
"=",
"self",
".",
"graph",
".",
"node",
"[",
"node_key",
"]",
"[",
"'name'",
"]",
"init_str",
"+=",
"'%s = False\\n'",
"%",
"node_name",
"rule_str",
"=",
"''",
"for",
"node_key",
"in",
"self",
".",
"graph",
".",
"nodes",
"(",
")",
":",
"node_name",
"=",
"self",
".",
"graph",
".",
"node",
"[",
"node_key",
"]",
"[",
"'name'",
"]",
"in_edges",
"=",
"self",
".",
"graph",
".",
"in_edges",
"(",
"node_key",
")",
"if",
"not",
"in_edges",
":",
"continue",
"parents",
"=",
"[",
"e",
"[",
"0",
"]",
"for",
"e",
"in",
"in_edges",
"]",
"polarities",
"=",
"[",
"self",
".",
"graph",
".",
"edge",
"[",
"e",
"[",
"0",
"]",
"]",
"[",
"node_key",
"]",
"[",
"'polarity'",
"]",
"for",
"e",
"in",
"in_edges",
"]",
"pos_parents",
"=",
"[",
"par",
"for",
"par",
",",
"pol",
"in",
"zip",
"(",
"parents",
",",
"polarities",
")",
"if",
"pol",
"==",
"'positive'",
"]",
"neg_parents",
"=",
"[",
"par",
"for",
"par",
",",
"pol",
"in",
"zip",
"(",
"parents",
",",
"polarities",
")",
"if",
"pol",
"==",
"'negative'",
"]",
"rhs_pos_parts",
"=",
"[",
"]",
"for",
"par",
"in",
"pos_parents",
":",
"rhs_pos_parts",
".",
"append",
"(",
"self",
".",
"graph",
".",
"node",
"[",
"par",
"]",
"[",
"'name'",
"]",
")",
"rhs_pos_str",
"=",
"' or '",
".",
"join",
"(",
"rhs_pos_parts",
")",
"rhs_neg_parts",
"=",
"[",
"]",
"for",
"par",
"in",
"neg_parents",
":",
"rhs_neg_parts",
".",
"append",
"(",
"self",
".",
"graph",
".",
"node",
"[",
"par",
"]",
"[",
"'name'",
"]",
")",
"rhs_neg_str",
"=",
"' or '",
".",
"join",
"(",
"rhs_neg_parts",
")",
"if",
"rhs_pos_str",
":",
"if",
"rhs_neg_str",
":",
"rhs_str",
"=",
"'('",
"+",
"rhs_pos_str",
"+",
"') and not ('",
"+",
"rhs_neg_str",
"+",
"')'",
"else",
":",
"rhs_str",
"=",
"rhs_pos_str",
"else",
":",
"rhs_str",
"=",
"'not ('",
"+",
"rhs_neg_str",
"+",
"')'",
"node_eq",
"=",
"'%s* = %s\\n'",
"%",
"(",
"node_name",
",",
"rhs_str",
")",
"rule_str",
"+=",
"node_eq",
"full_str",
"=",
"init_str",
"+",
"'\\n'",
"+",
"rule_str",
"if",
"out_file",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"out_file",
",",
"'wt'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"full_str",
")",
"return",
"full_str"
]
| Return a Boolean network from the assembled graph.
See https://github.com/ialbert/booleannet for details about
the format used to encode the Boolean rules.
Parameters
----------
out_file : Optional[str]
A file name in which the Boolean network is saved.
Returns
-------
full_str : str
The string representing the Boolean network. | [
"Return",
"a",
"Boolean",
"network",
"from",
"the",
"assembled",
"graph",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sif/assembler.py#L183-L242 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | _ensure_api_keys | def _ensure_api_keys(task_desc, failure_ret=None):
"""Wrap Elsevier methods which directly use the API keys.
Ensure that the keys are retrieved from the environment or config file when
first called, and store global scope. Subsequently use globally stashed
results and check for required ids.
"""
def check_func_wrapper(func):
@wraps(func)
def check_api_keys(*args, **kwargs):
global ELSEVIER_KEYS
if ELSEVIER_KEYS is None:
ELSEVIER_KEYS = {}
# Try to read in Elsevier API keys. For each key, first check
# the environment variables, then check the INDRA config file.
if not has_config(INST_KEY_ENV_NAME):
logger.warning('Institution API key %s not found in config '
'file or environment variable: this will '
'limit access for %s'
% (INST_KEY_ENV_NAME, task_desc))
ELSEVIER_KEYS['X-ELS-Insttoken'] = get_config(INST_KEY_ENV_NAME)
if not has_config(API_KEY_ENV_NAME):
logger.error('API key %s not found in configuration file '
'or environment variable: cannot %s'
% (API_KEY_ENV_NAME, task_desc))
return failure_ret
ELSEVIER_KEYS['X-ELS-APIKey'] = get_config(API_KEY_ENV_NAME)
elif 'X-ELS-APIKey' not in ELSEVIER_KEYS.keys():
logger.error('No Elsevier API key %s found: cannot %s'
% (API_KEY_ENV_NAME, task_desc))
return failure_ret
return func(*args, **kwargs)
return check_api_keys
return check_func_wrapper | python | def _ensure_api_keys(task_desc, failure_ret=None):
"""Wrap Elsevier methods which directly use the API keys.
Ensure that the keys are retrieved from the environment or config file when
first called, and store global scope. Subsequently use globally stashed
results and check for required ids.
"""
def check_func_wrapper(func):
@wraps(func)
def check_api_keys(*args, **kwargs):
global ELSEVIER_KEYS
if ELSEVIER_KEYS is None:
ELSEVIER_KEYS = {}
# Try to read in Elsevier API keys. For each key, first check
# the environment variables, then check the INDRA config file.
if not has_config(INST_KEY_ENV_NAME):
logger.warning('Institution API key %s not found in config '
'file or environment variable: this will '
'limit access for %s'
% (INST_KEY_ENV_NAME, task_desc))
ELSEVIER_KEYS['X-ELS-Insttoken'] = get_config(INST_KEY_ENV_NAME)
if not has_config(API_KEY_ENV_NAME):
logger.error('API key %s not found in configuration file '
'or environment variable: cannot %s'
% (API_KEY_ENV_NAME, task_desc))
return failure_ret
ELSEVIER_KEYS['X-ELS-APIKey'] = get_config(API_KEY_ENV_NAME)
elif 'X-ELS-APIKey' not in ELSEVIER_KEYS.keys():
logger.error('No Elsevier API key %s found: cannot %s'
% (API_KEY_ENV_NAME, task_desc))
return failure_ret
return func(*args, **kwargs)
return check_api_keys
return check_func_wrapper | [
"def",
"_ensure_api_keys",
"(",
"task_desc",
",",
"failure_ret",
"=",
"None",
")",
":",
"def",
"check_func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"check_api_keys",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"ELSEVIER_KEYS",
"if",
"ELSEVIER_KEYS",
"is",
"None",
":",
"ELSEVIER_KEYS",
"=",
"{",
"}",
"# Try to read in Elsevier API keys. For each key, first check",
"# the environment variables, then check the INDRA config file.",
"if",
"not",
"has_config",
"(",
"INST_KEY_ENV_NAME",
")",
":",
"logger",
".",
"warning",
"(",
"'Institution API key %s not found in config '",
"'file or environment variable: this will '",
"'limit access for %s'",
"%",
"(",
"INST_KEY_ENV_NAME",
",",
"task_desc",
")",
")",
"ELSEVIER_KEYS",
"[",
"'X-ELS-Insttoken'",
"]",
"=",
"get_config",
"(",
"INST_KEY_ENV_NAME",
")",
"if",
"not",
"has_config",
"(",
"API_KEY_ENV_NAME",
")",
":",
"logger",
".",
"error",
"(",
"'API key %s not found in configuration file '",
"'or environment variable: cannot %s'",
"%",
"(",
"API_KEY_ENV_NAME",
",",
"task_desc",
")",
")",
"return",
"failure_ret",
"ELSEVIER_KEYS",
"[",
"'X-ELS-APIKey'",
"]",
"=",
"get_config",
"(",
"API_KEY_ENV_NAME",
")",
"elif",
"'X-ELS-APIKey'",
"not",
"in",
"ELSEVIER_KEYS",
".",
"keys",
"(",
")",
":",
"logger",
".",
"error",
"(",
"'No Elsevier API key %s found: cannot %s'",
"%",
"(",
"API_KEY_ENV_NAME",
",",
"task_desc",
")",
")",
"return",
"failure_ret",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"check_api_keys",
"return",
"check_func_wrapper"
]
| Wrap Elsevier methods which directly use the API keys.
Ensure that the keys are retrieved from the environment or config file when
first called, and store global scope. Subsequently use globally stashed
results and check for required ids. | [
"Wrap",
"Elsevier",
"methods",
"which",
"directly",
"use",
"the",
"API",
"keys",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L51-L85 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | check_entitlement | def check_entitlement(doi):
"""Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
"""
if doi.lower().startswith('doi:'):
doi = doi[4:]
url = '%s/%s' % (elsevier_entitlement_url, doi)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.error('Could not check entitlements for article %s: '
'status code %d' % (doi, res.status_code))
logger.error('Response content: %s' % res.text)
return False
return True | python | def check_entitlement(doi):
"""Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
"""
if doi.lower().startswith('doi:'):
doi = doi[4:]
url = '%s/%s' % (elsevier_entitlement_url, doi)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.error('Could not check entitlements for article %s: '
'status code %d' % (doi, res.status_code))
logger.error('Response content: %s' % res.text)
return False
return True | [
"def",
"check_entitlement",
"(",
"doi",
")",
":",
"if",
"doi",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'doi:'",
")",
":",
"doi",
"=",
"doi",
"[",
"4",
":",
"]",
"url",
"=",
"'%s/%s'",
"%",
"(",
"elsevier_entitlement_url",
",",
"doi",
")",
"params",
"=",
"{",
"'httpAccept'",
":",
"'text/xml'",
"}",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
",",
"headers",
"=",
"ELSEVIER_KEYS",
")",
"if",
"not",
"res",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"error",
"(",
"'Could not check entitlements for article %s: '",
"'status code %d'",
"%",
"(",
"doi",
",",
"res",
".",
"status_code",
")",
")",
"logger",
".",
"error",
"(",
"'Response content: %s'",
"%",
"res",
".",
"text",
")",
"return",
"False",
"return",
"True"
]
| Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys. | [
"Check",
"whether",
"IP",
"and",
"credentials",
"enable",
"access",
"to",
"content",
"for",
"a",
"doi",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L89-L106 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.