Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def area_uri(self, area_uuid):
if area_uuid not in self.areas:
raise UploadException("I don't know about area {uuid}".format(uuid=area_uuid))
return UploadAreaURI(self._config.upload.areas[area_uuid]['uri']) | [
"\n Return the URI for an Upload Area\n :param area_uuid: UUID of area for which we want URI\n :return: Upload Area URI object\n :rtype: UploadAreaURI\n :raises UploadException: if area does not exist\n "
]
|
Please provide a description of the function:def add_area(self, uri):
if uri.area_uuid not in self._config.upload.areas:
self._config.upload.areas[uri.area_uuid] = {'uri': uri.uri}
self.save() | [
"\n Record information about a new Upload Area\n\n :param UploadAreaURI uri: An Upload Area URI.\n "
]
|
Please provide a description of the function:def select_area(self, area_uuid):
self._config.upload.current_area = area_uuid
self.save() | [
"\n Update the \"current area\" to be the area with this UUID.\n\n :param str area_uuid: The RFC4122-compliant UUID of the Upload Area.\n "
]
|
Please provide a description of the function:def forget_area(self, area_uuid):
if self._config.upload.current_area == area_uuid:
self._config.upload.current_area = None
if area_uuid in self._config.upload.areas:
del self._config.upload.areas[area_uuid]
self.save() | [
"\n Remove an Upload Area from out cache of known areas.\n :param str area_uuid: The RFC4122-compliant UUID of the Upload Area.\n "
]
|
Please provide a description of the function:def area_uuid_from_partial_uuid(self, partial_uuid):
matching_areas = [uuid for uuid in self.areas if re.match(partial_uuid, uuid)]
if len(matching_areas) == 0:
raise UploadException("Sorry I don't recognize area \"%s\"" % (partial_uuid,))
elif len(matching_areas) == 1:
return matching_areas[0]
else:
raise UploadException(
"\"%s\" matches more than one area, please provide more characters." % (partial_uuid,)) | [
"\n Given a partial UUID (a prefix), see if we have know about an Upload Area matching it.\n :param (str) partial_uuid: UUID prefix\n :return: a matching UUID\n :rtype: str\n :raises UploadException: if no or more than one UUIDs match.\n "
]
|
Please provide a description of the function:def unique_prefix(self, area_uuid):
for prefix_len in range(1, len(area_uuid)):
prefix = area_uuid[0:prefix_len]
matching_areas = [uuid for uuid in self.areas if re.match(prefix, uuid)]
if len(matching_areas) == 1:
return prefix | [
"\n Find the minimum prefix required to address this Upload Area UUID uniquely.\n :param (str) area_uuid: UUID of Upload Area\n :return: a string with the minimum prefix required to be unique\n :rtype: str\n "
]
|
Please provide a description of the function:def get_days_since_last_modified(filename):
now = datetime.now()
last_modified = datetime.fromtimestamp(os.path.getmtime(filename))
return (now - last_modified).days | [
"\n :param filename: Absolute file path\n :return: Number of days since filename's last modified time\n "
]
|
Please provide a description of the function:def create_area(self, area_uuid):
result = self.api_client.create_area(area_uuid=area_uuid)
area_uri = UploadAreaURI(uri=result['uri'])
return UploadArea(uri=area_uri, upload_service=self) | [
"\n Create an Upload Area\n :param area_uuid: UUID of Upload Area to be created\n :return: an Upload Area object\n :rtype: UploadArea\n "
]
|
Please provide a description of the function:def create_area(self, area_uuid):
response = self._make_request('post',
path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return response.json() | [
"\n Create an Upload Area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :return: a dict of the form { \"uri\": \"s3://<bucket_name>/<upload-area-id>/\" }\n :rtype: dict\n :raises UploadApiException: if the an Upload Area was not created\n "
]
|
Please provide a description of the function:def area_exists(self, area_uuid):
response = requests.head(self._url(path="/area/{id}".format(id=area_uuid)))
return response.ok | [
"\n Check if an Upload Area exists\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :return: True or False\n :rtype: bool\n "
]
|
Please provide a description of the function:def delete_area(self, area_uuid):
self._make_request('delete', path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return True | [
"\n Delete an Upload Area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :return: True\n :rtype: bool\n :raises UploadApiException: if the an Upload Area was not deleted\n "
]
|
Please provide a description of the function:def credentials(self, area_uuid):
response = self._make_request("post", path="/area/{uuid}/credentials".format(uuid=area_uuid))
return response.json() | [
"\n Get AWS credentials required to directly upload files to Upload Area in S3\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :return: a dict containing an AWS AccessKey, SecretKey and SessionToken\n :rtype: dict\n :raises UploadApiException: if credentials could not be obtained\n "
]
|
Please provide a description of the function:def store_file(self, area_uuid, filename, file_content, content_type):
url_safe_filename = urlparse.quote(filename)
path = "/area/{id}/{filename}".format(id=area_uuid, filename=url_safe_filename)
response = self._make_request('put',
path=path,
data=file_content,
headers={
'Api-Key': self.auth_token,
'Content-Type': content_type
})
return response.json() | [
"\n Store a small file in an Upload Area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param str filename: The name the file will have in the Upload Area\n :param str file_content: The contents of the file\n :param str content_type: The MIME-type for the file\n :return: information about the stored file (similar to that returned by files_info)\n :rtype: dict\n :raises UploadApiException: if file could not be stored\n "
]
|
Please provide a description of the function:def file_upload_notification(self, area_uuid, filename):
url_safe_filename = urlparse.quote(filename)
path = ("/area/{area_uuid}/{filename}".format(area_uuid=area_uuid, filename=url_safe_filename))
response = self._make_request('post', path=path)
return response.ok | [
"\n Notify Upload Service that a file has been placed in an Upload Area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param str filename: The name the file in the Upload Area\n :return: True\n :rtype: bool\n :raises UploadApiException: if file could not be stored\n "
]
|
Please provide a description of the function:def files_info(self, area_uuid, file_list):
path = "/area/{uuid}/files_info".format(uuid=area_uuid)
file_list = [urlparse.quote(filename) for filename in file_list]
response = self._make_request('put', path=path, json=file_list)
return response.json() | [
"\n Get information about files\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param list file_list: The names the files in the Upload Area about which we want information\n :return: an array of file information dicts\n :rtype: list of dicts\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def checksum_status(self, area_uuid, filename):
url_safe_filename = urlparse.quote(filename)
path = "/area/{uuid}/{filename}/checksum".format(uuid=area_uuid, filename=url_safe_filename)
response = self._make_request('get', path)
return response.json() | [
"\n Retrieve checksum status and values for a file\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param str filename: The name of the file within the Upload Area\n :return: a dict with checksum information\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def validate_files(self, area_uuid, file_list, validator_image, original_validation_id="", environment={}):
path = "/area/{uuid}/validate".format(uuid=area_uuid)
file_list = [urlparse.quote(filename) for filename in file_list]
payload = {
"environment": environment,
"files": file_list,
"original_validation_id": original_validation_id,
"validator_image": validator_image
}
result = self._make_request('put', path=path, json=payload, headers={'Api-Key': self.auth_token})
return result.json() | [
"\n Invoke supplied validator Docker image and give it access to the file/s.\n The validator must be based off the base validator Docker image.\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param list file_list: A list of files within the Upload Area to be validated\n :param str validator_image: the location of a docker image to use for validation\n :param str original_validation_id: [optional]\n :param dict environment: [optional] list of environment variable to set for the validator\n :return: ID of scheduled validation\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def validation_statuses(self, area_uuid):
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | [
"\n Get count of validation statuses for all files in upload_area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :return: a dict with key for each state and value being the count of files in that state\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def language_name(self, text: str) -> str:
values = extract(text)
input_fn = _to_func(([values], []))
pos: int = next(self._classifier.predict_classes(input_fn=input_fn))
LOGGER.debug("Predicted language position %s", pos)
return sorted(self.languages)[pos] | [
"Predict the programming language name of the given source code.\n\n :param text: source code.\n :return: language name\n "
]
|
Please provide a description of the function:def scores(self, text: str) -> Dict[str, float]:
values = extract(text)
input_fn = _to_func(([values], []))
prediction = self._classifier.predict_proba(input_fn=input_fn)
probabilities = next(prediction).tolist()
sorted_languages = sorted(self.languages)
return dict(zip(sorted_languages, probabilities)) | [
"A score for each language corresponding to the probability that\n the text is written in the given language.\n The score is a `float` value between 0.0 and 1.0\n\n :param text: source code.\n :return: language to score dictionary\n "
]
|
Please provide a description of the function:def probable_languages(
self,
text: str,
max_languages: int = 3) -> Tuple[str, ...]:
scores = self.scores(text)
# Sorted from the most probable language to the least probable
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
languages, probabilities = list(zip(*sorted_scores))
# Find the most distant consecutive languages.
# A logarithmic scale is used here because the probabilities
# are most of the time really close to zero
rescaled_probabilities = [log(proba) for proba in probabilities]
distances = [
rescaled_probabilities[pos] - rescaled_probabilities[pos+1]
for pos in range(len(rescaled_probabilities)-1)]
max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0]
limit = min(max_distance_pos, max_languages)
return languages[:limit] | [
"List of most probable programming languages,\n the list is ordered from the most probable to the least probable one.\n\n :param text: source code.\n :param max_languages: maximum number of listed languages.\n :return: languages list\n "
]
|
Please provide a description of the function:def learn(self, input_dir: str) -> float:
if self.is_default:
LOGGER.error("Cannot learn using default model")
raise GuesslangError('Cannot learn using default "readonly" model')
languages = self.languages
LOGGER.info("Extract training data")
extensions = [ext for exts in languages.values() for ext in exts]
files = search_files(input_dir, extensions)
nb_files = len(files)
chunk_size = min(int(CHUNK_PROPORTION * nb_files), CHUNK_SIZE)
LOGGER.debug("Evaluation files count: %d", chunk_size)
LOGGER.debug("Training files count: %d", nb_files - chunk_size)
batches = _pop_many(files, chunk_size)
LOGGER.debug("Prepare evaluation data")
evaluation_data = extract_from_files(next(batches), languages)
LOGGER.debug("Evaluation data count: %d", len(evaluation_data[0]))
accuracy = 0
total = ceil(nb_files / chunk_size) - 1
LOGGER.info("Start learning")
for pos, training_files in enumerate(batches, 1):
LOGGER.info("Step %.2f%%", 100 * pos / total)
LOGGER.debug("Training data extraction")
training_data = extract_from_files(training_files, languages)
LOGGER.debug("Training data count: %d", len(training_data[0]))
steps = int(FITTING_FACTOR * len(training_data[0]) / 100)
LOGGER.debug("Fitting, steps count: %d", steps)
self._classifier.fit(input_fn=_to_func(training_data), steps=steps)
LOGGER.debug("Evaluation")
accuracy = self._classifier.evaluate(
input_fn=_to_func(evaluation_data), steps=1)['accuracy']
_comment(accuracy)
return accuracy | [
"Learn languages features from source files.\n\n :raise GuesslangError: when the default model is used for learning\n :param input_dir: source code files directory.\n :return: learning accuracy\n "
]
|
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'reportfile', type=argparse.FileType('r'),
help="test report file generated by `guesslang --test TESTDIR`")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
report = json.load(args.reportfile)
graph_data = _build_graph(report)
index_path = _prepare_resources(graph_data)
webbrowser.open(str(index_path)) | [
"Report graph creator command line"
]
|
Please provide a description of the function:def search_files(source: str, extensions: List[str]) -> List[Path]:
files = [
path for path in Path(source).glob('**/*')
if path.is_file() and path.suffix.lstrip('.') in extensions]
nb_files = len(files)
LOGGER.debug("Total files found: %d", nb_files)
if nb_files < NB_FILES_MIN:
LOGGER.error("Too few source files")
raise GuesslangError(
'{} source files found in {}. {} files minimum is required'.format(
nb_files, source, NB_FILES_MIN))
random.shuffle(files)
return files | [
"Retrieve files located the source directory and its subdirectories,\n whose extension match one of the listed extensions.\n\n :raise GuesslangError: when there is not enough files in the directory\n :param source: directory name\n :param extensions: list of file extensions\n :return: filenames\n "
]
|
Please provide a description of the function:def extract_from_files(
files: List[Path],
languages: Dict[str, List[str]]) -> DataSet:
enumerator = enumerate(sorted(languages.items()))
rank_map = {ext: rank for rank, (_, exts) in enumerator for ext in exts}
with multiprocessing.Pool(initializer=_process_init) as pool:
file_iterator = ((path, rank_map) for path in files)
arrays = _to_arrays(pool.starmap(_extract_features, file_iterator))
LOGGER.debug("Extracted arrays count: %d", len(arrays[0]))
return arrays | [
"Extract arrays of features from the given files.\n\n :param files: list of paths\n :param languages: language name =>\n associated file extension list\n :return: features\n "
]
|
Please provide a description of the function:def safe_read_file(file_path: Path) -> str:
for encoding in FILE_ENCODINGS:
try:
return file_path.read_text(encoding=encoding)
except UnicodeError:
pass # Ignore encoding error
raise GuesslangError('Encoding not supported for {!s}'.format(file_path)) | [
"Read a text file. Several text encodings are tried until\n the file content is correctly decoded.\n\n :raise GuesslangError: when the file encoding is not supported\n :param file_path: path to the input file\n :return: text file content\n "
]
|
Please provide a description of the function:def config_logging(debug: bool = False) -> None:
if debug:
level = 'DEBUG'
tf_level = tf.logging.INFO
else:
level = 'INFO'
tf_level = tf.logging.ERROR
logging_config = config_dict('logging.json')
for logger in logging_config['loggers'].values():
logger['level'] = level
logging.config.dictConfig(logging_config)
tf.logging.set_verbosity(tf_level) | [
"Set-up application and `tensorflow` logging.\n\n :param debug: show or hide debug messages\n "
]
|
Please provide a description of the function:def config_dict(name: str) -> Dict[str, Any]:
try:
content = resource_string(PACKAGE, DATADIR.format(name)).decode()
except DistributionNotFound as error:
LOGGER.warning("Cannot load %s from packages: %s", name, error)
content = DATA_FALLBACK.joinpath(name).read_text()
return cast(Dict[str, Any], json.loads(content)) | [
"Load a JSON configuration dict from Guesslang config directory.\n\n :param name: the JSON file name.\n :return: configuration\n "
]
|
Please provide a description of the function:def model_info(model_dir: Optional[str] = None) -> Tuple[str, bool]:
if model_dir is None:
try:
model_dir = resource_filename(PACKAGE, DATADIR.format('model'))
except DistributionNotFound as error:
LOGGER.warning("Cannot load model from packages: %s", error)
model_dir = str(DATA_FALLBACK.joinpath('model').absolute())
is_default_model = True
else:
is_default_model = False
model_path = Path(model_dir)
model_path.mkdir(exist_ok=True)
LOGGER.debug("Using model: %s, default: %s", model_path, is_default_model)
return (model_dir, is_default_model) | [
"Retrieve Guesslang model directory name,\n and tells if it is the default model.\n\n :param model_dir: model location, if `None` default model is selected\n :return: selected model directory with an indication\n that the model is the default or not\n "
]
|
Please provide a description of the function:def format(self, record: logging.LogRecord) -> str:
if platform.system() != 'Linux': # Avoid funny logs on Windows & MacOS
return super().format(record)
record.msg = (
self.STYLE[record.levelname] + record.msg + self.STYLE['END'])
record.levelname = (
self.STYLE['LEVEL'] + record.levelname + self.STYLE['END'])
return super().format(record) | [
"Format log records to produce colored messages.\n\n :param record: log record\n :return: log message\n "
]
|
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'githubtoken',
help="Github OAuth token, see https://developer.github.com/v3/oauth/")
parser.add_argument('destination', help="location of the downloaded repos")
parser.add_argument(
'-n', '--nbrepo', help="number of repositories per language",
type=int, default=1000)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
destination = Path(args.destination)
nb_repos = args.nbrepo
token = args.githubtoken
languages = config_dict('languages.json')
destination.mkdir(exist_ok=True)
for pos, language in enumerate(sorted(languages), 1):
LOGGER.info("Step %.2f%%, %s", 100 * pos / len(languages), language)
LOGGER.info("Fetch %d repos infos for language %s", nb_repos, language)
repos = _retrieve_repo_details(language, nb_repos, token)
LOGGER.info("%d repos details kept. Downloading", len(repos))
_download_repos(language, repos, destination)
LOGGER.info("Language %s repos downloaded", language)
LOGGER.debug("Exit OK") | [
"Github repositories downloaded command line"
]
|
Please provide a description of the function:def retry(default=None):
def decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
for pos in range(1, MAX_RETRIES):
try:
return func(*args, **kw)
except (RuntimeError, requests.ConnectionError) as error:
LOGGER.warning("Failed: %s, %s", type(error), error)
# Wait a bit before retrying
for _ in range(pos):
_rest()
LOGGER.warning("Request Aborted")
return default
return _wrapper
return decorator | [
"Retry functions after failures",
"Retry decorator"
]
|
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('learn', help="learning source codes directory")
parser.add_argument('keywords', help="output keywords file, JSON")
parser.add_argument(
'-n', '--nbkeywords', type=int, default=10000,
help="the number of keywords to keep")
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
learn_path = Path(args.learn)
keywords_path = Path(args.keywords)
nb_keywords = args.nbkeywords
languages = config_dict('languages.json')
exts = {ext: lang for lang, exts in languages.items() for ext in exts}
term_count = Counter()
document_count = Counter()
pos = 0
LOGGER.info("Reading files form %s", learn_path)
for pos, path in enumerate(Path(learn_path).glob('**/*'), 1):
if pos % STEP == 0:
LOGGER.debug("Processed %d", pos)
gc.collect() # Cleanup dirt
if not path.is_file() or not exts.get(path.suffix.lstrip('.')):
continue
counter = _extract(path)
term_count.update(counter)
document_count.update(counter.keys())
nb_terms = sum(term_count.values())
nb_documents = pos - 1
if not nb_documents:
LOGGER.error("No source files found in %s", learn_path)
raise RuntimeError('No source files in {}'.format(learn_path))
LOGGER.info("%d unique terms found", len(term_count))
terms = _most_frequent(
(term_count, nb_terms), (document_count, nb_documents), nb_keywords)
keywords = {
token: int(hashlib.sha1(token.encode()).hexdigest(), 16)
for token in terms
}
with keywords_path.open('w') as keywords_file:
json.dump(keywords, keywords_file, indent=2, sort_keys=True)
LOGGER.info("%d keywords written into %s", len(keywords), keywords_path)
LOGGER.debug("Exit OK") | [
"Keywords generator command line"
]
|
Please provide a description of the function:def main() -> None:
try:
_real_main()
except GuesslangError as error:
LOGGER.critical("Failed: %s", error)
sys.exit(-1)
except KeyboardInterrupt:
LOGGER.critical("Cancelled!")
sys.exit(-2) | [
"Run command line"
]
|
Please provide a description of the function:def split(text: str) -> List[str]:
return [word for word in SEPARATOR.split(text) if word.strip(' \t')] | [
"Split a text into a list of tokens.\n\n :param text: the text to split\n :return: tokens\n "
]
|
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', help="location of the downloaded repos")
parser.add_argument('destination', help="location of the extracted files")
parser.add_argument(
'-t', '--nb-test-files', help="number of testing files per language",
type=int, default=5000)
parser.add_argument(
'-l', '--nb-learn-files', help="number of learning files per language",
type=int, default=10000)
parser.add_argument(
'-r', '--remove', help="remove repos that cannot be read",
action='store_true', default=False)
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help="show debug messages")
args = parser.parse_args()
config_logging(args.debug)
source = Path(args.source)
destination = Path(args.destination)
nb_test = args.nb_test_files
nb_learn = args.nb_learn_files
remove = args.remove
repos = _find_repos(source)
split_repos = _split_repos(repos, nb_test, nb_learn)
split_files = _find_files(*split_repos, nb_test, nb_learn, remove)
_unzip_all(*split_files, destination)
LOGGER.info("Files saved into %s", destination)
LOGGER.debug("Exit OK") | [
"Files extractor command line"
]
|
Please provide a description of the function:def combine_slices(slice_datasets, rescale=None):
'''
Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
'''
if len(slice_datasets) == 0:
raise DicomImportException("Must provide at least one DICOM dataset")
_validate_slices_form_uniform_grid(slice_datasets)
voxels = _merge_slice_pixel_arrays(slice_datasets, rescale)
transform = _ijk_to_patient_xyz_transform_matrix(slice_datasets)
return voxels, transform | []
|
Please provide a description of the function:def _validate_slices_form_uniform_grid(slice_datasets):
'''
Perform various data checks to ensure that the list of slices form a
evenly-spaced grid of data.
Some of these checks are probably not required if the data follows the
DICOM specification, however it seems pertinent to check anyway.
'''
invariant_properties = [
'Modality',
'SOPClassUID',
'SeriesInstanceUID',
'Rows',
'Columns',
'PixelSpacing',
'PixelRepresentation',
'BitsAllocated',
'BitsStored',
'HighBit',
]
for property_name in invariant_properties:
_slice_attribute_equal(slice_datasets, property_name)
_validate_image_orientation(slice_datasets[0].ImageOrientationPatient)
_slice_ndarray_attribute_almost_equal(slice_datasets, 'ImageOrientationPatient', 1e-5)
slice_positions = _slice_positions(slice_datasets)
_check_for_missing_slices(slice_positions) | []
|
Please provide a description of the function:def _validate_image_orientation(image_orientation):
'''
Ensure that the image orientation is supported
- The direction cosines have magnitudes of 1 (just in case)
- The direction cosines are perpendicular
'''
row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation)
if not _almost_zero(np.dot(row_cosine, column_cosine), 1e-4):
raise DicomImportException("Non-orthogonal direction cosines: {}, {}".format(row_cosine, column_cosine))
elif not _almost_zero(np.dot(row_cosine, column_cosine), 1e-8):
logger.warning("Direction cosines aren't quite orthogonal: {}, {}".format(row_cosine, column_cosine))
if not _almost_one(np.linalg.norm(row_cosine), 1e-4):
raise DicomImportException("The row direction cosine's magnitude is not 1: {}".format(row_cosine))
elif not _almost_one(np.linalg.norm(row_cosine), 1e-8):
logger.warning("The row direction cosine's magnitude is not quite 1: {}".format(row_cosine))
if not _almost_one(np.linalg.norm(column_cosine), 1e-4):
raise DicomImportException("The column direction cosine's magnitude is not 1: {}".format(column_cosine))
elif not _almost_one(np.linalg.norm(column_cosine), 1e-8):
logger.warning("The column direction cosine's magnitude is not quite 1: {}".format(column_cosine)) | []
|
Please provide a description of the function:def parse_url(cls, string): # pylint: disable=redefined-outer-name
match = cls.URL_RE.match(string)
if not match:
raise InvalidKeyError(cls, string)
return match.groupdict() | [
"\n If it can be parsed as a version_guid with no preceding org + offering, returns a dict\n with key 'version_guid' and the value,\n\n If it can be parsed as a org + offering, returns a dict\n with key 'id' and optional keys 'branch' and 'version_guid'.\n\n Raises:\n InvalidKeyError: if string cannot be parsed -or- string ends with a newline.\n "
]
|
Please provide a description of the function:def offering(self):
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.course and not self.run:
return None
elif not self.run and self.course:
return self.course
return "/".join([self.course, self.run]) | [
"\n Deprecated. Use course and run independently.\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
parse = cls.parse_url(serialized)
if parse['version_guid']:
parse['version_guid'] = cls.as_object_id(parse['version_guid'])
return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS}) | [
"\n Return a CourseLocator parsing the given serialized string\n :param serialized: matches the string to a CourseLocator\n "
]
|
Please provide a description of the function:def make_usage_key_from_deprecated_string(self, location_url):
warnings.warn(
"make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key",
DeprecationWarning,
stacklevel=2
)
return BlockUsageLocator.from_string(location_url).replace(run=self.run) | [
"\n Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.\n\n NOTE: this prejudicially takes the tag, org, and course from the url not self.\n\n Raises:\n InvalidKeyError: if the url does not parse\n "
]
|
Please provide a description of the function:def _to_deprecated_string(self):
return u'/'.join([self.org, self.course, self.run]) | [
"Returns an 'old-style' course id, represented as 'org/course/run'"
]
|
Please provide a description of the function:def _from_deprecated_string(cls, serialized):
if serialized.count('/') != 2:
raise InvalidKeyError(cls, serialized)
return cls(*serialized.split('/'), deprecated=True) | [
"\n Return an instance of `cls` parsed from its deprecated `serialized` form.\n\n This will be called only if :meth:`OpaqueKey.from_string` is unable to\n parse a key out of `serialized`, and only if `set_deprecated_fallback` has\n been called to register a fallback class.\n\n Args:\n cls: The :class:`OpaqueKey` subclass.\n serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\n Raises:\n InvalidKeyError: Should be raised if `serialized` is not a valid serialized key\n understood by `cls`.\n "
]
|
Please provide a description of the function:def for_branch(self, branch):
if self.org is None and branch is not None:
raise InvalidKeyError(self.__class__, "Branches must have full library ids not just versions")
return self.replace(branch=branch, version_guid=None) | [
"\n Return a new CourseLocator for another branch of the same library (also version agnostic)\n "
]
|
Please provide a description of the function:def _to_string(self):
parts = []
if self.library: # pylint: disable=no-member
parts.extend([self.org, self.library]) # pylint: disable=no-member
if self.branch: # pylint: disable=no-member
parts.append(u"{prefix}@{branch}".format(prefix=self.BRANCH_PREFIX, branch=self.branch)) # pylint: disable=no-member
if self.version_guid: # pylint: disable=no-member
parts.append(u"{prefix}@{guid}".format(prefix=self.VERSION_PREFIX, guid=self.version_guid)) # pylint: disable=no-member
return u"+".join(parts) | [
"\n Return a string representing this location.\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
# Allow access to _from_string protected method
course_key = CourseLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = cls.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
return cls(course_key, parsed_parts.get('block_type'), block_id) | [
"\n Requests CourseLocator to deserialize its part and then adds the local deserialization of block\n "
]
|
Please provide a description of the function:def for_branch(self, branch):
return self.replace(course_key=self.course_key.for_branch(branch)) | [
"\n Return a UsageLocator for the same block in a different branch of the course.\n "
]
|
Please provide a description of the function:def for_version(self, version_guid):
return self.replace(course_key=self.course_key.for_version(version_guid)) | [
"\n Return a UsageLocator for the same block in a different branch of the course.\n "
]
|
Please provide a description of the function:def _parse_block_ref(cls, block_ref, deprecated=False):
if deprecated and block_ref is None:
return None
if isinstance(block_ref, LocalId):
return block_ref
is_valid_deprecated = deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref)
is_valid = cls.ALLOWED_ID_RE.match(block_ref)
if is_valid or is_valid_deprecated:
return block_ref
else:
raise InvalidKeyError(cls, block_ref) | [
"\n Given `block_ref`, tries to parse it into a valid block reference.\n\n Returns `block_ref` if it is valid.\n\n Raises:\n InvalidKeyError: if `block_ref` is invalid.\n "
]
|
Please provide a description of the function:def make_relative(cls, course_locator, block_type, block_id):
if hasattr(course_locator, 'course_key'):
course_locator = course_locator.course_key
return course_locator.make_usage_key(
block_type=block_type,
block_id=block_id
) | [
"\n Return a new instance which has the given block_id in the given course\n :param course_locator: may be a BlockUsageLocator in the same snapshot\n "
]
|
Please provide a description of the function:def _to_string(self):
# Allow access to _to_string protected method
return u"{course_key}+{BLOCK_TYPE_PREFIX}@{block_type}+{BLOCK_PREFIX}@{block_id}".format(
course_key=self.course_key._to_string(), # pylint: disable=protected-access
BLOCK_TYPE_PREFIX=self.BLOCK_TYPE_PREFIX,
block_type=self.block_type,
BLOCK_PREFIX=self.BLOCK_PREFIX,
block_id=self.block_id
) | [
"\n Return a string representing this location.\n "
]
|
Please provide a description of the function:def html_id(self):
if self.deprecated:
id_fields = [self.DEPRECATED_TAG, self.org, self.course, self.block_type, self.block_id, self.version_guid]
id_string = u"-".join([v for v in id_fields if v is not None])
return self.clean_for_html(id_string)
else:
return self.block_id | [
"\n Return an id which can be used on an html page as an id attr of an html element. It is currently also\n persisted by some clients to identify blocks.\n\n To make compatible with old Location object functionality. I don't believe this behavior fits at this\n place, but I have no way to override. We should clearly define the purpose and restrictions of this\n (e.g., I'm assuming periods are fine).\n "
]
|
Please provide a description of the function:def _to_deprecated_string(self):
# pylint: disable=missing-format-attribute
url = u"{0.DEPRECATED_TAG}://{0.course_key.org}/{0.course_key.course}/{0.block_type}/{0.block_id}".format(self)
if self.course_key.branch:
url += u"@{rev}".format(rev=self.course_key.branch)
return url | [
"\n Returns an old-style location, represented as:\n i4x://org/course/category/name[@revision] # Revision is optional\n "
]
|
Please provide a description of the function:def _from_deprecated_string(cls, serialized):
match = cls.DEPRECATED_URL_RE.match(serialized)
if match is None:
raise InvalidKeyError(BlockUsageLocator, serialized)
groups = match.groupdict()
course_key = CourseLocator(
org=groups['org'],
course=groups['course'],
run=None,
branch=groups.get('revision'),
deprecated=True,
)
return cls(course_key, groups['category'], groups['name'], deprecated=True) | [
"\n Return an instance of `cls` parsed from its deprecated `serialized` form.\n\n This will be called only if :meth:`OpaqueKey.from_string` is unable to\n parse a key out of `serialized`, and only if `set_deprecated_fallback` has\n been called to register a fallback class.\n\n Args:\n cls: The :class:`OpaqueKey` subclass.\n serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\n Raises:\n InvalidKeyError: Should be raised if `serialized` is not a valid serialized key\n understood by `cls`.\n "
]
|
Please provide a description of the function:def to_deprecated_son(self, prefix='', tag='i4x'):
# This preserves the old SON keys ('tag', 'org', 'course', 'category', 'name', 'revision'),
# because that format was used to store data historically in mongo
# adding tag b/c deprecated form used it
son = SON({prefix + 'tag': tag})
for field_name in ('org', 'course'):
# Temporary filtering of run field because deprecated form left it out
son[prefix + field_name] = getattr(self.course_key, field_name)
for (dep_field_name, field_name) in [('category', 'block_type'), ('name', 'block_id')]:
son[prefix + dep_field_name] = getattr(self, field_name)
son[prefix + 'revision'] = self.course_key.branch
return son | [
"\n Returns a SON object that represents this location\n "
]
|
Please provide a description of the function:def _from_deprecated_son(cls, id_dict, run):
course_key = CourseLocator(
id_dict['org'],
id_dict['course'],
run,
id_dict['revision'],
deprecated=True,
)
return cls(course_key, id_dict['category'], id_dict['name'], deprecated=True) | [
"\n Return the Location decoding this id_dict and run\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
# Allow access to _from_string protected method
library_key = LibraryLocator._from_string(serialized) # pylint: disable=protected-access
parsed_parts = LibraryLocator.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
block_type = parsed_parts.get('block_type')
if block_type is None:
raise InvalidKeyError(cls, serialized)
return cls(library_key, parsed_parts.get('block_type'), block_id) | [
"\n Requests LibraryLocator to deserialize its part and then adds the local deserialization of block\n "
]
|
Please provide a description of the function:def for_branch(self, branch):
return self.replace(library_key=self.library_key.for_branch(branch)) | [
"\n Return a UsageLocator for the same block in a different branch of the library.\n "
]
|
Please provide a description of the function:def for_version(self, version_guid):
return self.replace(library_key=self.library_key.for_version(version_guid)) | [
"\n Return a UsageLocator for the same block in a different version of the library.\n "
]
|
Please provide a description of the function:def _to_string(self):
return u"{}+{}@{}".format(text_type(self.definition_id), self.BLOCK_TYPE_PREFIX, self.block_type) | [
"\n Return a string representing this location.\n unicode(self) returns something like this: \"519665f6223ebd6980884f2b+type+problem\"\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
parse = cls.URL_RE.match(serialized)
if not parse:
raise InvalidKeyError(cls, serialized)
parse = parse.groupdict()
if parse['definition_id']:
parse['definition_id'] = cls.as_object_id(parse['definition_id'])
return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS}) | [
"\n Return a DefinitionLocator parsing the given serialized string\n :param serialized: matches the string to\n "
]
|
Please provide a description of the function:def _to_deprecated_string(self):
# pylint: disable=missing-format-attribute
url = u"/{0.DEPRECATED_TAG}/{0.course_key.org}/{0.course_key.course}/{0.block_type}/{0.block_id}".format(self)
if self.course_key.branch:
url += u'@{}'.format(self.course_key.branch)
return url | [
"\n Returns an old-style location, represented as:\n\n /c4x/org/course/category/name\n "
]
|
Please provide a description of the function:def to_deprecated_list_repr(self):
return ['c4x', self.org, self.course, self.block_type, self.block_id, None] | [
"\n Thumbnail locations are stored as lists [c4x, org, course, thumbnail, path, None] in contentstore.mongo\n That should be the only use of this method, but the method is general enough to provide the pre-opaque\n Location fields as an array in the old order with the tag.\n "
]
|
Please provide a description of the function:def _strip_object(key):
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key | [
"\n Strips branch and version info if the given key supports those attributes.\n "
]
|
Please provide a description of the function:def _strip_value(value, lookup='exact'):
if lookup == 'in':
stripped_value = [_strip_object(el) for el in value]
else:
stripped_value = _strip_object(value)
return stripped_value | [
"\n Helper function to remove the branch and version information from the given value,\n which could be a single object or a list.\n "
]
|
Please provide a description of the function:def validate(self, value, model_instance):
# raise validation error if the use of this field says it can't be blank but it is
if not self.blank and value is self.Empty:
raise ValidationError(self.error_messages['blank'])
else:
return super(OpaqueKeyField, self).validate(value, model_instance) | [
"Validate Empty values, otherwise defer to the parent"
]
|
Please provide a description of the function:def run_validators(self, value):
if value is self.Empty:
return
return super(OpaqueKeyField, self).run_validators(value) | [
"Validate Empty values, otherwise defer to the parent"
]
|
Please provide a description of the function:def from_string(cls, serialized):
warnings.warn(
"SlashSeparatedCourseKey is deprecated! Please use locator.CourseLocator",
DeprecationWarning,
stacklevel=2
)
return CourseLocator.from_string(serialized) | [
"Deprecated. Use :meth:`locator.CourseLocator.from_string`."
]
|
Please provide a description of the function:def replace(self, **kwargs):
# Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.
return SlashSeparatedCourseKey(
kwargs.pop('org', self.org),
kwargs.pop('course', self.course),
kwargs.pop('run', self.run),
**kwargs
) | [
"\n Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing\n their corresponding values.\n\n Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs.\n Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs.\n "
]
|
Please provide a description of the function:def _deprecation_warning(cls):
if issubclass(cls, Location):
warnings.warn(
"Location is deprecated! Please use locator.BlockUsageLocator",
DeprecationWarning,
stacklevel=3
)
elif issubclass(cls, AssetLocation):
warnings.warn(
"AssetLocation is deprecated! Please use locator.AssetLocator",
DeprecationWarning,
stacklevel=3
)
else:
warnings.warn(
"{} is deprecated!".format(cls),
DeprecationWarning,
stacklevel=3
) | [
"Display a deprecation warning for the given cls"
]
|
Please provide a description of the function:def _check_location_part(cls, val, regexp):
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp) | [
"Deprecated. See CourseLocator._check_location_part"
]
|
Please provide a description of the function:def _clean(cls, value, invalid):
cls._deprecation_warning()
return BlockUsageLocator._clean(value, invalid) | [
"Deprecated. See BlockUsageLocator._clean"
]
|
Please provide a description of the function:def _from_deprecated_son(cls, id_dict, run):
cls._deprecation_warning()
return BlockUsageLocator._from_deprecated_son(id_dict, run) | [
"Deprecated. See BlockUsageLocator._from_deprecated_son"
]
|
Please provide a description of the function:def replace(self, **kwargs):
# NOTE: Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.
return Location(
kwargs.pop('org', self.course_key.org),
kwargs.pop('course', self.course_key.course),
kwargs.pop('run', self.course_key.run),
kwargs.pop('category', self.block_type),
kwargs.pop('name', self.block_id),
revision=kwargs.pop('revision', self.branch),
**kwargs
) | [
"\n Return: a new :class:`Location` with specific ``kwargs`` replacing\n their corresponding values.\n\n Using BlockUsageLocator's replace function results in a mismatch of __init__ args and kwargs.\n Replace tries to instantiate a Location object with BlockUsageLocator's args and kwargs.\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
# Allow access to _from_string protected method
parsed_parts = cls.parse_url(serialized)
course_key = CourseLocator(
parsed_parts.get('org'), parsed_parts.get('course'), parsed_parts.get('run'),
# specifically not saying deprecated=True b/c that would lose the run on serialization
)
block_id = parsed_parts.get('block_id')
return cls(course_key, parsed_parts.get('block_type'), block_id) | [
"\n see super\n "
]
|
Please provide a description of the function:def _to_string(self):
parts = [self.org, self.course, self.run, self.block_type, self.block_id]
return u"+".join(parts) | [
"\n Return a string representing this location.\n "
]
|
Please provide a description of the function:def replace(self, **kwargs):
# NOTE: Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.
return AssetLocation(
kwargs.pop('org', self.org),
kwargs.pop('course', self.course),
kwargs.pop('run', self.run),
kwargs.pop('category', self.block_type),
kwargs.pop('name', self.block_id),
revision=kwargs.pop('revision', self.branch),
**kwargs
) | [
"\n Return: a new :class:`AssetLocation` with specific ``kwargs`` replacing\n their corresponding values.\n\n Using AssetLocator's replace function results in a mismatch of __init__ args and kwargs.\n Replace tries to instantiate an AssetLocation object with AssetLocators args and kwargs.\n "
]
|
Please provide a description of the function:def _from_deprecated_son(cls, id_dict, run):
cls._deprecation_warning()
return AssetLocator._from_deprecated_son(id_dict, run) | [
"Deprecated. See BlockUsageLocator._from_deprecated_son"
]
|
Please provide a description of the function:def _from_string(cls, serialized):
if ':' not in serialized:
raise InvalidKeyError(
"BlockTypeKeyV1 keys must contain ':' separating the block family from the block_type.", serialized)
family, __, block_type = serialized.partition(':')
return cls(family, block_type) | [
"\n Return an instance of `cls` parsed from its `serialized` form.\n\n Args:\n cls: The :class:`OpaqueKey` subclass.\n serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\n Raises:\n InvalidKeyError: Should be raised if `serialized` is not a valid serialized key\n understood by `cls`.\n "
]
|
Please provide a description of the function:def _decode_v1(value):
decode_colons = value.replace('$::', '::')
decode_dollars = decode_colons.replace('$$', '$')
reencoded = _encode_v1(decode_dollars)
if reencoded != value:
raise ValueError('Ambiguous encoded value, {!r} could have been encoded as {!r}'.format(value, reencoded))
return decode_dollars | [
"\n Decode '::' and '$' characters encoded by `_encode`.\n "
]
|
Please provide a description of the function:def _join_keys_v1(left, right):
if left.endswith(':') or '::' in left:
raise ValueError("Can't join a left string ending in ':' or containing '::'")
return u"{}::{}".format(_encode_v1(left), _encode_v1(right)) | [
"\n Join two keys into a format separable by using _split_keys_v1.\n "
]
|
Please provide a description of the function:def _split_keys_v1(joined):
left, _, right = joined.partition('::')
return _decode_v1(left), _decode_v1(right) | [
"\n Split two keys out a string created by _join_keys_v1.\n "
]
|
Please provide a description of the function:def _decode_v2(value):
if re.search(r'(?<!\$):', value):
raise ValueError("Unescaped ':' in the encoded string")
decode_colons = value.replace('$:', ':')
if re.search(r'(?<!\$)(\$\$)*\$([^$]|\Z)', decode_colons):
raise ValueError("Unescaped '$' in encoded string")
return decode_colons.replace('$$', '$') | [
"\n Decode ':' and '$' characters encoded by `_encode`.\n "
]
|
Please provide a description of the function:def _split_keys_v2(joined):
left, _, right = joined.rpartition('::')
return _decode_v2(left), _decode_v2(right) | [
"\n Split two keys out a string created by _join_keys_v2.\n "
]
|
Please provide a description of the function:def replace(self, **kwargs):
if 'definition_key' in kwargs:
for attr in self.DEFINITION_KEY_FIELDS:
kwargs.pop(attr, None)
else:
kwargs['definition_key'] = self.definition_key.replace(**{
key: kwargs.pop(key)
for key
in self.DEFINITION_KEY_FIELDS
if key in kwargs
})
return super(AsideDefinitionKeyV2, self).replace(**kwargs) | [
"\n Return: a new :class:`AsideDefinitionKeyV2` with ``KEY_FIELDS`` specified in ``kwargs`` replaced\n with their corresponding values. Deprecation value is also preserved.\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
try:
def_key, aside_type = _split_keys_v2(serialized)
return cls(DefinitionKey.from_string(def_key), aside_type)
except ValueError as exc:
raise InvalidKeyError(cls, exc.args) | [
"\n Return an instance of `cls` parsed from its `serialized` form.\n\n Args:\n cls: The :class:`OpaqueKey` subclass.\n serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\n Raises:\n InvalidKeyError: Should be raised if `serialized` is not a valid serialized key\n understood by `cls`.\n "
]
|
Please provide a description of the function:def map_into_course(self, course_key):
return self.replace(usage_key=self.usage_key.map_into_course(course_key)) | [
"\n Return a new :class:`UsageKey` or :class:`AssetKey` representing this usage inside the\n course identified by the supplied :class:`CourseKey`. It returns the same type as\n `self`\n\n Args:\n course_key (:class:`CourseKey`): The course to map this object into.\n\n Returns:\n A new :class:`CourseObjectMixin` instance.\n "
]
|
Please provide a description of the function:def replace(self, **kwargs):
if 'usage_key' in kwargs:
for attr in self.USAGE_KEY_ATTRS:
kwargs.pop(attr, None)
else:
kwargs['usage_key'] = self.usage_key.replace(**{
key: kwargs.pop(key)
for key
in self.USAGE_KEY_ATTRS
if key in kwargs
})
return super(AsideUsageKeyV2, self).replace(**kwargs) | [
"\n Return: a new :class:`AsideUsageKeyV2` with ``KEY_FIELDS`` specified in ``kwargs`` replaced\n with their corresponding values. Deprecation value is also preserved.\n "
]
|
Please provide a description of the function:def _from_string(cls, serialized):
try:
usage_key, aside_type = _split_keys_v1(serialized)
return cls(UsageKey.from_string(usage_key), aside_type)
except ValueError as exc:
raise InvalidKeyError(cls, exc.args) | [
"\n Return an instance of `cls` parsed from its `serialized` form.\n\n Args:\n cls: The :class:`OpaqueKey` subclass.\n serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\n Raises:\n InvalidKeyError: Should be raised if `serialized` is not a valid serialized key\n understood by `cls`.\n "
]
|
Please provide a description of the function:def refresher(name, refreshers=CompletionRefresher.refreshers):
def wrapper(wrapped):
refreshers[name] = wrapped
return wrapped
return wrapper | [
"Decorator to add the decorated function to the dictionary of\n refreshers. Any function decorated with a @refresher will be executed as\n part of the completion refresh routine."
]
|
Please provide a description of the function:def refresh(self, executor, callbacks, completer_options=None):
if completer_options is None:
completer_options = {}
if self.is_refreshing():
self._restart_refresh.set()
return [(None, None, None, 'Auto-completion refresh restarted.')]
else:
self._completer_thread = threading.Thread(
target=self._bg_refresh,
args=(executor, callbacks, completer_options),
name='completion_refresh')
self._completer_thread.setDaemon(True)
self._completer_thread.start()
return [(None, None, None,
'Auto-completion refresh started in the background.')] | [
"Creates a SQLCompleter object and populates it with the relevant\n completion suggestions in a background thread.\n\n executor - SQLExecute object, used to extract the credentials to connect\n to the database.\n callbacks - A function or a list of functions to call after the thread\n has completed the refresh. The newly created completion\n object will be passed in as an argument to each callback.\n completer_options - dict of options to pass to SQLCompleter.\n "
]
|
Please provide a description of the function:def handle_cd_command(arg):
CD_CMD = 'cd'
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1] if len(tokens) > 1 else None
if not directory:
return False, "No folder name was provided."
try:
os.chdir(directory)
subprocess.call(['pwd'])
return True, None
except OSError as e:
return False, e.strerror | [
"Handles a `cd` shell command by calling python's os.chdir."
]
|
Please provide a description of the function:def format_uptime(uptime_in_seconds):
m, s = divmod(int(uptime_in_seconds), 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
uptime_values = []
for value, unit in ((d, 'days'), (h, 'hours'), (m, 'min'), (s, 'sec')):
if value == 0 and not uptime_values:
# Don't include a value/unit if the unit isn't applicable to
# the uptime. E.g. don't do 0 days 0 hours 1 min 30 sec.
continue
elif value == 1 and unit.endswith('s'):
# Remove the "s" if the unit is singular.
unit = unit[:-1]
uptime_values.append('{0} {1}'.format(value, unit))
uptime = ' '.join(uptime_values)
return uptime | [
"Format number of seconds into human-readable string.\n :param uptime_in_seconds: The server uptime in seconds.\n :returns: A human-readable string representing the uptime.\n >>> uptime = format_uptime('56892')\n >>> print(uptime)\n 15 hours 48 min 12 sec\n "
]
|
Please provide a description of the function:def get_editor_query(sql):
sql = sql.strip()
# The reason we can't simply do .strip('\e') is that it strips characters,
# not a substring. So it'll strip "e" in the end of the sql also!
# Ex: "select * from style\e" -> "select * from styl".
pattern = re.compile('(^\\\e|\\\e$)')
while pattern.search(sql):
sql = pattern.sub('', sql)
return sql | [
"Get the query part of an editor command."
]
|
Please provide a description of the function:def open_external_editor(filename=None, sql=None):
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql (if available) and a
# placeholder comment.
query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER),
filename=filename, extension='.sql')
if filename:
try:
with open(filename, encoding='utf-8') as f:
query = f.read()
except IOError:
message = 'Error reading file: %s.' % filename
if query is not None:
query = query.split(MARKER, 1)[0].rstrip('\n')
else:
# Don't return None for the caller to deal with.
# Empty string is ok.
query = sql
return (query, message) | [
"Open external editor, wait for the user to type in their query, return\n the query.\n :return: list with one tuple, query as first element.\n "
]
|
Please provide a description of the function:def execute_favorite_query(cur, arg, **_):
if arg == '':
for result in list_favorite_queries():
yield result
name, _, arg_str = arg.partition(' ')
args = shlex.split(arg_str)
query = favoritequeries.get(name)
if query is None:
message = "No favorite query: %s" % (name)
yield (None, None, None, message)
else:
query, arg_error = subst_favorite_query_args(query, args)
if arg_error:
yield (None, None, None, arg_error)
else:
for sql in sqlparse.split(query):
_logger.debug("query is [%s]", sql)
sql = sql.rstrip(';')
title = '> %s' % (sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
yield (title, cur.fetchall(), headers, None)
else:
yield (title, None, None, None) | [
"Returns (title, rows, headers, status)",
"Parse out favorite name and optional substitution parameters"
]
|
Please provide a description of the function:def list_favorite_queries():
headers = ["Name", "Query"]
rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()]
if not rows:
status = '\nNo favorite queries found.' + favoritequeries.usage
else:
status = ''
return [('', rows, headers, status)] | [
"List of all favorite queries.\n Returns (title, rows, headers, status)"
]
|
Please provide a description of the function:def subst_favorite_query_args(query, args):
for idx, val in enumerate(args):
subst_var = '$' + str(idx + 1)
if subst_var not in query:
return [None, 'query does not have substitution parameter ' + subst_var + ':\n ' + query]
query = query.replace(subst_var, val)
match = re.search('\\$\d+', query)
if match:
return[None, 'missing substitution for ' + match.group(0) + ' in query:\n ' + query]
return [query, None] | [
"replace positional parameters ($1...$N) in query."
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.