repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
mila-iqia/fuel | fuel/transformers/__init__.py | ExpectsAxisLabels.verify_axis_labels | def verify_axis_labels(self, expected, actual, source_name):
"""Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches.
"""
if not getattr(self, '_checked_axis_labels', False):
self._checked_axis_labels = defaultdict(bool)
if not self._checked_axis_labels[source_name]:
if actual is None:
log.warning("%s instance could not verify (missing) axis "
"expected %s, got None",
self.__class__.__name__, expected)
else:
if expected != actual:
raise AxisLabelsMismatchError("{} expected axis labels "
"{}, got {} instead".format(
self.__class__.__name__,
expected, actual))
self._checked_axis_labels[source_name] = True | python | def verify_axis_labels(self, expected, actual, source_name):
"""Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches.
"""
if not getattr(self, '_checked_axis_labels', False):
self._checked_axis_labels = defaultdict(bool)
if not self._checked_axis_labels[source_name]:
if actual is None:
log.warning("%s instance could not verify (missing) axis "
"expected %s, got None",
self.__class__.__name__, expected)
else:
if expected != actual:
raise AxisLabelsMismatchError("{} expected axis labels "
"{}, got {} instead".format(
self.__class__.__name__,
expected, actual))
self._checked_axis_labels[source_name] = True | [
"def",
"verify_axis_labels",
"(",
"self",
",",
"expected",
",",
"actual",
",",
"source_name",
")",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"'_checked_axis_labels'",
",",
"False",
")",
":",
"self",
".",
"_checked_axis_labels",
"=",
"defaultdict",
"(",
"bool",
")",
"if",
"not",
"self",
".",
"_checked_axis_labels",
"[",
"source_name",
"]",
":",
"if",
"actual",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"%s instance could not verify (missing) axis \"",
"\"expected %s, got None\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"expected",
")",
"else",
":",
"if",
"expected",
"!=",
"actual",
":",
"raise",
"AxisLabelsMismatchError",
"(",
"\"{} expected axis labels \"",
"\"{}, got {} instead\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"expected",
",",
"actual",
")",
")",
"self",
".",
"_checked_axis_labels",
"[",
"source_name",
"]",
"=",
"True"
] | Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches. | [
"Verify",
"that",
"axis",
"labels",
"for",
"a",
"given",
"source",
"are",
"as",
"expected",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/__init__.py#L34-L67 | train |
mila-iqia/fuel | fuel/transformers/__init__.py | Batch.get_data | def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data) | python | def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data) | [
"def",
"get_data",
"(",
"self",
",",
"request",
"=",
"None",
")",
":",
"if",
"request",
"is",
"None",
":",
"raise",
"ValueError",
"data",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"self",
".",
"sources",
"]",
"for",
"i",
"in",
"range",
"(",
"request",
")",
":",
"try",
":",
"for",
"source_data",
",",
"example",
"in",
"zip",
"(",
"data",
",",
"next",
"(",
"self",
".",
"child_epoch_iterator",
")",
")",
":",
"source_data",
".",
"append",
"(",
"example",
")",
"except",
"StopIteration",
":",
"# If some data has been extracted and `strict` is not set,",
"# we should spit out this data before stopping iteration.",
"if",
"not",
"self",
".",
"strictness",
"and",
"data",
"[",
"0",
"]",
":",
"break",
"elif",
"self",
".",
"strictness",
">",
"1",
"and",
"data",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"raise",
"return",
"tuple",
"(",
"numpy",
".",
"asarray",
"(",
"source_data",
")",
"for",
"source_data",
"in",
"data",
")"
] | Get data from the dataset. | [
"Get",
"data",
"from",
"the",
"dataset",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/__init__.py#L608-L626 | train |
mila-iqia/fuel | fuel/utils/parallel.py | _producer_wrapper | def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy() | python | def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy() | [
"def",
"_producer_wrapper",
"(",
"f",
",",
"port",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
")",
":",
"try",
":",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUSH",
")",
"socket",
".",
"connect",
"(",
"':'",
".",
"join",
"(",
"[",
"addr",
",",
"str",
"(",
"port",
")",
"]",
")",
")",
"f",
"(",
"socket",
")",
"finally",
":",
"# Works around a Python 3.x bug.",
"context",
".",
"destroy",
"(",
")"
] | A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1'). | [
"A",
"shim",
"that",
"sets",
"up",
"a",
"socket",
"and",
"starts",
"the",
"producer",
"callable",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L14-L36 | train |
mila-iqia/fuel | fuel/utils/parallel.py | _spawn_producer | def _spawn_producer(f, port, addr='tcp://127.0.0.1'):
"""Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
"""
process = Process(target=_producer_wrapper, args=(f, port, addr))
process.start()
return process | python | def _spawn_producer(f, port, addr='tcp://127.0.0.1'):
"""Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
"""
process = Process(target=_producer_wrapper, args=(f, port, addr))
process.start()
return process | [
"def",
"_spawn_producer",
"(",
"f",
",",
"port",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
")",
":",
"process",
"=",
"Process",
"(",
"target",
"=",
"_producer_wrapper",
",",
"args",
"=",
"(",
"f",
",",
"port",
",",
"addr",
")",
")",
"process",
".",
"start",
"(",
")",
"return",
"process"
] | Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process. | [
"Start",
"a",
"process",
"that",
"sends",
"results",
"on",
"a",
"PUSH",
"socket",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L39-L56 | train |
mila-iqia/fuel | fuel/utils/parallel.py | producer_consumer | def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy() | python | def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy() | [
"def",
"producer_consumer",
"(",
"producer",
",",
"consumer",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
",",
"port",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"context_created",
"=",
"False",
"if",
"context",
"is",
"None",
":",
"context_created",
"=",
"True",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"try",
":",
"consumer_socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PULL",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"consumer_socket",
".",
"bind_to_random_port",
"(",
"addr",
")",
"try",
":",
"process",
"=",
"_spawn_producer",
"(",
"producer",
",",
"port",
")",
"result",
"=",
"consumer",
"(",
"consumer_socket",
")",
"finally",
":",
"process",
".",
"terminate",
"(",
")",
"return",
"result",
"finally",
":",
"# Works around a Python 3.x bug.",
"if",
"context_created",
":",
"context",
".",
"destroy",
"(",
")"
] | A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle. | [
"A",
"producer",
"-",
"consumer",
"pattern",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L59-L113 | train |
mila-iqia/fuel | fuel/converters/dogs_vs_cats.py | convert_dogs_vs_cats | def convert_dogs_vs_cats(directory, output_directory,
output_filename='dogs_vs_cats.hdf5'):
"""Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
# Prepare output file
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf_features = h5file.create_dataset('image_features', (37500,),
dtype=dtype)
hdf_shapes = h5file.create_dataset('image_features_shapes', (37500, 3),
dtype='int32')
hdf_labels = h5file.create_dataset('targets', (25000, 1), dtype='uint8')
# Attach shape annotations and scales
hdf_features.dims.create_scale(hdf_shapes, 'shapes')
hdf_features.dims[0].attach_scale(hdf_shapes)
hdf_shapes_labels = h5file.create_dataset('image_features_shapes_labels',
(3,), dtype='S7')
hdf_shapes_labels[...] = ['channel'.encode('utf8'),
'height'.encode('utf8'),
'width'.encode('utf8')]
hdf_features.dims.create_scale(hdf_shapes_labels, 'shape_labels')
hdf_features.dims[0].attach_scale(hdf_shapes_labels)
# Add axis annotations
hdf_features.dims[0].label = 'batch'
hdf_labels.dims[0].label = 'batch'
hdf_labels.dims[1].label = 'index'
# Convert
i = 0
for split, split_size in zip([TRAIN, TEST], [25000, 12500]):
# Open the ZIP file
filename = os.path.join(directory, split)
zip_file = zipfile.ZipFile(filename, 'r')
image_names = zip_file.namelist()[1:] # Discard the directory name
# Shuffle the examples
if split == TRAIN:
rng = numpy.random.RandomState(123522)
rng.shuffle(image_names)
else:
image_names.sort(key=lambda fn: int(os.path.splitext(fn[6:])[0]))
# Convert from JPEG to NumPy arrays
with progress_bar(filename, split_size) as bar:
for image_name in image_names:
# Save image
image = numpy.array(Image.open(zip_file.open(image_name)))
image = image.transpose(2, 0, 1)
hdf_features[i] = image.flatten()
hdf_shapes[i] = image.shape
# Cats are 0, Dogs are 1
if split == TRAIN:
hdf_labels[i] = 0 if 'cat' in image_name else 1
# Update progress
i += 1
bar.update(i if split == TRAIN else i - 25000)
# Add the labels
split_dict = {}
sources = ['image_features', 'targets']
split_dict['train'] = dict(zip(sources, [(0, 25000)] * 2))
split_dict['test'] = {sources[0]: (25000, 37500)}
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
h5file.flush()
h5file.close()
return (output_path,) | python | def convert_dogs_vs_cats(directory, output_directory,
output_filename='dogs_vs_cats.hdf5'):
"""Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
# Prepare output file
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf_features = h5file.create_dataset('image_features', (37500,),
dtype=dtype)
hdf_shapes = h5file.create_dataset('image_features_shapes', (37500, 3),
dtype='int32')
hdf_labels = h5file.create_dataset('targets', (25000, 1), dtype='uint8')
# Attach shape annotations and scales
hdf_features.dims.create_scale(hdf_shapes, 'shapes')
hdf_features.dims[0].attach_scale(hdf_shapes)
hdf_shapes_labels = h5file.create_dataset('image_features_shapes_labels',
(3,), dtype='S7')
hdf_shapes_labels[...] = ['channel'.encode('utf8'),
'height'.encode('utf8'),
'width'.encode('utf8')]
hdf_features.dims.create_scale(hdf_shapes_labels, 'shape_labels')
hdf_features.dims[0].attach_scale(hdf_shapes_labels)
# Add axis annotations
hdf_features.dims[0].label = 'batch'
hdf_labels.dims[0].label = 'batch'
hdf_labels.dims[1].label = 'index'
# Convert
i = 0
for split, split_size in zip([TRAIN, TEST], [25000, 12500]):
# Open the ZIP file
filename = os.path.join(directory, split)
zip_file = zipfile.ZipFile(filename, 'r')
image_names = zip_file.namelist()[1:] # Discard the directory name
# Shuffle the examples
if split == TRAIN:
rng = numpy.random.RandomState(123522)
rng.shuffle(image_names)
else:
image_names.sort(key=lambda fn: int(os.path.splitext(fn[6:])[0]))
# Convert from JPEG to NumPy arrays
with progress_bar(filename, split_size) as bar:
for image_name in image_names:
# Save image
image = numpy.array(Image.open(zip_file.open(image_name)))
image = image.transpose(2, 0, 1)
hdf_features[i] = image.flatten()
hdf_shapes[i] = image.shape
# Cats are 0, Dogs are 1
if split == TRAIN:
hdf_labels[i] = 0 if 'cat' in image_name else 1
# Update progress
i += 1
bar.update(i if split == TRAIN else i - 25000)
# Add the labels
split_dict = {}
sources = ['image_features', 'targets']
split_dict['train'] = dict(zip(sources, [(0, 25000)] * 2))
split_dict['test'] = {sources[0]: (25000, 37500)}
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
h5file.flush()
h5file.close()
return (output_path,) | [
"def",
"convert_dogs_vs_cats",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'dogs_vs_cats.hdf5'",
")",
":",
"# Prepare output file",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"dtype",
"=",
"h5py",
".",
"special_dtype",
"(",
"vlen",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
")",
"hdf_features",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features'",
",",
"(",
"37500",
",",
")",
",",
"dtype",
"=",
"dtype",
")",
"hdf_shapes",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features_shapes'",
",",
"(",
"37500",
",",
"3",
")",
",",
"dtype",
"=",
"'int32'",
")",
"hdf_labels",
"=",
"h5file",
".",
"create_dataset",
"(",
"'targets'",
",",
"(",
"25000",
",",
"1",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"# Attach shape annotations and scales",
"hdf_features",
".",
"dims",
".",
"create_scale",
"(",
"hdf_shapes",
",",
"'shapes'",
")",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"attach_scale",
"(",
"hdf_shapes",
")",
"hdf_shapes_labels",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features_shapes_labels'",
",",
"(",
"3",
",",
")",
",",
"dtype",
"=",
"'S7'",
")",
"hdf_shapes_labels",
"[",
"...",
"]",
"=",
"[",
"'channel'",
".",
"encode",
"(",
"'utf8'",
")",
",",
"'height'",
".",
"encode",
"(",
"'utf8'",
")",
",",
"'width'",
".",
"encode",
"(",
"'utf8'",
")",
"]",
"hdf_features",
".",
"dims",
".",
"create_scale",
"(",
"hdf_shapes_labels",
",",
"'shape_labels'",
")",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"attach_scale",
"(",
"hdf_shapes_labels",
")",
"# Add axis annotations",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"hdf_labels",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"hdf_labels",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"# Convert",
"i",
"=",
"0",
"for",
"split",
",",
"split_size",
"in",
"zip",
"(",
"[",
"TRAIN",
",",
"TEST",
"]",
",",
"[",
"25000",
",",
"12500",
"]",
")",
":",
"# Open the ZIP file",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"split",
")",
"zip_file",
"=",
"zipfile",
".",
"ZipFile",
"(",
"filename",
",",
"'r'",
")",
"image_names",
"=",
"zip_file",
".",
"namelist",
"(",
")",
"[",
"1",
":",
"]",
"# Discard the directory name",
"# Shuffle the examples",
"if",
"split",
"==",
"TRAIN",
":",
"rng",
"=",
"numpy",
".",
"random",
".",
"RandomState",
"(",
"123522",
")",
"rng",
".",
"shuffle",
"(",
"image_names",
")",
"else",
":",
"image_names",
".",
"sort",
"(",
"key",
"=",
"lambda",
"fn",
":",
"int",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
"[",
"6",
":",
"]",
")",
"[",
"0",
"]",
")",
")",
"# Convert from JPEG to NumPy arrays",
"with",
"progress_bar",
"(",
"filename",
",",
"split_size",
")",
"as",
"bar",
":",
"for",
"image_name",
"in",
"image_names",
":",
"# Save image",
"image",
"=",
"numpy",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"zip_file",
".",
"open",
"(",
"image_name",
")",
")",
")",
"image",
"=",
"image",
".",
"transpose",
"(",
"2",
",",
"0",
",",
"1",
")",
"hdf_features",
"[",
"i",
"]",
"=",
"image",
".",
"flatten",
"(",
")",
"hdf_shapes",
"[",
"i",
"]",
"=",
"image",
".",
"shape",
"# Cats are 0, Dogs are 1",
"if",
"split",
"==",
"TRAIN",
":",
"hdf_labels",
"[",
"i",
"]",
"=",
"0",
"if",
"'cat'",
"in",
"image_name",
"else",
"1",
"# Update progress",
"i",
"+=",
"1",
"bar",
".",
"update",
"(",
"i",
"if",
"split",
"==",
"TRAIN",
"else",
"i",
"-",
"25000",
")",
"# Add the labels",
"split_dict",
"=",
"{",
"}",
"sources",
"=",
"[",
"'image_features'",
",",
"'targets'",
"]",
"split_dict",
"[",
"'train'",
"]",
"=",
"dict",
"(",
"zip",
"(",
"sources",
",",
"[",
"(",
"0",
",",
"25000",
")",
"]",
"*",
"2",
")",
")",
"split_dict",
"[",
"'test'",
"]",
"=",
"{",
"sources",
"[",
"0",
"]",
":",
"(",
"25000",
",",
"37500",
")",
"}",
"h5file",
".",
"attrs",
"[",
"'split'",
"]",
"=",
"H5PYDataset",
".",
"create_split_array",
"(",
"split_dict",
")",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] | Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset. | [
"Converts",
"the",
"Dogs",
"vs",
".",
"Cats",
"dataset",
"to",
"HDF5",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/dogs_vs_cats.py#L16-L113 | train |
mila-iqia/fuel | fuel/bin/fuel_download.py | main | def main(args=None):
"""Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(downloaders.all_downloaders)
if fuel.config.extra_downloaders:
for name in fuel.config.extra_downloaders:
extra_datasets = dict(
importlib.import_module(name).all_downloaders)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra downloaders conflict in name with '
'built-in downloaders')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Download script for built-in datasets.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="where to save the downloaded files",
type=str, default=os.getcwd())
parent_parser.add_argument(
"--clear", help="clear the downloaded files", action='store_true')
subparsers = parser.add_subparsers()
download_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Download the {} dataset'.format(name))
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
download_functions[name] = fill_subparser(subparser)
args = parser.parse_args()
args_dict = vars(args)
download_function = download_functions[args_dict.pop('which_')]
try:
download_function(**args_dict)
except NeedURLPrefix:
parser.error(url_prefix_message) | python | def main(args=None):
"""Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(downloaders.all_downloaders)
if fuel.config.extra_downloaders:
for name in fuel.config.extra_downloaders:
extra_datasets = dict(
importlib.import_module(name).all_downloaders)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra downloaders conflict in name with '
'built-in downloaders')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Download script for built-in datasets.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="where to save the downloaded files",
type=str, default=os.getcwd())
parent_parser.add_argument(
"--clear", help="clear the downloaded files", action='store_true')
subparsers = parser.add_subparsers()
download_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Download the {} dataset'.format(name))
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
download_functions[name] = fill_subparser(subparser)
args = parser.parse_args()
args_dict = vars(args)
download_function = download_functions[args_dict.pop('which_')]
try:
download_function(**args_dict)
except NeedURLPrefix:
parser.error(url_prefix_message) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"built_in_datasets",
"=",
"dict",
"(",
"downloaders",
".",
"all_downloaders",
")",
"if",
"fuel",
".",
"config",
".",
"extra_downloaders",
":",
"for",
"name",
"in",
"fuel",
".",
"config",
".",
"extra_downloaders",
":",
"extra_datasets",
"=",
"dict",
"(",
"importlib",
".",
"import_module",
"(",
"name",
")",
".",
"all_downloaders",
")",
"if",
"any",
"(",
"key",
"in",
"built_in_datasets",
"for",
"key",
"in",
"extra_datasets",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'extra downloaders conflict in name with '",
"'built-in downloaders'",
")",
"built_in_datasets",
".",
"update",
"(",
"extra_datasets",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Download script for built-in datasets.'",
")",
"parent_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--directory\"",
",",
"help",
"=",
"\"where to save the downloaded files\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"--clear\"",
",",
"help",
"=",
"\"clear the downloaded files\"",
",",
"action",
"=",
"'store_true'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
")",
"download_functions",
"=",
"{",
"}",
"for",
"name",
",",
"fill_subparser",
"in",
"built_in_datasets",
".",
"items",
"(",
")",
":",
"subparser",
"=",
"subparsers",
".",
"add_parser",
"(",
"name",
",",
"parents",
"=",
"[",
"parent_parser",
"]",
",",
"help",
"=",
"'Download the {} dataset'",
".",
"format",
"(",
"name",
")",
")",
"# Allows the parser to know which subparser was called.",
"subparser",
".",
"set_defaults",
"(",
"which_",
"=",
"name",
")",
"download_functions",
"[",
"name",
"]",
"=",
"fill_subparser",
"(",
"subparser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"args_dict",
"=",
"vars",
"(",
"args",
")",
"download_function",
"=",
"download_functions",
"[",
"args_dict",
".",
"pop",
"(",
"'which_'",
")",
"]",
"try",
":",
"download_function",
"(",
"*",
"*",
"args_dict",
")",
"except",
"NeedURLPrefix",
":",
"parser",
".",
"error",
"(",
"url_prefix_message",
")"
] | Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used. | [
"Entry",
"point",
"for",
"fuel",
"-",
"download",
"script",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_download.py#L19-L64 | train |
mila-iqia/fuel | fuel/downloaders/mnist.py | fill_subparser | def fill_subparser(subparser):
"""Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
urls = ['http://yann.lecun.com/exdb/mnist/' + f for f in filenames]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader | python | def fill_subparser(subparser):
"""Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
urls = ['http://yann.lecun.com/exdb/mnist/' + f for f in filenames]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader | [
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"filenames",
"=",
"[",
"'train-images-idx3-ubyte.gz'",
",",
"'train-labels-idx1-ubyte.gz'",
",",
"'t10k-images-idx3-ubyte.gz'",
",",
"'t10k-labels-idx1-ubyte.gz'",
"]",
"urls",
"=",
"[",
"'http://yann.lecun.com/exdb/mnist/'",
"+",
"f",
"for",
"f",
"in",
"filenames",
"]",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"urls",
",",
"filenames",
"=",
"filenames",
")",
"return",
"default_downloader"
] | Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command. | [
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"MNIST",
"dataset",
"files",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/mnist.py#L4-L22 | train |
mila-iqia/fuel | fuel/bin/fuel_info.py | main | def main(args=None):
"""Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message) | python | def main(args=None):
"""Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Extracts metadata from a Fuel-converted HDF5 file.'",
")",
"parser",
".",
"add_argument",
"(",
"\"filename\"",
",",
"help",
"=",
"\"HDF5 file to analyze\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"with",
"h5py",
".",
"File",
"(",
"args",
".",
"filename",
",",
"'r'",
")",
"as",
"h5file",
":",
"interface_version",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'h5py_interface_version'",
",",
"'N/A'",
")",
"fuel_convert_version",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'fuel_convert_version'",
",",
"'N/A'",
")",
"fuel_convert_command",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'fuel_convert_command'",
",",
"'N/A'",
")",
"message_prefix",
"=",
"message_prefix_template",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"filename",
")",
")",
"message_body",
"=",
"message_body_template",
".",
"format",
"(",
"fuel_convert_command",
",",
"interface_version",
",",
"fuel_convert_version",
")",
"message",
"=",
"''",
".",
"join",
"(",
"[",
"'\\n'",
",",
"message_prefix",
",",
"'\\n'",
",",
"'='",
"*",
"len",
"(",
"message_prefix",
")",
",",
"message_body",
"]",
")",
"print",
"(",
"message",
")"
] | Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used. | [
"Entry",
"point",
"for",
"fuel",
"-",
"info",
"script",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_info.py#L22-L51 | train |
mila-iqia/fuel | fuel/converters/caltech101_silhouettes.py | convert_silhouettes | def convert_silhouettes(size, directory, output_directory,
output_filename=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_filename is None:
output_filename = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_filename)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,) | python | def convert_silhouettes(size, directory, output_directory,
output_filename=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_filename is None:
output_filename = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_filename)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,) | [
"def",
"convert_silhouettes",
"(",
"size",
",",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"None",
")",
":",
"if",
"size",
"not",
"in",
"(",
"16",
",",
"28",
")",
":",
"raise",
"ValueError",
"(",
"'size must be 16 or 28'",
")",
"if",
"output_filename",
"is",
"None",
":",
"output_filename",
"=",
"'caltech101_silhouettes{}.hdf5'",
".",
"format",
"(",
"size",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"input_file",
"=",
"'caltech101_silhouettes_{}_split1.mat'",
".",
"format",
"(",
"size",
")",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"input_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"input_file",
")",
":",
"raise",
"MissingInputFiles",
"(",
"'Required files missing'",
",",
"[",
"input_file",
"]",
")",
"with",
"h5py",
".",
"File",
"(",
"output_file",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"h5file",
":",
"mat",
"=",
"loadmat",
"(",
"input_file",
")",
"train_features",
"=",
"mat",
"[",
"'train_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"train_targets",
"=",
"mat",
"[",
"'train_labels'",
"]",
"valid_features",
"=",
"mat",
"[",
"'val_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"valid_targets",
"=",
"mat",
"[",
"'val_labels'",
"]",
"test_features",
"=",
"mat",
"[",
"'test_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"test_targets",
"=",
"mat",
"[",
"'test_labels'",
"]",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_features",
")",
",",
"(",
"'train'",
",",
"'targets'",
",",
"train_targets",
")",
",",
"(",
"'valid'",
",",
"'features'",
",",
"valid_features",
")",
",",
"(",
"'valid'",
",",
"'targets'",
",",
"valid_targets",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_features",
")",
",",
"(",
"'test'",
",",
"'targets'",
",",
"test_targets",
")",
",",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"(",
"'batch'",
",",
"'channel'",
",",
"'height'",
",",
"'width'",
")",
")",
":",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"i",
"]",
".",
"label",
"=",
"label",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"(",
"'batch'",
",",
"'index'",
")",
")",
":",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"i",
"]",
".",
"label",
"=",
"label",
"return",
"(",
"output_file",
",",
")"
] | Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset. | [
"Convert",
"the",
"CalTech",
"101",
"Silhouettes",
"Datasets",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/caltech101_silhouettes.py#L9-L61 | train |
mila-iqia/fuel | fuel/schemes.py | cross_validation | def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin) | python | def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin) | [
"def",
"cross_validation",
"(",
"scheme_class",
",",
"num_examples",
",",
"num_folds",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"strict",
"and",
"num_examples",
"%",
"num_folds",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"(",
"\"{} examples are not divisible in {} evenly-sized \"",
"+",
"\"folds. To allow this, have a look at the \"",
"+",
"\"`strict` argument.\"",
")",
".",
"format",
"(",
"num_examples",
",",
"num_folds",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"num_folds",
")",
":",
"begin",
"=",
"num_examples",
"*",
"i",
"//",
"num_folds",
"end",
"=",
"num_examples",
"*",
"(",
"i",
"+",
"1",
")",
"//",
"num_folds",
"train",
"=",
"scheme_class",
"(",
"list",
"(",
"chain",
"(",
"xrange",
"(",
"0",
",",
"begin",
")",
",",
"xrange",
"(",
"end",
",",
"num_examples",
")",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"valid",
"=",
"scheme_class",
"(",
"xrange",
"(",
"begin",
",",
"end",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"strict",
":",
"yield",
"(",
"train",
",",
"valid",
")",
"else",
":",
"yield",
"(",
"train",
",",
"valid",
",",
"end",
"-",
"begin",
")"
] | Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set. | [
"Return",
"pairs",
"of",
"schemes",
"to",
"be",
"used",
"for",
"cross",
"-",
"validation",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/schemes.py#L260-L305 | train |
mila-iqia/fuel | fuel/bin/fuel_convert.py | main | def main(args=None):
"""Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(converters.all_converters)
if fuel.config.extra_converters:
for name in fuel.config.extra_converters:
extra_datasets = dict(
importlib.import_module(name).all_converters)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra converters conflict in name with '
'built-in converters')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Conversion script for built-in datasets.')
subparsers = parser.add_subparsers()
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="directory in which input files reside",
type=str, default=os.getcwd())
convert_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Convert the {} dataset'.format(name))
subparser.add_argument(
"-o", "--output-directory", help="where to save the dataset",
type=str, default=os.getcwd(), action=CheckDirectoryAction)
subparser.add_argument(
"-r", "--output_filename", help="new name of the created dataset",
type=str, default=None)
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
convert_functions[name] = fill_subparser(subparser)
args = parser.parse_args(args)
args_dict = vars(args)
if args_dict['output_filename'] is not None and\
os.path.splitext(args_dict['output_filename'])[1] not in\
('.hdf5', '.hdf', '.h5'):
args_dict['output_filename'] += '.hdf5'
if args_dict['output_filename'] is None:
args_dict.pop('output_filename')
convert_function = convert_functions[args_dict.pop('which_')]
try:
output_paths = convert_function(**args_dict)
except MissingInputFiles as e:
intro = "The following required files were not found:\n"
message = "\n".join([intro] + [" * " + f for f in e.filenames])
message += "\n\nDid you forget to run fuel-download?"
parser.error(message)
# Tag the newly-created file(s) with H5PYDataset version and command-line
# options
for output_path in output_paths:
h5file = h5py.File(output_path, 'a')
interface_version = H5PYDataset.interface_version.encode('utf-8')
h5file.attrs['h5py_interface_version'] = interface_version
fuel_convert_version = converters.__version__.encode('utf-8')
h5file.attrs['fuel_convert_version'] = fuel_convert_version
command = [os.path.basename(sys.argv[0])] + sys.argv[1:]
h5file.attrs['fuel_convert_command'] = (
' '.join(command).encode('utf-8'))
h5file.flush()
h5file.close() | python | def main(args=None):
"""Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(converters.all_converters)
if fuel.config.extra_converters:
for name in fuel.config.extra_converters:
extra_datasets = dict(
importlib.import_module(name).all_converters)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra converters conflict in name with '
'built-in converters')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Conversion script for built-in datasets.')
subparsers = parser.add_subparsers()
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="directory in which input files reside",
type=str, default=os.getcwd())
convert_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Convert the {} dataset'.format(name))
subparser.add_argument(
"-o", "--output-directory", help="where to save the dataset",
type=str, default=os.getcwd(), action=CheckDirectoryAction)
subparser.add_argument(
"-r", "--output_filename", help="new name of the created dataset",
type=str, default=None)
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
convert_functions[name] = fill_subparser(subparser)
args = parser.parse_args(args)
args_dict = vars(args)
if args_dict['output_filename'] is not None and\
os.path.splitext(args_dict['output_filename'])[1] not in\
('.hdf5', '.hdf', '.h5'):
args_dict['output_filename'] += '.hdf5'
if args_dict['output_filename'] is None:
args_dict.pop('output_filename')
convert_function = convert_functions[args_dict.pop('which_')]
try:
output_paths = convert_function(**args_dict)
except MissingInputFiles as e:
intro = "The following required files were not found:\n"
message = "\n".join([intro] + [" * " + f for f in e.filenames])
message += "\n\nDid you forget to run fuel-download?"
parser.error(message)
# Tag the newly-created file(s) with H5PYDataset version and command-line
# options
for output_path in output_paths:
h5file = h5py.File(output_path, 'a')
interface_version = H5PYDataset.interface_version.encode('utf-8')
h5file.attrs['h5py_interface_version'] = interface_version
fuel_convert_version = converters.__version__.encode('utf-8')
h5file.attrs['fuel_convert_version'] = fuel_convert_version
command = [os.path.basename(sys.argv[0])] + sys.argv[1:]
h5file.attrs['fuel_convert_command'] = (
' '.join(command).encode('utf-8'))
h5file.flush()
h5file.close() | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"built_in_datasets",
"=",
"dict",
"(",
"converters",
".",
"all_converters",
")",
"if",
"fuel",
".",
"config",
".",
"extra_converters",
":",
"for",
"name",
"in",
"fuel",
".",
"config",
".",
"extra_converters",
":",
"extra_datasets",
"=",
"dict",
"(",
"importlib",
".",
"import_module",
"(",
"name",
")",
".",
"all_converters",
")",
"if",
"any",
"(",
"key",
"in",
"built_in_datasets",
"for",
"key",
"in",
"extra_datasets",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'extra converters conflict in name with '",
"'built-in converters'",
")",
"built_in_datasets",
".",
"update",
"(",
"extra_datasets",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Conversion script for built-in datasets.'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
")",
"parent_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--directory\"",
",",
"help",
"=",
"\"directory in which input files reside\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
"convert_functions",
"=",
"{",
"}",
"for",
"name",
",",
"fill_subparser",
"in",
"built_in_datasets",
".",
"items",
"(",
")",
":",
"subparser",
"=",
"subparsers",
".",
"add_parser",
"(",
"name",
",",
"parents",
"=",
"[",
"parent_parser",
"]",
",",
"help",
"=",
"'Convert the {} dataset'",
".",
"format",
"(",
"name",
")",
")",
"subparser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output-directory\"",
",",
"help",
"=",
"\"where to save the dataset\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"action",
"=",
"CheckDirectoryAction",
")",
"subparser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--output_filename\"",
",",
"help",
"=",
"\"new name of the created dataset\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
")",
"# Allows the parser to know which subparser was called.",
"subparser",
".",
"set_defaults",
"(",
"which_",
"=",
"name",
")",
"convert_functions",
"[",
"name",
"]",
"=",
"fill_subparser",
"(",
"subparser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"args_dict",
"=",
"vars",
"(",
"args",
")",
"if",
"args_dict",
"[",
"'output_filename'",
"]",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"args_dict",
"[",
"'output_filename'",
"]",
")",
"[",
"1",
"]",
"not",
"in",
"(",
"'.hdf5'",
",",
"'.hdf'",
",",
"'.h5'",
")",
":",
"args_dict",
"[",
"'output_filename'",
"]",
"+=",
"'.hdf5'",
"if",
"args_dict",
"[",
"'output_filename'",
"]",
"is",
"None",
":",
"args_dict",
".",
"pop",
"(",
"'output_filename'",
")",
"convert_function",
"=",
"convert_functions",
"[",
"args_dict",
".",
"pop",
"(",
"'which_'",
")",
"]",
"try",
":",
"output_paths",
"=",
"convert_function",
"(",
"*",
"*",
"args_dict",
")",
"except",
"MissingInputFiles",
"as",
"e",
":",
"intro",
"=",
"\"The following required files were not found:\\n\"",
"message",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"intro",
"]",
"+",
"[",
"\" * \"",
"+",
"f",
"for",
"f",
"in",
"e",
".",
"filenames",
"]",
")",
"message",
"+=",
"\"\\n\\nDid you forget to run fuel-download?\"",
"parser",
".",
"error",
"(",
"message",
")",
"# Tag the newly-created file(s) with H5PYDataset version and command-line",
"# options",
"for",
"output_path",
"in",
"output_paths",
":",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"'a'",
")",
"interface_version",
"=",
"H5PYDataset",
".",
"interface_version",
".",
"encode",
"(",
"'utf-8'",
")",
"h5file",
".",
"attrs",
"[",
"'h5py_interface_version'",
"]",
"=",
"interface_version",
"fuel_convert_version",
"=",
"converters",
".",
"__version__",
".",
"encode",
"(",
"'utf-8'",
")",
"h5file",
".",
"attrs",
"[",
"'fuel_convert_version'",
"]",
"=",
"fuel_convert_version",
"command",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"]",
"+",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"h5file",
".",
"attrs",
"[",
"'fuel_convert_command'",
"]",
"=",
"(",
"' '",
".",
"join",
"(",
"command",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")"
] | Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used. | [
"Entry",
"point",
"for",
"fuel",
"-",
"convert",
"script",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_convert.py#L24-L98 | train |
mila-iqia/fuel | fuel/utils/lock.py | refresh_lock | def refresh_lock(lock_file):
"""'Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]), hostname)
try:
lock_write = open(lock_file, 'w')
lock_write.write(unique_id + '\n')
lock_write.close()
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
raise
return unique_id | python | def refresh_lock(lock_file):
"""'Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]), hostname)
try:
lock_write = open(lock_file, 'w')
lock_write.write(unique_id + '\n')
lock_write.close()
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
raise
return unique_id | [
"def",
"refresh_lock",
"(",
"lock_file",
")",
":",
"unique_id",
"=",
"'%s_%s_%s'",
"%",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"9",
")",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
"]",
")",
",",
"hostname",
")",
"try",
":",
"lock_write",
"=",
"open",
"(",
"lock_file",
",",
"'w'",
")",
"lock_write",
".",
"write",
"(",
"unique_id",
"+",
"'\\n'",
")",
"lock_write",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"# In some strange case, this happen. To prevent all tests",
"# from failing, we release the lock, but as there is a",
"# problem, we still keep the original exception.",
"# This way, only 1 test would fail.",
"while",
"get_lock",
".",
"n_lock",
">",
"0",
":",
"release_lock",
"(",
")",
"raise",
"return",
"unique_id"
] | Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned. | [
"Refresh",
"an",
"existing",
"lock",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L95-L118 | train |
mila-iqia/fuel | fuel/utils/lock.py | get_lock | def get_lock(lock_dir, **kw):
"""Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
"""
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception(
"For some unknow reason, the lock was already taken,"
" but no start time was registered.")
now = time.time()
if now - get_lock.start_time > TIMEOUT:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1 | python | def get_lock(lock_dir, **kw):
"""Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
"""
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception(
"For some unknow reason, the lock was already taken,"
" but no start time was registered.")
now = time.time()
if now - get_lock.start_time > TIMEOUT:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1 | [
"def",
"get_lock",
"(",
"lock_dir",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"hasattr",
"(",
"get_lock",
",",
"'n_lock'",
")",
":",
"# Initialization.",
"get_lock",
".",
"n_lock",
"=",
"0",
"if",
"not",
"hasattr",
"(",
"get_lock",
",",
"'lock_is_enabled'",
")",
":",
"# Enable lock by default.",
"get_lock",
".",
"lock_is_enabled",
"=",
"True",
"get_lock",
".",
"lock_dir",
"=",
"lock_dir",
"get_lock",
".",
"unlocker",
"=",
"Unlocker",
"(",
"get_lock",
".",
"lock_dir",
")",
"else",
":",
"if",
"lock_dir",
"!=",
"get_lock",
".",
"lock_dir",
":",
"# Compilation directory has changed.",
"# First ensure all old locks were released.",
"assert",
"get_lock",
".",
"n_lock",
"==",
"0",
"# Update members for new compilation directory.",
"get_lock",
".",
"lock_dir",
"=",
"lock_dir",
"get_lock",
".",
"unlocker",
"=",
"Unlocker",
"(",
"get_lock",
".",
"lock_dir",
")",
"if",
"get_lock",
".",
"lock_is_enabled",
":",
"# Only really try to acquire the lock if we do not have it already.",
"if",
"get_lock",
".",
"n_lock",
"==",
"0",
":",
"lock",
"(",
"get_lock",
".",
"lock_dir",
",",
"*",
"*",
"kw",
")",
"atexit",
".",
"register",
"(",
"Unlocker",
".",
"unlock",
",",
"get_lock",
".",
"unlocker",
")",
"# Store time at which the lock was set.",
"get_lock",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"else",
":",
"# Check whether we need to 'refresh' the lock. We do this",
"# every 'config.compile.timeout / 2' seconds to ensure",
"# no one else tries to override our lock after their",
"# 'config.compile.timeout' timeout period.",
"if",
"get_lock",
".",
"start_time",
"is",
"None",
":",
"# This should not happen. So if this happen, clean up",
"# the lock state and raise an error.",
"while",
"get_lock",
".",
"n_lock",
">",
"0",
":",
"release_lock",
"(",
")",
"raise",
"Exception",
"(",
"\"For some unknow reason, the lock was already taken,\"",
"\" but no start time was registered.\"",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"now",
"-",
"get_lock",
".",
"start_time",
">",
"TIMEOUT",
":",
"lockpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_lock",
".",
"lock_dir",
",",
"'lock'",
")",
"logger",
".",
"info",
"(",
"'Refreshing lock %s'",
",",
"str",
"(",
"lockpath",
")",
")",
"refresh_lock",
"(",
"lockpath",
")",
"get_lock",
".",
"start_time",
"=",
"now",
"get_lock",
".",
"n_lock",
"+=",
"1"
] | Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time. | [
"Obtain",
"lock",
"on",
"compilation",
"directory",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L297-L356 | train |
mila-iqia/fuel | fuel/utils/lock.py | release_lock | def release_lock():
"""Release lock on compilation directory."""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock() | python | def release_lock():
"""Release lock on compilation directory."""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock() | [
"def",
"release_lock",
"(",
")",
":",
"get_lock",
".",
"n_lock",
"-=",
"1",
"assert",
"get_lock",
".",
"n_lock",
">=",
"0",
"# Only really release lock once all lock requests have ended.",
"if",
"get_lock",
".",
"lock_is_enabled",
"and",
"get_lock",
".",
"n_lock",
"==",
"0",
":",
"get_lock",
".",
"start_time",
"=",
"None",
"get_lock",
".",
"unlocker",
".",
"unlock",
"(",
")"
] | Release lock on compilation directory. | [
"Release",
"lock",
"on",
"compilation",
"directory",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L359-L366 | train |
mila-iqia/fuel | fuel/utils/lock.py | release_readlock | def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name) | python | def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name) | [
"def",
"release_readlock",
"(",
"lockdir_name",
")",
":",
"# Make sure the lock still exists before deleting it",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"lockdir_name",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"lockdir_name",
")",
":",
"os",
".",
"rmdir",
"(",
"lockdir_name",
")"
] | Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock | [
"Release",
"a",
"previously",
"obtained",
"readlock",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L392-L403 | train |
mila-iqia/fuel | fuel/utils/lock.py | get_readlock | def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1e6)
lockdir_name = "%s.readlock.%i.%i" % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name) | python | def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1e6)
lockdir_name = "%s.readlock.%i.%i" % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name) | [
"def",
"get_readlock",
"(",
"pid",
",",
"path",
")",
":",
"timestamp",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1e6",
")",
"lockdir_name",
"=",
"\"%s.readlock.%i.%i\"",
"%",
"(",
"path",
",",
"pid",
",",
"timestamp",
")",
"os",
".",
"mkdir",
"(",
"lockdir_name",
")",
"# Register function to release the readlock at the end of the script",
"atexit",
".",
"register",
"(",
"release_readlock",
",",
"lockdir_name",
"=",
"lockdir_name",
")"
] | Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock | [
"Obtain",
"a",
"readlock",
"on",
"a",
"file",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L406-L420 | train |
mila-iqia/fuel | fuel/utils/lock.py | Unlocker.unlock | def unlock(self):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
try:
self.os.remove(self.os.path.join(self.tmp_dir, 'lock'))
except Exception:
pass
try:
self.os.rmdir(self.tmp_dir)
except Exception:
pass | python | def unlock(self):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
try:
self.os.remove(self.os.path.join(self.tmp_dir, 'lock'))
except Exception:
pass
try:
self.os.rmdir(self.tmp_dir)
except Exception:
pass | [
"def",
"unlock",
"(",
"self",
")",
":",
"# If any error occurs, we assume this is because someone else tried to",
"# unlock this directory at the same time.",
"# Note that it is important not to have both remove statements within",
"# the same try/except block. The reason is that while the attempt to",
"# remove the file may fail (e.g. because for some reason this file does",
"# not exist), we still want to try and remove the directory.",
"try",
":",
"self",
".",
"os",
".",
"remove",
"(",
"self",
".",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"tmp_dir",
",",
"'lock'",
")",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"self",
".",
"os",
".",
"rmdir",
"(",
"self",
".",
"tmp_dir",
")",
"except",
"Exception",
":",
"pass"
] | Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit). | [
"Remove",
"current",
"lock",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L69-L92 | train |
mila-iqia/fuel | fuel/downloaders/base.py | filename_from_url | def filename_from_url(url, path=None):
"""Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse.
"""
r = requests.get(url, stream=True)
if 'Content-Disposition' in r.headers:
filename = re.findall(r'filename=([^;]+)',
r.headers['Content-Disposition'])[0].strip('"\"')
else:
filename = os.path.basename(urllib.parse.urlparse(url).path)
return filename | python | def filename_from_url(url, path=None):
"""Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse.
"""
r = requests.get(url, stream=True)
if 'Content-Disposition' in r.headers:
filename = re.findall(r'filename=([^;]+)',
r.headers['Content-Disposition'])[0].strip('"\"')
else:
filename = os.path.basename(urllib.parse.urlparse(url).path)
return filename | [
"def",
"filename_from_url",
"(",
"url",
",",
"path",
"=",
"None",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"'Content-Disposition'",
"in",
"r",
".",
"headers",
":",
"filename",
"=",
"re",
".",
"findall",
"(",
"r'filename=([^;]+)'",
",",
"r",
".",
"headers",
"[",
"'Content-Disposition'",
"]",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"\\\"'",
")",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
"return",
"filename"
] | Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse. | [
"Parses",
"a",
"URL",
"to",
"determine",
"a",
"file",
"name",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L39-L54 | train |
mila-iqia/fuel | fuel/downloaders/base.py | download | def download(url, file_handle, chunk_size=1024):
"""Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL.
"""
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None:
maxval = UnknownLength
else:
maxval = int(total_length)
name = file_handle.name
with progress_bar(name=name, maxval=maxval) as bar:
for i, chunk in enumerate(r.iter_content(chunk_size)):
if total_length:
bar.update(i * chunk_size)
file_handle.write(chunk) | python | def download(url, file_handle, chunk_size=1024):
"""Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL.
"""
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None:
maxval = UnknownLength
else:
maxval = int(total_length)
name = file_handle.name
with progress_bar(name=name, maxval=maxval) as bar:
for i, chunk in enumerate(r.iter_content(chunk_size)):
if total_length:
bar.update(i * chunk_size)
file_handle.write(chunk) | [
"def",
"download",
"(",
"url",
",",
"file_handle",
",",
"chunk_size",
"=",
"1024",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"total_length",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"'content-length'",
")",
"if",
"total_length",
"is",
"None",
":",
"maxval",
"=",
"UnknownLength",
"else",
":",
"maxval",
"=",
"int",
"(",
"total_length",
")",
"name",
"=",
"file_handle",
".",
"name",
"with",
"progress_bar",
"(",
"name",
"=",
"name",
",",
"maxval",
"=",
"maxval",
")",
"as",
"bar",
":",
"for",
"i",
",",
"chunk",
"in",
"enumerate",
"(",
"r",
".",
"iter_content",
"(",
"chunk_size",
")",
")",
":",
"if",
"total_length",
":",
"bar",
".",
"update",
"(",
"i",
"*",
"chunk_size",
")",
"file_handle",
".",
"write",
"(",
"chunk",
")"
] | Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL. | [
"Downloads",
"a",
"given",
"URL",
"to",
"a",
"specific",
"file",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L57-L79 | train |
mila-iqia/fuel | fuel/downloaders/base.py | default_downloader | def default_downloader(directory, urls, filenames, url_prefix=None,
clear=False):
"""Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them.
"""
# Parse file names from URL if not provided
for i, url in enumerate(urls):
filename = filenames[i]
if not filename:
filename = filename_from_url(url)
if not filename:
raise ValueError("no filename available for URL '{}'".format(url))
filenames[i] = filename
files = [os.path.join(directory, f) for f in filenames]
if clear:
for f in files:
if os.path.isfile(f):
os.remove(f)
else:
print('Downloading ' + ', '.join(filenames) + '\n')
ensure_directory_exists(directory)
for url, f, n in zip(urls, files, filenames):
if not url:
if url_prefix is None:
raise NeedURLPrefix
url = url_prefix + n
with open(f, 'wb') as file_handle:
download(url, file_handle) | python | def default_downloader(directory, urls, filenames, url_prefix=None,
clear=False):
"""Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them.
"""
# Parse file names from URL if not provided
for i, url in enumerate(urls):
filename = filenames[i]
if not filename:
filename = filename_from_url(url)
if not filename:
raise ValueError("no filename available for URL '{}'".format(url))
filenames[i] = filename
files = [os.path.join(directory, f) for f in filenames]
if clear:
for f in files:
if os.path.isfile(f):
os.remove(f)
else:
print('Downloading ' + ', '.join(filenames) + '\n')
ensure_directory_exists(directory)
for url, f, n in zip(urls, files, filenames):
if not url:
if url_prefix is None:
raise NeedURLPrefix
url = url_prefix + n
with open(f, 'wb') as file_handle:
download(url, file_handle) | [
"def",
"default_downloader",
"(",
"directory",
",",
"urls",
",",
"filenames",
",",
"url_prefix",
"=",
"None",
",",
"clear",
"=",
"False",
")",
":",
"# Parse file names from URL if not provided",
"for",
"i",
",",
"url",
"in",
"enumerate",
"(",
"urls",
")",
":",
"filename",
"=",
"filenames",
"[",
"i",
"]",
"if",
"not",
"filename",
":",
"filename",
"=",
"filename_from_url",
"(",
"url",
")",
"if",
"not",
"filename",
":",
"raise",
"ValueError",
"(",
"\"no filename available for URL '{}'\"",
".",
"format",
"(",
"url",
")",
")",
"filenames",
"[",
"i",
"]",
"=",
"filename",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"if",
"clear",
":",
"for",
"f",
"in",
"files",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"os",
".",
"remove",
"(",
"f",
")",
"else",
":",
"print",
"(",
"'Downloading '",
"+",
"', '",
".",
"join",
"(",
"filenames",
")",
"+",
"'\\n'",
")",
"ensure_directory_exists",
"(",
"directory",
")",
"for",
"url",
",",
"f",
",",
"n",
"in",
"zip",
"(",
"urls",
",",
"files",
",",
"filenames",
")",
":",
"if",
"not",
"url",
":",
"if",
"url_prefix",
"is",
"None",
":",
"raise",
"NeedURLPrefix",
"url",
"=",
"url_prefix",
"+",
"n",
"with",
"open",
"(",
"f",
",",
"'wb'",
")",
"as",
"file_handle",
":",
"download",
"(",
"url",
",",
"file_handle",
")"
] | Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them. | [
"Downloads",
"or",
"clears",
"files",
"from",
"URLs",
"and",
"filenames",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L96-L140 | train |
mila-iqia/fuel | fuel/utils/__init__.py | find_in_data_path | def find_in_data_path(filename):
"""Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path.
"""
for path in config.data_path:
path = os.path.expanduser(os.path.expandvars(path))
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
return file_path
raise IOError("{} not found in Fuel's data path".format(filename)) | python | def find_in_data_path(filename):
"""Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path.
"""
for path in config.data_path:
path = os.path.expanduser(os.path.expandvars(path))
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
return file_path
raise IOError("{} not found in Fuel's data path".format(filename)) | [
"def",
"find_in_data_path",
"(",
"filename",
")",
":",
"for",
"path",
"in",
"config",
".",
"data_path",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"path",
")",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"return",
"file_path",
"raise",
"IOError",
"(",
"\"{} not found in Fuel's data path\"",
".",
"format",
"(",
"filename",
")",
")"
] | Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path. | [
"Searches",
"for",
"a",
"file",
"within",
"Fuel",
"s",
"data",
"path",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L406-L434 | train |
mila-iqia/fuel | fuel/utils/__init__.py | lazy_property_factory | def lazy_property_factory(lazy_property):
"""Create properties that perform lazy loading of attributes."""
def lazy_property_getter(self):
if not hasattr(self, '_' + lazy_property):
self.load()
if not hasattr(self, '_' + lazy_property):
raise ValueError("{} wasn't loaded".format(lazy_property))
return getattr(self, '_' + lazy_property)
def lazy_property_setter(self, value):
setattr(self, '_' + lazy_property, value)
return lazy_property_getter, lazy_property_setter | python | def lazy_property_factory(lazy_property):
"""Create properties that perform lazy loading of attributes."""
def lazy_property_getter(self):
if not hasattr(self, '_' + lazy_property):
self.load()
if not hasattr(self, '_' + lazy_property):
raise ValueError("{} wasn't loaded".format(lazy_property))
return getattr(self, '_' + lazy_property)
def lazy_property_setter(self, value):
setattr(self, '_' + lazy_property, value)
return lazy_property_getter, lazy_property_setter | [
"def",
"lazy_property_factory",
"(",
"lazy_property",
")",
":",
"def",
"lazy_property_getter",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
":",
"self",
".",
"load",
"(",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
":",
"raise",
"ValueError",
"(",
"\"{} wasn't loaded\"",
".",
"format",
"(",
"lazy_property",
")",
")",
"return",
"getattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
"def",
"lazy_property_setter",
"(",
"self",
",",
"value",
")",
":",
"setattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
",",
"value",
")",
"return",
"lazy_property_getter",
",",
"lazy_property_setter"
] | Create properties that perform lazy loading of attributes. | [
"Create",
"properties",
"that",
"perform",
"lazy",
"loading",
"of",
"attributes",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L437-L449 | train |
mila-iqia/fuel | fuel/utils/__init__.py | do_not_pickle_attributes | def do_not_pickle_attributes(*lazy_properties):
r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1]
"""
def wrap_class(cls):
if not hasattr(cls, 'load'):
raise ValueError("no load method implemented")
# Attach the lazy loading properties to the class
for lazy_property in lazy_properties:
setattr(cls, lazy_property,
property(*lazy_property_factory(lazy_property)))
# Delete the values of lazy properties when serializing
if not hasattr(cls, '__getstate__'):
def __getstate__(self):
serializable_state = self.__dict__.copy()
for lazy_property in lazy_properties:
attr = serializable_state.get('_' + lazy_property)
# Iterators would lose their state
if isinstance(attr, collections.Iterator):
raise ValueError("Iterators can't be lazy loaded")
serializable_state.pop('_' + lazy_property, None)
return serializable_state
setattr(cls, '__getstate__', __getstate__)
return cls
return wrap_class | python | def do_not_pickle_attributes(*lazy_properties):
r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1]
"""
def wrap_class(cls):
if not hasattr(cls, 'load'):
raise ValueError("no load method implemented")
# Attach the lazy loading properties to the class
for lazy_property in lazy_properties:
setattr(cls, lazy_property,
property(*lazy_property_factory(lazy_property)))
# Delete the values of lazy properties when serializing
if not hasattr(cls, '__getstate__'):
def __getstate__(self):
serializable_state = self.__dict__.copy()
for lazy_property in lazy_properties:
attr = serializable_state.get('_' + lazy_property)
# Iterators would lose their state
if isinstance(attr, collections.Iterator):
raise ValueError("Iterators can't be lazy loaded")
serializable_state.pop('_' + lazy_property, None)
return serializable_state
setattr(cls, '__getstate__', __getstate__)
return cls
return wrap_class | [
"def",
"do_not_pickle_attributes",
"(",
"*",
"lazy_properties",
")",
":",
"def",
"wrap_class",
"(",
"cls",
")",
":",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"'load'",
")",
":",
"raise",
"ValueError",
"(",
"\"no load method implemented\"",
")",
"# Attach the lazy loading properties to the class",
"for",
"lazy_property",
"in",
"lazy_properties",
":",
"setattr",
"(",
"cls",
",",
"lazy_property",
",",
"property",
"(",
"*",
"lazy_property_factory",
"(",
"lazy_property",
")",
")",
")",
"# Delete the values of lazy properties when serializing",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"'__getstate__'",
")",
":",
"def",
"__getstate__",
"(",
"self",
")",
":",
"serializable_state",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"for",
"lazy_property",
"in",
"lazy_properties",
":",
"attr",
"=",
"serializable_state",
".",
"get",
"(",
"'_'",
"+",
"lazy_property",
")",
"# Iterators would lose their state",
"if",
"isinstance",
"(",
"attr",
",",
"collections",
".",
"Iterator",
")",
":",
"raise",
"ValueError",
"(",
"\"Iterators can't be lazy loaded\"",
")",
"serializable_state",
".",
"pop",
"(",
"'_'",
"+",
"lazy_property",
",",
"None",
")",
"return",
"serializable_state",
"setattr",
"(",
"cls",
",",
"'__getstate__'",
",",
"__getstate__",
")",
"return",
"cls",
"return",
"wrap_class"
] | r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1] | [
"r",
"Decorator",
"to",
"assign",
"non",
"-",
"pickable",
"properties",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L452-L513 | train |
mila-iqia/fuel | fuel/utils/__init__.py | Subset.sorted_fancy_indexing | def sorted_fancy_indexing(indexable, request):
"""Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
"""
if len(request) > 1:
indices = numpy.argsort(request)
data = numpy.empty(shape=(len(request),) + indexable.shape[1:],
dtype=indexable.dtype)
data[indices] = indexable[numpy.array(request)[indices], ...]
else:
data = indexable[request]
return data | python | def sorted_fancy_indexing(indexable, request):
"""Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
"""
if len(request) > 1:
indices = numpy.argsort(request)
data = numpy.empty(shape=(len(request),) + indexable.shape[1:],
dtype=indexable.dtype)
data[indices] = indexable[numpy.array(request)[indices], ...]
else:
data = indexable[request]
return data | [
"def",
"sorted_fancy_indexing",
"(",
"indexable",
",",
"request",
")",
":",
"if",
"len",
"(",
"request",
")",
">",
"1",
":",
"indices",
"=",
"numpy",
".",
"argsort",
"(",
"request",
")",
"data",
"=",
"numpy",
".",
"empty",
"(",
"shape",
"=",
"(",
"len",
"(",
"request",
")",
",",
")",
"+",
"indexable",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"indexable",
".",
"dtype",
")",
"data",
"[",
"indices",
"]",
"=",
"indexable",
"[",
"numpy",
".",
"array",
"(",
"request",
")",
"[",
"indices",
"]",
",",
"...",
"]",
"else",
":",
"data",
"=",
"indexable",
"[",
"request",
"]",
"return",
"data"
] | Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on. | [
"Safe",
"fancy",
"indexing",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L175-L200 | train |
mila-iqia/fuel | fuel/utils/__init__.py | Subset.slice_to_numerical_args | def slice_to_numerical_args(slice_, num_examples):
"""Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`.
"""
start = slice_.start if slice_.start is not None else 0
stop = slice_.stop if slice_.stop is not None else num_examples
step = slice_.step if slice_.step is not None else 1
return start, stop, step | python | def slice_to_numerical_args(slice_, num_examples):
"""Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`.
"""
start = slice_.start if slice_.start is not None else 0
stop = slice_.stop if slice_.stop is not None else num_examples
step = slice_.step if slice_.step is not None else 1
return start, stop, step | [
"def",
"slice_to_numerical_args",
"(",
"slice_",
",",
"num_examples",
")",
":",
"start",
"=",
"slice_",
".",
"start",
"if",
"slice_",
".",
"start",
"is",
"not",
"None",
"else",
"0",
"stop",
"=",
"slice_",
".",
"stop",
"if",
"slice_",
".",
"stop",
"is",
"not",
"None",
"else",
"num_examples",
"step",
"=",
"slice_",
".",
"step",
"if",
"slice_",
".",
"step",
"is",
"not",
"None",
"else",
"1",
"return",
"start",
",",
"stop",
",",
"step"
] | Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`. | [
"Translate",
"a",
"slice",
"s",
"attributes",
"into",
"numerical",
"attributes",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L203-L219 | train |
mila-iqia/fuel | fuel/utils/__init__.py | Subset.get_list_representation | def get_list_representation(self):
"""Returns this subset's representation as a list of indices."""
if self.is_list:
return self.list_or_slice
else:
return self[list(range(self.num_examples))] | python | def get_list_representation(self):
"""Returns this subset's representation as a list of indices."""
if self.is_list:
return self.list_or_slice
else:
return self[list(range(self.num_examples))] | [
"def",
"get_list_representation",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_list",
":",
"return",
"self",
".",
"list_or_slice",
"else",
":",
"return",
"self",
"[",
"list",
"(",
"range",
"(",
"self",
".",
"num_examples",
")",
")",
"]"
] | Returns this subset's representation as a list of indices. | [
"Returns",
"this",
"subset",
"s",
"representation",
"as",
"a",
"list",
"of",
"indices",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L221-L226 | train |
mila-iqia/fuel | fuel/utils/__init__.py | Subset.index_within_subset | def index_within_subset(self, indexable, subset_request,
sort_indices=False):
"""Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`.
"""
# Translate the request within the context of this subset to a
# request to the indexable object
if isinstance(subset_request, numbers.Integral):
request, = self[[subset_request]]
else:
request = self[subset_request]
# Integer or slice requests can be processed directly.
if isinstance(request, numbers.Integral) or hasattr(request, 'step'):
return indexable[request]
# If requested, we do fancy indexing in sorted order and reshuffle the
# result back in the original order.
if sort_indices:
return self.sorted_fancy_indexing(indexable, request)
# If the indexable supports fancy indexing (numpy array, HDF5 dataset),
# the request can be processed directly.
if isinstance(indexable, (numpy.ndarray, h5py.Dataset)):
return indexable[request]
# Anything else (e.g. lists) isn't considered to support fancy
# indexing, so Subset does it manually.
return iterable_fancy_indexing(indexable, request) | python | def index_within_subset(self, indexable, subset_request,
sort_indices=False):
"""Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`.
"""
# Translate the request within the context of this subset to a
# request to the indexable object
if isinstance(subset_request, numbers.Integral):
request, = self[[subset_request]]
else:
request = self[subset_request]
# Integer or slice requests can be processed directly.
if isinstance(request, numbers.Integral) or hasattr(request, 'step'):
return indexable[request]
# If requested, we do fancy indexing in sorted order and reshuffle the
# result back in the original order.
if sort_indices:
return self.sorted_fancy_indexing(indexable, request)
# If the indexable supports fancy indexing (numpy array, HDF5 dataset),
# the request can be processed directly.
if isinstance(indexable, (numpy.ndarray, h5py.Dataset)):
return indexable[request]
# Anything else (e.g. lists) isn't considered to support fancy
# indexing, so Subset does it manually.
return iterable_fancy_indexing(indexable, request) | [
"def",
"index_within_subset",
"(",
"self",
",",
"indexable",
",",
"subset_request",
",",
"sort_indices",
"=",
"False",
")",
":",
"# Translate the request within the context of this subset to a",
"# request to the indexable object",
"if",
"isinstance",
"(",
"subset_request",
",",
"numbers",
".",
"Integral",
")",
":",
"request",
",",
"=",
"self",
"[",
"[",
"subset_request",
"]",
"]",
"else",
":",
"request",
"=",
"self",
"[",
"subset_request",
"]",
"# Integer or slice requests can be processed directly.",
"if",
"isinstance",
"(",
"request",
",",
"numbers",
".",
"Integral",
")",
"or",
"hasattr",
"(",
"request",
",",
"'step'",
")",
":",
"return",
"indexable",
"[",
"request",
"]",
"# If requested, we do fancy indexing in sorted order and reshuffle the",
"# result back in the original order.",
"if",
"sort_indices",
":",
"return",
"self",
".",
"sorted_fancy_indexing",
"(",
"indexable",
",",
"request",
")",
"# If the indexable supports fancy indexing (numpy array, HDF5 dataset),",
"# the request can be processed directly.",
"if",
"isinstance",
"(",
"indexable",
",",
"(",
"numpy",
".",
"ndarray",
",",
"h5py",
".",
"Dataset",
")",
")",
":",
"return",
"indexable",
"[",
"request",
"]",
"# Anything else (e.g. lists) isn't considered to support fancy",
"# indexing, so Subset does it manually.",
"return",
"iterable_fancy_indexing",
"(",
"indexable",
",",
"request",
")"
] | Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`. | [
"Index",
"an",
"indexable",
"object",
"within",
"the",
"context",
"of",
"this",
"subset",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L228-L266 | train |
mila-iqia/fuel | fuel/utils/__init__.py | Subset.num_examples | def num_examples(self):
"""The number of examples this subset spans."""
if self.is_list:
return len(self.list_or_slice)
else:
start, stop, step = self.slice_to_numerical_args(
self.list_or_slice, self.original_num_examples)
return stop - start | python | def num_examples(self):
"""The number of examples this subset spans."""
if self.is_list:
return len(self.list_or_slice)
else:
start, stop, step = self.slice_to_numerical_args(
self.list_or_slice, self.original_num_examples)
return stop - start | [
"def",
"num_examples",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_list",
":",
"return",
"len",
"(",
"self",
".",
"list_or_slice",
")",
"else",
":",
"start",
",",
"stop",
",",
"step",
"=",
"self",
".",
"slice_to_numerical_args",
"(",
"self",
".",
"list_or_slice",
",",
"self",
".",
"original_num_examples",
")",
"return",
"stop",
"-",
"start"
] | The number of examples this subset spans. | [
"The",
"number",
"of",
"examples",
"this",
"subset",
"spans",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L290-L297 | train |
mila-iqia/fuel | fuel/streams.py | DataStream.get_epoch_iterator | def get_epoch_iterator(self, **kwargs):
"""Get an epoch iterator for the data stream."""
if not self._fresh_state:
self.next_epoch()
else:
self._fresh_state = False
return super(DataStream, self).get_epoch_iterator(**kwargs) | python | def get_epoch_iterator(self, **kwargs):
"""Get an epoch iterator for the data stream."""
if not self._fresh_state:
self.next_epoch()
else:
self._fresh_state = False
return super(DataStream, self).get_epoch_iterator(**kwargs) | [
"def",
"get_epoch_iterator",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_fresh_state",
":",
"self",
".",
"next_epoch",
"(",
")",
"else",
":",
"self",
".",
"_fresh_state",
"=",
"False",
"return",
"super",
"(",
"DataStream",
",",
"self",
")",
".",
"get_epoch_iterator",
"(",
"*",
"*",
"kwargs",
")"
] | Get an epoch iterator for the data stream. | [
"Get",
"an",
"epoch",
"iterator",
"for",
"the",
"data",
"stream",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/streams.py#L172-L178 | train |
mila-iqia/fuel | fuel/downloaders/binarized_mnist.py | fill_subparser | def fill_subparser(subparser):
"""Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
sets = ['train', 'valid', 'test']
urls = ['http://www.cs.toronto.edu/~larocheh/public/datasets/' +
'binarized_mnist/binarized_mnist_{}.amat'.format(s) for s in sets]
filenames = ['binarized_mnist_{}.amat'.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader | python | def fill_subparser(subparser):
"""Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
sets = ['train', 'valid', 'test']
urls = ['http://www.cs.toronto.edu/~larocheh/public/datasets/' +
'binarized_mnist/binarized_mnist_{}.amat'.format(s) for s in sets]
filenames = ['binarized_mnist_{}.amat'.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader | [
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"sets",
"=",
"[",
"'train'",
",",
"'valid'",
",",
"'test'",
"]",
"urls",
"=",
"[",
"'http://www.cs.toronto.edu/~larocheh/public/datasets/'",
"+",
"'binarized_mnist/binarized_mnist_{}.amat'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"sets",
"]",
"filenames",
"=",
"[",
"'binarized_mnist_{}.amat'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"sets",
"]",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"urls",
",",
"filenames",
"=",
"filenames",
")",
"return",
"default_downloader"
] | Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command. | [
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"binarized",
"MNIST",
"dataset",
"files",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/binarized_mnist.py#L4-L25 | train |
mila-iqia/fuel | fuel/downloaders/youtube_audio.py | download | def download(directory, youtube_id, clear=False):
"""Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`.
"""
filepath = os.path.join(directory, '{}.m4a'.format(youtube_id))
if clear:
os.remove(filepath)
return
if not PAFY_AVAILABLE:
raise ImportError("pafy is required to download YouTube videos")
url = 'https://www.youtube.com/watch?v={}'.format(youtube_id)
video = pafy.new(url)
audio = video.getbestaudio()
audio.download(quiet=False, filepath=filepath) | python | def download(directory, youtube_id, clear=False):
"""Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`.
"""
filepath = os.path.join(directory, '{}.m4a'.format(youtube_id))
if clear:
os.remove(filepath)
return
if not PAFY_AVAILABLE:
raise ImportError("pafy is required to download YouTube videos")
url = 'https://www.youtube.com/watch?v={}'.format(youtube_id)
video = pafy.new(url)
audio = video.getbestaudio()
audio.download(quiet=False, filepath=filepath) | [
"def",
"download",
"(",
"directory",
",",
"youtube_id",
",",
"clear",
"=",
"False",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.m4a'",
".",
"format",
"(",
"youtube_id",
")",
")",
"if",
"clear",
":",
"os",
".",
"remove",
"(",
"filepath",
")",
"return",
"if",
"not",
"PAFY_AVAILABLE",
":",
"raise",
"ImportError",
"(",
"\"pafy is required to download YouTube videos\"",
")",
"url",
"=",
"'https://www.youtube.com/watch?v={}'",
".",
"format",
"(",
"youtube_id",
")",
"video",
"=",
"pafy",
".",
"new",
"(",
"url",
")",
"audio",
"=",
"video",
".",
"getbestaudio",
"(",
")",
"audio",
".",
"download",
"(",
"quiet",
"=",
"False",
",",
"filepath",
"=",
"filepath",
")"
] | Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`. | [
"Download",
"the",
"audio",
"of",
"a",
"YouTube",
"video",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/youtube_audio.py#L10-L38 | train |
mila-iqia/fuel | fuel/downloaders/youtube_audio.py | fill_subparser | def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download | python | def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download | [
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"'--youtube-id'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"(",
"\"The YouTube ID of the video from which to extract audio, \"",
"\"usually an 11-character string.\"",
")",
")",
"return",
"download"
] | Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command. | [
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"audio",
"of",
"YouTube",
"videos",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/youtube_audio.py#L41-L57 | train |
mila-iqia/fuel | fuel/converters/youtube_audio.py | convert_youtube_audio | def convert_youtube_audio(directory, output_directory, youtube_id, channels,
sample, output_filename=None):
"""Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
"""
input_file = os.path.join(directory, '{}.m4a'.format(youtube_id))
wav_filename = '{}.wav'.format(youtube_id)
wav_file = os.path.join(directory, wav_filename)
ffmpeg_not_available = subprocess.call(['ffmpeg', '-version'])
if ffmpeg_not_available:
raise RuntimeError('conversion requires ffmpeg')
subprocess.check_call(['ffmpeg', '-y', '-i', input_file, '-ac',
str(channels), '-ar', str(sample), wav_file],
stdout=sys.stdout)
# Load WAV into array
_, data = scipy.io.wavfile.read(wav_file)
if data.ndim == 1:
data = data[:, None]
data = data[None, :]
# Store in HDF5
if output_filename is None:
output_filename = '{}.hdf5'.format(youtube_id)
output_file = os.path.join(output_directory, output_filename)
with h5py.File(output_file, 'w') as h5file:
fill_hdf5_file(h5file, (('train', 'features', data),))
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'time'
h5file['features'].dims[2].label = 'feature'
return (output_file,) | python | def convert_youtube_audio(directory, output_directory, youtube_id, channels,
sample, output_filename=None):
"""Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
"""
input_file = os.path.join(directory, '{}.m4a'.format(youtube_id))
wav_filename = '{}.wav'.format(youtube_id)
wav_file = os.path.join(directory, wav_filename)
ffmpeg_not_available = subprocess.call(['ffmpeg', '-version'])
if ffmpeg_not_available:
raise RuntimeError('conversion requires ffmpeg')
subprocess.check_call(['ffmpeg', '-y', '-i', input_file, '-ac',
str(channels), '-ar', str(sample), wav_file],
stdout=sys.stdout)
# Load WAV into array
_, data = scipy.io.wavfile.read(wav_file)
if data.ndim == 1:
data = data[:, None]
data = data[None, :]
# Store in HDF5
if output_filename is None:
output_filename = '{}.hdf5'.format(youtube_id)
output_file = os.path.join(output_directory, output_filename)
with h5py.File(output_file, 'w') as h5file:
fill_hdf5_file(h5file, (('train', 'features', data),))
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'time'
h5file['features'].dims[2].label = 'feature'
return (output_file,) | [
"def",
"convert_youtube_audio",
"(",
"directory",
",",
"output_directory",
",",
"youtube_id",
",",
"channels",
",",
"sample",
",",
"output_filename",
"=",
"None",
")",
":",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.m4a'",
".",
"format",
"(",
"youtube_id",
")",
")",
"wav_filename",
"=",
"'{}.wav'",
".",
"format",
"(",
"youtube_id",
")",
"wav_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"wav_filename",
")",
"ffmpeg_not_available",
"=",
"subprocess",
".",
"call",
"(",
"[",
"'ffmpeg'",
",",
"'-version'",
"]",
")",
"if",
"ffmpeg_not_available",
":",
"raise",
"RuntimeError",
"(",
"'conversion requires ffmpeg'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'ffmpeg'",
",",
"'-y'",
",",
"'-i'",
",",
"input_file",
",",
"'-ac'",
",",
"str",
"(",
"channels",
")",
",",
"'-ar'",
",",
"str",
"(",
"sample",
")",
",",
"wav_file",
"]",
",",
"stdout",
"=",
"sys",
".",
"stdout",
")",
"# Load WAV into array",
"_",
",",
"data",
"=",
"scipy",
".",
"io",
".",
"wavfile",
".",
"read",
"(",
"wav_file",
")",
"if",
"data",
".",
"ndim",
"==",
"1",
":",
"data",
"=",
"data",
"[",
":",
",",
"None",
"]",
"data",
"=",
"data",
"[",
"None",
",",
":",
"]",
"# Store in HDF5",
"if",
"output_filename",
"is",
"None",
":",
"output_filename",
"=",
"'{}.hdf5'",
".",
"format",
"(",
"youtube_id",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"with",
"h5py",
".",
"File",
"(",
"output_file",
",",
"'w'",
")",
"as",
"h5file",
":",
"fill_hdf5_file",
"(",
"h5file",
",",
"(",
"(",
"'train'",
",",
"'features'",
",",
"data",
")",
",",
")",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'time'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'feature'",
"return",
"(",
"output_file",
",",
")"
] | Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used. | [
"Converts",
"downloaded",
"YouTube",
"audio",
"to",
"HDF5",
"format",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/youtube_audio.py#L11-L62 | train |
mila-iqia/fuel | fuel/converters/youtube_audio.py | fill_subparser | def fill_subparser(subparser):
"""Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
subparser.add_argument(
'--channels', type=int, default=1,
help=("The number of audio channels to convert to. The default of 1"
"means audio is converted to mono.")
)
subparser.add_argument(
'--sample', type=int, default=16000,
help=("The sampling rate in Hz. The default of 16000 is "
"significantly downsampled compared to normal WAVE files; "
"pass 44100 for the usual sampling rate.")
)
return convert_youtube_audio | python | def fill_subparser(subparser):
"""Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
subparser.add_argument(
'--channels', type=int, default=1,
help=("The number of audio channels to convert to. The default of 1"
"means audio is converted to mono.")
)
subparser.add_argument(
'--sample', type=int, default=16000,
help=("The sampling rate in Hz. The default of 16000 is "
"significantly downsampled compared to normal WAVE files; "
"pass 44100 for the usual sampling rate.")
)
return convert_youtube_audio | [
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"'--youtube-id'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"(",
"\"The YouTube ID of the video from which to extract audio, \"",
"\"usually an 11-character string.\"",
")",
")",
"subparser",
".",
"add_argument",
"(",
"'--channels'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"(",
"\"The number of audio channels to convert to. The default of 1\"",
"\"means audio is converted to mono.\"",
")",
")",
"subparser",
".",
"add_argument",
"(",
"'--sample'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"(",
"\"The sampling rate in Hz. The default of 16000 is \"",
"\"significantly downsampled compared to normal WAVE files; \"",
"\"pass 44100 for the usual sampling rate.\"",
")",
")",
"return",
"convert_youtube_audio"
] | Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command. | [
"Sets",
"up",
"a",
"subparser",
"to",
"convert",
"YouTube",
"audio",
"files",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/youtube_audio.py#L65-L93 | train |
mila-iqia/fuel | fuel/converters/ilsvrc2012.py | convert_ilsvrc2012 | def convert_ilsvrc2012(directory, output_directory,
output_filename='ilsvrc2012.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
train, valid, test = [os.path.join(directory, fn) for fn in IMAGE_TARS]
n_train, valid_groundtruth, n_test, wnid_map = prepare_metadata(
devkit_path)
n_valid = len(valid_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f, create_temp_tar() as patch:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, (None,) * n_test,
n_train + n_valid)
log.info('Done.')
return (output_path,) | python | def convert_ilsvrc2012(directory, output_directory,
output_filename='ilsvrc2012.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
train, valid, test = [os.path.join(directory, fn) for fn in IMAGE_TARS]
n_train, valid_groundtruth, n_test, wnid_map = prepare_metadata(
devkit_path)
n_valid = len(valid_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f, create_temp_tar() as patch:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, (None,) * n_test,
n_train + n_valid)
log.info('Done.')
return (output_path,) | [
"def",
"convert_ilsvrc2012",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'ilsvrc2012.hdf5'",
",",
"shuffle_seed",
"=",
"config",
".",
"default_seed",
")",
":",
"devkit_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"DEVKIT_ARCHIVE",
")",
"train",
",",
"valid",
",",
"test",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"fn",
")",
"for",
"fn",
"in",
"IMAGE_TARS",
"]",
"n_train",
",",
"valid_groundtruth",
",",
"n_test",
",",
"wnid_map",
"=",
"prepare_metadata",
"(",
"devkit_path",
")",
"n_valid",
"=",
"len",
"(",
"valid_groundtruth",
")",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"with",
"h5py",
".",
"File",
"(",
"output_path",
",",
"'w'",
")",
"as",
"f",
",",
"create_temp_tar",
"(",
")",
"as",
"patch",
":",
"log",
".",
"info",
"(",
"'Creating HDF5 datasets...'",
")",
"prepare_hdf5_file",
"(",
"f",
",",
"n_train",
",",
"n_valid",
",",
"n_test",
")",
"log",
".",
"info",
"(",
"'Processing training set...'",
")",
"process_train_set",
"(",
"f",
",",
"train",
",",
"patch",
",",
"n_train",
",",
"wnid_map",
",",
"shuffle_seed",
")",
"log",
".",
"info",
"(",
"'Processing validation set...'",
")",
"process_other_set",
"(",
"f",
",",
"'valid'",
",",
"valid",
",",
"patch",
",",
"valid_groundtruth",
",",
"n_train",
")",
"log",
".",
"info",
"(",
"'Processing test set...'",
")",
"process_other_set",
"(",
"f",
",",
"'test'",
",",
"test",
",",
"patch",
",",
"(",
"None",
",",
")",
"*",
"n_test",
",",
"n_train",
"+",
"n_valid",
")",
"log",
".",
"info",
"(",
"'Done.'",
")",
"return",
"(",
"output_path",
",",
")"
] | Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index | [
"Converter",
"for",
"data",
"from",
"the",
"ILSVRC",
"2012",
"competition",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L35-L78 | train |
mila-iqia/fuel | fuel/converters/ilsvrc2012.py | fill_subparser | def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012 | python | def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012 | [
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"\"--shuffle-seed\"",
",",
"help",
"=",
"\"Seed to use for randomizing order of the \"",
"\"training set on disk.\"",
",",
"default",
"=",
"config",
".",
"default_seed",
",",
"type",
"=",
"int",
",",
"required",
"=",
"False",
")",
"return",
"convert_ilsvrc2012"
] | Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command. | [
"Sets",
"up",
"a",
"subparser",
"to",
"convert",
"the",
"ILSVRC2012",
"dataset",
"files",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L81-L94 | train |
mila-iqia/fuel | fuel/converters/ilsvrc2012.py | read_metadata_mat_file | def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
new_dtype = numpy.dtype([
('ILSVRC2012_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2012_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets | python | def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
new_dtype = numpy.dtype([
('ILSVRC2012_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2012_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets | [
"def",
"read_metadata_mat_file",
"(",
"meta_mat",
")",
":",
"mat",
"=",
"loadmat",
"(",
"meta_mat",
",",
"squeeze_me",
"=",
"True",
")",
"synsets",
"=",
"mat",
"[",
"'synsets'",
"]",
"new_dtype",
"=",
"numpy",
".",
"dtype",
"(",
"[",
"(",
"'ILSVRC2012_ID'",
",",
"numpy",
".",
"int16",
")",
",",
"(",
"'WNID'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'WNID'",
"]",
")",
")",
")",
")",
",",
"(",
"'wordnet_height'",
",",
"numpy",
".",
"int8",
")",
",",
"(",
"'gloss'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'gloss'",
"]",
")",
")",
")",
")",
",",
"(",
"'num_children'",
",",
"numpy",
".",
"int8",
")",
",",
"(",
"'words'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'words'",
"]",
")",
")",
")",
")",
",",
"(",
"'children'",
",",
"(",
"numpy",
".",
"int8",
",",
"max",
"(",
"synsets",
"[",
"'num_children'",
"]",
")",
")",
")",
",",
"(",
"'num_train_images'",
",",
"numpy",
".",
"uint16",
")",
"]",
")",
"new_synsets",
"=",
"numpy",
".",
"empty",
"(",
"synsets",
".",
"shape",
",",
"dtype",
"=",
"new_dtype",
")",
"for",
"attr",
"in",
"[",
"'ILSVRC2012_ID'",
",",
"'WNID'",
",",
"'wordnet_height'",
",",
"'gloss'",
",",
"'num_children'",
",",
"'words'",
",",
"'num_train_images'",
"]",
":",
"new_synsets",
"[",
"attr",
"]",
"=",
"synsets",
"[",
"attr",
"]",
"children",
"=",
"[",
"numpy",
".",
"atleast_1d",
"(",
"ch",
")",
"for",
"ch",
"in",
"synsets",
"[",
"'children'",
"]",
"]",
"padded_children",
"=",
"[",
"numpy",
".",
"concatenate",
"(",
"(",
"c",
",",
"-",
"numpy",
".",
"ones",
"(",
"new_dtype",
"[",
"'children'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"len",
"(",
"c",
")",
",",
"dtype",
"=",
"numpy",
".",
"int16",
")",
")",
")",
"for",
"c",
"in",
"children",
"]",
"new_synsets",
"[",
"'children'",
"]",
"=",
"padded_children",
"return",
"new_synsets"
] | Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset. | [
"Read",
"ILSVRC2012",
"metadata",
"from",
"the",
"distributed",
"MAT",
"file",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L231-L294 | train |
mila-iqia/fuel | fuel/config_parser.py | multiple_paths_parser | def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value | python | def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value | [
"def",
"multiple_paths_parser",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"value",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"return",
"value"
] | Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths. | [
"Parses",
"data_path",
"argument",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/config_parser.py#L108-L124 | train |
mila-iqia/fuel | fuel/config_parser.py | Configuration.add_config | def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default | python | def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default | [
"def",
"add_config",
"(",
"self",
",",
"key",
",",
"type_",
",",
"default",
"=",
"NOT_SET",
",",
"env_var",
"=",
"None",
")",
":",
"self",
".",
"config",
"[",
"key",
"]",
"=",
"{",
"'type'",
":",
"type_",
"}",
"if",
"env_var",
"is",
"not",
"None",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'env_var'",
"]",
"=",
"env_var",
"if",
"default",
"is",
"not",
"NOT_SET",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'default'",
"]",
"=",
"default"
] | Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file. | [
"Add",
"a",
"configuration",
"setting",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/config_parser.py#L168-L196 | train |
mila-iqia/fuel | fuel/server.py | send_arrays | def send_arrays(socket, arrays, stop=False):
"""Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
"""
if arrays:
# The buffer protocol only works on contiguous arrays
arrays = [numpy.ascontiguousarray(array) for array in arrays]
if stop:
headers = {'stop': True}
socket.send_json(headers)
else:
headers = [header_data_from_array_1_0(array) for array in arrays]
socket.send_json(headers, zmq.SNDMORE)
for array in arrays[:-1]:
socket.send(array, zmq.SNDMORE)
socket.send(arrays[-1]) | python | def send_arrays(socket, arrays, stop=False):
"""Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
"""
if arrays:
# The buffer protocol only works on contiguous arrays
arrays = [numpy.ascontiguousarray(array) for array in arrays]
if stop:
headers = {'stop': True}
socket.send_json(headers)
else:
headers = [header_data_from_array_1_0(array) for array in arrays]
socket.send_json(headers, zmq.SNDMORE)
for array in arrays[:-1]:
socket.send(array, zmq.SNDMORE)
socket.send(arrays[-1]) | [
"def",
"send_arrays",
"(",
"socket",
",",
"arrays",
",",
"stop",
"=",
"False",
")",
":",
"if",
"arrays",
":",
"# The buffer protocol only works on contiguous arrays",
"arrays",
"=",
"[",
"numpy",
".",
"ascontiguousarray",
"(",
"array",
")",
"for",
"array",
"in",
"arrays",
"]",
"if",
"stop",
":",
"headers",
"=",
"{",
"'stop'",
":",
"True",
"}",
"socket",
".",
"send_json",
"(",
"headers",
")",
"else",
":",
"headers",
"=",
"[",
"header_data_from_array_1_0",
"(",
"array",
")",
"for",
"array",
"in",
"arrays",
"]",
"socket",
".",
"send_json",
"(",
"headers",
",",
"zmq",
".",
"SNDMORE",
")",
"for",
"array",
"in",
"arrays",
"[",
":",
"-",
"1",
"]",
":",
"socket",
".",
"send",
"(",
"array",
",",
"zmq",
".",
"SNDMORE",
")",
"socket",
".",
"send",
"(",
"arrays",
"[",
"-",
"1",
"]",
")"
] | Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol). | [
"Send",
"NumPy",
"arrays",
"using",
"the",
"buffer",
"interface",
"and",
"some",
"metadata",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L12-L45 | train |
mila-iqia/fuel | fuel/server.py | recv_arrays | def recv_arrays(socket):
"""Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
"""
headers = socket.recv_json()
if 'stop' in headers:
raise StopIteration
arrays = []
for header in headers:
data = socket.recv(copy=False)
buf = buffer_(data)
array = numpy.frombuffer(buf, dtype=numpy.dtype(header['descr']))
array.shape = header['shape']
if header['fortran_order']:
array.shape = header['shape'][::-1]
array = array.transpose()
arrays.append(array)
return arrays | python | def recv_arrays(socket):
"""Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
"""
headers = socket.recv_json()
if 'stop' in headers:
raise StopIteration
arrays = []
for header in headers:
data = socket.recv(copy=False)
buf = buffer_(data)
array = numpy.frombuffer(buf, dtype=numpy.dtype(header['descr']))
array.shape = header['shape']
if header['fortran_order']:
array.shape = header['shape'][::-1]
array = array.transpose()
arrays.append(array)
return arrays | [
"def",
"recv_arrays",
"(",
"socket",
")",
":",
"headers",
"=",
"socket",
".",
"recv_json",
"(",
")",
"if",
"'stop'",
"in",
"headers",
":",
"raise",
"StopIteration",
"arrays",
"=",
"[",
"]",
"for",
"header",
"in",
"headers",
":",
"data",
"=",
"socket",
".",
"recv",
"(",
"copy",
"=",
"False",
")",
"buf",
"=",
"buffer_",
"(",
"data",
")",
"array",
"=",
"numpy",
".",
"frombuffer",
"(",
"buf",
",",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"header",
"[",
"'descr'",
"]",
")",
")",
"array",
".",
"shape",
"=",
"header",
"[",
"'shape'",
"]",
"if",
"header",
"[",
"'fortran_order'",
"]",
":",
"array",
".",
"shape",
"=",
"header",
"[",
"'shape'",
"]",
"[",
":",
":",
"-",
"1",
"]",
"array",
"=",
"array",
".",
"transpose",
"(",
")",
"arrays",
".",
"append",
"(",
"array",
")",
"return",
"arrays"
] | Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch. | [
"Receive",
"a",
"list",
"of",
"NumPy",
"arrays",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L48-L81 | train |
mila-iqia/fuel | fuel/server.py | start_server | def start_server(data_stream, port=5557, hwm=10):
"""Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
"""
logging.basicConfig(level='INFO')
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(hwm)
socket.bind('tcp://*:{}'.format(port))
it = data_stream.get_epoch_iterator()
logger.info('server started')
while True:
try:
data = next(it)
stop = False
logger.debug("sending {} arrays".format(len(data)))
except StopIteration:
it = data_stream.get_epoch_iterator()
data = None
stop = True
logger.debug("sending StopIteration")
send_arrays(socket, data, stop=stop) | python | def start_server(data_stream, port=5557, hwm=10):
"""Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
"""
logging.basicConfig(level='INFO')
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(hwm)
socket.bind('tcp://*:{}'.format(port))
it = data_stream.get_epoch_iterator()
logger.info('server started')
while True:
try:
data = next(it)
stop = False
logger.debug("sending {} arrays".format(len(data)))
except StopIteration:
it = data_stream.get_epoch_iterator()
data = None
stop = True
logger.debug("sending StopIteration")
send_arrays(socket, data, stop=stop) | [
"def",
"start_server",
"(",
"data_stream",
",",
"port",
"=",
"5557",
",",
"hwm",
"=",
"10",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"'INFO'",
")",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUSH",
")",
"socket",
".",
"set_hwm",
"(",
"hwm",
")",
"socket",
".",
"bind",
"(",
"'tcp://*:{}'",
".",
"format",
"(",
"port",
")",
")",
"it",
"=",
"data_stream",
".",
"get_epoch_iterator",
"(",
")",
"logger",
".",
"info",
"(",
"'server started'",
")",
"while",
"True",
":",
"try",
":",
"data",
"=",
"next",
"(",
"it",
")",
"stop",
"=",
"False",
"logger",
".",
"debug",
"(",
"\"sending {} arrays\"",
".",
"format",
"(",
"len",
"(",
"data",
")",
")",
")",
"except",
"StopIteration",
":",
"it",
"=",
"data_stream",
".",
"get_epoch_iterator",
"(",
")",
"data",
"=",
"None",
"stop",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"sending StopIteration\"",
")",
"send_arrays",
"(",
"socket",
",",
"data",
",",
"stop",
"=",
"stop",
")"
] | Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well. | [
"Start",
"a",
"data",
"processing",
"server",
"."
] | 1d6292dc25e3a115544237e392e61bff6631d23c | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L84-L131 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/HomusImageGenerator.py | HomusImageGenerator.create_images | def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
"""
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
"""
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes | python | def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
"""
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
"""
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes | [
"def",
"create_images",
"(",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
",",
"canvas_width",
":",
"int",
"=",
"None",
",",
"canvas_height",
":",
"int",
"=",
"None",
",",
"staff_line_spacing",
":",
"int",
"=",
"14",
",",
"staff_line_vertical_offsets",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"random_position_on_canvas",
":",
"bool",
"=",
"False",
")",
"->",
"dict",
":",
"all_symbol_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.txt'",
")",
")",
"]",
"staff_line_multiplier",
"=",
"1",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
"and",
"staff_line_vertical_offsets",
":",
"staff_line_multiplier",
"=",
"len",
"(",
"staff_line_vertical_offsets",
")",
"total_number_of_symbols",
"=",
"len",
"(",
"all_symbol_files",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"*",
"staff_line_multiplier",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"all_symbol_files",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
":",
"output",
"+=",
"\" and with staff-lines with {0} different offsets from the top ({1})\"",
".",
"format",
"(",
"staff_line_multiplier",
",",
"staff_line_vertical_offsets",
")",
"if",
"canvas_width",
"is",
"not",
"None",
"and",
"canvas_height",
"is",
"not",
"None",
":",
"if",
"random_position_on_canvas",
"is",
"False",
":",
"output",
"+=",
"\"\\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"else",
":",
"output",
"+=",
"\"\\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"bounding_boxes",
"=",
"dict",
"(",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
")",
"for",
"symbol_file",
"in",
"all_symbol_files",
":",
"with",
"open",
"(",
"symbol_file",
")",
"as",
"file",
":",
"content",
"=",
"file",
".",
"read",
"(",
")",
"symbol",
"=",
"HomusSymbol",
".",
"initialize_from_string",
"(",
"content",
")",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"symbol_file",
")",
")",
"[",
"0",
"]",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"if",
"canvas_width",
"is",
"None",
"and",
"canvas_height",
"is",
"None",
":",
"symbol",
".",
"draw_into_bitmap",
"(",
"export_path",
",",
"stroke_thickness",
",",
"margin",
"=",
"2",
")",
"else",
":",
"symbol",
".",
"draw_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
",",
"canvas_width",
",",
"canvas_height",
",",
"staff_line_spacing",
",",
"staff_line_vertical_offsets",
",",
"bounding_boxes",
",",
"random_position_on_canvas",
")",
"progress_bar",
".",
"update",
"(",
"1",
"*",
"staff_line_multiplier",
")",
"progress_bar",
".",
"close",
"(",
")",
"return",
"bounding_boxes"
] | Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol. | [
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Homus",
"Dataset",
"by",
"parsing",
"all",
"text",
"-",
"files",
"and",
"the",
"symbols",
"as",
"specified",
"by",
"the",
"parameters",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusImageGenerator.py#L13-L105 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py | MuscimaPlusPlusImageGenerator.extract_and_render_all_symbol_masks | def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
"""
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory) | python | def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
"""
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory) | [
"def",
"extract_and_render_all_symbol_masks",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
")",
":",
"print",
"(",
"\"Extracting Symbols from Muscima++ Dataset...\"",
")",
"xml_files",
"=",
"self",
".",
"get_all_xml_file_paths",
"(",
"raw_data_directory",
")",
"crop_objects",
"=",
"self",
".",
"load_crop_objects_from_xml_files",
"(",
"xml_files",
")",
"self",
".",
"render_masks_of_crop_objects_into_image",
"(",
"crop_objects",
",",
"destination_directory",
")"
] | Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically | [
"Extracts",
"all",
"symbols",
"from",
"the",
"raw",
"XML",
"documents",
"and",
"generates",
"individual",
"symbols",
"from",
"the",
"masks"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py#L23-L35 | train |
apacha/OMR-Datasets | omrdatasettools/converters/ImageColorInverter.py | ImageColorInverter.invert_images | def invert_images(self, image_directory: str, image_file_ending: str = "*.bmp"):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png") | python | def invert_images(self, image_directory: str, image_file_ending: str = "*.bmp"):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png") | [
"def",
"invert_images",
"(",
"self",
",",
"image_directory",
":",
"str",
",",
"image_file_ending",
":",
"str",
"=",
"\"*.bmp\"",
")",
":",
"image_paths",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"image_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"image_file_ending",
")",
")",
"]",
"for",
"image_path",
"in",
"tqdm",
"(",
"image_paths",
",",
"desc",
"=",
"\"Inverting all images in directory {0}\"",
".",
"format",
"(",
"image_directory",
")",
")",
":",
"white_on_black_image",
"=",
"Image",
".",
"open",
"(",
"image_path",
")",
".",
"convert",
"(",
"\"L\"",
")",
"black_on_white_image",
"=",
"ImageOps",
".",
"invert",
"(",
"white_on_black_image",
")",
"black_on_white_image",
".",
"save",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"image_path",
")",
"[",
"0",
"]",
"+",
"\".png\"",
")"
] | In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory | [
"In",
"-",
"situ",
"converts",
"the",
"white",
"on",
"black",
"images",
"of",
"a",
"directory",
"to",
"black",
"on",
"white",
"images"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/ImageColorInverter.py#L15-L26 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/CapitanImageGenerator.py | CapitanImageGenerator.create_capitan_images | def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory) | python | def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory) | [
"def",
"create_capitan_images",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"symbols",
"=",
"self",
".",
"load_capitan_symbols",
"(",
"raw_data_directory",
")",
"self",
".",
"draw_capitan_stroke_images",
"(",
"symbols",
",",
"destination_directory",
",",
"stroke_thicknesses",
")",
"self",
".",
"draw_capitan_score_images",
"(",
"symbols",
",",
"destination_directory",
")"
] | Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16 | [
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Capitan",
"strokes",
"by",
"parsing",
"all",
"text",
"-",
"files",
"and",
"the",
"symbols",
"as",
"specified",
"by",
"the",
"parameters",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L13-L29 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/CapitanImageGenerator.py | CapitanImageGenerator.draw_capitan_stroke_images | def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close() | python | def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close() | [
"def",
"draw_capitan_stroke_images",
"(",
"self",
",",
"symbols",
":",
"List",
"[",
"CapitanSymbol",
"]",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"total_number_of_symbols",
"=",
"len",
"(",
"symbols",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"symbols",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
",",
"desc",
"=",
"\"Rendering strokes\"",
")",
"capitan_file_name_counter",
"=",
"0",
"for",
"symbol",
"in",
"symbols",
":",
"capitan_file_name_counter",
"+=",
"1",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"\"capitan-{0}-{1}-stroke\"",
".",
"format",
"(",
"symbol",
".",
"symbol_class",
",",
"capitan_file_name_counter",
")",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"symbol",
".",
"draw_capitan_stroke_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
")",
"progress_bar",
".",
"update",
"(",
"1",
")",
"progress_bar",
".",
"close",
"(",
")"
] | Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16 | [
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Capitan",
"strokes",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L44-L82 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/Rectangle.py | Rectangle.overlap | def overlap(r1: 'Rectangle', r2: 'Rectangle'):
"""
Overlapping rectangles overlap both horizontally & vertically
"""
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps | python | def overlap(r1: 'Rectangle', r2: 'Rectangle'):
"""
Overlapping rectangles overlap both horizontally & vertically
"""
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps | [
"def",
"overlap",
"(",
"r1",
":",
"'Rectangle'",
",",
"r2",
":",
"'Rectangle'",
")",
":",
"h_overlaps",
"=",
"(",
"r1",
".",
"left",
"<=",
"r2",
".",
"right",
")",
"and",
"(",
"r1",
".",
"right",
">=",
"r2",
".",
"left",
")",
"v_overlaps",
"=",
"(",
"r1",
".",
"bottom",
">=",
"r2",
".",
"top",
")",
"and",
"(",
"r1",
".",
"top",
"<=",
"r2",
".",
"bottom",
")",
"return",
"h_overlaps",
"and",
"v_overlaps"
] | Overlapping rectangles overlap both horizontally & vertically | [
"Overlapping",
"rectangles",
"overlap",
"both",
"horizontally",
"&",
"vertically"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/Rectangle.py#L18-L24 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/AudiverisOmrImageGenerator.py | AudiverisOmrImageGenerator.extract_symbols | def extract_symbols(self, raw_data_directory: str, destination_directory: str):
"""
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Audiveris OMR Dataset...")
all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))]
data_pairs = []
for i in range(len(all_xml_files)):
data_pairs.append((all_xml_files[i], all_image_files[i]))
for data_pair in data_pairs:
self.__extract_symbols(data_pair[0], data_pair[1], destination_directory) | python | def extract_symbols(self, raw_data_directory: str, destination_directory: str):
"""
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Audiveris OMR Dataset...")
all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))]
data_pairs = []
for i in range(len(all_xml_files)):
data_pairs.append((all_xml_files[i], all_image_files[i]))
for data_pair in data_pairs:
self.__extract_symbols(data_pair[0], data_pair[1], destination_directory) | [
"def",
"extract_symbols",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
")",
":",
"print",
"(",
"\"Extracting Symbols from Audiveris OMR Dataset...\"",
")",
"all_xml_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.xml'",
")",
")",
"]",
"all_image_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.png'",
")",
")",
"]",
"data_pairs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"all_xml_files",
")",
")",
":",
"data_pairs",
".",
"append",
"(",
"(",
"all_xml_files",
"[",
"i",
"]",
",",
"all_image_files",
"[",
"i",
"]",
")",
")",
"for",
"data_pair",
"in",
"data_pairs",
":",
"self",
".",
"__extract_symbols",
"(",
"data_pair",
"[",
"0",
"]",
",",
"data_pair",
"[",
"1",
"]",
",",
"destination_directory",
")"
] | Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically | [
"Extracts",
"the",
"symbols",
"from",
"the",
"raw",
"XML",
"documents",
"and",
"matching",
"images",
"of",
"the",
"Audiveris",
"OMR",
"dataset",
"into",
"individual",
"symbols"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/AudiverisOmrImageGenerator.py#L16-L35 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/HomusSymbol.py | HomusSymbol.initialize_from_string | def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions) | python | def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions) | [
"def",
"initialize_from_string",
"(",
"content",
":",
"str",
")",
"->",
"'HomusSymbol'",
":",
"if",
"content",
"is",
"None",
"or",
"content",
"is",
"\"\"",
":",
"return",
"None",
"lines",
"=",
"content",
".",
"splitlines",
"(",
")",
"min_x",
"=",
"sys",
".",
"maxsize",
"max_x",
"=",
"0",
"min_y",
"=",
"sys",
".",
"maxsize",
"max_y",
"=",
"0",
"symbol_name",
"=",
"lines",
"[",
"0",
"]",
"strokes",
"=",
"[",
"]",
"for",
"stroke_string",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"stroke",
"=",
"[",
"]",
"for",
"point_string",
"in",
"stroke_string",
".",
"split",
"(",
"\";\"",
")",
":",
"if",
"point_string",
"is",
"\"\"",
":",
"continue",
"# Skip the last element, that is due to a trailing ; in each line",
"point_x",
",",
"point_y",
"=",
"point_string",
".",
"split",
"(",
"\",\"",
")",
"x",
"=",
"int",
"(",
"point_x",
")",
"y",
"=",
"int",
"(",
"point_y",
")",
"stroke",
".",
"append",
"(",
"Point2D",
"(",
"x",
",",
"y",
")",
")",
"max_x",
"=",
"max",
"(",
"max_x",
",",
"x",
")",
"min_x",
"=",
"min",
"(",
"min_x",
",",
"x",
")",
"max_y",
"=",
"max",
"(",
"max_y",
",",
"y",
")",
"min_y",
"=",
"min",
"(",
"min_y",
",",
"y",
")",
"strokes",
".",
"append",
"(",
"stroke",
")",
"dimensions",
"=",
"Rectangle",
"(",
"Point2D",
"(",
"min_x",
",",
"min_y",
")",
",",
"max_x",
"-",
"min_x",
"+",
"1",
",",
"max_y",
"-",
"min_y",
"+",
"1",
")",
"return",
"HomusSymbol",
"(",
"content",
",",
"strokes",
",",
"symbol_name",
",",
"dimensions",
")"
] | Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol | [
"Create",
"and",
"initializes",
"a",
"new",
"symbol",
"from",
"a",
"string"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L21-L62 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/HomusSymbol.py | HomusSymbol.draw_into_bitmap | def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin) | python | def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin) | [
"def",
"draw_into_bitmap",
"(",
"self",
",",
"export_path",
":",
"ExportPath",
",",
"stroke_thickness",
":",
"int",
",",
"margin",
":",
"int",
"=",
"0",
")",
"->",
"None",
":",
"self",
".",
"draw_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"margin",
",",
"self",
".",
"dimensions",
".",
"width",
"+",
"2",
"*",
"margin",
",",
"self",
".",
"dimensions",
".",
"height",
"+",
"2",
"*",
"margin",
")"
] | Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol | [
"Draws",
"the",
"symbol",
"in",
"the",
"original",
"size",
"that",
"it",
"has",
"plus",
"an",
"optional",
"margin"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L64-L76 | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/HomusSymbol.py | HomusSymbol.draw_onto_canvas | def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close() | python | def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close() | [
"def",
"draw_onto_canvas",
"(",
"self",
",",
"export_path",
":",
"ExportPath",
",",
"stroke_thickness",
":",
"int",
",",
"margin",
":",
"int",
",",
"destination_width",
":",
"int",
",",
"destination_height",
":",
"int",
",",
"staff_line_spacing",
":",
"int",
"=",
"14",
",",
"staff_line_vertical_offsets",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"bounding_boxes",
":",
"dict",
"=",
"None",
",",
"random_position_on_canvas",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"width",
"=",
"self",
".",
"dimensions",
".",
"width",
"+",
"2",
"*",
"margin",
"height",
"=",
"self",
".",
"dimensions",
".",
"height",
"+",
"2",
"*",
"margin",
"if",
"random_position_on_canvas",
":",
"# max is required for elements that are larger than the canvas,",
"# where the possible range for the random value would be negative",
"random_horizontal_offset",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"max",
"(",
"0",
",",
"destination_width",
"-",
"width",
")",
")",
"random_vertical_offset",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"max",
"(",
"0",
",",
"destination_height",
"-",
"height",
")",
")",
"offset",
"=",
"Point2D",
"(",
"self",
".",
"dimensions",
".",
"origin",
".",
"x",
"-",
"margin",
"-",
"random_horizontal_offset",
",",
"self",
".",
"dimensions",
".",
"origin",
".",
"y",
"-",
"margin",
"-",
"random_vertical_offset",
")",
"else",
":",
"width_offset_for_centering",
"=",
"(",
"destination_width",
"-",
"width",
")",
"/",
"2",
"height_offset_for_centering",
"=",
"(",
"destination_height",
"-",
"height",
")",
"/",
"2",
"offset",
"=",
"Point2D",
"(",
"self",
".",
"dimensions",
".",
"origin",
".",
"x",
"-",
"margin",
"-",
"width_offset_for_centering",
",",
"self",
".",
"dimensions",
".",
"origin",
".",
"y",
"-",
"margin",
"-",
"height_offset_for_centering",
")",
"image_without_staff_lines",
"=",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"(",
"destination_width",
",",
"destination_height",
")",
",",
"\"white\"",
")",
"# create a new white image",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image_without_staff_lines",
")",
"black",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
"for",
"stroke",
"in",
"self",
".",
"strokes",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"stroke",
")",
"-",
"1",
")",
":",
"start_point",
"=",
"self",
".",
"__subtract_offset",
"(",
"stroke",
"[",
"i",
"]",
",",
"offset",
")",
"end_point",
"=",
"self",
".",
"__subtract_offset",
"(",
"stroke",
"[",
"i",
"+",
"1",
"]",
",",
"offset",
")",
"draw",
".",
"line",
"(",
"(",
"start_point",
".",
"x",
",",
"start_point",
".",
"y",
",",
"end_point",
".",
"x",
",",
"end_point",
".",
"y",
")",
",",
"black",
",",
"stroke_thickness",
")",
"location",
"=",
"self",
".",
"__subtract_offset",
"(",
"self",
".",
"dimensions",
".",
"origin",
",",
"offset",
")",
"bounding_box_in_image",
"=",
"Rectangle",
"(",
"location",
",",
"self",
".",
"dimensions",
".",
"width",
",",
"self",
".",
"dimensions",
".",
"height",
")",
"# self.draw_bounding_box(draw, location)",
"del",
"draw",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
"and",
"staff_line_vertical_offsets",
":",
"for",
"staff_line_vertical_offset",
"in",
"staff_line_vertical_offsets",
":",
"image_with_staff_lines",
"=",
"image_without_staff_lines",
".",
"copy",
"(",
")",
"self",
".",
"__draw_staff_lines_into_image",
"(",
"image_with_staff_lines",
",",
"stroke_thickness",
",",
"staff_line_spacing",
",",
"staff_line_vertical_offset",
")",
"file_name_with_offset",
"=",
"export_path",
".",
"get_full_path",
"(",
"staff_line_vertical_offset",
")",
"image_with_staff_lines",
".",
"save",
"(",
"file_name_with_offset",
")",
"image_with_staff_lines",
".",
"close",
"(",
")",
"if",
"bounding_boxes",
"is",
"not",
"None",
":",
"# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and",
"# the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary",
"class_and_file_name",
"=",
"export_path",
".",
"get_class_name_and_file_path",
"(",
"staff_line_vertical_offset",
")",
"bounding_boxes",
"[",
"class_and_file_name",
"]",
"=",
"bounding_box_in_image",
"else",
":",
"image_without_staff_lines",
".",
"save",
"(",
"export_path",
".",
"get_full_path",
"(",
")",
")",
"if",
"bounding_boxes",
"is",
"not",
"None",
":",
"# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and",
"# the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary",
"class_and_file_name",
"=",
"export_path",
".",
"get_class_name_and_file_path",
"(",
")",
"bounding_boxes",
"[",
"class_and_file_name",
"]",
"=",
"bounding_box_in_image",
"image_without_staff_lines",
".",
"close",
"(",
")"
] | Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated | [
"Draws",
"the",
"symbol",
"onto",
"a",
"canvas",
"with",
"a",
"fixed",
"size"
] | d0a22a03ae35caeef211729efa340e1ec0e01ea5 | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L78-L148 | train |
datascopeanalytics/scrubadub | scrubadub/import_magic.py | update_locals | def update_locals(locals_instance, instance_iterator, *args, **kwargs):
"""import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth``
"""
# http://stackoverflow.com/a/4526709/564709
# http://stackoverflow.com/a/511059/564709
for instance in instance_iterator():
locals_instance.update({type(instance).__name__: instance.__class__}) | python | def update_locals(locals_instance, instance_iterator, *args, **kwargs):
"""import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth``
"""
# http://stackoverflow.com/a/4526709/564709
# http://stackoverflow.com/a/511059/564709
for instance in instance_iterator():
locals_instance.update({type(instance).__name__: instance.__class__}) | [
"def",
"update_locals",
"(",
"locals_instance",
",",
"instance_iterator",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# http://stackoverflow.com/a/4526709/564709",
"# http://stackoverflow.com/a/511059/564709",
"for",
"instance",
"in",
"instance_iterator",
"(",
")",
":",
"locals_instance",
".",
"update",
"(",
"{",
"type",
"(",
"instance",
")",
".",
"__name__",
":",
"instance",
".",
"__class__",
"}",
")"
] | import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth`` | [
"import",
"all",
"of",
"the",
"detector",
"classes",
"into",
"the",
"local",
"namespace",
"to",
"make",
"it",
"easy",
"to",
"do",
"things",
"like",
"import",
"scrubadub",
".",
"detectors",
".",
"NameDetector",
"without",
"having",
"to",
"add",
"each",
"new",
"Detector",
"or",
"Filth"
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/import_magic.py#L34-L42 | train |
datascopeanalytics/scrubadub | scrubadub/filth/__init__.py | iter_filth_clss | def iter_filth_clss():
"""Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time.
"""
return iter_subclasses(
os.path.dirname(os.path.abspath(__file__)),
Filth,
_is_abstract_filth,
) | python | def iter_filth_clss():
"""Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time.
"""
return iter_subclasses(
os.path.dirname(os.path.abspath(__file__)),
Filth,
_is_abstract_filth,
) | [
"def",
"iter_filth_clss",
"(",
")",
":",
"return",
"iter_subclasses",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"Filth",
",",
"_is_abstract_filth",
",",
")"
] | Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time. | [
"Iterate",
"over",
"all",
"of",
"the",
"filths",
"that",
"are",
"included",
"in",
"this",
"sub",
"-",
"package",
".",
"This",
"is",
"a",
"convenience",
"method",
"for",
"capturing",
"all",
"new",
"Filth",
"that",
"are",
"added",
"over",
"time",
"."
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/__init__.py#L13-L22 | train |
datascopeanalytics/scrubadub | scrubadub/filth/__init__.py | iter_filths | def iter_filths():
"""Iterate over all instances of filth"""
for filth_cls in iter_filth_clss():
if issubclass(filth_cls, RegexFilth):
m = next(re.finditer(r"\s+", "fake pattern string"))
yield filth_cls(m)
else:
yield filth_cls() | python | def iter_filths():
"""Iterate over all instances of filth"""
for filth_cls in iter_filth_clss():
if issubclass(filth_cls, RegexFilth):
m = next(re.finditer(r"\s+", "fake pattern string"))
yield filth_cls(m)
else:
yield filth_cls() | [
"def",
"iter_filths",
"(",
")",
":",
"for",
"filth_cls",
"in",
"iter_filth_clss",
"(",
")",
":",
"if",
"issubclass",
"(",
"filth_cls",
",",
"RegexFilth",
")",
":",
"m",
"=",
"next",
"(",
"re",
".",
"finditer",
"(",
"r\"\\s+\"",
",",
"\"fake pattern string\"",
")",
")",
"yield",
"filth_cls",
"(",
"m",
")",
"else",
":",
"yield",
"filth_cls",
"(",
")"
] | Iterate over all instances of filth | [
"Iterate",
"over",
"all",
"instances",
"of",
"filth"
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/__init__.py#L25-L32 | train |
datascopeanalytics/scrubadub | scrubadub/filth/base.py | MergedFilth._update_content | def _update_content(self, other_filth):
"""this updates the bounds, text and placeholder for the merged
filth
"""
if self.end < other_filth.beg or other_filth.end < self.beg:
raise exceptions.FilthMergeError(
"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % (
self.beg, self.end, other_filth.beg, other_filth.end
))
# get the text over lap correct
if self.beg < other_filth.beg:
first = self
second = other_filth
else:
second = self
first = other_filth
end_offset = second.end - first.end
if end_offset > 0:
self.text = first.text + second.text[-end_offset:]
# update the beg/end strings
self.beg = min(self.beg, other_filth.beg)
self.end = max(self.end, other_filth.end)
if self.end - self.beg != len(self.text):
raise exceptions.FilthMergeError("text length isn't consistent")
# update the placeholder
self.filths.append(other_filth)
self._placeholder = '+'.join([filth.type for filth in self.filths]) | python | def _update_content(self, other_filth):
"""this updates the bounds, text and placeholder for the merged
filth
"""
if self.end < other_filth.beg or other_filth.end < self.beg:
raise exceptions.FilthMergeError(
"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % (
self.beg, self.end, other_filth.beg, other_filth.end
))
# get the text over lap correct
if self.beg < other_filth.beg:
first = self
second = other_filth
else:
second = self
first = other_filth
end_offset = second.end - first.end
if end_offset > 0:
self.text = first.text + second.text[-end_offset:]
# update the beg/end strings
self.beg = min(self.beg, other_filth.beg)
self.end = max(self.end, other_filth.end)
if self.end - self.beg != len(self.text):
raise exceptions.FilthMergeError("text length isn't consistent")
# update the placeholder
self.filths.append(other_filth)
self._placeholder = '+'.join([filth.type for filth in self.filths]) | [
"def",
"_update_content",
"(",
"self",
",",
"other_filth",
")",
":",
"if",
"self",
".",
"end",
"<",
"other_filth",
".",
"beg",
"or",
"other_filth",
".",
"end",
"<",
"self",
".",
"beg",
":",
"raise",
"exceptions",
".",
"FilthMergeError",
"(",
"\"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)\"",
"%",
"(",
"self",
".",
"beg",
",",
"self",
".",
"end",
",",
"other_filth",
".",
"beg",
",",
"other_filth",
".",
"end",
")",
")",
"# get the text over lap correct",
"if",
"self",
".",
"beg",
"<",
"other_filth",
".",
"beg",
":",
"first",
"=",
"self",
"second",
"=",
"other_filth",
"else",
":",
"second",
"=",
"self",
"first",
"=",
"other_filth",
"end_offset",
"=",
"second",
".",
"end",
"-",
"first",
".",
"end",
"if",
"end_offset",
">",
"0",
":",
"self",
".",
"text",
"=",
"first",
".",
"text",
"+",
"second",
".",
"text",
"[",
"-",
"end_offset",
":",
"]",
"# update the beg/end strings",
"self",
".",
"beg",
"=",
"min",
"(",
"self",
".",
"beg",
",",
"other_filth",
".",
"beg",
")",
"self",
".",
"end",
"=",
"max",
"(",
"self",
".",
"end",
",",
"other_filth",
".",
"end",
")",
"if",
"self",
".",
"end",
"-",
"self",
".",
"beg",
"!=",
"len",
"(",
"self",
".",
"text",
")",
":",
"raise",
"exceptions",
".",
"FilthMergeError",
"(",
"\"text length isn't consistent\"",
")",
"# update the placeholder",
"self",
".",
"filths",
".",
"append",
"(",
"other_filth",
")",
"self",
".",
"_placeholder",
"=",
"'+'",
".",
"join",
"(",
"[",
"filth",
".",
"type",
"for",
"filth",
"in",
"self",
".",
"filths",
"]",
")"
] | this updates the bounds, text and placeholder for the merged
filth | [
"this",
"updates",
"the",
"bounds",
"text",
"and",
"placeholder",
"for",
"the",
"merged",
"filth"
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/base.py#L65-L94 | train |
datascopeanalytics/scrubadub | scrubadub/scrubbers.py | Scrubber.add_detector | def add_detector(self, detector_cls):
"""Add a ``Detector`` to scrubadub"""
if not issubclass(detector_cls, detectors.base.Detector):
raise TypeError((
'"%(detector_cls)s" is not a subclass of Detector'
) % locals())
# TODO: should add tests to make sure filth_cls is actually a proper
# filth_cls
name = detector_cls.filth_cls.type
if name in self._detectors:
raise KeyError((
'can not add Detector "%(name)s"---it already exists. '
'Try removing it first.'
) % locals())
self._detectors[name] = detector_cls() | python | def add_detector(self, detector_cls):
"""Add a ``Detector`` to scrubadub"""
if not issubclass(detector_cls, detectors.base.Detector):
raise TypeError((
'"%(detector_cls)s" is not a subclass of Detector'
) % locals())
# TODO: should add tests to make sure filth_cls is actually a proper
# filth_cls
name = detector_cls.filth_cls.type
if name in self._detectors:
raise KeyError((
'can not add Detector "%(name)s"---it already exists. '
'Try removing it first.'
) % locals())
self._detectors[name] = detector_cls() | [
"def",
"add_detector",
"(",
"self",
",",
"detector_cls",
")",
":",
"if",
"not",
"issubclass",
"(",
"detector_cls",
",",
"detectors",
".",
"base",
".",
"Detector",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'\"%(detector_cls)s\" is not a subclass of Detector'",
")",
"%",
"locals",
"(",
")",
")",
"# TODO: should add tests to make sure filth_cls is actually a proper",
"# filth_cls",
"name",
"=",
"detector_cls",
".",
"filth_cls",
".",
"type",
"if",
"name",
"in",
"self",
".",
"_detectors",
":",
"raise",
"KeyError",
"(",
"(",
"'can not add Detector \"%(name)s\"---it already exists. '",
"'Try removing it first.'",
")",
"%",
"locals",
"(",
")",
")",
"self",
".",
"_detectors",
"[",
"name",
"]",
"=",
"detector_cls",
"(",
")"
] | Add a ``Detector`` to scrubadub | [
"Add",
"a",
"Detector",
"to",
"scrubadub"
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L24-L38 | train |
datascopeanalytics/scrubadub | scrubadub/scrubbers.py | Scrubber.clean | def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if sys.version_info < (3, 0):
# Only in Python 2. In 3 every string is a Python 2 unicode
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks) | python | def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if sys.version_info < (3, 0):
# Only in Python 2. In 3 every string is a Python 2 unicode
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks) | [
"def",
"clean",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"# Only in Python 2. In 3 every string is a Python 2 unicode",
"if",
"not",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"raise",
"exceptions",
".",
"UnicodeRequired",
"clean_chunks",
"=",
"[",
"]",
"filth",
"=",
"Filth",
"(",
")",
"for",
"next_filth",
"in",
"self",
".",
"iter_filth",
"(",
"text",
")",
":",
"clean_chunks",
".",
"append",
"(",
"text",
"[",
"filth",
".",
"end",
":",
"next_filth",
".",
"beg",
"]",
")",
"clean_chunks",
".",
"append",
"(",
"next_filth",
".",
"replace_with",
"(",
"*",
"*",
"kwargs",
")",
")",
"filth",
"=",
"next_filth",
"clean_chunks",
".",
"append",
"(",
"text",
"[",
"filth",
".",
"end",
":",
"]",
")",
"return",
"u''",
".",
"join",
"(",
"clean_chunks",
")"
] | This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned. | [
"This",
"is",
"the",
"master",
"method",
"that",
"cleans",
"all",
"of",
"the",
"filth",
"out",
"of",
"the",
"dirty",
"dirty",
"text",
".",
"All",
"keyword",
"arguments",
"to",
"this",
"function",
"are",
"passed",
"through",
"to",
"the",
"Filth",
".",
"replace_with",
"method",
"to",
"fine",
"-",
"tune",
"how",
"the",
"Filth",
"is",
"cleaned",
"."
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L44-L62 | train |
datascopeanalytics/scrubadub | scrubadub/scrubbers.py | Scrubber.iter_filth | def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth | python | def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth | [
"def",
"iter_filth",
"(",
"self",
",",
"text",
")",
":",
"# currently doing this by aggregating all_filths and then sorting",
"# inline instead of with a Filth.__cmp__ method, which is apparently",
"# much slower http://stackoverflow.com/a/988728/564709",
"#",
"# NOTE: we could probably do this in a more efficient way by iterating",
"# over all detectors simultaneously. just trying to get something",
"# working right now and we can worry about efficiency later",
"all_filths",
"=",
"[",
"]",
"for",
"detector",
"in",
"self",
".",
"_detectors",
".",
"values",
"(",
")",
":",
"for",
"filth",
"in",
"detector",
".",
"iter_filth",
"(",
"text",
")",
":",
"if",
"not",
"isinstance",
"(",
"filth",
",",
"Filth",
")",
":",
"raise",
"TypeError",
"(",
"'iter_filth must always yield Filth'",
")",
"all_filths",
".",
"append",
"(",
"filth",
")",
"# Sort by start position. If two filths start in the same place then",
"# return the longer one first",
"all_filths",
".",
"sort",
"(",
"key",
"=",
"lambda",
"f",
":",
"(",
"f",
".",
"beg",
",",
"-",
"f",
".",
"end",
")",
")",
"# this is where the Scrubber does its hard work and merges any",
"# overlapping filths.",
"if",
"not",
"all_filths",
":",
"raise",
"StopIteration",
"filth",
"=",
"all_filths",
"[",
"0",
"]",
"for",
"next_filth",
"in",
"all_filths",
"[",
"1",
":",
"]",
":",
"if",
"filth",
".",
"end",
"<",
"next_filth",
".",
"beg",
":",
"yield",
"filth",
"filth",
"=",
"next_filth",
"else",
":",
"filth",
"=",
"filth",
".",
"merge",
"(",
"next_filth",
")",
"yield",
"filth"
] | Iterate over the different types of filth that can exist. | [
"Iterate",
"over",
"the",
"different",
"types",
"of",
"filth",
"that",
"can",
"exist",
"."
] | 914bda49a16130b44af43df6a2f84755477c407c | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L64-L96 | train |
terrycain/aioboto3 | aioboto3/s3/inject.py | download_file | async def download_file(self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'wb') as open_file:
await download_fileobj(self, Bucket, Key, open_file, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) | python | async def download_file(self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'wb') as open_file:
await download_fileobj(self, Bucket, Key, open_file, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) | [
"async",
"def",
"download_file",
"(",
"self",
",",
"Bucket",
",",
"Key",
",",
"Filename",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"with",
"open",
"(",
"Filename",
",",
"'wb'",
")",
"as",
"open_file",
":",
"await",
"download_fileobj",
"(",
"self",
",",
"Bucket",
",",
"Key",
",",
"open_file",
",",
"ExtraArgs",
"=",
"ExtraArgs",
",",
"Callback",
"=",
"Callback",
",",
"Config",
"=",
"Config",
")"
] | Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. | [
"Download",
"an",
"S3",
"object",
"to",
"a",
"file",
"."
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L17-L30 | train |
terrycain/aioboto3 | aioboto3/s3/inject.py | download_fileobj | async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) | python | async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) | [
"async",
"def",
"download_fileobj",
"(",
"self",
",",
"Bucket",
",",
"Key",
",",
"Fileobj",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"try",
":",
"resp",
"=",
"await",
"self",
".",
"get_object",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
")",
"except",
"ClientError",
"as",
"err",
":",
"if",
"err",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
"==",
"'NoSuchKey'",
":",
"# Convert to 404 so it looks the same when boto3.download_file fails",
"raise",
"ClientError",
"(",
"{",
"'Error'",
":",
"{",
"'Code'",
":",
"'404'",
",",
"'Message'",
":",
"'Not Found'",
"}",
"}",
",",
"'HeadObject'",
")",
"raise",
"body",
"=",
"resp",
"[",
"'Body'",
"]",
"while",
"True",
":",
"data",
"=",
"await",
"body",
".",
"read",
"(",
"4096",
")",
"if",
"data",
"==",
"b''",
":",
"break",
"if",
"Callback",
":",
"try",
":",
"Callback",
"(",
"len",
"(",
"data",
")",
")",
"except",
":",
"# noqa: E722",
"pass",
"Fileobj",
".",
"write",
"(",
"data",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0.0",
")"
] | Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download. | [
"Download",
"an",
"object",
"from",
"S3",
"to",
"a",
"file",
"-",
"like",
"object",
"."
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L33-L95 | train |
terrycain/aioboto3 | aioboto3/s3/inject.py | upload_fileobj | async def upload_fileobj(self, Fileobj: BinaryIO, Bucket: str, Key: str, ExtraArgs: Optional[Dict[str, Any]] = None,
Callback: Optional[Callable[[int], None]] = None,
Config: Optional[S3TransferConfig] = None):
"""Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
if not ExtraArgs:
ExtraArgs = {}
# I was debating setting up a queue etc...
# If its too slow I'll then be bothered
multipart_chunksize = 8388608 if Config is None else Config.multipart_chunksize
io_chunksize = 262144 if Config is None else Config.io_chunksize
# max_concurrency = 10 if Config is None else Config.max_concurrency
# max_io_queue = 100 if config is None else Config.max_io_queue
# Start multipart upload
resp = await self.create_multipart_upload(Bucket=Bucket, Key=Key, **ExtraArgs)
upload_id = resp['UploadId']
part = 0
parts = []
running = True
sent_bytes = 0
try:
while running:
part += 1
multipart_payload = b''
while len(multipart_payload) < multipart_chunksize:
if asyncio.iscoroutinefunction(Fileobj.read): # handles if we pass in aiofiles obj
data = await Fileobj.read(io_chunksize)
else:
data = Fileobj.read(io_chunksize)
if data == b'': # End of file
running = False
break
multipart_payload += data
# Submit part to S3
resp = await self.upload_part(
Body=multipart_payload,
Bucket=Bucket,
Key=Key,
PartNumber=part,
UploadId=upload_id
)
parts.append({'ETag': resp['ETag'], 'PartNumber': part})
sent_bytes += len(multipart_payload)
try:
Callback(sent_bytes) # Attempt to call the callback, if it fails, ignore, if no callback, ignore
except: # noqa: E722
pass
# By now the uploads must have been done
await self.complete_multipart_upload(
Bucket=Bucket,
Key=Key,
UploadId=upload_id,
MultipartUpload={'Parts': parts}
)
except: # noqa: E722
# Cancel multipart upload
await self.abort_multipart_upload(
Bucket=Bucket,
Key=Key,
UploadId=upload_id
)
raise | python | async def upload_fileobj(self, Fileobj: BinaryIO, Bucket: str, Key: str, ExtraArgs: Optional[Dict[str, Any]] = None,
Callback: Optional[Callable[[int], None]] = None,
Config: Optional[S3TransferConfig] = None):
"""Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
if not ExtraArgs:
ExtraArgs = {}
# I was debating setting up a queue etc...
# If its too slow I'll then be bothered
multipart_chunksize = 8388608 if Config is None else Config.multipart_chunksize
io_chunksize = 262144 if Config is None else Config.io_chunksize
# max_concurrency = 10 if Config is None else Config.max_concurrency
# max_io_queue = 100 if config is None else Config.max_io_queue
# Start multipart upload
resp = await self.create_multipart_upload(Bucket=Bucket, Key=Key, **ExtraArgs)
upload_id = resp['UploadId']
part = 0
parts = []
running = True
sent_bytes = 0
try:
while running:
part += 1
multipart_payload = b''
while len(multipart_payload) < multipart_chunksize:
if asyncio.iscoroutinefunction(Fileobj.read): # handles if we pass in aiofiles obj
data = await Fileobj.read(io_chunksize)
else:
data = Fileobj.read(io_chunksize)
if data == b'': # End of file
running = False
break
multipart_payload += data
# Submit part to S3
resp = await self.upload_part(
Body=multipart_payload,
Bucket=Bucket,
Key=Key,
PartNumber=part,
UploadId=upload_id
)
parts.append({'ETag': resp['ETag'], 'PartNumber': part})
sent_bytes += len(multipart_payload)
try:
Callback(sent_bytes) # Attempt to call the callback, if it fails, ignore, if no callback, ignore
except: # noqa: E722
pass
# By now the uploads must have been done
await self.complete_multipart_upload(
Bucket=Bucket,
Key=Key,
UploadId=upload_id,
MultipartUpload={'Parts': parts}
)
except: # noqa: E722
# Cancel multipart upload
await self.abort_multipart_upload(
Bucket=Bucket,
Key=Key,
UploadId=upload_id
)
raise | [
"async",
"def",
"upload_fileobj",
"(",
"self",
",",
"Fileobj",
":",
"BinaryIO",
",",
"Bucket",
":",
"str",
",",
"Key",
":",
"str",
",",
"ExtraArgs",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"None",
",",
"Callback",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"int",
"]",
",",
"None",
"]",
"]",
"=",
"None",
",",
"Config",
":",
"Optional",
"[",
"S3TransferConfig",
"]",
"=",
"None",
")",
":",
"if",
"not",
"ExtraArgs",
":",
"ExtraArgs",
"=",
"{",
"}",
"# I was debating setting up a queue etc...",
"# If its too slow I'll then be bothered",
"multipart_chunksize",
"=",
"8388608",
"if",
"Config",
"is",
"None",
"else",
"Config",
".",
"multipart_chunksize",
"io_chunksize",
"=",
"262144",
"if",
"Config",
"is",
"None",
"else",
"Config",
".",
"io_chunksize",
"# max_concurrency = 10 if Config is None else Config.max_concurrency",
"# max_io_queue = 100 if config is None else Config.max_io_queue",
"# Start multipart upload",
"resp",
"=",
"await",
"self",
".",
"create_multipart_upload",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"*",
"*",
"ExtraArgs",
")",
"upload_id",
"=",
"resp",
"[",
"'UploadId'",
"]",
"part",
"=",
"0",
"parts",
"=",
"[",
"]",
"running",
"=",
"True",
"sent_bytes",
"=",
"0",
"try",
":",
"while",
"running",
":",
"part",
"+=",
"1",
"multipart_payload",
"=",
"b''",
"while",
"len",
"(",
"multipart_payload",
")",
"<",
"multipart_chunksize",
":",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"Fileobj",
".",
"read",
")",
":",
"# handles if we pass in aiofiles obj",
"data",
"=",
"await",
"Fileobj",
".",
"read",
"(",
"io_chunksize",
")",
"else",
":",
"data",
"=",
"Fileobj",
".",
"read",
"(",
"io_chunksize",
")",
"if",
"data",
"==",
"b''",
":",
"# End of file",
"running",
"=",
"False",
"break",
"multipart_payload",
"+=",
"data",
"# Submit part to S3",
"resp",
"=",
"await",
"self",
".",
"upload_part",
"(",
"Body",
"=",
"multipart_payload",
",",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"PartNumber",
"=",
"part",
",",
"UploadId",
"=",
"upload_id",
")",
"parts",
".",
"append",
"(",
"{",
"'ETag'",
":",
"resp",
"[",
"'ETag'",
"]",
",",
"'PartNumber'",
":",
"part",
"}",
")",
"sent_bytes",
"+=",
"len",
"(",
"multipart_payload",
")",
"try",
":",
"Callback",
"(",
"sent_bytes",
")",
"# Attempt to call the callback, if it fails, ignore, if no callback, ignore",
"except",
":",
"# noqa: E722",
"pass",
"# By now the uploads must have been done",
"await",
"self",
".",
"complete_multipart_upload",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"UploadId",
"=",
"upload_id",
",",
"MultipartUpload",
"=",
"{",
"'Parts'",
":",
"parts",
"}",
")",
"except",
":",
"# noqa: E722",
"# Cancel multipart upload",
"await",
"self",
".",
"abort_multipart_upload",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"UploadId",
"=",
"upload_id",
")",
"raise"
] | Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload. | [
"Upload",
"a",
"file",
"-",
"like",
"object",
"to",
"S3",
"."
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L98-L203 | train |
terrycain/aioboto3 | aioboto3/s3/inject.py | upload_file | async def upload_file(self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'rb') as open_file:
await upload_fileobj(self, open_file, Bucket, Key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) | python | async def upload_file(self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'rb') as open_file:
await upload_fileobj(self, open_file, Bucket, Key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) | [
"async",
"def",
"upload_file",
"(",
"self",
",",
"Filename",
",",
"Bucket",
",",
"Key",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"with",
"open",
"(",
"Filename",
",",
"'rb'",
")",
"as",
"open_file",
":",
"await",
"upload_fileobj",
"(",
"self",
",",
"open_file",
",",
"Bucket",
",",
"Key",
",",
"ExtraArgs",
"=",
"ExtraArgs",
",",
"Callback",
"=",
"Callback",
",",
"Config",
"=",
"Config",
")"
] | Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. | [
"Upload",
"a",
"file",
"to",
"an",
"S3",
"object",
"."
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L206-L219 | train |
terrycain/aioboto3 | aioboto3/resources.py | AIOBoto3ResourceFactory._create_action | def _create_action(factory_self, action_model, resource_name,
service_context, is_load=False):
"""
Creates a new method which makes a request to the underlying
AWS service.
"""
# Create the action in in this closure but before the ``do_action``
# method below is invoked, which allows instances of the resource
# to share the ServiceAction instance.
action = AIOServiceAction(
action_model, factory=factory_self,
service_context=service_context
)
# A resource's ``load`` method is special because it sets
# values on the resource instead of returning the response.
if is_load:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
# response = action(self, *args, **kwargs)
response = await action.async_call(self, *args, **kwargs)
self.meta.data = response
# Create the docstring for the load/reload mehtods.
lazy_docstring = docstring.LoadReloadDocstring(
action_name=action_model.name,
resource_name=resource_name,
event_emitter=factory_self._emitter,
load_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
else:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
response = await action.async_call(self, *args, **kwargs)
if hasattr(self, 'load'):
# Clear cached data. It will be reloaded the next
# time that an attribute is accessed.
# TODO: Make this configurable in the future?
self.meta.data = None
return response
lazy_docstring = docstring.ActionDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
action_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
do_action.__name__ = str(action_model.name)
do_action.__doc__ = lazy_docstring
return do_action | python | def _create_action(factory_self, action_model, resource_name,
service_context, is_load=False):
"""
Creates a new method which makes a request to the underlying
AWS service.
"""
# Create the action in in this closure but before the ``do_action``
# method below is invoked, which allows instances of the resource
# to share the ServiceAction instance.
action = AIOServiceAction(
action_model, factory=factory_self,
service_context=service_context
)
# A resource's ``load`` method is special because it sets
# values on the resource instead of returning the response.
if is_load:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
# response = action(self, *args, **kwargs)
response = await action.async_call(self, *args, **kwargs)
self.meta.data = response
# Create the docstring for the load/reload mehtods.
lazy_docstring = docstring.LoadReloadDocstring(
action_name=action_model.name,
resource_name=resource_name,
event_emitter=factory_self._emitter,
load_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
else:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
response = await action.async_call(self, *args, **kwargs)
if hasattr(self, 'load'):
# Clear cached data. It will be reloaded the next
# time that an attribute is accessed.
# TODO: Make this configurable in the future?
self.meta.data = None
return response
lazy_docstring = docstring.ActionDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
action_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
do_action.__name__ = str(action_model.name)
do_action.__doc__ = lazy_docstring
return do_action | [
"def",
"_create_action",
"(",
"factory_self",
",",
"action_model",
",",
"resource_name",
",",
"service_context",
",",
"is_load",
"=",
"False",
")",
":",
"# Create the action in in this closure but before the ``do_action``",
"# method below is invoked, which allows instances of the resource",
"# to share the ServiceAction instance.",
"action",
"=",
"AIOServiceAction",
"(",
"action_model",
",",
"factory",
"=",
"factory_self",
",",
"service_context",
"=",
"service_context",
")",
"# A resource's ``load`` method is special because it sets",
"# values on the resource instead of returning the response.",
"if",
"is_load",
":",
"# We need a new method here because we want access to the",
"# instance via ``self``.",
"async",
"def",
"do_action",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# response = action(self, *args, **kwargs)",
"response",
"=",
"await",
"action",
".",
"async_call",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"meta",
".",
"data",
"=",
"response",
"# Create the docstring for the load/reload mehtods.",
"lazy_docstring",
"=",
"docstring",
".",
"LoadReloadDocstring",
"(",
"action_name",
"=",
"action_model",
".",
"name",
",",
"resource_name",
"=",
"resource_name",
",",
"event_emitter",
"=",
"factory_self",
".",
"_emitter",
",",
"load_model",
"=",
"action_model",
",",
"service_model",
"=",
"service_context",
".",
"service_model",
",",
"include_signature",
"=",
"False",
")",
"else",
":",
"# We need a new method here because we want access to the",
"# instance via ``self``.",
"async",
"def",
"do_action",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"await",
"action",
".",
"async_call",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"hasattr",
"(",
"self",
",",
"'load'",
")",
":",
"# Clear cached data. It will be reloaded the next",
"# time that an attribute is accessed.",
"# TODO: Make this configurable in the future?",
"self",
".",
"meta",
".",
"data",
"=",
"None",
"return",
"response",
"lazy_docstring",
"=",
"docstring",
".",
"ActionDocstring",
"(",
"resource_name",
"=",
"resource_name",
",",
"event_emitter",
"=",
"factory_self",
".",
"_emitter",
",",
"action_model",
"=",
"action_model",
",",
"service_model",
"=",
"service_context",
".",
"service_model",
",",
"include_signature",
"=",
"False",
")",
"do_action",
".",
"__name__",
"=",
"str",
"(",
"action_model",
".",
"name",
")",
"do_action",
".",
"__doc__",
"=",
"lazy_docstring",
"return",
"do_action"
] | Creates a new method which makes a request to the underlying
AWS service. | [
"Creates",
"a",
"new",
"method",
"which",
"makes",
"a",
"request",
"to",
"the",
"underlying",
"AWS",
"service",
"."
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/resources.py#L183-L240 | train |
terrycain/aioboto3 | aioboto3/s3/cse.py | AsymmetricCryptoContext.from_der_private_key | def from_der_private_key(data: bytes, password: Optional[str] = None) -> _RSAPrivateKey:
"""
Convert private key in DER encoding to a Private key object
:param data: private key bytes
:param password: password the private key is encrypted with
"""
return serialization.load_der_private_key(data, password, default_backend()) | python | def from_der_private_key(data: bytes, password: Optional[str] = None) -> _RSAPrivateKey:
"""
Convert private key in DER encoding to a Private key object
:param data: private key bytes
:param password: password the private key is encrypted with
"""
return serialization.load_der_private_key(data, password, default_backend()) | [
"def",
"from_der_private_key",
"(",
"data",
":",
"bytes",
",",
"password",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"_RSAPrivateKey",
":",
"return",
"serialization",
".",
"load_der_private_key",
"(",
"data",
",",
"password",
",",
"default_backend",
"(",
")",
")"
] | Convert private key in DER encoding to a Private key object
:param data: private key bytes
:param password: password the private key is encrypted with | [
"Convert",
"private",
"key",
"in",
"DER",
"encoding",
"to",
"a",
"Private",
"key",
"object"
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/cse.py#L149-L156 | train |
terrycain/aioboto3 | aioboto3/s3/cse.py | S3CSE.get_object | async def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
"""
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
"""
if self._s3_client is None:
await self.setup()
# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries
# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.
# We pass the actual start, desired start and desired end to the decrypt function so that it can
# generate the correct IV's for starting decryption at that block and then chop off the start and end of the
# AES block so it matches what the user is expecting.
_range = kwargs.get('Range')
actual_range_start = None
desired_range_start = None
desired_range_end = None
if _range:
range_match = RANGE_REGEX.match(_range)
if not range_match:
raise ValueError('Dont understand this range value {0}'.format(_range))
desired_range_start = int(range_match.group(1))
desired_range_end = range_match.group(2)
if desired_range_end is None:
desired_range_end = 9223372036854775806
else:
desired_range_end = int(desired_range_end)
actual_range_start, actual_range_end = _get_adjusted_crypto_range(desired_range_start, desired_range_end)
# Update range with actual start_end
kwargs['Range'] = 'bytes={0}-{1}'.format(actual_range_start, actual_range_end)
s3_response = await self._s3_client.get_object(Bucket=Bucket, Key=Key, **kwargs)
file_data = await s3_response['Body'].read()
metadata = s3_response['Metadata']
whole_file_length = int(s3_response['ResponseMetadata']['HTTPHeaders']['content-length'])
if 'x-amz-key' not in metadata and 'x-amz-key-v2' not in metadata:
# No crypto
return s3_response
if 'x-amz-key' in metadata:
# Crypto V1
body = await self._decrypt_v1(file_data, metadata, actual_range_start)
else:
# Crypto V2
body = await self._decrypt_v2(file_data, metadata, whole_file_length,
actual_range_start, desired_range_start,
desired_range_end)
s3_response['Body'] = DummyAIOFile(body)
return s3_response | python | async def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
"""
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
"""
if self._s3_client is None:
await self.setup()
# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries
# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.
# We pass the actual start, desired start and desired end to the decrypt function so that it can
# generate the correct IV's for starting decryption at that block and then chop off the start and end of the
# AES block so it matches what the user is expecting.
_range = kwargs.get('Range')
actual_range_start = None
desired_range_start = None
desired_range_end = None
if _range:
range_match = RANGE_REGEX.match(_range)
if not range_match:
raise ValueError('Dont understand this range value {0}'.format(_range))
desired_range_start = int(range_match.group(1))
desired_range_end = range_match.group(2)
if desired_range_end is None:
desired_range_end = 9223372036854775806
else:
desired_range_end = int(desired_range_end)
actual_range_start, actual_range_end = _get_adjusted_crypto_range(desired_range_start, desired_range_end)
# Update range with actual start_end
kwargs['Range'] = 'bytes={0}-{1}'.format(actual_range_start, actual_range_end)
s3_response = await self._s3_client.get_object(Bucket=Bucket, Key=Key, **kwargs)
file_data = await s3_response['Body'].read()
metadata = s3_response['Metadata']
whole_file_length = int(s3_response['ResponseMetadata']['HTTPHeaders']['content-length'])
if 'x-amz-key' not in metadata and 'x-amz-key-v2' not in metadata:
# No crypto
return s3_response
if 'x-amz-key' in metadata:
# Crypto V1
body = await self._decrypt_v1(file_data, metadata, actual_range_start)
else:
# Crypto V2
body = await self._decrypt_v2(file_data, metadata, whole_file_length,
actual_range_start, desired_range_start,
desired_range_end)
s3_response['Body'] = DummyAIOFile(body)
return s3_response | [
"async",
"def",
"get_object",
"(",
"self",
",",
"Bucket",
":",
"str",
",",
"Key",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"if",
"self",
".",
"_s3_client",
"is",
"None",
":",
"await",
"self",
".",
"setup",
"(",
")",
"# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries",
"# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.",
"# We pass the actual start, desired start and desired end to the decrypt function so that it can",
"# generate the correct IV's for starting decryption at that block and then chop off the start and end of the",
"# AES block so it matches what the user is expecting.",
"_range",
"=",
"kwargs",
".",
"get",
"(",
"'Range'",
")",
"actual_range_start",
"=",
"None",
"desired_range_start",
"=",
"None",
"desired_range_end",
"=",
"None",
"if",
"_range",
":",
"range_match",
"=",
"RANGE_REGEX",
".",
"match",
"(",
"_range",
")",
"if",
"not",
"range_match",
":",
"raise",
"ValueError",
"(",
"'Dont understand this range value {0}'",
".",
"format",
"(",
"_range",
")",
")",
"desired_range_start",
"=",
"int",
"(",
"range_match",
".",
"group",
"(",
"1",
")",
")",
"desired_range_end",
"=",
"range_match",
".",
"group",
"(",
"2",
")",
"if",
"desired_range_end",
"is",
"None",
":",
"desired_range_end",
"=",
"9223372036854775806",
"else",
":",
"desired_range_end",
"=",
"int",
"(",
"desired_range_end",
")",
"actual_range_start",
",",
"actual_range_end",
"=",
"_get_adjusted_crypto_range",
"(",
"desired_range_start",
",",
"desired_range_end",
")",
"# Update range with actual start_end",
"kwargs",
"[",
"'Range'",
"]",
"=",
"'bytes={0}-{1}'",
".",
"format",
"(",
"actual_range_start",
",",
"actual_range_end",
")",
"s3_response",
"=",
"await",
"self",
".",
"_s3_client",
".",
"get_object",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"*",
"*",
"kwargs",
")",
"file_data",
"=",
"await",
"s3_response",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
"metadata",
"=",
"s3_response",
"[",
"'Metadata'",
"]",
"whole_file_length",
"=",
"int",
"(",
"s3_response",
"[",
"'ResponseMetadata'",
"]",
"[",
"'HTTPHeaders'",
"]",
"[",
"'content-length'",
"]",
")",
"if",
"'x-amz-key'",
"not",
"in",
"metadata",
"and",
"'x-amz-key-v2'",
"not",
"in",
"metadata",
":",
"# No crypto",
"return",
"s3_response",
"if",
"'x-amz-key'",
"in",
"metadata",
":",
"# Crypto V1",
"body",
"=",
"await",
"self",
".",
"_decrypt_v1",
"(",
"file_data",
",",
"metadata",
",",
"actual_range_start",
")",
"else",
":",
"# Crypto V2",
"body",
"=",
"await",
"self",
".",
"_decrypt_v2",
"(",
"file_data",
",",
"metadata",
",",
"whole_file_length",
",",
"actual_range_start",
",",
"desired_range_start",
",",
"desired_range_end",
")",
"s3_response",
"[",
"'Body'",
"]",
"=",
"DummyAIOFile",
"(",
"body",
")",
"return",
"s3_response"
] | S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object | [
"S3",
"GetObject",
".",
"Takes",
"same",
"args",
"as",
"Boto3",
"documentation"
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/cse.py#L330-L389 | train |
terrycain/aioboto3 | aioboto3/s3/cse.py | S3CSE.put_object | async def put_object(self, Body: Union[bytes, IO], Bucket: str, Key: str, Metadata: Dict = None, **kwargs):
"""
PutObject. Takes same args as Boto3 documentation
Encrypts files
:param: Body: File data
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
"""
if self._s3_client is None:
await self.setup()
if hasattr(Body, 'read'):
if inspect.iscoroutinefunction(Body.read):
Body = await Body.read()
else:
Body = Body.read()
# We do some different V2 stuff if using kms
is_kms = isinstance(self._crypto_context, KMSCryptoContext)
# noinspection PyUnresolvedReferences
authenticated_crypto = is_kms and self._crypto_context.authenticated_encryption
Metadata = Metadata if Metadata is not None else {}
aes_key, matdesc_metadata, key_metadata = await self._crypto_context.get_encryption_aes_key()
if is_kms and authenticated_crypto:
Metadata['x-amz-cek-alg'] = 'AES/GCM/NoPadding'
Metadata['x-amz-tag-len'] = str(AES_BLOCK_SIZE)
iv = os.urandom(12)
# 16byte 128bit authentication tag forced
aesgcm = AESGCM(aes_key)
result = await self._loop.run_in_executor(None, lambda: aesgcm.encrypt(iv, Body, None))
else:
if is_kms: # V1 is always AES/CBC/PKCS5Padding
Metadata['x-amz-cek-alg'] = 'AES/CBC/PKCS5Padding'
iv = os.urandom(16)
padder = PKCS7(AES.block_size).padder()
padded_result = await self._loop.run_in_executor(None, lambda: (padder.update(Body) + padder.finalize()))
aescbc = Cipher(AES(aes_key), CBC(iv), backend=self._backend).encryptor()
result = await self._loop.run_in_executor(None, lambda: (aescbc.update(padded_result) + aescbc.finalize()))
# For all V1 and V2
Metadata['x-amz-unencrypted-content-length'] = str(len(Body))
Metadata['x-amz-iv'] = base64.b64encode(iv).decode()
Metadata['x-amz-matdesc'] = json.dumps(matdesc_metadata)
if is_kms:
Metadata['x-amz-wrap-alg'] = 'kms'
Metadata['x-amz-key-v2'] = key_metadata
else:
Metadata['x-amz-key'] = key_metadata
await self._s3_client.put_object(
Bucket=Bucket,
Key=Key,
Body=result,
Metadata=Metadata,
**kwargs
) | python | async def put_object(self, Body: Union[bytes, IO], Bucket: str, Key: str, Metadata: Dict = None, **kwargs):
"""
PutObject. Takes same args as Boto3 documentation
Encrypts files
:param: Body: File data
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
"""
if self._s3_client is None:
await self.setup()
if hasattr(Body, 'read'):
if inspect.iscoroutinefunction(Body.read):
Body = await Body.read()
else:
Body = Body.read()
# We do some different V2 stuff if using kms
is_kms = isinstance(self._crypto_context, KMSCryptoContext)
# noinspection PyUnresolvedReferences
authenticated_crypto = is_kms and self._crypto_context.authenticated_encryption
Metadata = Metadata if Metadata is not None else {}
aes_key, matdesc_metadata, key_metadata = await self._crypto_context.get_encryption_aes_key()
if is_kms and authenticated_crypto:
Metadata['x-amz-cek-alg'] = 'AES/GCM/NoPadding'
Metadata['x-amz-tag-len'] = str(AES_BLOCK_SIZE)
iv = os.urandom(12)
# 16byte 128bit authentication tag forced
aesgcm = AESGCM(aes_key)
result = await self._loop.run_in_executor(None, lambda: aesgcm.encrypt(iv, Body, None))
else:
if is_kms: # V1 is always AES/CBC/PKCS5Padding
Metadata['x-amz-cek-alg'] = 'AES/CBC/PKCS5Padding'
iv = os.urandom(16)
padder = PKCS7(AES.block_size).padder()
padded_result = await self._loop.run_in_executor(None, lambda: (padder.update(Body) + padder.finalize()))
aescbc = Cipher(AES(aes_key), CBC(iv), backend=self._backend).encryptor()
result = await self._loop.run_in_executor(None, lambda: (aescbc.update(padded_result) + aescbc.finalize()))
# For all V1 and V2
Metadata['x-amz-unencrypted-content-length'] = str(len(Body))
Metadata['x-amz-iv'] = base64.b64encode(iv).decode()
Metadata['x-amz-matdesc'] = json.dumps(matdesc_metadata)
if is_kms:
Metadata['x-amz-wrap-alg'] = 'kms'
Metadata['x-amz-key-v2'] = key_metadata
else:
Metadata['x-amz-key'] = key_metadata
await self._s3_client.put_object(
Bucket=Bucket,
Key=Key,
Body=result,
Metadata=Metadata,
**kwargs
) | [
"async",
"def",
"put_object",
"(",
"self",
",",
"Body",
":",
"Union",
"[",
"bytes",
",",
"IO",
"]",
",",
"Bucket",
":",
"str",
",",
"Key",
":",
"str",
",",
"Metadata",
":",
"Dict",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_s3_client",
"is",
"None",
":",
"await",
"self",
".",
"setup",
"(",
")",
"if",
"hasattr",
"(",
"Body",
",",
"'read'",
")",
":",
"if",
"inspect",
".",
"iscoroutinefunction",
"(",
"Body",
".",
"read",
")",
":",
"Body",
"=",
"await",
"Body",
".",
"read",
"(",
")",
"else",
":",
"Body",
"=",
"Body",
".",
"read",
"(",
")",
"# We do some different V2 stuff if using kms",
"is_kms",
"=",
"isinstance",
"(",
"self",
".",
"_crypto_context",
",",
"KMSCryptoContext",
")",
"# noinspection PyUnresolvedReferences",
"authenticated_crypto",
"=",
"is_kms",
"and",
"self",
".",
"_crypto_context",
".",
"authenticated_encryption",
"Metadata",
"=",
"Metadata",
"if",
"Metadata",
"is",
"not",
"None",
"else",
"{",
"}",
"aes_key",
",",
"matdesc_metadata",
",",
"key_metadata",
"=",
"await",
"self",
".",
"_crypto_context",
".",
"get_encryption_aes_key",
"(",
")",
"if",
"is_kms",
"and",
"authenticated_crypto",
":",
"Metadata",
"[",
"'x-amz-cek-alg'",
"]",
"=",
"'AES/GCM/NoPadding'",
"Metadata",
"[",
"'x-amz-tag-len'",
"]",
"=",
"str",
"(",
"AES_BLOCK_SIZE",
")",
"iv",
"=",
"os",
".",
"urandom",
"(",
"12",
")",
"# 16byte 128bit authentication tag forced",
"aesgcm",
"=",
"AESGCM",
"(",
"aes_key",
")",
"result",
"=",
"await",
"self",
".",
"_loop",
".",
"run_in_executor",
"(",
"None",
",",
"lambda",
":",
"aesgcm",
".",
"encrypt",
"(",
"iv",
",",
"Body",
",",
"None",
")",
")",
"else",
":",
"if",
"is_kms",
":",
"# V1 is always AES/CBC/PKCS5Padding",
"Metadata",
"[",
"'x-amz-cek-alg'",
"]",
"=",
"'AES/CBC/PKCS5Padding'",
"iv",
"=",
"os",
".",
"urandom",
"(",
"16",
")",
"padder",
"=",
"PKCS7",
"(",
"AES",
".",
"block_size",
")",
".",
"padder",
"(",
")",
"padded_result",
"=",
"await",
"self",
".",
"_loop",
".",
"run_in_executor",
"(",
"None",
",",
"lambda",
":",
"(",
"padder",
".",
"update",
"(",
"Body",
")",
"+",
"padder",
".",
"finalize",
"(",
")",
")",
")",
"aescbc",
"=",
"Cipher",
"(",
"AES",
"(",
"aes_key",
")",
",",
"CBC",
"(",
"iv",
")",
",",
"backend",
"=",
"self",
".",
"_backend",
")",
".",
"encryptor",
"(",
")",
"result",
"=",
"await",
"self",
".",
"_loop",
".",
"run_in_executor",
"(",
"None",
",",
"lambda",
":",
"(",
"aescbc",
".",
"update",
"(",
"padded_result",
")",
"+",
"aescbc",
".",
"finalize",
"(",
")",
")",
")",
"# For all V1 and V2",
"Metadata",
"[",
"'x-amz-unencrypted-content-length'",
"]",
"=",
"str",
"(",
"len",
"(",
"Body",
")",
")",
"Metadata",
"[",
"'x-amz-iv'",
"]",
"=",
"base64",
".",
"b64encode",
"(",
"iv",
")",
".",
"decode",
"(",
")",
"Metadata",
"[",
"'x-amz-matdesc'",
"]",
"=",
"json",
".",
"dumps",
"(",
"matdesc_metadata",
")",
"if",
"is_kms",
":",
"Metadata",
"[",
"'x-amz-wrap-alg'",
"]",
"=",
"'kms'",
"Metadata",
"[",
"'x-amz-key-v2'",
"]",
"=",
"key_metadata",
"else",
":",
"Metadata",
"[",
"'x-amz-key'",
"]",
"=",
"key_metadata",
"await",
"self",
".",
"_s3_client",
".",
"put_object",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"Body",
"=",
"result",
",",
"Metadata",
"=",
"Metadata",
",",
"*",
"*",
"kwargs",
")"
] | PutObject. Takes same args as Boto3 documentation
Encrypts files
:param: Body: File data
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath) | [
"PutObject",
".",
"Takes",
"same",
"args",
"as",
"Boto3",
"documentation"
] | 0fd192175461f7bb192f3ed9a872591caf8474ac | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/cse.py#L482-L549 | train |
astrofrog/fast-histogram | fast_histogram/histogram.py | histogram1d | def histogram1d(x, bins, range, weights=None):
"""
Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array
"""
nx = bins
if not np.isscalar(bins):
raise TypeError('bins should be an integer')
xmin, xmax = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if weights is None:
return _histogram1d(x, nx, xmin, xmax)
else:
return _histogram1d_weighted(x, weights, nx, xmin, xmax) | python | def histogram1d(x, bins, range, weights=None):
"""
Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array
"""
nx = bins
if not np.isscalar(bins):
raise TypeError('bins should be an integer')
xmin, xmax = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if weights is None:
return _histogram1d(x, nx, xmin, xmax)
else:
return _histogram1d_weighted(x, weights, nx, xmin, xmax) | [
"def",
"histogram1d",
"(",
"x",
",",
"bins",
",",
"range",
",",
"weights",
"=",
"None",
")",
":",
"nx",
"=",
"bins",
"if",
"not",
"np",
".",
"isscalar",
"(",
"bins",
")",
":",
"raise",
"TypeError",
"(",
"'bins should be an integer'",
")",
"xmin",
",",
"xmax",
"=",
"range",
"if",
"not",
"np",
".",
"isfinite",
"(",
"xmin",
")",
":",
"raise",
"ValueError",
"(",
"\"xmin should be finite\"",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"xmax",
")",
":",
"raise",
"ValueError",
"(",
"\"xmax should be finite\"",
")",
"if",
"xmax",
"<=",
"xmin",
":",
"raise",
"ValueError",
"(",
"\"xmax should be greater than xmin\"",
")",
"if",
"nx",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"nx should be strictly positive\"",
")",
"if",
"weights",
"is",
"None",
":",
"return",
"_histogram1d",
"(",
"x",
",",
"nx",
",",
"xmin",
",",
"xmax",
")",
"else",
":",
"return",
"_histogram1d_weighted",
"(",
"x",
",",
"weights",
",",
"nx",
",",
"xmin",
",",
"xmax",
")"
] | Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array | [
"Compute",
"a",
"1D",
"histogram",
"assuming",
"equally",
"spaced",
"bins",
"."
] | ace4f2444fba2e21fa3cd9dad966f6b65b60660f | https://github.com/astrofrog/fast-histogram/blob/ace4f2444fba2e21fa3cd9dad966f6b65b60660f/fast_histogram/histogram.py#L15-L58 | train |
astrofrog/fast-histogram | fast_histogram/histogram.py | histogram2d | def histogram2d(x, y, bins, range, weights=None):
"""
Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array
"""
if isinstance(bins, numbers.Integral):
nx = ny = bins
else:
nx, ny = bins
if not np.isscalar(nx) or not np.isscalar(ny):
raise TypeError('bins should be an iterable of two integers')
(xmin, xmax), (ymin, ymax) = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if not np.isfinite(ymin):
raise ValueError("ymin should be finite")
if not np.isfinite(ymax):
raise ValueError("ymax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if ymax <= ymin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if ny <= 0:
raise ValueError("ny should be strictly positive")
if weights is None:
return _histogram2d(x, y, nx, xmin, xmax, ny, ymin, ymax)
else:
return _histogram2d_weighted(x, y, weights, nx, xmin, xmax, ny, ymin, ymax) | python | def histogram2d(x, y, bins, range, weights=None):
"""
Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array
"""
if isinstance(bins, numbers.Integral):
nx = ny = bins
else:
nx, ny = bins
if not np.isscalar(nx) or not np.isscalar(ny):
raise TypeError('bins should be an iterable of two integers')
(xmin, xmax), (ymin, ymax) = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if not np.isfinite(ymin):
raise ValueError("ymin should be finite")
if not np.isfinite(ymax):
raise ValueError("ymax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if ymax <= ymin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if ny <= 0:
raise ValueError("ny should be strictly positive")
if weights is None:
return _histogram2d(x, y, nx, xmin, xmax, ny, ymin, ymax)
else:
return _histogram2d_weighted(x, y, weights, nx, xmin, xmax, ny, ymin, ymax) | [
"def",
"histogram2d",
"(",
"x",
",",
"y",
",",
"bins",
",",
"range",
",",
"weights",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"bins",
",",
"numbers",
".",
"Integral",
")",
":",
"nx",
"=",
"ny",
"=",
"bins",
"else",
":",
"nx",
",",
"ny",
"=",
"bins",
"if",
"not",
"np",
".",
"isscalar",
"(",
"nx",
")",
"or",
"not",
"np",
".",
"isscalar",
"(",
"ny",
")",
":",
"raise",
"TypeError",
"(",
"'bins should be an iterable of two integers'",
")",
"(",
"xmin",
",",
"xmax",
")",
",",
"(",
"ymin",
",",
"ymax",
")",
"=",
"range",
"if",
"not",
"np",
".",
"isfinite",
"(",
"xmin",
")",
":",
"raise",
"ValueError",
"(",
"\"xmin should be finite\"",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"xmax",
")",
":",
"raise",
"ValueError",
"(",
"\"xmax should be finite\"",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"ymin",
")",
":",
"raise",
"ValueError",
"(",
"\"ymin should be finite\"",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"ymax",
")",
":",
"raise",
"ValueError",
"(",
"\"ymax should be finite\"",
")",
"if",
"xmax",
"<=",
"xmin",
":",
"raise",
"ValueError",
"(",
"\"xmax should be greater than xmin\"",
")",
"if",
"ymax",
"<=",
"ymin",
":",
"raise",
"ValueError",
"(",
"\"xmax should be greater than xmin\"",
")",
"if",
"nx",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"nx should be strictly positive\"",
")",
"if",
"ny",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"ny should be strictly positive\"",
")",
"if",
"weights",
"is",
"None",
":",
"return",
"_histogram2d",
"(",
"x",
",",
"y",
",",
"nx",
",",
"xmin",
",",
"xmax",
",",
"ny",
",",
"ymin",
",",
"ymax",
")",
"else",
":",
"return",
"_histogram2d_weighted",
"(",
"x",
",",
"y",
",",
"weights",
",",
"nx",
",",
"xmin",
",",
"xmax",
",",
"ny",
",",
"ymin",
",",
"ymax",
")"
] | Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array | [
"Compute",
"a",
"2D",
"histogram",
"assuming",
"equally",
"spaced",
"bins",
"."
] | ace4f2444fba2e21fa3cd9dad966f6b65b60660f | https://github.com/astrofrog/fast-histogram/blob/ace4f2444fba2e21fa3cd9dad966f6b65b60660f/fast_histogram/histogram.py#L61-L121 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.to_networkx | def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json()) | python | def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json()) | [
"def",
"to_networkx",
"(",
"self",
")",
":",
"return",
"nx_util",
".",
"to_networkx",
"(",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"__url",
")",
".",
"json",
"(",
")",
")"
] | Return this network in NetworkX graph object.
:return: Network as NetworkX graph object | [
"Return",
"this",
"network",
"in",
"NetworkX",
"graph",
"object",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L46-L52 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.to_dataframe | def to_dataframe(self, extra_edges_columns=[]):
"""
Return this network in pandas DataFrame.
:return: Network as DataFrame. This is equivalent to SIF.
"""
return df_util.to_dataframe(
self.session.get(self.__url).json(),
edges_attr_cols=extra_edges_columns
) | python | def to_dataframe(self, extra_edges_columns=[]):
"""
Return this network in pandas DataFrame.
:return: Network as DataFrame. This is equivalent to SIF.
"""
return df_util.to_dataframe(
self.session.get(self.__url).json(),
edges_attr_cols=extra_edges_columns
) | [
"def",
"to_dataframe",
"(",
"self",
",",
"extra_edges_columns",
"=",
"[",
"]",
")",
":",
"return",
"df_util",
".",
"to_dataframe",
"(",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"__url",
")",
".",
"json",
"(",
")",
",",
"edges_attr_cols",
"=",
"extra_edges_columns",
")"
] | Return this network in pandas DataFrame.
:return: Network as DataFrame. This is equivalent to SIF. | [
"Return",
"this",
"network",
"in",
"pandas",
"DataFrame",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L54-L63 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.add_node | def add_node(self, node_name, dataframe=False):
""" Add a single node to the network. """
if node_name is None:
return None
return self.add_nodes([node_name], dataframe=dataframe) | python | def add_node(self, node_name, dataframe=False):
""" Add a single node to the network. """
if node_name is None:
return None
return self.add_nodes([node_name], dataframe=dataframe) | [
"def",
"add_node",
"(",
"self",
",",
"node_name",
",",
"dataframe",
"=",
"False",
")",
":",
"if",
"node_name",
"is",
"None",
":",
"return",
"None",
"return",
"self",
".",
"add_nodes",
"(",
"[",
"node_name",
"]",
",",
"dataframe",
"=",
"dataframe",
")"
] | Add a single node to the network. | [
"Add",
"a",
"single",
"node",
"to",
"the",
"network",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L82-L86 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.add_nodes | def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes} | python | def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes} | [
"def",
"add_nodes",
"(",
"self",
",",
"node_name_list",
",",
"dataframe",
"=",
"False",
")",
":",
"res",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"__url",
"+",
"'nodes'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"node_name_list",
")",
",",
"headers",
"=",
"HEADERS",
")",
"check_response",
"(",
"res",
")",
"nodes",
"=",
"res",
".",
"json",
"(",
")",
"if",
"dataframe",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"nodes",
")",
".",
"set_index",
"(",
"[",
"'SUID'",
"]",
")",
"else",
":",
"return",
"{",
"node",
"[",
"'name'",
"]",
":",
"node",
"[",
"'SUID'",
"]",
"for",
"node",
"in",
"nodes",
"}"
] | Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes. | [
"Add",
"new",
"nodes",
"to",
"the",
"network"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L88-L102 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.add_edge | def add_edge(self, source, target, interaction='-', directed=True, dataframe=True):
""" Add a single edge from source to target. """
new_edge = {
'source': source,
'target': target,
'interaction': interaction,
'directed': directed
}
return self.add_edges([new_edge], dataframe=dataframe) | python | def add_edge(self, source, target, interaction='-', directed=True, dataframe=True):
""" Add a single edge from source to target. """
new_edge = {
'source': source,
'target': target,
'interaction': interaction,
'directed': directed
}
return self.add_edges([new_edge], dataframe=dataframe) | [
"def",
"add_edge",
"(",
"self",
",",
"source",
",",
"target",
",",
"interaction",
"=",
"'-'",
",",
"directed",
"=",
"True",
",",
"dataframe",
"=",
"True",
")",
":",
"new_edge",
"=",
"{",
"'source'",
":",
"source",
",",
"'target'",
":",
"target",
",",
"'interaction'",
":",
"interaction",
",",
"'directed'",
":",
"directed",
"}",
"return",
"self",
".",
"add_edges",
"(",
"[",
"new_edge",
"]",
",",
"dataframe",
"=",
"dataframe",
")"
] | Add a single edge from source to target. | [
"Add",
"a",
"single",
"edge",
"from",
"source",
"to",
"target",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L104-L112 | train |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.get_views | def get_views(self):
"""
Get views as a list of SUIDs
:return:
"""
url = self.__url + 'views'
return self.session.get(url).json() | python | def get_views(self):
"""
Get views as a list of SUIDs
:return:
"""
url = self.__url + 'views'
return self.session.get(url).json() | [
"def",
"get_views",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"__url",
"+",
"'views'",
"return",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
".",
"json",
"(",
")"
] | Get views as a list of SUIDs
:return: | [
"Get",
"views",
"as",
"a",
"list",
"of",
"SUIDs"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L304-L311 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/diffusion.py | diffusion.diffuse_advanced | def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False):
"""
Diffusion will send the selected network view and its selected nodes to
a web-based REST service to calculate network propagation. Results are
returned and represented by columns in the node table.
Columns are created for each execution of Diffusion and their names are
returned in the response.
:param heatColumnName (string, optional): A node column name intended
to override the default table column 'diffusion_input'. This represents
the query vector and corresponds to h in the diffusion equation. =
['HEKScore', 'JurkatScore', '(Use selected nodes)']
:param time (string, optional): The extent of spread over the network.
This corresponds to t in the diffusion equation.
:param verbose: print more
"""
PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time])
response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False):
"""
Diffusion will send the selected network view and its selected nodes to
a web-based REST service to calculate network propagation. Results are
returned and represented by columns in the node table.
Columns are created for each execution of Diffusion and their names are
returned in the response.
:param heatColumnName (string, optional): A node column name intended
to override the default table column 'diffusion_input'. This represents
the query vector and corresponds to h in the diffusion equation. =
['HEKScore', 'JurkatScore', '(Use selected nodes)']
:param time (string, optional): The extent of spread over the network.
This corresponds to t in the diffusion equation.
:param verbose: print more
"""
PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time])
response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"diffuse_advanced",
"(",
"self",
",",
"heatColumnName",
"=",
"None",
",",
"time",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"heatColumnName\"",
",",
"\"time\"",
"]",
",",
"[",
"heatColumnName",
",",
"time",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/diffuse_advanced\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Diffusion will send the selected network view and its selected nodes to
a web-based REST service to calculate network propagation. Results are
returned and represented by columns in the node table.
Columns are created for each execution of Diffusion and their names are
returned in the response.
:param heatColumnName (string, optional): A node column name intended
to override the default table column 'diffusion_input'. This represents
the query vector and corresponds to h in the diffusion equation. =
['HEKScore', 'JurkatScore', '(Use selected nodes)']
:param time (string, optional): The extent of spread over the network.
This corresponds to t in the diffusion equation.
:param verbose: print more | [
"Diffusion",
"will",
"send",
"the",
"selected",
"network",
"view",
"and",
"its",
"selected",
"nodes",
"to",
"a",
"web",
"-",
"based",
"REST",
"service",
"to",
"calculate",
"network",
"propagation",
".",
"Results",
"are",
"returned",
"and",
"represented",
"by",
"columns",
"in",
"the",
"node",
"table",
".",
"Columns",
"are",
"created",
"for",
"each",
"execution",
"of",
"Diffusion",
"and",
"their",
"names",
"are",
"returned",
"in",
"the",
"response",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/diffusion.py#L30-L48 | train |
cytoscape/py2cytoscape | py2cytoscape/util/util_networkx.py | to_networkx | def to_networkx(cyjs, directed=True):
"""
Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph.
"""
if directed:
g = nx.MultiDiGraph()
else:
g = nx.MultiGraph()
network_data = cyjs[DATA]
if network_data is not None:
for key in network_data.keys():
g.graph[key] = network_data[key]
nodes = cyjs[ELEMENTS][NODES]
edges = cyjs[ELEMENTS][EDGES]
for node in nodes:
data = node[DATA]
g.add_node(data[ID], attr_dict=data)
for edge in edges:
data = edge[DATA]
source = data[SOURCE]
target = data[TARGET]
g.add_edge(source, target, attr_dict=data)
return g | python | def to_networkx(cyjs, directed=True):
"""
Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph.
"""
if directed:
g = nx.MultiDiGraph()
else:
g = nx.MultiGraph()
network_data = cyjs[DATA]
if network_data is not None:
for key in network_data.keys():
g.graph[key] = network_data[key]
nodes = cyjs[ELEMENTS][NODES]
edges = cyjs[ELEMENTS][EDGES]
for node in nodes:
data = node[DATA]
g.add_node(data[ID], attr_dict=data)
for edge in edges:
data = edge[DATA]
source = data[SOURCE]
target = data[TARGET]
g.add_edge(source, target, attr_dict=data)
return g | [
"def",
"to_networkx",
"(",
"cyjs",
",",
"directed",
"=",
"True",
")",
":",
"if",
"directed",
":",
"g",
"=",
"nx",
".",
"MultiDiGraph",
"(",
")",
"else",
":",
"g",
"=",
"nx",
".",
"MultiGraph",
"(",
")",
"network_data",
"=",
"cyjs",
"[",
"DATA",
"]",
"if",
"network_data",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"network_data",
".",
"keys",
"(",
")",
":",
"g",
".",
"graph",
"[",
"key",
"]",
"=",
"network_data",
"[",
"key",
"]",
"nodes",
"=",
"cyjs",
"[",
"ELEMENTS",
"]",
"[",
"NODES",
"]",
"edges",
"=",
"cyjs",
"[",
"ELEMENTS",
"]",
"[",
"EDGES",
"]",
"for",
"node",
"in",
"nodes",
":",
"data",
"=",
"node",
"[",
"DATA",
"]",
"g",
".",
"add_node",
"(",
"data",
"[",
"ID",
"]",
",",
"attr_dict",
"=",
"data",
")",
"for",
"edge",
"in",
"edges",
":",
"data",
"=",
"edge",
"[",
"DATA",
"]",
"source",
"=",
"data",
"[",
"SOURCE",
"]",
"target",
"=",
"data",
"[",
"TARGET",
"]",
"g",
".",
"add_edge",
"(",
"source",
",",
"target",
",",
"attr_dict",
"=",
"data",
")",
"return",
"g"
] | Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph. | [
"Convert",
"Cytoscape",
".",
"js",
"-",
"style",
"JSON",
"object",
"into",
"NetworkX",
"object",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/util/util_networkx.py#L120-L151 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/cybrowser.py | cybrowser.dialog | def dialog(self=None, wid=None, text=None, title=None, url=None, debug=False, verbose=False):
"""
Launch and HTML browser in a separate window.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param debug: Show debug tools. boolean
:param verbose: print more
"""
PARAMS=set_param(["id","text","title","url","debug"],[wid,text,title,url,debug])
response=api(url=self.__url+"/dialog?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | python | def dialog(self=None, wid=None, text=None, title=None, url=None, debug=False, verbose=False):
"""
Launch and HTML browser in a separate window.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param debug: Show debug tools. boolean
:param verbose: print more
"""
PARAMS=set_param(["id","text","title","url","debug"],[wid,text,title,url,debug])
response=api(url=self.__url+"/dialog?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | [
"def",
"dialog",
"(",
"self",
"=",
"None",
",",
"wid",
"=",
"None",
",",
"text",
"=",
"None",
",",
"title",
"=",
"None",
",",
"url",
"=",
"None",
",",
"debug",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"id\"",
",",
"\"text\"",
",",
"\"title\"",
",",
"\"url\"",
",",
"\"debug\"",
"]",
",",
"[",
"wid",
",",
"text",
",",
"title",
",",
"url",
",",
"debug",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/dialog?\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"GET\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Launch and HTML browser in a separate window.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param debug: Show debug tools. boolean
:param verbose: print more | [
"Launch",
"and",
"HTML",
"browser",
"in",
"a",
"separate",
"window",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cybrowser.py#L13-L27 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/cybrowser.py | cybrowser.hide | def hide(self, wid, verbose=False):
"""
Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more
"""
PARAMS={"id":wid}
response=api(url=self.__url+"/hide?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | python | def hide(self, wid, verbose=False):
"""
Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more
"""
PARAMS={"id":wid}
response=api(url=self.__url+"/hide?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | [
"def",
"hide",
"(",
"self",
",",
"wid",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"{",
"\"id\"",
":",
"wid",
"}",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/hide?\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"GET\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more | [
"Hide",
"and",
"HTML",
"browser",
"in",
"the",
"Results",
"Panel",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cybrowser.py#L29-L40 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/cybrowser.py | cybrowser.show | def show(self, wid=None, text=None, title=None, url=None, verbose=False):
"""
Launch an HTML browser in the Results Panel.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param verbose: print more
"""
PARAMS={}
for p,v in zip(["id","text","title","url"],[wid,text,title,url]):
if v:
PARAMS[p]=v
response=api(url=self.__url+"/show?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | python | def show(self, wid=None, text=None, title=None, url=None, verbose=False):
"""
Launch an HTML browser in the Results Panel.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param verbose: print more
"""
PARAMS={}
for p,v in zip(["id","text","title","url"],[wid,text,title,url]):
if v:
PARAMS[p]=v
response=api(url=self.__url+"/show?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | [
"def",
"show",
"(",
"self",
",",
"wid",
"=",
"None",
",",
"text",
"=",
"None",
",",
"title",
"=",
"None",
",",
"url",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"{",
"}",
"for",
"p",
",",
"v",
"in",
"zip",
"(",
"[",
"\"id\"",
",",
"\"text\"",
",",
"\"title\"",
",",
"\"url\"",
"]",
",",
"[",
"wid",
",",
"text",
",",
"title",
",",
"url",
"]",
")",
":",
"if",
"v",
":",
"PARAMS",
"[",
"p",
"]",
"=",
"v",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/show?\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"GET\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Launch an HTML browser in the Results Panel.
:param wid: Window ID
:param text: HTML text
:param title: Window Title
:param url: URL
:param verbose: print more | [
"Launch",
"an",
"HTML",
"browser",
"in",
"the",
"Results",
"Panel",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cybrowser.py#L42-L59 | train |
cytoscape/py2cytoscape | py2cytoscape/data/util_http.py | check_response | def check_response(res):
""" Check HTTP response and raise exception if response is not OK. """
try:
res.raise_for_status() # Alternative is res.ok
except Exception as exc:
# Bad response code, e.g. if adding an edge with nodes that doesn't exist
try:
err_info = res.json()
err_msg = err_info['message'] # or 'localizeMessage'
except ValueError:
err_msg = res.text[:40] # Take the first 40 chars of the response
except KeyError:
err_msg = res.text[:40] + ("(No 'message' in err_info dict: %s"
% list(err_info.keys()))
exc.args += (err_msg,)
raise exc | python | def check_response(res):
""" Check HTTP response and raise exception if response is not OK. """
try:
res.raise_for_status() # Alternative is res.ok
except Exception as exc:
# Bad response code, e.g. if adding an edge with nodes that doesn't exist
try:
err_info = res.json()
err_msg = err_info['message'] # or 'localizeMessage'
except ValueError:
err_msg = res.text[:40] # Take the first 40 chars of the response
except KeyError:
err_msg = res.text[:40] + ("(No 'message' in err_info dict: %s"
% list(err_info.keys()))
exc.args += (err_msg,)
raise exc | [
"def",
"check_response",
"(",
"res",
")",
":",
"try",
":",
"res",
".",
"raise_for_status",
"(",
")",
"# Alternative is res.ok",
"except",
"Exception",
"as",
"exc",
":",
"# Bad response code, e.g. if adding an edge with nodes that doesn't exist",
"try",
":",
"err_info",
"=",
"res",
".",
"json",
"(",
")",
"err_msg",
"=",
"err_info",
"[",
"'message'",
"]",
"# or 'localizeMessage'",
"except",
"ValueError",
":",
"err_msg",
"=",
"res",
".",
"text",
"[",
":",
"40",
"]",
"# Take the first 40 chars of the response",
"except",
"KeyError",
":",
"err_msg",
"=",
"res",
".",
"text",
"[",
":",
"40",
"]",
"+",
"(",
"\"(No 'message' in err_info dict: %s\"",
"%",
"list",
"(",
"err_info",
".",
"keys",
"(",
")",
")",
")",
"exc",
".",
"args",
"+=",
"(",
"err_msg",
",",
")",
"raise",
"exc"
] | Check HTTP response and raise exception if response is not OK. | [
"Check",
"HTTP",
"response",
"and",
"raise",
"exception",
"if",
"response",
"is",
"not",
"OK",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/util_http.py#L3-L18 | train |
cytoscape/py2cytoscape | py2cytoscape/util/util_dataframe.py | from_dataframe | def from_dataframe(df,
source_col='source',
target_col='target',
interaction_col='interaction',
name='From DataFrame',
edge_attr_cols=[]):
"""
Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df: Dataframe to convert.
:param source_col: Name of source column.
:param target_col: Name of target column.
:param interaction_col: Name of interaction column.
:param name: Name of network.
:param edge_attr_cols: List containing other columns to consider in df as
edges' attributes.
:return: Dictionary version of df.
"""
network = cyjs.get_empty_network(name=name)
nodes = set()
if edge_attr_cols is None:
edge_attr_cols = []
for index, row in df.iterrows():
s = row[source_col]
t = row[target_col]
if s not in nodes:
nodes.add(s)
source = get_node(s)
network['elements']['nodes'].append(source)
if t not in nodes:
nodes.add(t)
target = get_node(t)
network['elements']['nodes'].append(target)
extra_values = {column: row[column]
for column in edge_attr_cols
if column in df.columns}
network['elements']['edges'].append(
get_edge(s, t, interaction=row[interaction_col], **extra_values)
)
return network | python | def from_dataframe(df,
source_col='source',
target_col='target',
interaction_col='interaction',
name='From DataFrame',
edge_attr_cols=[]):
"""
Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df: Dataframe to convert.
:param source_col: Name of source column.
:param target_col: Name of target column.
:param interaction_col: Name of interaction column.
:param name: Name of network.
:param edge_attr_cols: List containing other columns to consider in df as
edges' attributes.
:return: Dictionary version of df.
"""
network = cyjs.get_empty_network(name=name)
nodes = set()
if edge_attr_cols is None:
edge_attr_cols = []
for index, row in df.iterrows():
s = row[source_col]
t = row[target_col]
if s not in nodes:
nodes.add(s)
source = get_node(s)
network['elements']['nodes'].append(source)
if t not in nodes:
nodes.add(t)
target = get_node(t)
network['elements']['nodes'].append(target)
extra_values = {column: row[column]
for column in edge_attr_cols
if column in df.columns}
network['elements']['edges'].append(
get_edge(s, t, interaction=row[interaction_col], **extra_values)
)
return network | [
"def",
"from_dataframe",
"(",
"df",
",",
"source_col",
"=",
"'source'",
",",
"target_col",
"=",
"'target'",
",",
"interaction_col",
"=",
"'interaction'",
",",
"name",
"=",
"'From DataFrame'",
",",
"edge_attr_cols",
"=",
"[",
"]",
")",
":",
"network",
"=",
"cyjs",
".",
"get_empty_network",
"(",
"name",
"=",
"name",
")",
"nodes",
"=",
"set",
"(",
")",
"if",
"edge_attr_cols",
"is",
"None",
":",
"edge_attr_cols",
"=",
"[",
"]",
"for",
"index",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"s",
"=",
"row",
"[",
"source_col",
"]",
"t",
"=",
"row",
"[",
"target_col",
"]",
"if",
"s",
"not",
"in",
"nodes",
":",
"nodes",
".",
"add",
"(",
"s",
")",
"source",
"=",
"get_node",
"(",
"s",
")",
"network",
"[",
"'elements'",
"]",
"[",
"'nodes'",
"]",
".",
"append",
"(",
"source",
")",
"if",
"t",
"not",
"in",
"nodes",
":",
"nodes",
".",
"add",
"(",
"t",
")",
"target",
"=",
"get_node",
"(",
"t",
")",
"network",
"[",
"'elements'",
"]",
"[",
"'nodes'",
"]",
".",
"append",
"(",
"target",
")",
"extra_values",
"=",
"{",
"column",
":",
"row",
"[",
"column",
"]",
"for",
"column",
"in",
"edge_attr_cols",
"if",
"column",
"in",
"df",
".",
"columns",
"}",
"network",
"[",
"'elements'",
"]",
"[",
"'edges'",
"]",
".",
"append",
"(",
"get_edge",
"(",
"s",
",",
"t",
",",
"interaction",
"=",
"row",
"[",
"interaction_col",
"]",
",",
"*",
"*",
"extra_values",
")",
")",
"return",
"network"
] | Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df: Dataframe to convert.
:param source_col: Name of source column.
:param target_col: Name of target column.
:param interaction_col: Name of interaction column.
:param name: Name of network.
:param edge_attr_cols: List containing other columns to consider in df as
edges' attributes.
:return: Dictionary version of df. | [
"Utility",
"to",
"convert",
"Pandas",
"DataFrame",
"object",
"into",
"Cytoscape",
".",
"js",
"JSON"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/util/util_dataframe.py#L6-L49 | train |
cytoscape/py2cytoscape | py2cytoscape/util/util_dataframe.py | to_dataframe | def to_dataframe(network,
interaction='interaction',
default_interaction='-',
edges_attr_cols=[]):
"""
Utility to convert a Cytoscape dictionary into a Pandas Dataframe.
:param network: Dictionary to convert.
:param interaction: Name of interaction column.
:param default_interaction: Default value for missing interactions.
:param edges_attr_cols: List containing other edges' attributes to include
in the Dataframe, if present in network.
:return: Converted Pandas Dataframe.
"""
edges = network['elements']['edges']
if edges_attr_cols is None:
edges_attr_cols = []
edges_attr_cols = sorted(edges_attr_cols)
network_array = []
# the set avoids duplicates
valid_extra_cols = set()
for edge in edges:
edge_data = edge['data']
source = edge_data['source']
target = edge_data['target']
if interaction in edge_data:
itr = edge_data[interaction]
else:
itr = default_interaction
extra_values = []
for extra_column in edges_attr_cols:
if extra_column in edge_data:
extra_values.append(edge_data[extra_column])
valid_extra_cols.add(extra_column)
row = tuple([source, itr, target] + extra_values)
network_array.append(row)
return pd.DataFrame( network_array, columns=['source', 'interaction', 'target'] + sorted(valid_extra_cols)) | python | def to_dataframe(network,
interaction='interaction',
default_interaction='-',
edges_attr_cols=[]):
"""
Utility to convert a Cytoscape dictionary into a Pandas Dataframe.
:param network: Dictionary to convert.
:param interaction: Name of interaction column.
:param default_interaction: Default value for missing interactions.
:param edges_attr_cols: List containing other edges' attributes to include
in the Dataframe, if present in network.
:return: Converted Pandas Dataframe.
"""
edges = network['elements']['edges']
if edges_attr_cols is None:
edges_attr_cols = []
edges_attr_cols = sorted(edges_attr_cols)
network_array = []
# the set avoids duplicates
valid_extra_cols = set()
for edge in edges:
edge_data = edge['data']
source = edge_data['source']
target = edge_data['target']
if interaction in edge_data:
itr = edge_data[interaction]
else:
itr = default_interaction
extra_values = []
for extra_column in edges_attr_cols:
if extra_column in edge_data:
extra_values.append(edge_data[extra_column])
valid_extra_cols.add(extra_column)
row = tuple([source, itr, target] + extra_values)
network_array.append(row)
return pd.DataFrame( network_array, columns=['source', 'interaction', 'target'] + sorted(valid_extra_cols)) | [
"def",
"to_dataframe",
"(",
"network",
",",
"interaction",
"=",
"'interaction'",
",",
"default_interaction",
"=",
"'-'",
",",
"edges_attr_cols",
"=",
"[",
"]",
")",
":",
"edges",
"=",
"network",
"[",
"'elements'",
"]",
"[",
"'edges'",
"]",
"if",
"edges_attr_cols",
"is",
"None",
":",
"edges_attr_cols",
"=",
"[",
"]",
"edges_attr_cols",
"=",
"sorted",
"(",
"edges_attr_cols",
")",
"network_array",
"=",
"[",
"]",
"# the set avoids duplicates",
"valid_extra_cols",
"=",
"set",
"(",
")",
"for",
"edge",
"in",
"edges",
":",
"edge_data",
"=",
"edge",
"[",
"'data'",
"]",
"source",
"=",
"edge_data",
"[",
"'source'",
"]",
"target",
"=",
"edge_data",
"[",
"'target'",
"]",
"if",
"interaction",
"in",
"edge_data",
":",
"itr",
"=",
"edge_data",
"[",
"interaction",
"]",
"else",
":",
"itr",
"=",
"default_interaction",
"extra_values",
"=",
"[",
"]",
"for",
"extra_column",
"in",
"edges_attr_cols",
":",
"if",
"extra_column",
"in",
"edge_data",
":",
"extra_values",
".",
"append",
"(",
"edge_data",
"[",
"extra_column",
"]",
")",
"valid_extra_cols",
".",
"add",
"(",
"extra_column",
")",
"row",
"=",
"tuple",
"(",
"[",
"source",
",",
"itr",
",",
"target",
"]",
"+",
"extra_values",
")",
"network_array",
".",
"append",
"(",
"row",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"network_array",
",",
"columns",
"=",
"[",
"'source'",
",",
"'interaction'",
",",
"'target'",
"]",
"+",
"sorted",
"(",
"valid_extra_cols",
")",
")"
] | Utility to convert a Cytoscape dictionary into a Pandas Dataframe.
:param network: Dictionary to convert.
:param interaction: Name of interaction column.
:param default_interaction: Default value for missing interactions.
:param edges_attr_cols: List containing other edges' attributes to include
in the Dataframe, if present in network.
:return: Converted Pandas Dataframe. | [
"Utility",
"to",
"convert",
"a",
"Cytoscape",
"dictionary",
"into",
"a",
"Pandas",
"Dataframe",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/util/util_dataframe.py#L52-L91 | train |
cytoscape/py2cytoscape | py2cytoscape/cytoscapejs/viewer.py | render | def render(network,
style=DEF_STYLE,
layout_algorithm=DEF_LAYOUT,
background=DEF_BACKGROUND_COLOR,
height=DEF_HEIGHT,
width=DEF_WIDTH,
style_file=STYLE_FILE,
def_nodes=DEF_NODES,
def_edges=DEF_EDGES):
"""Render network data with embedded Cytoscape.js widget.
:param network: dict (required)
The network data should be in Cytoscape.js JSON format.
:param style: str or dict
If str, pick one of the preset style. [default: 'default']
If dict, it should be Cytoscape.js style CSS object
:param layout_algorithm: str
Name of Cytoscape.js layout algorithm
:param background: str
Background in CSS format
:param height: int
Height of the widget.
:param width: int
Width of the widget.
:param style_file: a styles file. [default: 'default_style.json']
:param def_nodes: default: [
{'data': {'id': 'Network Data'}},
{'data': {'id': 'Empty'}} ]
:param def_edges: default: [ {'data': {'id': 'is', 'source': 'Network Data', 'target': 'Empty'}} ]
"""
from jinja2 import Template
from IPython.core.display import display, HTML
STYLES=set_styles(style_file)
# Load style file if none available
if isinstance(style, str):
# Specified by name
style = STYLES[style]
if network is None:
nodes = def_nodes
edges = def_edges
else:
nodes = network['elements']['nodes']
edges = network['elements']['edges']
path = os.path.abspath(os.path.dirname(__file__)) + '/' + HTML_TEMPLATE_FILE
template = Template(open(path).read())
cyjs_widget = template.render(
nodes=json.dumps(nodes),
edges=json.dumps(edges),
background=background,
uuid="cy" + str(uuid.uuid4()),
widget_width=str(width),
widget_height=str(height),
layout=layout_algorithm,
style_json=json.dumps(style)
)
display(HTML(cyjs_widget)) | python | def render(network,
style=DEF_STYLE,
layout_algorithm=DEF_LAYOUT,
background=DEF_BACKGROUND_COLOR,
height=DEF_HEIGHT,
width=DEF_WIDTH,
style_file=STYLE_FILE,
def_nodes=DEF_NODES,
def_edges=DEF_EDGES):
"""Render network data with embedded Cytoscape.js widget.
:param network: dict (required)
The network data should be in Cytoscape.js JSON format.
:param style: str or dict
If str, pick one of the preset style. [default: 'default']
If dict, it should be Cytoscape.js style CSS object
:param layout_algorithm: str
Name of Cytoscape.js layout algorithm
:param background: str
Background in CSS format
:param height: int
Height of the widget.
:param width: int
Width of the widget.
:param style_file: a styles file. [default: 'default_style.json']
:param def_nodes: default: [
{'data': {'id': 'Network Data'}},
{'data': {'id': 'Empty'}} ]
:param def_edges: default: [ {'data': {'id': 'is', 'source': 'Network Data', 'target': 'Empty'}} ]
"""
from jinja2 import Template
from IPython.core.display import display, HTML
STYLES=set_styles(style_file)
# Load style file if none available
if isinstance(style, str):
# Specified by name
style = STYLES[style]
if network is None:
nodes = def_nodes
edges = def_edges
else:
nodes = network['elements']['nodes']
edges = network['elements']['edges']
path = os.path.abspath(os.path.dirname(__file__)) + '/' + HTML_TEMPLATE_FILE
template = Template(open(path).read())
cyjs_widget = template.render(
nodes=json.dumps(nodes),
edges=json.dumps(edges),
background=background,
uuid="cy" + str(uuid.uuid4()),
widget_width=str(width),
widget_height=str(height),
layout=layout_algorithm,
style_json=json.dumps(style)
)
display(HTML(cyjs_widget)) | [
"def",
"render",
"(",
"network",
",",
"style",
"=",
"DEF_STYLE",
",",
"layout_algorithm",
"=",
"DEF_LAYOUT",
",",
"background",
"=",
"DEF_BACKGROUND_COLOR",
",",
"height",
"=",
"DEF_HEIGHT",
",",
"width",
"=",
"DEF_WIDTH",
",",
"style_file",
"=",
"STYLE_FILE",
",",
"def_nodes",
"=",
"DEF_NODES",
",",
"def_edges",
"=",
"DEF_EDGES",
")",
":",
"from",
"jinja2",
"import",
"Template",
"from",
"IPython",
".",
"core",
".",
"display",
"import",
"display",
",",
"HTML",
"STYLES",
"=",
"set_styles",
"(",
"style_file",
")",
"# Load style file if none available",
"if",
"isinstance",
"(",
"style",
",",
"str",
")",
":",
"# Specified by name",
"style",
"=",
"STYLES",
"[",
"style",
"]",
"if",
"network",
"is",
"None",
":",
"nodes",
"=",
"def_nodes",
"edges",
"=",
"def_edges",
"else",
":",
"nodes",
"=",
"network",
"[",
"'elements'",
"]",
"[",
"'nodes'",
"]",
"edges",
"=",
"network",
"[",
"'elements'",
"]",
"[",
"'edges'",
"]",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"+",
"'/'",
"+",
"HTML_TEMPLATE_FILE",
"template",
"=",
"Template",
"(",
"open",
"(",
"path",
")",
".",
"read",
"(",
")",
")",
"cyjs_widget",
"=",
"template",
".",
"render",
"(",
"nodes",
"=",
"json",
".",
"dumps",
"(",
"nodes",
")",
",",
"edges",
"=",
"json",
".",
"dumps",
"(",
"edges",
")",
",",
"background",
"=",
"background",
",",
"uuid",
"=",
"\"cy\"",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
",",
"widget_width",
"=",
"str",
"(",
"width",
")",
",",
"widget_height",
"=",
"str",
"(",
"height",
")",
",",
"layout",
"=",
"layout_algorithm",
",",
"style_json",
"=",
"json",
".",
"dumps",
"(",
"style",
")",
")",
"display",
"(",
"HTML",
"(",
"cyjs_widget",
")",
")"
] | Render network data with embedded Cytoscape.js widget.
:param network: dict (required)
The network data should be in Cytoscape.js JSON format.
:param style: str or dict
If str, pick one of the preset style. [default: 'default']
If dict, it should be Cytoscape.js style CSS object
:param layout_algorithm: str
Name of Cytoscape.js layout algorithm
:param background: str
Background in CSS format
:param height: int
Height of the widget.
:param width: int
Width of the widget.
:param style_file: a styles file. [default: 'default_style.json']
:param def_nodes: default: [
{'data': {'id': 'Network Data'}},
{'data': {'id': 'Empty'}} ]
:param def_edges: default: [ {'data': {'id': 'is', 'source': 'Network Data', 'target': 'Empty'}} ] | [
"Render",
"network",
"data",
"with",
"embedded",
"Cytoscape",
".",
"js",
"widget",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cytoscapejs/viewer.py#L57-L118 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/edge.py | edge.create_attribute | def create_attribute(self,column=None,listType=None,namespace=None, network=None, atype=None, verbose=False):
"""
Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["column","listType","namespace","network","type"],[column,listType,namespace,network,atype])
response=api(url=self.__url+"/create attribute", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def create_attribute(self,column=None,listType=None,namespace=None, network=None, atype=None, verbose=False):
"""
Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["column","listType","namespace","network","type"],[column,listType,namespace,network,atype])
response=api(url=self.__url+"/create attribute", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"create_attribute",
"(",
"self",
",",
"column",
"=",
"None",
",",
"listType",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"network",
"=",
"None",
",",
"atype",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"network",
"=",
"check_network",
"(",
"self",
",",
"network",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"column\"",
",",
"\"listType\"",
",",
"\"namespace\"",
",",
"\"network\"",
",",
"\"type\"",
"]",
",",
"[",
"column",
",",
"listType",
",",
"namespace",
",",
"network",
",",
"atype",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/create attribute\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more | [
"Creates",
"a",
"new",
"edge",
"column",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/edge.py#L13-L35 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/edge.py | edge.get | def get(self,edge=None,network=None,sourceNode=None, targetNode=None, atype=None, verbose=False):
"""
Returns the SUID of an edge that matches the passed parameters. If
multiple edges are found, only one will be returned, and a warning will
be reported in the Cytoscape Task History dialog.
:param edge (string, optional): Selects an edge by name, or, if the
parameter has the prefix suid:, selects an edge by SUID. If this
parameter is set, all other edge matching parameters are ignored.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param sourceNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its source. This parameter
must be used with the targetNode parameter to produce results.
:param targetNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its target. This parameter
must be used with the sourceNode parameter to produce results.
:param atype (string, optional): Specifies that the edge matched must
be of the specified type. This parameter must be used with the
sourceNode and targetNode parameters to produce results.
:param verbose: print more
:returns: {"columnName": columnName }
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["edge","network","sourceNode","targetNode","type"],[edge,network,sourceNode,targetNode,atype])
response=api(url=self.__url+"/get", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def get(self,edge=None,network=None,sourceNode=None, targetNode=None, atype=None, verbose=False):
"""
Returns the SUID of an edge that matches the passed parameters. If
multiple edges are found, only one will be returned, and a warning will
be reported in the Cytoscape Task History dialog.
:param edge (string, optional): Selects an edge by name, or, if the
parameter has the prefix suid:, selects an edge by SUID. If this
parameter is set, all other edge matching parameters are ignored.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param sourceNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its source. This parameter
must be used with the targetNode parameter to produce results.
:param targetNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its target. This parameter
must be used with the sourceNode parameter to produce results.
:param atype (string, optional): Specifies that the edge matched must
be of the specified type. This parameter must be used with the
sourceNode and targetNode parameters to produce results.
:param verbose: print more
:returns: {"columnName": columnName }
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["edge","network","sourceNode","targetNode","type"],[edge,network,sourceNode,targetNode,atype])
response=api(url=self.__url+"/get", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"get",
"(",
"self",
",",
"edge",
"=",
"None",
",",
"network",
"=",
"None",
",",
"sourceNode",
"=",
"None",
",",
"targetNode",
"=",
"None",
",",
"atype",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"network",
"=",
"check_network",
"(",
"self",
",",
"network",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"edge\"",
",",
"\"network\"",
",",
"\"sourceNode\"",
",",
"\"targetNode\"",
",",
"\"type\"",
"]",
",",
"[",
"edge",
",",
"network",
",",
"sourceNode",
",",
"targetNode",
",",
"atype",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/get\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Returns the SUID of an edge that matches the passed parameters. If
multiple edges are found, only one will be returned, and a warning will
be reported in the Cytoscape Task History dialog.
:param edge (string, optional): Selects an edge by name, or, if the
parameter has the prefix suid:, selects an edge by SUID. If this
parameter is set, all other edge matching parameters are ignored.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param sourceNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its source. This parameter
must be used with the targetNode parameter to produce results.
:param targetNode (string, optional): Selects a node by name, or, if
the parameter has the prefix suid:, selects a node by SUID. Specifies
that the edge matched must have this node as its target. This parameter
must be used with the sourceNode parameter to produce results.
:param atype (string, optional): Specifies that the edge matched must
be of the specified type. This parameter must be used with the
sourceNode and targetNode parameters to produce results.
:param verbose: print more
:returns: {"columnName": columnName } | [
"Returns",
"the",
"SUID",
"of",
"an",
"edge",
"that",
"matches",
"the",
"passed",
"parameters",
".",
"If",
"multiple",
"edges",
"are",
"found",
"only",
"one",
"will",
"be",
"returned",
"and",
"a",
"warning",
"will",
"be",
"reported",
"in",
"the",
"Cytoscape",
"Task",
"History",
"dialog",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/edge.py#L37-L68 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.add_edge | def add_edge(self, isDirected=None,name=None,network=None,sourceName=None,targetName=None, verbose=False):
"""
Add a new edge between two existing nodes in a network. The names of the
nodes must be specified and much match the value in the 'name' column for
each node.
:param isDirected (string, optional): Whether the edge should be directed
or not. Even though all edges in Cytoscape have a source and target, by
default they are treated as undirected. Setting this to 'true' will
flag some algorithms to treat them as directed, although many current
implementations will ignore this flag.
:param name (string, optional): Set the 'name' and 'shared name' columns
for this edge to the provided value. ,
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param sourceName (string): Enter the name of an existing node in the
network to be the source of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param targetName (string): Enter the name of an existing node in the
network to be the target of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["isDirected","name","network","sourceName","targetName"],\
[isDirected,name,network,sourceName,targetName])
response=api(url=self.__url+"/add edge", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def add_edge(self, isDirected=None,name=None,network=None,sourceName=None,targetName=None, verbose=False):
"""
Add a new edge between two existing nodes in a network. The names of the
nodes must be specified and much match the value in the 'name' column for
each node.
:param isDirected (string, optional): Whether the edge should be directed
or not. Even though all edges in Cytoscape have a source and target, by
default they are treated as undirected. Setting this to 'true' will
flag some algorithms to treat them as directed, although many current
implementations will ignore this flag.
:param name (string, optional): Set the 'name' and 'shared name' columns
for this edge to the provided value. ,
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param sourceName (string): Enter the name of an existing node in the
network to be the source of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param targetName (string): Enter the name of an existing node in the
network to be the target of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["isDirected","name","network","sourceName","targetName"],\
[isDirected,name,network,sourceName,targetName])
response=api(url=self.__url+"/add edge", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"add_edge",
"(",
"self",
",",
"isDirected",
"=",
"None",
",",
"name",
"=",
"None",
",",
"network",
"=",
"None",
",",
"sourceName",
"=",
"None",
",",
"targetName",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"network",
"=",
"check_network",
"(",
"self",
",",
"network",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"isDirected\"",
",",
"\"name\"",
",",
"\"network\"",
",",
"\"sourceName\"",
",",
"\"targetName\"",
"]",
",",
"[",
"isDirected",
",",
"name",
",",
"network",
",",
"sourceName",
",",
"targetName",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/add edge\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Add a new edge between two existing nodes in a network. The names of the
nodes must be specified and much match the value in the 'name' column for
each node.
:param isDirected (string, optional): Whether the edge should be directed
or not. Even though all edges in Cytoscape have a source and target, by
default they are treated as undirected. Setting this to 'true' will
flag some algorithms to treat them as directed, although many current
implementations will ignore this flag.
:param name (string, optional): Set the 'name' and 'shared name' columns
for this edge to the provided value. ,
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param sourceName (string): Enter the name of an existing node in the
network to be the source of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param targetName (string): Enter the name of an existing node in the
network to be the target of the edge. Note that this is the name as
defined in the 'name' column of the network.
:param verbose: print more | [
"Add",
"a",
"new",
"edge",
"between",
"two",
"existing",
"nodes",
"in",
"a",
"network",
".",
"The",
"names",
"of",
"the",
"nodes",
"must",
"be",
"specified",
"and",
"much",
"match",
"the",
"value",
"in",
"the",
"name",
"column",
"for",
"each",
"node",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L45-L73 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.create | def create(self, edgeList=None, excludeEdges=None, networkName=None, nodeList=None, source=None, verbose=False):
"""
Create a new network from a list of nodes and edges in an existing source network.
The SUID of the network and view are returned.
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param excludeEdges (string, optional): Unless this is set to true, edges
that connect nodes in the nodeList are implicitly included
:param networkName (string, optional):
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param source (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param verbose: print more
:returns: { netowrk, view }
"""
network=check_network(self,source, verbose=verbose)
PARAMS=set_param(["edgeList","excludeEdges","networkName","nodeList","source"], \
[edgeList,excludeEdges,networkName,nodeList,network])
response=api(url=self.__url+"/create", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def create(self, edgeList=None, excludeEdges=None, networkName=None, nodeList=None, source=None, verbose=False):
"""
Create a new network from a list of nodes and edges in an existing source network.
The SUID of the network and view are returned.
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param excludeEdges (string, optional): Unless this is set to true, edges
that connect nodes in the nodeList are implicitly included
:param networkName (string, optional):
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param source (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param verbose: print more
:returns: { netowrk, view }
"""
network=check_network(self,source, verbose=verbose)
PARAMS=set_param(["edgeList","excludeEdges","networkName","nodeList","source"], \
[edgeList,excludeEdges,networkName,nodeList,network])
response=api(url=self.__url+"/create", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"create",
"(",
"self",
",",
"edgeList",
"=",
"None",
",",
"excludeEdges",
"=",
"None",
",",
"networkName",
"=",
"None",
",",
"nodeList",
"=",
"None",
",",
"source",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"network",
"=",
"check_network",
"(",
"self",
",",
"source",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"edgeList\"",
",",
"\"excludeEdges\"",
",",
"\"networkName\"",
",",
"\"nodeList\"",
",",
"\"source\"",
"]",
",",
"[",
"edgeList",
",",
"excludeEdges",
",",
"networkName",
",",
"nodeList",
",",
"network",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/create\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Create a new network from a list of nodes and edges in an existing source network.
The SUID of the network and view are returned.
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param excludeEdges (string, optional): Unless this is set to true, edges
that connect nodes in the nodeList are implicitly included
:param networkName (string, optional):
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param source (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value can
also be used to specify the current network.
:param verbose: print more
:returns: { netowrk, view } | [
"Create",
"a",
"new",
"network",
"from",
"a",
"list",
"of",
"nodes",
"and",
"edges",
"in",
"an",
"existing",
"source",
"network",
".",
"The",
"SUID",
"of",
"the",
"network",
"and",
"view",
"are",
"returned",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L137-L170 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.create_empty | def create_empty(self, name=None, renderers=None, RootNetworkList=None, verbose=False):
"""
Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more
"""
PARAMS=set_param(["name","renderers","RootNetworkList"],[name,renderers,RootNetworkList])
response=api(url=self.__url+"/create empty", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def create_empty(self, name=None, renderers=None, RootNetworkList=None, verbose=False):
"""
Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more
"""
PARAMS=set_param(["name","renderers","RootNetworkList"],[name,renderers,RootNetworkList])
response=api(url=self.__url+"/create empty", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"create_empty",
"(",
"self",
",",
"name",
"=",
"None",
",",
"renderers",
"=",
"None",
",",
"RootNetworkList",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"name\"",
",",
"\"renderers\"",
",",
"\"RootNetworkList\"",
"]",
",",
"[",
"name",
",",
"renderers",
",",
"RootNetworkList",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/create empty\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more | [
"Create",
"a",
"new",
"empty",
"network",
".",
"The",
"new",
"network",
"may",
"be",
"created",
"as",
"part",
"of",
"an",
"existing",
"network",
"collection",
"or",
"a",
"new",
"network",
"collection",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L201-L218 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.list | def list(self, verbose=False):
"""
List all of the networks in the current session.
:param verbose: print more
:returns: [ list of network suids ]
"""
response=api(url=self.__url+"/list", method="POST", verbose=verbose)
return response | python | def list(self, verbose=False):
"""
List all of the networks in the current session.
:param verbose: print more
:returns: [ list of network suids ]
"""
response=api(url=self.__url+"/list", method="POST", verbose=verbose)
return response | [
"def",
"list",
"(",
"self",
",",
"verbose",
"=",
"False",
")",
":",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/list\"",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | List all of the networks in the current session.
:param verbose: print more
:returns: [ list of network suids ] | [
"List",
"all",
"of",
"the",
"networks",
"in",
"the",
"current",
"session",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L532-L542 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.list_attributes | def list_attributes(self, namespace=None, network=None, verbose=False):
"""
Returns a list of column names assocated with a network.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks also
support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param verbose: print more
:returns: [ list of column names assocated with a network ]
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["namespace","network"],[namespace,network])
response=api(url=self.__url+"/list attributes", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def list_attributes(self, namespace=None, network=None, verbose=False):
"""
Returns a list of column names assocated with a network.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks also
support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param verbose: print more
:returns: [ list of column names assocated with a network ]
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["namespace","network"],[namespace,network])
response=api(url=self.__url+"/list attributes", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"list_attributes",
"(",
"self",
",",
"namespace",
"=",
"None",
",",
"network",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"network",
"=",
"check_network",
"(",
"self",
",",
"network",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"namespace\"",
",",
"\"network\"",
"]",
",",
"[",
"namespace",
",",
"network",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/list attributes\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Returns a list of column names assocated with a network.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks also
support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param verbose: print more
:returns: [ list of column names assocated with a network ] | [
"Returns",
"a",
"list",
"of",
"column",
"names",
"assocated",
"with",
"a",
"network",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L544-L562 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/network.py | network.rename | def rename(self, name=None, sourceNetwork=None, verbose=False):
"""
Rename an existing network. The SUID of the network is returned
:param name (string): Enter a new title for the network
:param sourceNetwork (string): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param verbose: print more
:returns: SUID of the network is returned
"""
sourceNetwork=check_network(self,sourceNetwork,verbose=verbose)
PARAMS=set_param(["name","sourceNetwork"],[name,sourceNetwork])
response=api(url=self.__url+"/rename", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def rename(self, name=None, sourceNetwork=None, verbose=False):
"""
Rename an existing network. The SUID of the network is returned
:param name (string): Enter a new title for the network
:param sourceNetwork (string): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param verbose: print more
:returns: SUID of the network is returned
"""
sourceNetwork=check_network(self,sourceNetwork,verbose=verbose)
PARAMS=set_param(["name","sourceNetwork"],[name,sourceNetwork])
response=api(url=self.__url+"/rename", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"rename",
"(",
"self",
",",
"name",
"=",
"None",
",",
"sourceNetwork",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"sourceNetwork",
"=",
"check_network",
"(",
"self",
",",
"sourceNetwork",
",",
"verbose",
"=",
"verbose",
")",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"name\"",
",",
"\"sourceNetwork\"",
"]",
",",
"[",
"name",
",",
"sourceNetwork",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/rename\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Rename an existing network. The SUID of the network is returned
:param name (string): Enter a new title for the network
:param sourceNetwork (string): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param verbose: print more
:returns: SUID of the network is returned | [
"Rename",
"an",
"existing",
"network",
".",
"The",
"SUID",
"of",
"the",
"network",
"is",
"returned"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/network.py#L619-L634 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/session.py | session.new | def new(self, verbose=False):
"""
Destroys the current session and creates a new, empty one.
:param wid: Window ID
:param verbose: print more
"""
response=api(url=self.__url+"/new", verbose=verbose)
return response | python | def new(self, verbose=False):
"""
Destroys the current session and creates a new, empty one.
:param wid: Window ID
:param verbose: print more
"""
response=api(url=self.__url+"/new", verbose=verbose)
return response | [
"def",
"new",
"(",
"self",
",",
"verbose",
"=",
"False",
")",
":",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/new\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Destroys the current session and creates a new, empty one.
:param wid: Window ID
:param verbose: print more | [
"Destroys",
"the",
"current",
"session",
"and",
"creates",
"a",
"new",
"empty",
"one",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/session.py#L14-L23 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/session.py | session.open | def open(self, session_file=None,session_url=None, verbose=False):
"""
Opens a session from a local file or URL.
:param session_file: The path to the session file (.cys) to be loaded.
:param session_url: A URL that provides a session file.
:param verbose: print more
"""
PARAMS=set_param(["file", "url"],[session_file, session_url])
response=api(url=self.__url+"/open", PARAMS=PARAMS, verbose=verbose)
return response | python | def open(self, session_file=None,session_url=None, verbose=False):
"""
Opens a session from a local file or URL.
:param session_file: The path to the session file (.cys) to be loaded.
:param session_url: A URL that provides a session file.
:param verbose: print more
"""
PARAMS=set_param(["file", "url"],[session_file, session_url])
response=api(url=self.__url+"/open", PARAMS=PARAMS, verbose=verbose)
return response | [
"def",
"open",
"(",
"self",
",",
"session_file",
"=",
"None",
",",
"session_url",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"file\"",
",",
"\"url\"",
"]",
",",
"[",
"session_file",
",",
"session_url",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/open\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Opens a session from a local file or URL.
:param session_file: The path to the session file (.cys) to be loaded.
:param session_url: A URL that provides a session file.
:param verbose: print more | [
"Opens",
"a",
"session",
"from",
"a",
"local",
"file",
"or",
"URL",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/session.py#L26-L37 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/session.py | session.save | def save(self, session_file, verbose=False):
"""
Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
"""
PARAMS={"file":session_file}
response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose)
return response | python | def save(self, session_file, verbose=False):
"""
Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
"""
PARAMS={"file":session_file}
response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose)
return response | [
"def",
"save",
"(",
"self",
",",
"session_file",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"{",
"\"file\"",
":",
"session_file",
"}",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/save\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more | [
"Saves",
"the",
"current",
"session",
"to",
"an",
"existing",
"file",
"which",
"will",
"be",
"replaced",
".",
"If",
"this",
"is",
"a",
"new",
"session",
"that",
"has",
"not",
"been",
"saved",
"yet",
"use",
"save",
"as",
"instead",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/session.py#L40-L54 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/vizmap.py | vizmap.apply | def apply(self, styles=None, verbose=False):
"""
Applies the specified style to the selected views and returns the
SUIDs of the affected views.
:param styles (string): Name of Style to be applied to the selected
views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0',
'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal',
'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0',
'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best',
'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0',
'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style',
'Coverage Histogram:best', 'Coverage Histogram:best&unique_0',
'default black', 'No Histogram:best_0', 'No Histogram:unique',
'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique',
'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram',
'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0',
'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique',
'No Histogram', 'default']
:param verbose: print more
:returns: SUIDs of the affected views
"""
PARAMS=set_param(["styles"],[styles])
response=api(url=self.__url+"/apply", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | python | def apply(self, styles=None, verbose=False):
"""
Applies the specified style to the selected views and returns the
SUIDs of the affected views.
:param styles (string): Name of Style to be applied to the selected
views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0',
'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal',
'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0',
'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best',
'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0',
'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style',
'Coverage Histogram:best', 'Coverage Histogram:best&unique_0',
'default black', 'No Histogram:best_0', 'No Histogram:unique',
'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique',
'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram',
'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0',
'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique',
'No Histogram', 'default']
:param verbose: print more
:returns: SUIDs of the affected views
"""
PARAMS=set_param(["styles"],[styles])
response=api(url=self.__url+"/apply", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | [
"def",
"apply",
"(",
"self",
",",
"styles",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"styles\"",
"]",
",",
"[",
"styles",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/apply\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Applies the specified style to the selected views and returns the
SUIDs of the affected views.
:param styles (string): Name of Style to be applied to the selected
views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0',
'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal',
'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0',
'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best',
'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0',
'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style',
'Coverage Histogram:best', 'Coverage Histogram:best&unique_0',
'default black', 'No Histogram:best_0', 'No Histogram:unique',
'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique',
'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram',
'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0',
'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique',
'No Histogram', 'default']
:param verbose: print more
:returns: SUIDs of the affected views | [
"Applies",
"the",
"specified",
"style",
"to",
"the",
"selected",
"views",
"and",
"returns",
"the",
"SUIDs",
"of",
"the",
"affected",
"views",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/vizmap.py#L13-L39 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/vizmap.py | vizmap.create_style | def create_style(self,title=None,defaults=None,mappings=None,verbose=VERBOSE):
"""
Creates a new visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:returns: nothing
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if defaults:
defaults_=[]
for d in defaults:
if d:
defaults_.append(d)
defaults=defaults_
if mappings:
mappings_=[]
for m in mappings:
if m:
mappings_.append(m)
mappings=mappings_
try:
update_style(title=title,defaults=defaults,mappings=mappings,host=host,port=port)
print("Existing style was updated.")
sys.stdout.flush()
except:
print("Creating new style.")
sys.stdout.flush()
URL="http://"+str(host)+":"+str(port)+"/v1/styles"
PARAMS={"title":title,\
"defaults":defaults,\
"mappings":mappings}
r = requests.post(url = URL, json = PARAMS)
checkresponse(r) | python | def create_style(self,title=None,defaults=None,mappings=None,verbose=VERBOSE):
"""
Creates a new visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:returns: nothing
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if defaults:
defaults_=[]
for d in defaults:
if d:
defaults_.append(d)
defaults=defaults_
if mappings:
mappings_=[]
for m in mappings:
if m:
mappings_.append(m)
mappings=mappings_
try:
update_style(title=title,defaults=defaults,mappings=mappings,host=host,port=port)
print("Existing style was updated.")
sys.stdout.flush()
except:
print("Creating new style.")
sys.stdout.flush()
URL="http://"+str(host)+":"+str(port)+"/v1/styles"
PARAMS={"title":title,\
"defaults":defaults,\
"mappings":mappings}
r = requests.post(url = URL, json = PARAMS)
checkresponse(r) | [
"def",
"create_style",
"(",
"self",
",",
"title",
"=",
"None",
",",
"defaults",
"=",
"None",
",",
"mappings",
"=",
"None",
",",
"verbose",
"=",
"VERBOSE",
")",
":",
"u",
"=",
"self",
".",
"__url",
"host",
"=",
"u",
".",
"split",
"(",
"\"//\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"port",
"=",
"u",
".",
"split",
"(",
"\":\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"0",
"]",
"version",
"=",
"u",
".",
"split",
"(",
"\":\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"1",
"]",
"if",
"defaults",
":",
"defaults_",
"=",
"[",
"]",
"for",
"d",
"in",
"defaults",
":",
"if",
"d",
":",
"defaults_",
".",
"append",
"(",
"d",
")",
"defaults",
"=",
"defaults_",
"if",
"mappings",
":",
"mappings_",
"=",
"[",
"]",
"for",
"m",
"in",
"mappings",
":",
"if",
"m",
":",
"mappings_",
".",
"append",
"(",
"m",
")",
"mappings",
"=",
"mappings_",
"try",
":",
"update_style",
"(",
"title",
"=",
"title",
",",
"defaults",
"=",
"defaults",
",",
"mappings",
"=",
"mappings",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
"print",
"(",
"\"Existing style was updated.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"except",
":",
"print",
"(",
"\"Creating new style.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"URL",
"=",
"\"http://\"",
"+",
"str",
"(",
"host",
")",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")",
"+",
"\"/v1/styles\"",
"PARAMS",
"=",
"{",
"\"title\"",
":",
"title",
",",
"\"defaults\"",
":",
"defaults",
",",
"\"mappings\"",
":",
"mappings",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"URL",
",",
"json",
"=",
"PARAMS",
")",
"checkresponse",
"(",
"r",
")"
] | Creates a new visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:returns: nothing | [
"Creates",
"a",
"new",
"visual",
"style"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/vizmap.py#L86-L129 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/vizmap.py | vizmap.update_style | def update_style(self, title=None,defaults=None,mappings=None, verbose=False):
"""
Updates a visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:returns: nothing
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if defaults:
defaults_=[]
for d in defaults:
if d:
defaults_.append(d)
defaults=defaults_
if mappings:
mappings_=[]
for m in mappings:
if m:
mappings_.append(m)
mappings=mappings_
URL="http://"+str(host)+":"+str(port)+"/v1/styles/"+str(title)
if verbose:
print(URL)
sys.stdout.flush()
response = requests.get(URL).json()
olddefaults=response["defaults"]
oldmappings=response["mappings"]
if mappings:
mappings_visual_properties=[ m["visualProperty"] for m in mappings ]
newmappings=[ m for m in oldmappings if m["visualProperty"] not in mappings_visual_properties ]
for m in mappings:
newmappings.append(m)
else:
newmappings=oldmappings
if defaults:
defaults_visual_properties=[ m["visualProperty"] for m in defaults ]
newdefaults=[ m for m in olddefaults if m["visualProperty"] not in defaults_visual_properties ]
for m in defaults:
newdefaults.append(m)
else:
newdefaults=olddefaults
r=requests.delete(URL)
checkresponse(r)
URL="http://"+str(host)+":"+str(port)+"/v1/styles"
PARAMS={"title":title,\
"defaults":newdefaults,\
"mappings":newmappings}
r = requests.post(url = URL, json = PARAMS)
checkresponse(r) | python | def update_style(self, title=None,defaults=None,mappings=None, verbose=False):
"""
Updates a visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:returns: nothing
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if defaults:
defaults_=[]
for d in defaults:
if d:
defaults_.append(d)
defaults=defaults_
if mappings:
mappings_=[]
for m in mappings:
if m:
mappings_.append(m)
mappings=mappings_
URL="http://"+str(host)+":"+str(port)+"/v1/styles/"+str(title)
if verbose:
print(URL)
sys.stdout.flush()
response = requests.get(URL).json()
olddefaults=response["defaults"]
oldmappings=response["mappings"]
if mappings:
mappings_visual_properties=[ m["visualProperty"] for m in mappings ]
newmappings=[ m for m in oldmappings if m["visualProperty"] not in mappings_visual_properties ]
for m in mappings:
newmappings.append(m)
else:
newmappings=oldmappings
if defaults:
defaults_visual_properties=[ m["visualProperty"] for m in defaults ]
newdefaults=[ m for m in olddefaults if m["visualProperty"] not in defaults_visual_properties ]
for m in defaults:
newdefaults.append(m)
else:
newdefaults=olddefaults
r=requests.delete(URL)
checkresponse(r)
URL="http://"+str(host)+":"+str(port)+"/v1/styles"
PARAMS={"title":title,\
"defaults":newdefaults,\
"mappings":newmappings}
r = requests.post(url = URL, json = PARAMS)
checkresponse(r) | [
"def",
"update_style",
"(",
"self",
",",
"title",
"=",
"None",
",",
"defaults",
"=",
"None",
",",
"mappings",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"u",
"=",
"self",
".",
"__url",
"host",
"=",
"u",
".",
"split",
"(",
"\"//\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"port",
"=",
"u",
".",
"split",
"(",
"\":\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"0",
"]",
"version",
"=",
"u",
".",
"split",
"(",
"\":\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"1",
"]",
"if",
"defaults",
":",
"defaults_",
"=",
"[",
"]",
"for",
"d",
"in",
"defaults",
":",
"if",
"d",
":",
"defaults_",
".",
"append",
"(",
"d",
")",
"defaults",
"=",
"defaults_",
"if",
"mappings",
":",
"mappings_",
"=",
"[",
"]",
"for",
"m",
"in",
"mappings",
":",
"if",
"m",
":",
"mappings_",
".",
"append",
"(",
"m",
")",
"mappings",
"=",
"mappings_",
"URL",
"=",
"\"http://\"",
"+",
"str",
"(",
"host",
")",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")",
"+",
"\"/v1/styles/\"",
"+",
"str",
"(",
"title",
")",
"if",
"verbose",
":",
"print",
"(",
"URL",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"URL",
")",
".",
"json",
"(",
")",
"olddefaults",
"=",
"response",
"[",
"\"defaults\"",
"]",
"oldmappings",
"=",
"response",
"[",
"\"mappings\"",
"]",
"if",
"mappings",
":",
"mappings_visual_properties",
"=",
"[",
"m",
"[",
"\"visualProperty\"",
"]",
"for",
"m",
"in",
"mappings",
"]",
"newmappings",
"=",
"[",
"m",
"for",
"m",
"in",
"oldmappings",
"if",
"m",
"[",
"\"visualProperty\"",
"]",
"not",
"in",
"mappings_visual_properties",
"]",
"for",
"m",
"in",
"mappings",
":",
"newmappings",
".",
"append",
"(",
"m",
")",
"else",
":",
"newmappings",
"=",
"oldmappings",
"if",
"defaults",
":",
"defaults_visual_properties",
"=",
"[",
"m",
"[",
"\"visualProperty\"",
"]",
"for",
"m",
"in",
"defaults",
"]",
"newdefaults",
"=",
"[",
"m",
"for",
"m",
"in",
"olddefaults",
"if",
"m",
"[",
"\"visualProperty\"",
"]",
"not",
"in",
"defaults_visual_properties",
"]",
"for",
"m",
"in",
"defaults",
":",
"newdefaults",
".",
"append",
"(",
"m",
")",
"else",
":",
"newdefaults",
"=",
"olddefaults",
"r",
"=",
"requests",
".",
"delete",
"(",
"URL",
")",
"checkresponse",
"(",
"r",
")",
"URL",
"=",
"\"http://\"",
"+",
"str",
"(",
"host",
")",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")",
"+",
"\"/v1/styles\"",
"PARAMS",
"=",
"{",
"\"title\"",
":",
"title",
",",
"\"defaults\"",
":",
"newdefaults",
",",
"\"mappings\"",
":",
"newmappings",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"URL",
",",
"json",
"=",
"PARAMS",
")",
"checkresponse",
"(",
"r",
")"
] | Updates a visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:returns: nothing | [
"Updates",
"a",
"visual",
"style"
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/vizmap.py#L131-L194 | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/vizmap.py | vizmap.simple_defaults | def simple_defaults(self, defaults_dic):
"""
Simplifies defaults.
:param defaults_dic: a dictionary of the form { visualProperty_A:value_A, visualProperty_B:value_B, ..}
:returns: a list of dictionaries with each item corresponding to a given key in defaults_dic
"""
defaults=[]
for d in defaults_dic.keys():
dic={}
dic["visualProperty"]=d
dic["value"]=defaults_dic[d]
defaults.append(dic)
return defaults | python | def simple_defaults(self, defaults_dic):
"""
Simplifies defaults.
:param defaults_dic: a dictionary of the form { visualProperty_A:value_A, visualProperty_B:value_B, ..}
:returns: a list of dictionaries with each item corresponding to a given key in defaults_dic
"""
defaults=[]
for d in defaults_dic.keys():
dic={}
dic["visualProperty"]=d
dic["value"]=defaults_dic[d]
defaults.append(dic)
return defaults | [
"def",
"simple_defaults",
"(",
"self",
",",
"defaults_dic",
")",
":",
"defaults",
"=",
"[",
"]",
"for",
"d",
"in",
"defaults_dic",
".",
"keys",
"(",
")",
":",
"dic",
"=",
"{",
"}",
"dic",
"[",
"\"visualProperty\"",
"]",
"=",
"d",
"dic",
"[",
"\"value\"",
"]",
"=",
"defaults_dic",
"[",
"d",
"]",
"defaults",
".",
"append",
"(",
"dic",
")",
"return",
"defaults"
] | Simplifies defaults.
:param defaults_dic: a dictionary of the form { visualProperty_A:value_A, visualProperty_B:value_B, ..}
:returns: a list of dictionaries with each item corresponding to a given key in defaults_dic | [
"Simplifies",
"defaults",
"."
] | dd34de8d028f512314d0057168df7fef7c5d5195 | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/vizmap.py#L278-L293 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.