repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | _GoogleCloudStorageInputReader.next | def next(self):
"""Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
ctx = context.get()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return handle
except cloudstorage.NotFoundError:
# Fail the job if we're strict on missing input.
if getattr(self, "_fail_on_missing_input", False):
raise errors.FailJobError(
"File missing in GCS, aborting: %s" % filename)
# Move on otherwise.
logging.warning("File %s may have been removed. Skipping file.",
filename) | python | def next(self):
"""Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
ctx = context.get()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return handle
except cloudstorage.NotFoundError:
# Fail the job if we're strict on missing input.
if getattr(self, "_fail_on_missing_input", False):
raise errors.FailJobError(
"File missing in GCS, aborting: %s" % filename)
# Move on otherwise.
logging.warning("File %s may have been removed. Skipping file.",
filename) | Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2478-L2518 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | _GoogleCloudStorageRecordInputReader.next | def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(_GoogleCloudStorageRecordInputReader,
self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None | python | def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(_GoogleCloudStorageRecordInputReader,
self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None | Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2559-L2590 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | _ReducerReader.to_json | def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = super(_ReducerReader, self).to_json()
result["current_key"] = self.encode_data(self.current_key)
result["current_values"] = self.encode_data(self.current_values)
return result | python | def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = super(_ReducerReader, self).to_json()
result["current_key"] = self.encode_data(self.current_key)
result["current_values"] = self.encode_data(self.current_values)
return result | Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2693-L2702 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | _ReducerReader.from_json | def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result | python | def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result | Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2705-L2717 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/map_job_context.py | ShardContext.incr | def incr(self, counter_name, delta=1):
"""Changes counter by delta.
Args:
counter_name: the name of the counter to change. str.
delta: int.
"""
self._state.counters_map.increment(counter_name, delta) | python | def incr(self, counter_name, delta=1):
"""Changes counter by delta.
Args:
counter_name: the name of the counter to change. str.
delta: int.
"""
self._state.counters_map.increment(counter_name, delta) | Changes counter by delta.
Args:
counter_name: the name of the counter to change. str.
delta: int. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/map_job_context.py#L63-L70 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/map_job_context.py | ShardContext.counter | def counter(self, counter_name, default=0):
"""Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter.
"""
return self._state.counters_map.get(counter_name, default) | python | def counter(self, counter_name, default=0):
"""Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter.
"""
return self._state.counters_map.get(counter_name, default) | Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/map_job_context.py#L72-L82 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/map_job_context.py | SliceContext.emit | def emit(self, value):
"""Emits a value to output writer.
Args:
value: a value of type expected by the output writer.
"""
if not self._tstate.output_writer:
logging.error("emit is called, but no output writer is set.")
return
self._tstate.output_writer.write(value) | python | def emit(self, value):
"""Emits a value to output writer.
Args:
value: a value of type expected by the output writer.
"""
if not self._tstate.output_writer:
logging.error("emit is called, but no output writer is set.")
return
self._tstate.output_writer.write(value) | Emits a value to output writer.
Args:
value: a value of type expected by the output writer. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/map_job_context.py#L119-L128 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/lib/input_reader/_gcs.py | GCSInputReader.validate | def validate(cls, job_config):
"""Validate mapper specification.
Args:
job_config: map_job.JobConfig.
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
reader_params = job_config.input_reader_params
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_params[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
# Delimiter.
if cls.DELIMITER_PARAM in reader_params:
delimiter = reader_params[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
# Buffer size.
if cls.BUFFER_SIZE_PARAM in reader_params:
buffer_size = reader_params[cls.BUFFER_SIZE_PARAM]
if not isinstance(buffer_size, int):
raise errors.BadReaderParamsError(
"%s is not an int but a %s" %
(cls.BUFFER_SIZE_PARAM, type(buffer_size)))
# Path filter.
if cls.PATH_FILTER_PARAM in reader_params:
path_filter = reader_params[cls.PATH_FILTER_PARAM]
if not isinstance(path_filter, PathFilter):
raise errors.BadReaderParamsError(
"%s is not an instance of PathFilter but %s." %
(cls.PATH_FILTER_PARAM, type(path_filter))) | python | def validate(cls, job_config):
"""Validate mapper specification.
Args:
job_config: map_job.JobConfig.
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
reader_params = job_config.input_reader_params
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_params[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
# Delimiter.
if cls.DELIMITER_PARAM in reader_params:
delimiter = reader_params[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
# Buffer size.
if cls.BUFFER_SIZE_PARAM in reader_params:
buffer_size = reader_params[cls.BUFFER_SIZE_PARAM]
if not isinstance(buffer_size, int):
raise errors.BadReaderParamsError(
"%s is not an int but a %s" %
(cls.BUFFER_SIZE_PARAM, type(buffer_size)))
# Path filter.
if cls.PATH_FILTER_PARAM in reader_params:
path_filter = reader_params[cls.PATH_FILTER_PARAM]
if not isinstance(path_filter, PathFilter):
raise errors.BadReaderParamsError(
"%s is not an instance of PathFilter but %s." %
(cls.PATH_FILTER_PARAM, type(path_filter))) | Validate mapper specification.
Args:
job_config: map_job.JobConfig.
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L164-L225 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/lib/input_reader/_gcs.py | GCSInputReader.split_input | def split_input(cls, job_config):
"""Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
job_config: map_job.JobConfig
Returns:
A list of InputReaders. None when no input data can be found.
"""
reader_params = job_config.input_reader_params
bucket = reader_params[cls.BUCKET_NAME_PARAM]
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
delimiter = reader_params.get(cls.DELIMITER_PARAM)
account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)
path_filter = reader_params.get(cls.PATH_FILTER_PARAM)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, job_config.shard_count):
shard_filenames = all_filenames[shard::job_config.shard_count]
if shard_filenames:
readers.append(cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter, path_filter=path_filter))
return readers | python | def split_input(cls, job_config):
"""Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
job_config: map_job.JobConfig
Returns:
A list of InputReaders. None when no input data can be found.
"""
reader_params = job_config.input_reader_params
bucket = reader_params[cls.BUCKET_NAME_PARAM]
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
delimiter = reader_params.get(cls.DELIMITER_PARAM)
account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)
path_filter = reader_params.get(cls.PATH_FILTER_PARAM)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, job_config.shard_count):
shard_filenames = all_filenames[shard::job_config.shard_count]
if shard_filenames:
readers.append(cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter, path_filter=path_filter))
return readers | Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
job_config: map_job.JobConfig
Returns:
A list of InputReaders. None when no input data can be found. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L228-L269 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/lib/input_reader/_gcs.py | GCSInputReader.next | def next(self):
"""Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
if (self._path_filter and
not self._path_filter.accept(self._slice_ctx, filename)):
continue
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
self._slice_ctx.incr(self.COUNTER_FILE_READ)
return handle
except cloudstorage.NotFoundError:
logging.warning("File %s may have been removed. Skipping file.",
filename)
self._slice_ctx.incr(self.COUNTER_FILE_MISSING) | python | def next(self):
"""Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
if (self._path_filter and
not self._path_filter.accept(self._slice_ctx, filename)):
continue
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
self._slice_ctx.incr(self.COUNTER_FILE_READ)
return handle
except cloudstorage.NotFoundError:
logging.warning("File %s may have been removed. Skipping file.",
filename)
self._slice_ctx.incr(self.COUNTER_FILE_MISSING) | Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L289-L325 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/lib/input_reader/_gcs.py | GCSInputReader.params_to_json | def params_to_json(cls, params):
"""Inherit docs."""
params_cp = dict(params)
if cls.PATH_FILTER_PARAM in params_cp:
path_filter = params_cp[cls.PATH_FILTER_PARAM]
params_cp[cls.PATH_FILTER_PARAM] = pickle.dumps(path_filter)
return params_cp | python | def params_to_json(cls, params):
"""Inherit docs."""
params_cp = dict(params)
if cls.PATH_FILTER_PARAM in params_cp:
path_filter = params_cp[cls.PATH_FILTER_PARAM]
params_cp[cls.PATH_FILTER_PARAM] = pickle.dumps(path_filter)
return params_cp | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L348-L354 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/lib/input_reader/_gcs.py | GCSRecordInputReader.next | def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None | python | def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None | Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L379-L405 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py | AbstractDatastoreInputReader._get_query_spec | def _get_query_spec(cls, params):
"""Construct a model.QuerySpec from model.MapperSpec."""
entity_kind = params[cls.ENTITY_KIND_PARAM]
filters = params.get(cls.FILTERS_PARAM)
app = params.get(cls._APP_PARAM)
ns = params.get(cls.NAMESPACE_PARAM)
return model.QuerySpec(
entity_kind=cls._get_raw_entity_kind(entity_kind),
keys_only=bool(params.get(cls.KEYS_ONLY_PARAM, False)),
filters=filters,
batch_size=int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)),
model_class_path=entity_kind,
app=app,
ns=ns) | python | def _get_query_spec(cls, params):
"""Construct a model.QuerySpec from model.MapperSpec."""
entity_kind = params[cls.ENTITY_KIND_PARAM]
filters = params.get(cls.FILTERS_PARAM)
app = params.get(cls._APP_PARAM)
ns = params.get(cls.NAMESPACE_PARAM)
return model.QuerySpec(
entity_kind=cls._get_raw_entity_kind(entity_kind),
keys_only=bool(params.get(cls.KEYS_ONLY_PARAM, False)),
filters=filters,
batch_size=int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)),
model_class_path=entity_kind,
app=app,
ns=ns) | Construct a model.QuerySpec from model.MapperSpec. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py#L86-L100 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py | AbstractDatastoreInputReader.split_input | def split_input(cls, job_config):
"""Inherit doc."""
shard_count = job_config.shard_count
params = job_config.input_reader_params
query_spec = cls._get_query_spec(params)
namespaces = None
if query_spec.ns is not None:
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, [query_spec.ns], shard_count, query_spec)
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
# No namespace means the app may have some data but those data are not
# visible yet. Just return.
if not ns_keys:
return
# If the number of ns is small, we shard each ns by key and assign each
# shard a piece of a ns.
elif len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
namespaces = [ns_key.name() or "" for ns_key in ns_keys]
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, namespaces, shard_count, query_spec)
# When number of ns is large, we can only split lexicographically by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
k_ranges = [key_ranges.KeyRangesFactory.create_from_ns_range(ns_range)
for ns_range in ns_ranges]
iters = [db_iters.RangeIteratorFactory.create_key_ranges_iterator(
r, query_spec, cls._KEY_RANGE_ITER_CLS) for r in k_ranges]
return [cls(i) for i in iters] | python | def split_input(cls, job_config):
"""Inherit doc."""
shard_count = job_config.shard_count
params = job_config.input_reader_params
query_spec = cls._get_query_spec(params)
namespaces = None
if query_spec.ns is not None:
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, [query_spec.ns], shard_count, query_spec)
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
# No namespace means the app may have some data but those data are not
# visible yet. Just return.
if not ns_keys:
return
# If the number of ns is small, we shard each ns by key and assign each
# shard a piece of a ns.
elif len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
namespaces = [ns_key.name() or "" for ns_key in ns_keys]
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, namespaces, shard_count, query_spec)
# When number of ns is large, we can only split lexicographically by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
k_ranges = [key_ranges.KeyRangesFactory.create_from_ns_range(ns_range)
for ns_range in ns_ranges]
iters = [db_iters.RangeIteratorFactory.create_key_ranges_iterator(
r, query_spec, cls._KEY_RANGE_ITER_CLS) for r in k_ranges]
return [cls(i) for i in iters] | Inherit doc. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py#L103-L138 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py | AbstractDatastoreInputReader._to_key_ranges_by_shard | def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
"""Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
"""
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard | python | def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
"""Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
"""
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard | Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py#L141-L184 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py | AbstractDatastoreInputReader._split_ns_by_scatter | def _split_ns_by_scatter(cls,
shard_count,
namespace,
raw_entity_kind,
app):
"""Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end.
"""
if shard_count == 1:
# With one shard we don't need to calculate any split points at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
oversampling_factor = 32
random_keys = ds_query.Get(shard_count * oversampling_factor)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
k_ranges = []
k_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
k_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
k_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(k_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
k_ranges += [None] * (shard_count - len(k_ranges))
return k_ranges | python | def _split_ns_by_scatter(cls,
shard_count,
namespace,
raw_entity_kind,
app):
"""Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end.
"""
if shard_count == 1:
# With one shard we don't need to calculate any split points at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
oversampling_factor = 32
random_keys = ds_query.Get(shard_count * oversampling_factor)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
k_ranges = []
k_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
k_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
k_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(k_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
k_ranges += [None] * (shard_count - len(k_ranges))
return k_ranges | Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py#L187-L264 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py | AbstractDatastoreInputReader.validate | def validate(cls, job_config):
"""Inherit docs."""
super(AbstractDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Check for the required entity kind parameter.
if cls.ENTITY_KIND_PARAM not in params:
raise errors.BadReaderParamsError("Missing input reader parameter "
"'entity_kind'")
# Validate the batch size parameter.
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise errors.BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise errors.BadReaderParamsError("Bad batch size: %s" % e)
# Validate the keys only parameter.
try:
bool(params.get(cls.KEYS_ONLY_PARAM, False))
except:
raise errors.BadReaderParamsError("keys_only expects a boolean value but "
"got %s",
params[cls.KEYS_ONLY_PARAM])
# Validate the namespace parameter.
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise errors.BadReaderParamsError("Expected a single namespace string")
# Validate the filters parameter.
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise errors.BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise errors.BadReaderParamsError("Filter should be a tuple or list: "
"%s", f)
if len(f) != 3:
raise errors.BadReaderParamsError("Filter should be a 3-tuple: %s", f)
prop, op, _ = f
if not isinstance(prop, basestring):
raise errors.BadReaderParamsError("Property should be string: %s",
prop)
if not isinstance(op, basestring):
raise errors.BadReaderParamsError("Operator should be string: %s", op) | python | def validate(cls, job_config):
"""Inherit docs."""
super(AbstractDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Check for the required entity kind parameter.
if cls.ENTITY_KIND_PARAM not in params:
raise errors.BadReaderParamsError("Missing input reader parameter "
"'entity_kind'")
# Validate the batch size parameter.
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise errors.BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise errors.BadReaderParamsError("Bad batch size: %s" % e)
# Validate the keys only parameter.
try:
bool(params.get(cls.KEYS_ONLY_PARAM, False))
except:
raise errors.BadReaderParamsError("keys_only expects a boolean value but "
"got %s",
params[cls.KEYS_ONLY_PARAM])
# Validate the namespace parameter.
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise errors.BadReaderParamsError("Expected a single namespace string")
# Validate the filters parameter.
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise errors.BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise errors.BadReaderParamsError("Filter should be a tuple or list: "
"%s", f)
if len(f) != 3:
raise errors.BadReaderParamsError("Filter should be a 3-tuple: %s", f)
prop, op, _ = f
if not isinstance(prop, basestring):
raise errors.BadReaderParamsError("Property should be string: %s",
prop)
if not isinstance(op, basestring):
raise errors.BadReaderParamsError("Operator should be string: %s", op) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/abstract_datastore_input_reader.py#L275-L321 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | _setup_constants | def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
# pylint: disable=global-variable-undefined
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
# pylint: disable=undefined-loop-variable
del i | python | def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
# pylint: disable=global-variable-undefined
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
# pylint: disable=undefined-loop-variable
del i | Calculate derived constant values. Only useful for testing. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L48-L90 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | _ord_to_namespace | def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1)) | python | def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1)) | Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L94-L123 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | _namespace_to_ord | def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n | python | def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n | Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L126-L147 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | _key_for_namespace | def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app) | python | def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app) | Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L150-L167 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | get_namespace_keys | def get_namespace_keys(app, limit):
"""Get namespace keys."""
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app)
return list(ns_query.Run(limit=limit, batch_size=limit)) | python | def get_namespace_keys(app, limit):
"""Get namespace keys."""
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app)
return list(ns_query.Run(limit=limit, batch_size=limit)) | Get namespace keys. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L457-L460 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.split_range | def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)] | python | def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)] | Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L225-L245 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.with_start_after | def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app) | python | def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app) | Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L267-L281 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.make_datastore_query | def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app) | python | def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app) | Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L283-L303 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.normalized_start | def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app) | python | def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app) | Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L305-L322 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.to_json_object | def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict | python | def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict | Returns a dict representation that can be serialized to JSON. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L324-L330 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | NamespaceRange.split | def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
# pylint: disable=g-doc-args
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ranges = None
if can_query():
if not contiguous:
ns_keys = get_namespace_keys(_app, n + 1)
if not ns_keys:
return []
else:
if len(ns_keys) <= n:
# If you have less actual namespaces than number of NamespaceRanges
# to return, then just return the list of those namespaces.
ns_range = []
for ns_key in ns_keys:
ns_range.append(NamespaceRange(ns_key.name() or '',
ns_key.name() or '',
_app=_app))
return sorted(ns_range,
key=lambda ns_range: ns_range.namespace_start)
# Use the first key and save the initial normalized_start() call.
ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)]
else:
ns_range = NamespaceRange(_app=_app).normalized_start()
if ns_range is None:
return [NamespaceRange(_app=_app)]
ranges = [ns_range]
else:
ranges = [NamespaceRange(_app=_app)]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
# This condition is possible if every namespace was deleted after the
# first call to ns_range.normalized_start().
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges | python | def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
# pylint: disable=g-doc-args
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ranges = None
if can_query():
if not contiguous:
ns_keys = get_namespace_keys(_app, n + 1)
if not ns_keys:
return []
else:
if len(ns_keys) <= n:
# If you have less actual namespaces than number of NamespaceRanges
# to return, then just return the list of those namespaces.
ns_range = []
for ns_key in ns_keys:
ns_range.append(NamespaceRange(ns_key.name() or '',
ns_key.name() or '',
_app=_app))
return sorted(ns_range,
key=lambda ns_range: ns_range.namespace_start)
# Use the first key and save the initial normalized_start() call.
ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)]
else:
ns_range = NamespaceRange(_app=_app).normalized_start()
if ns_range is None:
return [NamespaceRange(_app=_app)]
ranges = [ns_range]
else:
ranges = [NamespaceRange(_app=_app)]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
# This condition is possible if every namespace was deleted after the
# first call to ns_range.normalized_start().
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges | Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L343-L441 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _RecordsPoolBase.append | def append(self, data):
"""Append data to a file."""
data_length = len(data)
if self._size + data_length > self._flush_size:
self.flush()
if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE:
raise errors.Error(
"Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE))
else:
self._buffer.append(data)
self._size += data_length
if self._size > self._flush_size:
self.flush() | python | def append(self, data):
"""Append data to a file."""
data_length = len(data)
if self._size + data_length > self._flush_size:
self.flush()
if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE:
raise errors.Error(
"Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE))
else:
self._buffer.append(data)
self._size += data_length
if self._size > self._flush_size:
self.flush() | Append data to a file. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L357-L371 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _RecordsPoolBase.flush | def flush(self):
"""Flush pool contents."""
# Write data to in-memory buffer first.
buf = cStringIO.StringIO()
with records.RecordsWriter(buf) as w:
for record in self._buffer:
w.write(record)
w._pad_block()
str_buf = buf.getvalue()
buf.close()
if not self._exclusive and len(str_buf) > _FILE_POOL_MAX_SIZE:
# Shouldn't really happen because of flush size.
raise errors.Error(
"Buffer too big. Can't write more than %s bytes in one request: "
"risk of writes interleaving. Got: %s" %
(_FILE_POOL_MAX_SIZE, len(str_buf)))
# Write data to file.
start_time = time.time()
self._write(str_buf)
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_BYTES, len(str_buf))(self._ctx)
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC,
int((time.time() - start_time) * 1000))(self._ctx)
# reset buffer
self._buffer = []
self._size = 0
gc.collect() | python | def flush(self):
"""Flush pool contents."""
# Write data to in-memory buffer first.
buf = cStringIO.StringIO()
with records.RecordsWriter(buf) as w:
for record in self._buffer:
w.write(record)
w._pad_block()
str_buf = buf.getvalue()
buf.close()
if not self._exclusive and len(str_buf) > _FILE_POOL_MAX_SIZE:
# Shouldn't really happen because of flush size.
raise errors.Error(
"Buffer too big. Can't write more than %s bytes in one request: "
"risk of writes interleaving. Got: %s" %
(_FILE_POOL_MAX_SIZE, len(str_buf)))
# Write data to file.
start_time = time.time()
self._write(str_buf)
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_BYTES, len(str_buf))(self._ctx)
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC,
int((time.time() - start_time) * 1000))(self._ctx)
# reset buffer
self._buffer = []
self._size = 0
gc.collect() | Flush pool contents. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L373-L404 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GCSRecordsPool._write | def _write(self, str_buf):
"""Uses the filehandle to the file in GCS to write to it."""
self._filehandle.write(str_buf)
self._buf_size += len(str_buf) | python | def _write(self, str_buf):
"""Uses the filehandle to the file in GCS to write to it."""
self._filehandle.write(str_buf)
self._buf_size += len(str_buf) | Uses the filehandle to the file in GCS to write to it. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L432-L435 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GCSRecordsPool.flush | def flush(self, force=False):
"""Flush pool contents.
Args:
force: Inserts additional padding to achieve the minimum block size
required for GCS.
"""
super(GCSRecordsPool, self).flush()
if force:
extra_padding = self._buf_size % self._GCS_BLOCK_SIZE
if extra_padding > 0:
self._write("\x00" * (self._GCS_BLOCK_SIZE - extra_padding))
self._filehandle.flush() | python | def flush(self, force=False):
"""Flush pool contents.
Args:
force: Inserts additional padding to achieve the minimum block size
required for GCS.
"""
super(GCSRecordsPool, self).flush()
if force:
extra_padding = self._buf_size % self._GCS_BLOCK_SIZE
if extra_padding > 0:
self._write("\x00" * (self._GCS_BLOCK_SIZE - extra_padding))
self._filehandle.flush() | Flush pool contents.
Args:
force: Inserts additional padding to achieve the minimum block size
required for GCS. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L437-L449 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageBase._get_tmp_gcs_bucket | def _get_tmp_gcs_bucket(cls, writer_spec):
"""Returns bucket used for writing tmp files."""
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec[cls.TMP_BUCKET_NAME_PARAM]
return cls._get_gcs_bucket(writer_spec) | python | def _get_tmp_gcs_bucket(cls, writer_spec):
"""Returns bucket used for writing tmp files."""
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec[cls.TMP_BUCKET_NAME_PARAM]
return cls._get_gcs_bucket(writer_spec) | Returns bucket used for writing tmp files. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L497-L501 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageBase._get_tmp_account_id | def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) | python | def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) | Returns the account id to use with tmp bucket. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L504-L509 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriterBase._generate_filename | def _generate_filename(cls, writer_spec, name, job_id, num,
attempt=None, seg_index=None):
"""Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders.
"""
naming_format = cls._TMP_FILE_NAMING_FORMAT
if seg_index is None:
naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM,
cls._DEFAULT_NAMING_FORMAT)
template = string.Template(naming_format)
try:
# Check that template doesn't use undefined mappings and is formatted well
if seg_index is None:
return template.substitute(name=name, id=job_id, num=num)
else:
return template.substitute(name=name, id=job_id, num=num,
attempt=attempt,
seg=seg_index)
except ValueError, error:
raise errors.BadWriterParamsError("Naming template is bad, %s" % (error))
except KeyError, error:
raise errors.BadWriterParamsError("Naming template '%s' has extra "
"mappings, %s" % (naming_format, error)) | python | def _generate_filename(cls, writer_spec, name, job_id, num,
attempt=None, seg_index=None):
"""Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders.
"""
naming_format = cls._TMP_FILE_NAMING_FORMAT
if seg_index is None:
naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM,
cls._DEFAULT_NAMING_FORMAT)
template = string.Template(naming_format)
try:
# Check that template doesn't use undefined mappings and is formatted well
if seg_index is None:
return template.substitute(name=name, id=job_id, num=num)
else:
return template.substitute(name=name, id=job_id, num=num,
attempt=attempt,
seg=seg_index)
except ValueError, error:
raise errors.BadWriterParamsError("Naming template is bad, %s" % (error))
except KeyError, error:
raise errors.BadWriterParamsError("Naming template '%s' has extra "
"mappings, %s" % (naming_format, error)) | Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L536-L573 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriterBase.validate | def validate(cls, mapper_spec):
"""Validate mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec.
Raises:
BadWriterParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
writer_spec = cls.get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in writer_spec:
raise errors.BadWriterParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
writer_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadWriterParamsError("Bad bucket name, %s" % (error))
# Validate the naming format does not throw any errors using dummy values
cls._generate_filename(writer_spec, "name", "id", 0)
cls._generate_filename(writer_spec, "name", "id", 0, 1, 0) | python | def validate(cls, mapper_spec):
"""Validate mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec.
Raises:
BadWriterParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
writer_spec = cls.get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in writer_spec:
raise errors.BadWriterParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
writer_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadWriterParamsError("Bad bucket name, %s" % (error))
# Validate the naming format does not throw any errors using dummy values
cls._generate_filename(writer_spec, "name", "id", 0)
cls._generate_filename(writer_spec, "name", "id", 0, 1, 0) | Validate mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec.
Raises:
BadWriterParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L586-L611 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriterBase._open_file | def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
"""Opens a new gcs file for writing."""
if use_tmp_bucket:
bucket = cls._get_tmp_gcs_bucket(writer_spec)
account_id = cls._get_tmp_account_id(writer_spec)
else:
bucket = cls._get_gcs_bucket(writer_spec)
account_id = cls._get_account_id(writer_spec)
# GoogleCloudStorage format for filenames, Initial slash is required
filename = "/%s/%s" % (bucket, filename_suffix)
content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)
options = {}
if cls.ACL_PARAM in writer_spec:
options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)
return cloudstorage.open(filename, mode="w", content_type=content_type,
options=options, _account_id=account_id) | python | def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
"""Opens a new gcs file for writing."""
if use_tmp_bucket:
bucket = cls._get_tmp_gcs_bucket(writer_spec)
account_id = cls._get_tmp_account_id(writer_spec)
else:
bucket = cls._get_gcs_bucket(writer_spec)
account_id = cls._get_account_id(writer_spec)
# GoogleCloudStorage format for filenames, Initial slash is required
filename = "/%s/%s" % (bucket, filename_suffix)
content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)
options = {}
if cls.ACL_PARAM in writer_spec:
options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)
return cloudstorage.open(filename, mode="w", content_type=content_type,
options=options, _account_id=account_id) | Opens a new gcs file for writing. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L614-L633 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriterBase.write | def write(self, data):
"""Write data to the GoogleCloudStorage file.
Args:
data: string containing the data to be written.
"""
start_time = time.time()
self._get_write_buffer().write(data)
ctx = context.get()
operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx) | python | def write(self, data):
"""Write data to the GoogleCloudStorage file.
Args:
data: string containing the data to be written.
"""
start_time = time.time()
self._get_write_buffer().write(data)
ctx = context.get()
operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx) | Write data to the GoogleCloudStorage file.
Args:
data: string containing the data to be written. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L651-L662 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriter.validate | def validate(cls, mapper_spec):
"""Inherit docs."""
writer_spec = cls.get_params(mapper_spec, allow_old=False)
if writer_spec.get(cls._NO_DUPLICATE, False) not in (True, False):
raise errors.BadWriterParamsError("No duplicate must a boolean.")
super(_GoogleCloudStorageOutputWriter, cls).validate(mapper_spec) | python | def validate(cls, mapper_spec):
"""Inherit docs."""
writer_spec = cls.get_params(mapper_spec, allow_old=False)
if writer_spec.get(cls._NO_DUPLICATE, False) not in (True, False):
raise errors.BadWriterParamsError("No duplicate must a boolean.")
super(_GoogleCloudStorageOutputWriter, cls).validate(mapper_spec) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L718-L723 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriter.create | def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
"""Inherit docs."""
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False)
seg_index = None
if writer_spec.get(cls._NO_DUPLICATE, False):
seg_index = 0
# Determine parameters
key = cls._generate_filename(writer_spec, mr_spec.name,
mr_spec.mapreduce_id,
shard_number, shard_attempt,
seg_index)
return cls._create(writer_spec, key) | python | def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
"""Inherit docs."""
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False)
seg_index = None
if writer_spec.get(cls._NO_DUPLICATE, False):
seg_index = 0
# Determine parameters
key = cls._generate_filename(writer_spec, mr_spec.name,
mr_spec.mapreduce_id,
shard_number, shard_attempt,
seg_index)
return cls._create(writer_spec, key) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L729-L741 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | _GoogleCloudStorageOutputWriter._create | def _create(cls, writer_spec, filename_suffix):
"""Helper method that actually creates the file in cloud storage."""
writer = cls._open_file(writer_spec, filename_suffix)
return cls(writer, writer_spec=writer_spec) | python | def _create(cls, writer_spec, filename_suffix):
"""Helper method that actually creates the file in cloud storage."""
writer = cls._open_file(writer_spec, filename_suffix)
return cls(writer, writer_spec=writer_spec) | Helper method that actually creates the file in cloud storage. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L744-L747 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GoogleCloudStorageConsistentOutputWriter.create | def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
"""Inherit docs."""
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False)
# Determine parameters
key = cls._generate_filename(writer_spec, mr_spec.name,
mr_spec.mapreduce_id,
shard_number, shard_attempt)
status = _ConsistentStatus()
status.writer_spec = writer_spec
status.mainfile = cls._open_file(writer_spec, key)
status.mapreduce_id = mr_spec.mapreduce_id
status.shard = shard_number
return cls(status) | python | def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
"""Inherit docs."""
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False)
# Determine parameters
key = cls._generate_filename(writer_spec, mr_spec.name,
mr_spec.mapreduce_id,
shard_number, shard_attempt)
status = _ConsistentStatus()
status.writer_spec = writer_spec
status.mainfile = cls._open_file(writer_spec, key)
status.mapreduce_id = mr_spec.mapreduce_id
status.shard = shard_number
return cls(status) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L912-L927 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GoogleCloudStorageConsistentOutputWriter._rewrite_tmpfile | def _rewrite_tmpfile(self, mainfile, tmpfile, writer_spec):
"""Copies contents of tmpfile (name) to mainfile (buffer)."""
if mainfile.closed:
# can happen when finalize fails
return
account_id = self._get_tmp_account_id(writer_spec)
f = cloudstorage_api.open(tmpfile, _account_id=account_id)
# both reads and writes are buffered - the number here doesn't matter
data = f.read(self._REWRITE_BLOCK_SIZE)
while data:
mainfile.write(data)
data = f.read(self._REWRITE_BLOCK_SIZE)
f.close()
mainfile.flush() | python | def _rewrite_tmpfile(self, mainfile, tmpfile, writer_spec):
"""Copies contents of tmpfile (name) to mainfile (buffer)."""
if mainfile.closed:
# can happen when finalize fails
return
account_id = self._get_tmp_account_id(writer_spec)
f = cloudstorage_api.open(tmpfile, _account_id=account_id)
# both reads and writes are buffered - the number here doesn't matter
data = f.read(self._REWRITE_BLOCK_SIZE)
while data:
mainfile.write(data)
data = f.read(self._REWRITE_BLOCK_SIZE)
f.close()
mainfile.flush() | Copies contents of tmpfile (name) to mainfile (buffer). | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L938-L952 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GoogleCloudStorageConsistentOutputWriter._create_tmpfile | def _create_tmpfile(cls, status):
"""Creates a new random-named tmpfile."""
# We can't put the tmpfile in the same directory as the output. There are
# rare circumstances when we leave trash behind and we don't want this trash
# to be loaded into bigquery and/or used for restore.
#
# We used mapreduce id, shard number and attempt and 128 random bits to make
# collisions virtually impossible.
tmpl = string.Template(cls._TMPFILE_PATTERN)
filename = tmpl.substitute(
id=status.mapreduce_id, shard=status.shard,
random=random.getrandbits(cls._RAND_BITS))
return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True) | python | def _create_tmpfile(cls, status):
"""Creates a new random-named tmpfile."""
# We can't put the tmpfile in the same directory as the output. There are
# rare circumstances when we leave trash behind and we don't want this trash
# to be loaded into bigquery and/or used for restore.
#
# We used mapreduce id, shard number and attempt and 128 random bits to make
# collisions virtually impossible.
tmpl = string.Template(cls._TMPFILE_PATTERN)
filename = tmpl.substitute(
id=status.mapreduce_id, shard=status.shard,
random=random.getrandbits(cls._RAND_BITS))
return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True) | Creates a new random-named tmpfile. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L955-L969 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | GoogleCloudStorageConsistentOutputWriter._try_to_clean_garbage | def _try_to_clean_garbage(self, writer_spec, exclude_list=()):
"""Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
"""
# Try to remove garbage (if any). Note that listbucket is not strongly
# consistent so something might survive.
tmpl = string.Template(self._TMPFILE_PREFIX)
prefix = tmpl.substitute(
id=self.status.mapreduce_id, shard=self.status.shard)
bucket = self._get_tmp_gcs_bucket(writer_spec)
account_id = self._get_tmp_account_id(writer_spec)
for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix),
_account_id=account_id):
if f.filename not in exclude_list:
self._remove_tmpfile(f.filename, self.status.writer_spec) | python | def _try_to_clean_garbage(self, writer_spec, exclude_list=()):
"""Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed.
"""
# Try to remove garbage (if any). Note that listbucket is not strongly
# consistent so something might survive.
tmpl = string.Template(self._TMPFILE_PREFIX)
prefix = tmpl.substitute(
id=self.status.mapreduce_id, shard=self.status.shard)
bucket = self._get_tmp_gcs_bucket(writer_spec)
account_id = self._get_tmp_account_id(writer_spec)
for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix),
_account_id=account_id):
if f.filename not in exclude_list:
self._remove_tmpfile(f.filename, self.status.writer_spec) | Tries to remove any files created by this shard that aren't needed.
Args:
writer_spec: writer_spec for the MR.
exclude_list: A list of filenames (strings) that should not be
removed. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L1014-L1032 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/sample_input_reader.py | SampleInputReader.from_json | def from_json(cls, state):
"""Inherit docs."""
return cls(state[cls.COUNT], state[cls.STRING_LENGTH]) | python | def from_json(cls, state):
"""Inherit docs."""
return cls(state[cls.COUNT], state[cls.STRING_LENGTH]) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/sample_input_reader.py#L73-L75 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/sample_input_reader.py | SampleInputReader.split_input | def split_input(cls, job_config):
"""Inherit docs."""
params = job_config.input_reader_params
count = params[cls.COUNT]
string_length = params.get(cls.STRING_LENGTH, cls._DEFAULT_STRING_LENGTH)
shard_count = job_config.shard_count
count_per_shard = count // shard_count
mr_input_readers = [
cls(count_per_shard, string_length) for _ in range(shard_count)]
left = count - count_per_shard*shard_count
if left > 0:
mr_input_readers.append(cls(left, string_length))
return mr_input_readers | python | def split_input(cls, job_config):
"""Inherit docs."""
params = job_config.input_reader_params
count = params[cls.COUNT]
string_length = params.get(cls.STRING_LENGTH, cls._DEFAULT_STRING_LENGTH)
shard_count = job_config.shard_count
count_per_shard = count // shard_count
mr_input_readers = [
cls(count_per_shard, string_length) for _ in range(shard_count)]
left = count - count_per_shard*shard_count
if left > 0:
mr_input_readers.append(cls(left, string_length))
return mr_input_readers | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/sample_input_reader.py#L82-L98 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/sample_input_reader.py | SampleInputReader.validate | def validate(cls, job_config):
"""Inherit docs."""
super(SampleInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Validate count.
if cls.COUNT not in params:
raise errors.BadReaderParamsError("Must specify %s" % cls.COUNT)
if not isinstance(params[cls.COUNT], int):
raise errors.BadReaderParamsError("%s should be an int but is %s" %
(cls.COUNT, type(params[cls.COUNT])))
if params[cls.COUNT] <= 0:
raise errors.BadReaderParamsError("%s should be a positive int")
# Validate string length.
if cls.STRING_LENGTH in params and not (
isinstance(params[cls.STRING_LENGTH], int) and
params[cls.STRING_LENGTH] > 0):
raise errors.BadReaderParamsError("%s should be a positive int "
"but is %s" %
(cls.STRING_LENGTH,
params[cls.STRING_LENGTH])) | python | def validate(cls, job_config):
"""Inherit docs."""
super(SampleInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Validate count.
if cls.COUNT not in params:
raise errors.BadReaderParamsError("Must specify %s" % cls.COUNT)
if not isinstance(params[cls.COUNT], int):
raise errors.BadReaderParamsError("%s should be an int but is %s" %
(cls.COUNT, type(params[cls.COUNT])))
if params[cls.COUNT] <= 0:
raise errors.BadReaderParamsError("%s should be a positive int")
# Validate string length.
if cls.STRING_LENGTH in params and not (
isinstance(params[cls.STRING_LENGTH], int) and
params[cls.STRING_LENGTH] > 0):
raise errors.BadReaderParamsError("%s should be a positive int "
"but is %s" %
(cls.STRING_LENGTH,
params[cls.STRING_LENGTH])) | Inherit docs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/sample_input_reader.py#L101-L121 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | _get_weights | def _get_weights(max_length):
"""Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3.
"""
weights = [1]
for i in range(1, max_length):
weights.append(weights[i-1] * len(_ALPHABET) + 1)
weights.reverse()
return weights | python | def _get_weights(max_length):
"""Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3.
"""
weights = [1]
for i in range(1, max_length):
weights.append(weights[i-1] * len(_ALPHABET) + 1)
weights.reverse()
return weights | Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L355-L372 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | _str_to_ord | def _str_to_ord(content, weights):
"""Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0.
"""
ordinal = 0
for i, c in enumerate(content):
ordinal += weights[i] * _ALPHABET.index(c) + 1
return ordinal | python | def _str_to_ord(content, weights):
"""Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0.
"""
ordinal = 0
for i, c in enumerate(content):
ordinal += weights[i] * _ALPHABET.index(c) + 1
return ordinal | Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L375-L388 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | _ord_to_str | def _ord_to_str(ordinal, weights):
"""Reverse function of _str_to_ord."""
chars = []
for weight in weights:
if ordinal == 0:
return "".join(chars)
ordinal -= 1
index, ordinal = divmod(ordinal, weight)
chars.append(_ALPHABET[index])
return "".join(chars) | python | def _ord_to_str(ordinal, weights):
"""Reverse function of _str_to_ord."""
chars = []
for weight in weights:
if ordinal == 0:
return "".join(chars)
ordinal -= 1
index, ordinal = divmod(ordinal, weight)
chars.append(_ALPHABET[index])
return "".join(chars) | Reverse function of _str_to_ord. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L391-L400 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | PropertyRange._get_range_from_filters | def _get_range_from_filters(cls, filters, model_class):
"""Get property range from filters user provided.
This method also validates there is one and only one closed range on a
single property.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type should satisfy the property's type.
model_class: the model class for the entity type to apply filters on.
Returns:
a tuple of (property, start_filter, end_filter). property is the model's
field that the range is about. start_filter and end_filter define the
start and the end of the range. (None, None, None) if no range is found.
Raises:
BadReaderParamsError: if any filter is invalid in any way.
"""
if not filters:
return None, None, None
range_property = None
start_val = None
end_val = None
start_filter = None
end_filter = None
for f in filters:
prop, op, val = f
if op in [">", ">=", "<", "<="]:
if range_property and range_property != prop:
raise errors.BadReaderParamsError(
"Range on only one property is supported.")
range_property = prop
if val is None:
raise errors.BadReaderParamsError(
"Range can't be None in filter %s", f)
if op in [">", ">="]:
if start_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
start_val = val
start_filter = f
else:
if end_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
end_val = val
end_filter = f
elif op != "=":
raise errors.BadReaderParamsError(
"Only < <= > >= = are supported as operation. Got %s", op)
if not range_property:
return None, None, None
if start_val is None or end_val is None:
raise errors.BadReaderParamsError(
"Filter should contains a complete range on property %s",
range_property)
if issubclass(model_class, db.Model):
property_obj = model_class.properties()[range_property]
else:
property_obj = (
model_class._properties[ # pylint: disable=protected-access
range_property])
supported_properties = (
_DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() +
_CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys())
if not isinstance(property_obj, tuple(supported_properties)):
raise errors.BadReaderParamsError(
"Filtered property %s is not supported by sharding.", range_property)
if not start_val < end_val:
raise errors.BadReaderParamsError(
"Start value %s should be smaller than end value %s",
start_val, end_val)
return property_obj, start_filter, end_filter | python | def _get_range_from_filters(cls, filters, model_class):
"""Get property range from filters user provided.
This method also validates there is one and only one closed range on a
single property.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type should satisfy the property's type.
model_class: the model class for the entity type to apply filters on.
Returns:
a tuple of (property, start_filter, end_filter). property is the model's
field that the range is about. start_filter and end_filter define the
start and the end of the range. (None, None, None) if no range is found.
Raises:
BadReaderParamsError: if any filter is invalid in any way.
"""
if not filters:
return None, None, None
range_property = None
start_val = None
end_val = None
start_filter = None
end_filter = None
for f in filters:
prop, op, val = f
if op in [">", ">=", "<", "<="]:
if range_property and range_property != prop:
raise errors.BadReaderParamsError(
"Range on only one property is supported.")
range_property = prop
if val is None:
raise errors.BadReaderParamsError(
"Range can't be None in filter %s", f)
if op in [">", ">="]:
if start_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
start_val = val
start_filter = f
else:
if end_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
end_val = val
end_filter = f
elif op != "=":
raise errors.BadReaderParamsError(
"Only < <= > >= = are supported as operation. Got %s", op)
if not range_property:
return None, None, None
if start_val is None or end_val is None:
raise errors.BadReaderParamsError(
"Filter should contains a complete range on property %s",
range_property)
if issubclass(model_class, db.Model):
property_obj = model_class.properties()[range_property]
else:
property_obj = (
model_class._properties[ # pylint: disable=protected-access
range_property])
supported_properties = (
_DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() +
_CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys())
if not isinstance(property_obj, tuple(supported_properties)):
raise errors.BadReaderParamsError(
"Filtered property %s is not supported by sharding.", range_property)
if not start_val < end_val:
raise errors.BadReaderParamsError(
"Start value %s should be smaller than end value %s",
start_val, end_val)
return property_obj, start_filter, end_filter | Get property range from filters user provided.
This method also validates there is one and only one closed range on a
single property.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type should satisfy the property's type.
model_class: the model class for the entity type to apply filters on.
Returns:
a tuple of (property, start_filter, end_filter). property is the model's
field that the range is about. start_filter and end_filter define the
start and the end of the range. (None, None, None) if no range is found.
Raises:
BadReaderParamsError: if any filter is invalid in any way. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L81-L162 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | PropertyRange.split | def split(self, n):
"""Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
"""
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n,
self.start[1] == ">=", self.end[1] == "<=")
start_filter = (name, ">=", splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters)
return [self.__class__(f, self.model_class_path) for f in new_range_filters] | python | def split(self, n):
"""Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
"""
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n,
self.start[1] == ">=", self.end[1] == "<=")
start_filter = (name, ">=", splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters)
return [self.__class__(f, self.model_class_path) for f in new_range_filters] | Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L164-L199 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/property_range.py | PropertyRange.make_query | def make_query(self, ns):
"""Make a query of entities within this range.
Query options are not supported. They should be specified when the query
is run.
Args:
ns: namespace of this query.
Returns:
a db.Query or ndb.Query, depends on the model class's type.
"""
if issubclass(self.model_class, db.Model):
query = db.Query(self.model_class, namespace=ns)
for f in self.filters:
query.filter("%s %s" % (f[0], f[1]), f[2])
else:
query = self.model_class.query(namespace=ns)
for f in self.filters:
query = query.filter(ndb.FilterNode(*f))
return query | python | def make_query(self, ns):
"""Make a query of entities within this range.
Query options are not supported. They should be specified when the query
is run.
Args:
ns: namespace of this query.
Returns:
a db.Query or ndb.Query, depends on the model class's type.
"""
if issubclass(self.model_class, db.Model):
query = db.Query(self.model_class, namespace=ns)
for f in self.filters:
query.filter("%s %s" % (f[0], f[1]), f[2])
else:
query = self.model_class.query(namespace=ns)
for f in self.filters:
query = query.filter(ndb.FilterNode(*f))
return query | Make a query of entities within this range.
Query options are not supported. They should be specified when the query
is run.
Args:
ns: namespace of this query.
Returns:
a db.Query or ndb.Query, depends on the model class's type. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L201-L221 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/output_writer.py | OutputWriter.validate | def validate(cls, job_config):
"""Validates relevant parameters.
This method can validate fields which it deems relevant.
Args:
job_config: an instance of map_job.JobConfig.
Raises:
errors.BadWriterParamsError: required parameters are missing or invalid.
"""
if job_config.output_writer_cls != cls:
raise errors.BadWriterParamsError(
"Expect output writer class %r, got %r." %
(cls, job_config.output_writer_cls)) | python | def validate(cls, job_config):
"""Validates relevant parameters.
This method can validate fields which it deems relevant.
Args:
job_config: an instance of map_job.JobConfig.
Raises:
errors.BadWriterParamsError: required parameters are missing or invalid.
"""
if job_config.output_writer_cls != cls:
raise errors.BadWriterParamsError(
"Expect output writer class %r, got %r." %
(cls, job_config.output_writer_cls)) | Validates relevant parameters.
This method can validate fields which it deems relevant.
Args:
job_config: an instance of map_job.JobConfig.
Raises:
errors.BadWriterParamsError: required parameters are missing or invalid. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/output_writer.py#L50-L64 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/output_writer.py | OutputWriter.commit_output | def commit_output(cls, shard_ctx, iterator):
"""Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs.
"""
# We accept an iterator just in case output references get too big.
outs = tuple(iterator)
shard_ctx._state.writer_state["outs"] = outs | python | def commit_output(cls, shard_ctx, iterator):
"""Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs.
"""
# We accept an iterator just in case output references get too big.
outs = tuple(iterator)
shard_ctx._state.writer_state["outs"] = outs | Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/output_writer.py#L111-L127 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/main.py | create_handlers_map | def create_handlers_map():
"""Create new handlers map.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
"""
pipeline_handlers_map = []
if pipeline:
pipeline_handlers_map = pipeline.create_handlers_map(prefix=".*/pipeline")
return pipeline_handlers_map + [
# Task queue handlers.
# Always suffix by mapreduce_id or shard_id for log analysis purposes.
# mapreduce_id or shard_id also presents in headers or payload.
(r".*/worker_callback.*", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback.*", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback.*", handlers.KickOffJobHandler),
(r".*/finalizejob_callback.*", handlers.FinalizeJobHandler),
# RPC requests with JSON responses
# All JSON handlers should have /command/ prefix.
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
# UI static files
(STATIC_RE, status.ResourceHandler),
# Redirect non-file URLs that do not end in status/detail to status page.
(r".*", RedirectHandler),
] | python | def create_handlers_map():
"""Create new handlers map.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
"""
pipeline_handlers_map = []
if pipeline:
pipeline_handlers_map = pipeline.create_handlers_map(prefix=".*/pipeline")
return pipeline_handlers_map + [
# Task queue handlers.
# Always suffix by mapreduce_id or shard_id for log analysis purposes.
# mapreduce_id or shard_id also presents in headers or payload.
(r".*/worker_callback.*", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback.*", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback.*", handlers.KickOffJobHandler),
(r".*/finalizejob_callback.*", handlers.FinalizeJobHandler),
# RPC requests with JSON responses
# All JSON handlers should have /command/ prefix.
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
# UI static files
(STATIC_RE, status.ResourceHandler),
# Redirect non-file URLs that do not end in status/detail to status page.
(r".*", RedirectHandler),
] | Create new handlers map.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/main.py#L58-L92 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/key_ranges.py | KeyRangesFactory.from_json | def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json) | python | def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json) | Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/key_ranges.py#L58-L72 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | split_into_sentences | def split_into_sentences(s):
"""Split text into list of sentences."""
s = re.sub(r"\s+", " ", s)
s = re.sub(r"[\\.\\?\\!]", "\n", s)
return s.split("\n") | python | def split_into_sentences(s):
"""Split text into list of sentences."""
s = re.sub(r"\s+", " ", s)
s = re.sub(r"[\\.\\?\\!]", "\n", s)
return s.split("\n") | Split text into list of sentences. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L181-L185 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | split_into_words | def split_into_words(s):
"""Split a sentence into list of words."""
s = re.sub(r"\W+", " ", s)
s = re.sub(r"[_0-9]+", " ", s)
return s.split() | python | def split_into_words(s):
"""Split a sentence into list of words."""
s = re.sub(r"\W+", " ", s)
s = re.sub(r"[_0-9]+", " ", s)
return s.split() | Split a sentence into list of words. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L188-L192 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | index_map | def index_map(data):
"""Index demo map function."""
(entry, text_fn) = data
text = text_fn()
logging.debug("Got %s", entry.filename)
for s in split_into_sentences(text):
for w in split_into_words(s.lower()):
yield (w, entry.filename) | python | def index_map(data):
"""Index demo map function."""
(entry, text_fn) = data
text = text_fn()
logging.debug("Got %s", entry.filename)
for s in split_into_sentences(text):
for w in split_into_words(s.lower()):
yield (w, entry.filename) | Index demo map function. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L211-L219 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | phrases_map | def phrases_map(data):
"""Phrases demo map function."""
(entry, text_fn) = data
text = text_fn()
filename = entry.filename
logging.debug("Got %s", filename)
for s in split_into_sentences(text):
words = split_into_words(s.lower())
if len(words) < PHRASE_LENGTH:
yield (":".join(words), filename)
continue
for i in range(0, len(words) - PHRASE_LENGTH):
yield (":".join(words[i:i+PHRASE_LENGTH]), filename) | python | def phrases_map(data):
"""Phrases demo map function."""
(entry, text_fn) = data
text = text_fn()
filename = entry.filename
logging.debug("Got %s", filename)
for s in split_into_sentences(text):
words = split_into_words(s.lower())
if len(words) < PHRASE_LENGTH:
yield (":".join(words), filename)
continue
for i in range(0, len(words) - PHRASE_LENGTH):
yield (":".join(words[i:i+PHRASE_LENGTH]), filename) | Phrases demo map function. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L230-L243 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | phrases_reduce | def phrases_reduce(key, values):
"""Phrases demo reduce function."""
if len(values) < 10:
return
counts = {}
for filename in values:
counts[filename] = counts.get(filename, 0) + 1
words = re.sub(r":", " ", key)
threshold = len(values) / 2
for filename, count in counts.items():
if count > threshold:
yield "%s:%s\n" % (words, filename) | python | def phrases_reduce(key, values):
"""Phrases demo reduce function."""
if len(values) < 10:
return
counts = {}
for filename in values:
counts[filename] = counts.get(filename, 0) + 1
words = re.sub(r":", " ", key)
threshold = len(values) / 2
for filename, count in counts.items():
if count > threshold:
yield "%s:%s\n" % (words, filename) | Phrases demo reduce function. | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L246-L258 |
GoogleCloudPlatform/appengine-mapreduce | python/demo/main.py | FileMetadata.getKeyName | def getKeyName(username, date, blob_key):
"""Returns the internal key for a particular item in the database.
Our items are stored with keys of the form 'user/date/blob_key' ('/' is
not the real separator, but __SEP is).
Args:
username: The given user's e-mail address.
date: A datetime object representing the date and time that an input
file was uploaded to this app.
blob_key: The blob key corresponding to the location of the input file
in the Blobstore.
Returns:
The internal key for the item specified by (username, date, blob_key).
"""
sep = FileMetadata.__SEP
return str(username + sep + str(date) + sep + blob_key) | python | def getKeyName(username, date, blob_key):
"""Returns the internal key for a particular item in the database.
Our items are stored with keys of the form 'user/date/blob_key' ('/' is
not the real separator, but __SEP is).
Args:
username: The given user's e-mail address.
date: A datetime object representing the date and time that an input
file was uploaded to this app.
blob_key: The blob key corresponding to the location of the input file
in the Blobstore.
Returns:
The internal key for the item specified by (username, date, blob_key).
"""
sep = FileMetadata.__SEP
return str(username + sep + str(date) + sep + blob_key) | Returns the internal key for a particular item in the database.
Our items are stored with keys of the form 'user/date/blob_key' ('/' is
not the real separator, but __SEP is).
Args:
username: The given user's e-mail address.
date: A datetime object representing the date and time that an input
file was uploaded to this app.
blob_key: The blob key corresponding to the location of the input file
in the Blobstore.
Returns:
The internal key for the item specified by (username, date, blob_key). | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L113-L130 |
materialsproject/custodian | custodian/custodian.py | Custodian.from_spec | def from_spec(cls, spec):
"""
Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance.
"""
dec = MontyDecoder()
def load_class(dotpath):
modname, classname = dotpath.rsplit(".", 1)
mod = __import__(modname, globals(), locals(), [classname], 0)
return getattr(mod, classname)
def process_params(d):
decoded = {}
for k, v in d.items():
if k.startswith("$"):
if isinstance(v, list):
v = [os.path.expandvars(i) for i in v]
elif isinstance(v, dict):
v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()}
else:
v = os.path.expandvars(v)
decoded[k.strip("$")] = dec.process_decoded(v)
return decoded
jobs = []
common_params = process_params(spec.get("jobs_common_params", {}))
for d in spec["jobs"]:
cls_ = load_class(d["jb"])
params = process_params(d.get("params", {}))
params.update(common_params)
jobs.append(cls_(**params))
handlers = []
for d in spec.get("handlers", []):
cls_ = load_class(d["hdlr"])
params = process_params(d.get("params", {}))
handlers.append(cls_(**params))
validators = []
for d in spec.get("validators", []):
cls_ = load_class(d["vldr"])
params = process_params(d.get("params", {}))
validators.append(cls_(**params))
custodian_params = process_params(spec.get("custodian_params", {}))
return cls(jobs=jobs, handlers=handlers, validators=validators,
**custodian_params) | python | def from_spec(cls, spec):
"""
Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance.
"""
dec = MontyDecoder()
def load_class(dotpath):
modname, classname = dotpath.rsplit(".", 1)
mod = __import__(modname, globals(), locals(), [classname], 0)
return getattr(mod, classname)
def process_params(d):
decoded = {}
for k, v in d.items():
if k.startswith("$"):
if isinstance(v, list):
v = [os.path.expandvars(i) for i in v]
elif isinstance(v, dict):
v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()}
else:
v = os.path.expandvars(v)
decoded[k.strip("$")] = dec.process_decoded(v)
return decoded
jobs = []
common_params = process_params(spec.get("jobs_common_params", {}))
for d in spec["jobs"]:
cls_ = load_class(d["jb"])
params = process_params(d.get("params", {}))
params.update(common_params)
jobs.append(cls_(**params))
handlers = []
for d in spec.get("handlers", []):
cls_ = load_class(d["hdlr"])
params = process_params(d.get("params", {}))
handlers.append(cls_(**params))
validators = []
for d in spec.get("validators", []):
cls_ = load_class(d["vldr"])
params = process_params(d.get("params", {}))
validators.append(cls_(**params))
custodian_params = process_params(spec.get("custodian_params", {}))
return cls(jobs=jobs, handlers=handlers, validators=validators,
**custodian_params) | Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L204-L292 |
materialsproject/custodian | custodian/custodian.py | Custodian.run | def run(self):
"""
Runs all jobs.
Returns:
All errors encountered as a list of list.
[[error_dicts for job 1], [error_dicts for job 2], ....]
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
logger.info("Hostname: {}, Cluster: {}".format(
*get_execution_host_info()))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job)
# We do a dump of the run log after each job.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
# Checkpoint after each job so that we can recover from last
# point and remove old checkpoints
if self.checkpoint:
self.restart = job_n
Custodian._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
Custodian._delete_checkpoints(cwd)
return self.run_log | python | def run(self):
"""
Runs all jobs.
Returns:
All errors encountered as a list of list.
[[error_dicts for job 1], [error_dicts for job 2], ....]
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
logger.info("Hostname: {}, Cluster: {}".format(
*get_execution_host_info()))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job)
# We do a dump of the run log after each job.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
# Checkpoint after each job so that we can recover from last
# point and remove old checkpoints
if self.checkpoint:
self.restart = job_n
Custodian._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
Custodian._delete_checkpoints(cwd)
return self.run_log | Runs all jobs.
Returns:
All errors encountered as a list of list.
[[error_dicts for job 1], [error_dicts for job 2], ....]
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L294-L357 |
materialsproject/custodian | custodian/custodian.py | Custodian._run_job | def _run_job(self, job_n, job):
"""
Runs a single job.
Args:
job_n: job number (1 index)
job: Custodian job
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
self.run_log.append({"job": job.as_dict(), "corrections": [],
"handler": None, "validator": None,
"max_errors": False, "max_errors_per_job": False,
"max_errors_per_handler": False,
"nonzero_return_code": False})
self.errors_current_job = 0
# reset the counters of the number of times a correction has been
# applied for each handler
for h in self.handlers:
h.n_applied_corrections = 0
job.setup()
attempt = 0
while (self.total_errors < self.max_errors and
self.errors_current_job < self.max_errors_per_job):
attempt += 1
logger.info(
"Starting job no. {} ({}) attempt no. {}. Total errors and "
"errors in job thus far = {}, {}.".format(
job_n, job.name, attempt, self.total_errors,
self.errors_current_job))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
zero_return_code = True
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
terminate = self.terminate_func or p.terminate
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
terminate)
if terminate is not None and terminate != p.terminate:
time.sleep(self.polling_time_step)
else:
p.wait()
if self.terminate_func is not None and \
self.terminate_func != p.terminate:
self.terminate_func()
time.sleep(self.polling_time_step)
zero_return_code = p.returncode == 0
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
if has_error:
# This makes sure the job is killed cleanly for certain systems.
job.terminate()
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
if not zero_return_code:
if self.terminate_on_nonzero_returncode:
self.run_log[-1]["nonzero_return_code"] = True
s = "Job return code is %d. Terminating..." % \
p.returncode
logger.info(s)
raise ReturnCodeError(s, True)
else:
warnings.warn("subprocess returned a non-zero return "
"code. Check outputs carefully...")
job.postprocess()
return
# Check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: %s" % x["handler"]
raise NonRecoverableError(s, False, x["handler"])
if self.errors_current_job >= self.max_errors_per_job:
self.run_log[-1]["max_errors_per_job"] = True
msg = "Max errors per job reached: {}.".format(self.max_errors_per_job)
logger.info(msg)
raise MaxCorrectionsPerJobError(msg, True, self.max_errors_per_job, job)
else:
self.run_log[-1]["max_errors"] = True
msg = "Max errors reached: {}.".format(self.max_errors)
logger.info(msg)
raise MaxCorrectionsError(msg, True, self.max_errors) | python | def _run_job(self, job_n, job):
"""
Runs a single job.
Args:
job_n: job number (1 index)
job: Custodian job
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
self.run_log.append({"job": job.as_dict(), "corrections": [],
"handler": None, "validator": None,
"max_errors": False, "max_errors_per_job": False,
"max_errors_per_handler": False,
"nonzero_return_code": False})
self.errors_current_job = 0
# reset the counters of the number of times a correction has been
# applied for each handler
for h in self.handlers:
h.n_applied_corrections = 0
job.setup()
attempt = 0
while (self.total_errors < self.max_errors and
self.errors_current_job < self.max_errors_per_job):
attempt += 1
logger.info(
"Starting job no. {} ({}) attempt no. {}. Total errors and "
"errors in job thus far = {}, {}.".format(
job_n, job.name, attempt, self.total_errors,
self.errors_current_job))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
zero_return_code = True
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
terminate = self.terminate_func or p.terminate
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
terminate)
if terminate is not None and terminate != p.terminate:
time.sleep(self.polling_time_step)
else:
p.wait()
if self.terminate_func is not None and \
self.terminate_func != p.terminate:
self.terminate_func()
time.sleep(self.polling_time_step)
zero_return_code = p.returncode == 0
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
if has_error:
# This makes sure the job is killed cleanly for certain systems.
job.terminate()
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
if not zero_return_code:
if self.terminate_on_nonzero_returncode:
self.run_log[-1]["nonzero_return_code"] = True
s = "Job return code is %d. Terminating..." % \
p.returncode
logger.info(s)
raise ReturnCodeError(s, True)
else:
warnings.warn("subprocess returned a non-zero return "
"code. Check outputs carefully...")
job.postprocess()
return
# Check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: %s" % x["handler"]
raise NonRecoverableError(s, False, x["handler"])
if self.errors_current_job >= self.max_errors_per_job:
self.run_log[-1]["max_errors_per_job"] = True
msg = "Max errors per job reached: {}.".format(self.max_errors_per_job)
logger.info(msg)
raise MaxCorrectionsPerJobError(msg, True, self.max_errors_per_job, job)
else:
self.run_log[-1]["max_errors"] = True
msg = "Max errors reached: {}.".format(self.max_errors)
logger.info(msg)
raise MaxCorrectionsError(msg, True, self.max_errors) | Runs a single job.
Args:
job_n: job number (1 index)
job: Custodian job
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L359-L487 |
materialsproject/custodian | custodian/custodian.py | Custodian.run_interrupted | def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") | python | def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") | Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L489-L596 |
materialsproject/custodian | custodian/custodian.py | Custodian._do_check | def _do_check(self, handlers, terminate_func=None):
"""
checks the specified handlers. Returns True iff errors caught
"""
corrections = []
for h in handlers:
try:
if h.check():
if h.max_num_corrections is not None \
and h.n_applied_corrections >= h.max_num_corrections:
msg = "Maximum number of corrections {} reached " \
"for handler {}".format(h.max_num_corrections, h)
if h.raise_on_max:
self.run_log[-1]["handler"] = h
self.run_log[-1]["max_errors_per_handler"] = True
raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h)
else:
logger.warning(msg+" Correction not applied.")
continue
if terminate_func is not None and h.is_terminating:
logger.info("Terminating job")
terminate_func()
# make sure we don't terminate twice
terminate_func = None
d = h.correct()
d["handler"] = h
logger.error("\n" + pformat(d, indent=2, width=-1))
corrections.append(d)
h.n_applied_corrections += 1
except Exception:
if not self.skip_over_errors:
raise
else:
import traceback
logger.error("Bad handler %s " % h)
logger.error(traceback.format_exc())
corrections.append(
{"errors": ["Bad handler %s " % h],
"actions": []})
self.total_errors += len(corrections)
self.errors_current_job += len(corrections)
self.run_log[-1]["corrections"].extend(corrections)
# We do a dump of the run log after each check.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
return len(corrections) > 0 | python | def _do_check(self, handlers, terminate_func=None):
"""
checks the specified handlers. Returns True iff errors caught
"""
corrections = []
for h in handlers:
try:
if h.check():
if h.max_num_corrections is not None \
and h.n_applied_corrections >= h.max_num_corrections:
msg = "Maximum number of corrections {} reached " \
"for handler {}".format(h.max_num_corrections, h)
if h.raise_on_max:
self.run_log[-1]["handler"] = h
self.run_log[-1]["max_errors_per_handler"] = True
raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h)
else:
logger.warning(msg+" Correction not applied.")
continue
if terminate_func is not None and h.is_terminating:
logger.info("Terminating job")
terminate_func()
# make sure we don't terminate twice
terminate_func = None
d = h.correct()
d["handler"] = h
logger.error("\n" + pformat(d, indent=2, width=-1))
corrections.append(d)
h.n_applied_corrections += 1
except Exception:
if not self.skip_over_errors:
raise
else:
import traceback
logger.error("Bad handler %s " % h)
logger.error(traceback.format_exc())
corrections.append(
{"errors": ["Bad handler %s " % h],
"actions": []})
self.total_errors += len(corrections)
self.errors_current_job += len(corrections)
self.run_log[-1]["corrections"].extend(corrections)
# We do a dump of the run log after each check.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
return len(corrections) > 0 | checks the specified handlers. Returns True iff errors caught | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L598-L643 |
materialsproject/custodian | custodian/feff/interpreter.py | FeffModder.apply_actions | def apply_actions(self, actions):
"""
Applies a list of actions to the FEFF Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': feffinput_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
if modified:
feff = self.feffinp
feff_input = "\n\n".join(str(feff[k]) for k in
["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff)
for k, v in six.iteritems(feff):
with open(os.path.join('.', k), "w") as f:
f.write(str(v))
with open(os.path.join('.', "feff.inp"), "w") as f:
f.write(feff_input) | python | def apply_actions(self, actions):
"""
Applies a list of actions to the FEFF Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': feffinput_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
if modified:
feff = self.feffinp
feff_input = "\n\n".join(str(feff[k]) for k in
["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff)
for k, v in six.iteritems(feff):
with open(os.path.join('.', k), "w") as f:
f.write(str(v))
with open(os.path.join('.', "feff.inp"), "w") as f:
f.write(feff_input) | Applies a list of actions to the FEFF Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': feffinput_key,
'action': moddermodification} | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/feff/interpreter.py#L35-L65 |
materialsproject/custodian | custodian/ansible/actions.py | FileActions.file_create | def file_create(filename, settings):
"""
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'content'.")
for k, v in settings.items():
if k == "content":
with open(filename, 'w') as f:
f.write(v) | python | def file_create(filename, settings):
"""
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'content'.")
for k, v in settings.items():
if k == "content":
with open(filename, 'w') as f:
f.write(v) | Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content} | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L154-L168 |
materialsproject/custodian | custodian/ansible/actions.py | FileActions.file_move | def file_move(filename, settings):
"""
Moves a file. {'_file_move': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'dest'.")
for k, v in settings.items():
if k == "dest":
shutil.move(filename, v) | python | def file_move(filename, settings):
"""
Moves a file. {'_file_move': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'dest'.")
for k, v in settings.items():
if k == "dest":
shutil.move(filename, v) | Moves a file. {'_file_move': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file} | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L171-L184 |
materialsproject/custodian | custodian/ansible/actions.py | FileActions.file_delete | def file_delete(filename, settings):
"""
Deletes a file. {'_file_delete': {'mode': "actual"}}
Args:
filename (str): Filename.
settings (dict): Must be {"mode": actual/simulated}. Simulated
mode only prints the action without performing it.
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'mode'.")
for k, v in settings.items():
if k == "mode" and v == "actual":
try:
os.remove(filename)
except OSError:
#Skip file not found error.
pass
elif k == "mode" and v == "simulated":
print("Simulated removal of {}".format(filename)) | python | def file_delete(filename, settings):
"""
Deletes a file. {'_file_delete': {'mode': "actual"}}
Args:
filename (str): Filename.
settings (dict): Must be {"mode": actual/simulated}. Simulated
mode only prints the action without performing it.
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'mode'.")
for k, v in settings.items():
if k == "mode" and v == "actual":
try:
os.remove(filename)
except OSError:
#Skip file not found error.
pass
elif k == "mode" and v == "simulated":
print("Simulated removal of {}".format(filename)) | Deletes a file. {'_file_delete': {'mode': "actual"}}
Args:
filename (str): Filename.
settings (dict): Must be {"mode": actual/simulated}. Simulated
mode only prints the action without performing it. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L187-L207 |
materialsproject/custodian | custodian/ansible/actions.py | FileActions.file_copy | def file_copy(filename, settings):
"""
Copies a file. {'_file_copy': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
for k, v in settings.items():
if k.startswith("dest"):
shutil.copyfile(filename, v) | python | def file_copy(filename, settings):
"""
Copies a file. {'_file_copy': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file}
"""
for k, v in settings.items():
if k.startswith("dest"):
shutil.copyfile(filename, v) | Copies a file. {'_file_copy': {'dest': 'new_file_name'}}
Args:
filename (str): Filename.
settings (dict): Must be {"dest": path of new file} | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L210-L220 |
materialsproject/custodian | custodian/ansible/actions.py | FileActions.file_modify | def file_modify(filename, settings):
"""
Modifies file access
Args:
filename (str): Filename.
settings (dict): Can be "mode" or "owners"
"""
for k, v in settings.items():
if k == "mode":
os.chmod(filename,v)
if k == "owners":
os.chown(filename,v) | python | def file_modify(filename, settings):
"""
Modifies file access
Args:
filename (str): Filename.
settings (dict): Can be "mode" or "owners"
"""
for k, v in settings.items():
if k == "mode":
os.chmod(filename,v)
if k == "owners":
os.chown(filename,v) | Modifies file access
Args:
filename (str): Filename.
settings (dict): Can be "mode" or "owners" | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L223-L235 |
materialsproject/custodian | custodian/feff/jobs.py | FeffJob.setup | def setup(self):
"""
Performs initial setup for FeffJob, do backing up.
Returns:
"""
decompress_dir('.')
if self.backup:
for f in FEFF_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
for f in FEFF_BACKUP_FILES:
if os.path.isfile(f):
shutil.copy(f, "{}.orig".format(f)) | python | def setup(self):
"""
Performs initial setup for FeffJob, do backing up.
Returns:
"""
decompress_dir('.')
if self.backup:
for f in FEFF_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
for f in FEFF_BACKUP_FILES:
if os.path.isfile(f):
shutil.copy(f, "{}.orig".format(f)) | Performs initial setup for FeffJob, do backing up.
Returns: | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/feff/jobs.py#L59-L73 |
materialsproject/custodian | custodian/feff/jobs.py | FeffJob.run | def run(self):
"""
Performs the actual FEFF run
Returns:
(subprocess.Popen) Used for monitoring.
"""
with open(self.output_file, "w") as f_std, \
open(self.stderr_file, "w", buffering=1) as f_err:
# Use line buffering for stderr
# On TSCC, need to run shell command
p = subprocess.Popen(self.feff_cmd, stdout=f_std, stderr=f_err, shell=True)
return p | python | def run(self):
"""
Performs the actual FEFF run
Returns:
(subprocess.Popen) Used for monitoring.
"""
with open(self.output_file, "w") as f_std, \
open(self.stderr_file, "w", buffering=1) as f_err:
# Use line buffering for stderr
# On TSCC, need to run shell command
p = subprocess.Popen(self.feff_cmd, stdout=f_std, stderr=f_err, shell=True)
return p | Performs the actual FEFF run
Returns:
(subprocess.Popen) Used for monitoring. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/feff/jobs.py#L75-L88 |
materialsproject/custodian | custodian/ansible/interpreter.py | Modder.modify | def modify(self, modification, obj):
"""
Note that modify makes actual in-place modifications. It does not
return a copy.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (dict/str/object): Object to modify depending on actions. For
example, for DictActions, obj will be a dict to be modified.
For FileActions, obj will be a string with a full pathname to a
file.
"""
for action, settings in modification.items():
if action in self.supported_actions:
self.supported_actions[action].__call__(obj, settings)
elif self.strict:
raise ValueError("{} is not a supported action!"
.format(action)) | python | def modify(self, modification, obj):
"""
Note that modify makes actual in-place modifications. It does not
return a copy.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (dict/str/object): Object to modify depending on actions. For
example, for DictActions, obj will be a dict to be modified.
For FileActions, obj will be a string with a full pathname to a
file.
"""
for action, settings in modification.items():
if action in self.supported_actions:
self.supported_actions[action].__call__(obj, settings)
elif self.strict:
raise ValueError("{} is not a supported action!"
.format(action)) | Note that modify makes actual in-place modifications. It does not
return a copy.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (dict/str/object): Object to modify depending on actions. For
example, for DictActions, obj will be a dict to be modified.
For FileActions, obj will be a string with a full pathname to a
file. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/interpreter.py#L67-L85 |
materialsproject/custodian | custodian/ansible/interpreter.py | Modder.modify_object | def modify_object(self, modification, obj):
"""
Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify
"""
d = obj.as_dict()
self.modify(modification, d)
return obj.from_dict(d) | python | def modify_object(self, modification, obj):
"""
Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify
"""
d = obj.as_dict()
self.modify(modification, d)
return obj.from_dict(d) | Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/interpreter.py#L87-L98 |
materialsproject/custodian | custodian/vasp/interpreter.py | VaspModder.apply_actions | def apply_actions(self, actions):
"""
Applies a list of actions to the Vasp Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': vaspinput_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.vi[k] = self.modify_object(a["action"], self.vi[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
for f in modified:
self.vi[f].write_file(f) | python | def apply_actions(self, actions):
"""
Applies a list of actions to the Vasp Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': vaspinput_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.vi[k] = self.modify_object(a["action"], self.vi[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
for f in modified:
self.vi[f].write_file(f) | Applies a list of actions to the Vasp Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': vaspinput_key,
'action': moddermodification} | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/interpreter.py#L31-L51 |
materialsproject/custodian | custodian/utils.py | backup | def backup(filenames, prefix="error"):
"""
Backup files to a tar.gz file. Used, for example, in backing up the
files of an errored run before performing corrections.
Args:
filenames ([str]): List of files to backup. Supports wildcards, e.g.,
*.*.
prefix (str): prefix to the files. Defaults to error, which means a
series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
"""
num = max([0] + [int(f.split(".")[1])
for f in glob("{}.*.tar.gz".format(prefix))])
filename = "{}.{}.tar.gz".format(prefix, num + 1)
logging.info("Backing up run to {}.".format(filename))
with tarfile.open(filename, "w:gz") as tar:
for fname in filenames:
for f in glob(fname):
tar.add(f) | python | def backup(filenames, prefix="error"):
"""
Backup files to a tar.gz file. Used, for example, in backing up the
files of an errored run before performing corrections.
Args:
filenames ([str]): List of files to backup. Supports wildcards, e.g.,
*.*.
prefix (str): prefix to the files. Defaults to error, which means a
series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
"""
num = max([0] + [int(f.split(".")[1])
for f in glob("{}.*.tar.gz".format(prefix))])
filename = "{}.{}.tar.gz".format(prefix, num + 1)
logging.info("Backing up run to {}.".format(filename))
with tarfile.open(filename, "w:gz") as tar:
for fname in filenames:
for f in glob(fname):
tar.add(f) | Backup files to a tar.gz file. Used, for example, in backing up the
files of an errored run before performing corrections.
Args:
filenames ([str]): List of files to backup. Supports wildcards, e.g.,
*.*.
prefix (str): prefix to the files. Defaults to error, which means a
series of error.1.tar.gz, error.2.tar.gz, ... will be generated. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/utils.py#L23-L41 |
materialsproject/custodian | custodian/utils.py | get_execution_host_info | def get_execution_host_info():
"""
Tries to return a tuple describing the execution host.
Doesn't work for all queueing systems
Returns:
(HOSTNAME, CLUSTER_NAME)
"""
host = os.environ.get('HOSTNAME', None)
cluster = os.environ.get('SGE_O_HOST', None)
if host is None:
try:
import socket
host = host or socket.gethostname()
except:
pass
return host or 'unknown', cluster or 'unknown' | python | def get_execution_host_info():
"""
Tries to return a tuple describing the execution host.
Doesn't work for all queueing systems
Returns:
(HOSTNAME, CLUSTER_NAME)
"""
host = os.environ.get('HOSTNAME', None)
cluster = os.environ.get('SGE_O_HOST', None)
if host is None:
try:
import socket
host = host or socket.gethostname()
except:
pass
return host or 'unknown', cluster or 'unknown' | Tries to return a tuple describing the execution host.
Doesn't work for all queueing systems
Returns:
(HOSTNAME, CLUSTER_NAME) | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/utils.py#L44-L60 |
materialsproject/custodian | custodian/qchem/jobs.py | QCJob.run | def run(self):
"""
Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
qclog = open(self.qclog_file, 'w')
p = subprocess.Popen(self.current_command, stdout=qclog)
return p | python | def run(self):
"""
Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
qclog = open(self.qclog_file, 'w')
p = subprocess.Popen(self.current_command, stdout=qclog)
return p | Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/qchem/jobs.py#L120-L129 |
materialsproject/custodian | custodian/qchem/jobs.py | QCJob.opt_with_frequency_flattener | def opt_with_frequency_flattener(cls,
qchem_command,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
qclog_file="mol.qclog",
max_iterations=10,
max_molecule_perturb_scale=0.3,
check_connectivity=True,
**QCJob_kwargs):
"""
Optimize a structure and calculate vibrational frequencies to check if the
structure is in a true minima. If a frequency is negative, iteratively
perturbe the geometry, optimize, and recalculate frequencies until all are
positive, aka a true minima has been found.
Args:
qchem_command (str): Command to run QChem.
multimode (str): Parallelization scheme, either openmp or mpi.
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that
can be applied to the molecule. Defaults to 0.3.
check_connectivity (bool): Whether to check differences in connectivity
introduced by structural perturbation. Defaults to True.
**QCJob_kwargs: Passthrough kwargs to QCJob. See
:class:`custodian.qchem.jobs.QCJob`.
"""
min_molecule_perturb_scale = 0.1
scale_grid = 10
perturb_scale_grid = (
max_molecule_perturb_scale - min_molecule_perturb_scale
) / scale_grid
if not os.path.exists(input_file):
raise AssertionError('Input file must be present!')
orig_opt_input = QCInput.from_file(input_file)
orig_opt_rem = copy.deepcopy(orig_opt_input.rem)
orig_freq_rem = copy.deepcopy(orig_opt_input.rem)
orig_freq_rem["job_type"] = "freq"
first = True
reversed_direction = False
num_neg_freqs = []
for ii in range(max_iterations):
yield (QCJob(
qchem_command=qchem_command,
multimode=multimode,
input_file=input_file,
output_file=output_file,
qclog_file=qclog_file,
suffix=".opt_" + str(ii),
backup=first,
**QCJob_kwargs))
first = False
opt_outdata = QCOutput(output_file + ".opt_" + str(ii)).data
if opt_outdata["structure_change"] == "unconnected_fragments" and not opt_outdata["completion"]:
print("Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...")
break
else:
freq_QCInput = QCInput(
molecule=opt_outdata.get("molecule_from_optimized_geometry"),
rem=orig_freq_rem,
opt=orig_opt_input.opt,
pcm=orig_opt_input.pcm,
solvent=orig_opt_input.solvent)
freq_QCInput.write_file(input_file)
yield (QCJob(
qchem_command=qchem_command,
multimode=multimode,
input_file=input_file,
output_file=output_file,
qclog_file=qclog_file,
suffix=".freq_" + str(ii),
backup=first,
**QCJob_kwargs))
outdata = QCOutput(output_file + ".freq_" + str(ii)).data
errors = outdata.get("errors")
if len(errors) != 0:
raise AssertionError('No errors should be encountered while flattening frequencies!')
if outdata.get('frequencies')[0] > 0.0:
print("All frequencies positive!")
break
else:
num_neg_freqs += [sum(1 for freq in outdata.get('frequencies') if freq < 0)]
if len(num_neg_freqs) > 1:
if num_neg_freqs[-1] == num_neg_freqs[-2] and not reversed_direction:
reversed_direction = True
elif num_neg_freqs[-1] == num_neg_freqs[-2] and reversed_direction:
if len(num_neg_freqs) < 3:
raise AssertionError("ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...")
else:
raise Exception("ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...")
elif num_neg_freqs[-1] != num_neg_freqs[-2] and reversed_direction:
reversed_direction = False
negative_freq_vecs = outdata.get("frequency_mode_vectors")[0]
structure_successfully_perturbed = False
for molecule_perturb_scale in np.arange(
max_molecule_perturb_scale, min_molecule_perturb_scale,
-perturb_scale_grid):
new_coords = perturb_coordinates(
old_coords=outdata.get("initial_geometry"),
negative_freq_vecs=negative_freq_vecs,
molecule_perturb_scale=molecule_perturb_scale,
reversed_direction=reversed_direction)
new_molecule = Molecule(
species=outdata.get('species'),
coords=new_coords,
charge=outdata.get('charge'),
spin_multiplicity=outdata.get('multiplicity'))
if check_connectivity:
old_molgraph = MoleculeGraph.with_local_env_strategy(outdata.get("initial_molecule"),
OpenBabelNN(),
reorder=False,
extend_structure=False)
new_molgraph = MoleculeGraph.with_local_env_strategy(new_molecule,
OpenBabelNN(),
reorder=False,
extend_structure=False)
if old_molgraph.isomorphic_to(new_molgraph):
structure_successfully_perturbed = True
break
if not structure_successfully_perturbed:
raise Exception(
"ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting..."
)
new_opt_QCInput = QCInput(
molecule=new_molecule,
rem=orig_opt_rem,
opt=orig_opt_input.opt,
pcm=orig_opt_input.pcm,
solvent=orig_opt_input.solvent)
new_opt_QCInput.write_file(input_file) | python | def opt_with_frequency_flattener(cls,
qchem_command,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
qclog_file="mol.qclog",
max_iterations=10,
max_molecule_perturb_scale=0.3,
check_connectivity=True,
**QCJob_kwargs):
"""
Optimize a structure and calculate vibrational frequencies to check if the
structure is in a true minima. If a frequency is negative, iteratively
perturbe the geometry, optimize, and recalculate frequencies until all are
positive, aka a true minima has been found.
Args:
qchem_command (str): Command to run QChem.
multimode (str): Parallelization scheme, either openmp or mpi.
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that
can be applied to the molecule. Defaults to 0.3.
check_connectivity (bool): Whether to check differences in connectivity
introduced by structural perturbation. Defaults to True.
**QCJob_kwargs: Passthrough kwargs to QCJob. See
:class:`custodian.qchem.jobs.QCJob`.
"""
min_molecule_perturb_scale = 0.1
scale_grid = 10
perturb_scale_grid = (
max_molecule_perturb_scale - min_molecule_perturb_scale
) / scale_grid
if not os.path.exists(input_file):
raise AssertionError('Input file must be present!')
orig_opt_input = QCInput.from_file(input_file)
orig_opt_rem = copy.deepcopy(orig_opt_input.rem)
orig_freq_rem = copy.deepcopy(orig_opt_input.rem)
orig_freq_rem["job_type"] = "freq"
first = True
reversed_direction = False
num_neg_freqs = []
for ii in range(max_iterations):
yield (QCJob(
qchem_command=qchem_command,
multimode=multimode,
input_file=input_file,
output_file=output_file,
qclog_file=qclog_file,
suffix=".opt_" + str(ii),
backup=first,
**QCJob_kwargs))
first = False
opt_outdata = QCOutput(output_file + ".opt_" + str(ii)).data
if opt_outdata["structure_change"] == "unconnected_fragments" and not opt_outdata["completion"]:
print("Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...")
break
else:
freq_QCInput = QCInput(
molecule=opt_outdata.get("molecule_from_optimized_geometry"),
rem=orig_freq_rem,
opt=orig_opt_input.opt,
pcm=orig_opt_input.pcm,
solvent=orig_opt_input.solvent)
freq_QCInput.write_file(input_file)
yield (QCJob(
qchem_command=qchem_command,
multimode=multimode,
input_file=input_file,
output_file=output_file,
qclog_file=qclog_file,
suffix=".freq_" + str(ii),
backup=first,
**QCJob_kwargs))
outdata = QCOutput(output_file + ".freq_" + str(ii)).data
errors = outdata.get("errors")
if len(errors) != 0:
raise AssertionError('No errors should be encountered while flattening frequencies!')
if outdata.get('frequencies')[0] > 0.0:
print("All frequencies positive!")
break
else:
num_neg_freqs += [sum(1 for freq in outdata.get('frequencies') if freq < 0)]
if len(num_neg_freqs) > 1:
if num_neg_freqs[-1] == num_neg_freqs[-2] and not reversed_direction:
reversed_direction = True
elif num_neg_freqs[-1] == num_neg_freqs[-2] and reversed_direction:
if len(num_neg_freqs) < 3:
raise AssertionError("ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...")
else:
raise Exception("ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...")
elif num_neg_freqs[-1] != num_neg_freqs[-2] and reversed_direction:
reversed_direction = False
negative_freq_vecs = outdata.get("frequency_mode_vectors")[0]
structure_successfully_perturbed = False
for molecule_perturb_scale in np.arange(
max_molecule_perturb_scale, min_molecule_perturb_scale,
-perturb_scale_grid):
new_coords = perturb_coordinates(
old_coords=outdata.get("initial_geometry"),
negative_freq_vecs=negative_freq_vecs,
molecule_perturb_scale=molecule_perturb_scale,
reversed_direction=reversed_direction)
new_molecule = Molecule(
species=outdata.get('species'),
coords=new_coords,
charge=outdata.get('charge'),
spin_multiplicity=outdata.get('multiplicity'))
if check_connectivity:
old_molgraph = MoleculeGraph.with_local_env_strategy(outdata.get("initial_molecule"),
OpenBabelNN(),
reorder=False,
extend_structure=False)
new_molgraph = MoleculeGraph.with_local_env_strategy(new_molecule,
OpenBabelNN(),
reorder=False,
extend_structure=False)
if old_molgraph.isomorphic_to(new_molgraph):
structure_successfully_perturbed = True
break
if not structure_successfully_perturbed:
raise Exception(
"ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting..."
)
new_opt_QCInput = QCInput(
molecule=new_molecule,
rem=orig_opt_rem,
opt=orig_opt_input.opt,
pcm=orig_opt_input.pcm,
solvent=orig_opt_input.solvent)
new_opt_QCInput.write_file(input_file) | Optimize a structure and calculate vibrational frequencies to check if the
structure is in a true minima. If a frequency is negative, iteratively
perturbe the geometry, optimize, and recalculate frequencies until all are
positive, aka a true minima has been found.
Args:
qchem_command (str): Command to run QChem.
multimode (str): Parallelization scheme, either openmp or mpi.
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that
can be applied to the molecule. Defaults to 0.3.
check_connectivity (bool): Whether to check differences in connectivity
introduced by structural perturbation. Defaults to True.
**QCJob_kwargs: Passthrough kwargs to QCJob. See
:class:`custodian.qchem.jobs.QCJob`. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/qchem/jobs.py#L132-L270 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.setup | def setup(self):
"""
Performs initial setup for VaspJob, including overriding any settings
and backing up.
"""
decompress_dir('.')
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
# Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try sge environment variable first
# (since multiprocessing counts cores on the current
# machine only)
ncores = os.environ.get('NSLOTS') or \
multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.auto_continue:
if os.path.exists("continue.json"):
actions = loadfn("continue.json").get("actions")
logger.info("Continuing previous VaspJob. Actions: {}".format(actions))
backup(VASP_BACKUP_FILES, prefix="prev_run")
VaspModder().apply_actions(actions)
else:
# Default functionality is to copy CONTCAR to POSCAR and set
# ISTART to 1 in the INCAR, but other actions can be specified
if self.auto_continue is True:
actions = [{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}},
{"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}}]
else:
actions = self.auto_continue
dumpfn({"actions": actions}, "continue.json")
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override) | python | def setup(self):
"""
Performs initial setup for VaspJob, including overriding any settings
and backing up.
"""
decompress_dir('.')
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
# Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try sge environment variable first
# (since multiprocessing counts cores on the current
# machine only)
ncores = os.environ.get('NSLOTS') or \
multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.auto_continue:
if os.path.exists("continue.json"):
actions = loadfn("continue.json").get("actions")
logger.info("Continuing previous VaspJob. Actions: {}".format(actions))
backup(VASP_BACKUP_FILES, prefix="prev_run")
VaspModder().apply_actions(actions)
else:
# Default functionality is to copy CONTCAR to POSCAR and set
# ISTART to 1 in the INCAR, but other actions can be specified
if self.auto_continue is True:
actions = [{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}},
{"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}}]
else:
actions = self.auto_continue
dumpfn({"actions": actions}, "continue.json")
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override) | Performs initial setup for VaspJob, including overriding any settings
and backing up. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L131-L189 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.run | def run(self):
"""
Perform the actual VASP run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
cmd = list(self.vasp_cmd)
if self.auto_gamma:
vi = VaspInput.from_directory(".")
kpts = vi["KPOINTS"]
if kpts.style == Kpoints.supported_modes.Gamma \
and tuple(kpts.kpts[0]) == (1, 1, 1):
if self.gamma_vasp_cmd is not None and which(
self.gamma_vasp_cmd[-1]):
cmd = self.gamma_vasp_cmd
elif which(cmd[-1] + ".gamma"):
cmd[-1] += ".gamma"
logger.info("Running {}".format(" ".join(cmd)))
with open(self.output_file, 'w') as f_std, \
open(self.stderr_file, "w", buffering=1) as f_err:
# use line buffering for stderr
p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)
return p | python | def run(self):
"""
Perform the actual VASP run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
cmd = list(self.vasp_cmd)
if self.auto_gamma:
vi = VaspInput.from_directory(".")
kpts = vi["KPOINTS"]
if kpts.style == Kpoints.supported_modes.Gamma \
and tuple(kpts.kpts[0]) == (1, 1, 1):
if self.gamma_vasp_cmd is not None and which(
self.gamma_vasp_cmd[-1]):
cmd = self.gamma_vasp_cmd
elif which(cmd[-1] + ".gamma"):
cmd[-1] += ".gamma"
logger.info("Running {}".format(" ".join(cmd)))
with open(self.output_file, 'w') as f_std, \
open(self.stderr_file, "w", buffering=1) as f_err:
# use line buffering for stderr
p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)
return p | Perform the actual VASP run.
Returns:
(subprocess.Popen) Used for monitoring. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L191-L214 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.postprocess | def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
Also copies the magmom to the incar if necessary
"""
for f in VASP_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
if self.copy_magmom and not self.final:
try:
outcar = Outcar("OUTCAR")
magmom = [m['tot'] for m in outcar.magnetization]
incar = Incar.from_file("INCAR")
incar['MAGMOM'] = magmom
incar.write_file("INCAR")
except:
logger.error('MAGMOM copy from OUTCAR to INCAR failed')
# Remove continuation so if a subsequent job is run in
# the same directory, will not restart this job.
if os.path.exists("continue.json"):
os.remove("continue.json") | python | def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
Also copies the magmom to the incar if necessary
"""
for f in VASP_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
if self.copy_magmom and not self.final:
try:
outcar = Outcar("OUTCAR")
magmom = [m['tot'] for m in outcar.magnetization]
incar = Incar.from_file("INCAR")
incar['MAGMOM'] = magmom
incar.write_file("INCAR")
except:
logger.error('MAGMOM copy from OUTCAR to INCAR failed')
# Remove continuation so if a subsequent job is run in
# the same directory, will not restart this job.
if os.path.exists("continue.json"):
os.remove("continue.json") | Postprocessing includes renaming and gzipping where necessary.
Also copies the magmom to the incar if necessary | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L216-L241 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.double_relaxation_run | def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,
half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of two jobs corresponding to an AFLOW style double
relaxation run.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
number of cores) as recommended by VASP for DFT calculations.
Generally, this results in significant speedups. Defaults to
True. Set to False for HF, GW and RPA calculations.
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
Returns:
List of two jobs corresponding to an AFLOW style run.
"""
incar_update = {"ISTART": 1}
if ediffg:
incar_update["EDIFFG"] = ediffg
settings_overide_1 = None
settings_overide_2 = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if half_kpts_first_relax and os.path.exists("KPOINTS") and \
os.path.exists("POSCAR"):
kpts = Kpoints.from_file("KPOINTS")
orig_kpts_dict = kpts.as_dict()
# lattice vectors with length < 8 will get >1 KPOINT
kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2,
1)).astype(int).tolist()
low_kpts_dict = kpts.as_dict()
settings_overide_1 = [
{"dict": "KPOINTS",
"action": {"_set": low_kpts_dict}}
]
settings_overide_2.append(
{"dict": "KPOINTS",
"action": {"_set": orig_kpts_dict}}
)
return [VaspJob(vasp_cmd, final=False, suffix=".relax1",
auto_npar=auto_npar, auto_continue=auto_continue,
settings_override=settings_overide_1),
VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2",
auto_npar=auto_npar, auto_continue=auto_continue,
settings_override=settings_overide_2)] | python | def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,
half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of two jobs corresponding to an AFLOW style double
relaxation run.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
number of cores) as recommended by VASP for DFT calculations.
Generally, this results in significant speedups. Defaults to
True. Set to False for HF, GW and RPA calculations.
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
Returns:
List of two jobs corresponding to an AFLOW style run.
"""
incar_update = {"ISTART": 1}
if ediffg:
incar_update["EDIFFG"] = ediffg
settings_overide_1 = None
settings_overide_2 = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if half_kpts_first_relax and os.path.exists("KPOINTS") and \
os.path.exists("POSCAR"):
kpts = Kpoints.from_file("KPOINTS")
orig_kpts_dict = kpts.as_dict()
# lattice vectors with length < 8 will get >1 KPOINT
kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2,
1)).astype(int).tolist()
low_kpts_dict = kpts.as_dict()
settings_overide_1 = [
{"dict": "KPOINTS",
"action": {"_set": low_kpts_dict}}
]
settings_overide_2.append(
{"dict": "KPOINTS",
"action": {"_set": orig_kpts_dict}}
)
return [VaspJob(vasp_cmd, final=False, suffix=".relax1",
auto_npar=auto_npar, auto_continue=auto_continue,
settings_override=settings_overide_1),
VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2",
auto_npar=auto_npar, auto_continue=auto_continue,
settings_override=settings_overide_2)] | Returns a list of two jobs corresponding to an AFLOW style double
relaxation run.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
number of cores) as recommended by VASP for DFT calculations.
Generally, this results in significant speedups. Defaults to
True. Set to False for HF, GW and RPA calculations.
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
Returns:
List of two jobs corresponding to an AFLOW style run. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L244-L298 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.metagga_opt_run | def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,
half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme
"""
incar = Incar.from_file("INCAR")
# Defaults to using the SCAN metaGGA
metaGGA = incar.get("METAGGA", "SCAN")
# Pre optimze WAVECAR and structure using regular GGA
pre_opt_setings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": None,
"LWAVE": True,
"NSW": 0}}}]
jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar,
final=False, suffix=".precondition",
settings_override=pre_opt_setings)]
# Finish with regular double relaxation style run using SCAN
jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar,
ediffg=ediffg,
half_kpts_first_relax=half_kpts_first_relax))
# Ensure the first relaxation doesn't overwrite the original inputs
jobs[1].backup = False
# Update double_relaxation job to start from pre-optimized run
post_opt_settings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": metaGGA, "ISTART": 1,
"NSW": incar.get("NSW", 99),
"LWAVE": incar.get("LWAVE", False)}}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if jobs[1].settings_override:
post_opt_settings = jobs[1].settings_override + post_opt_settings
jobs[1].settings_override = post_opt_settings
return jobs | python | def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,
half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme
"""
incar = Incar.from_file("INCAR")
# Defaults to using the SCAN metaGGA
metaGGA = incar.get("METAGGA", "SCAN")
# Pre optimze WAVECAR and structure using regular GGA
pre_opt_setings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": None,
"LWAVE": True,
"NSW": 0}}}]
jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar,
final=False, suffix=".precondition",
settings_override=pre_opt_setings)]
# Finish with regular double relaxation style run using SCAN
jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar,
ediffg=ediffg,
half_kpts_first_relax=half_kpts_first_relax))
# Ensure the first relaxation doesn't overwrite the original inputs
jobs[1].backup = False
# Update double_relaxation job to start from pre-optimized run
post_opt_settings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": metaGGA, "ISTART": 1,
"NSW": incar.get("NSW", 99),
"LWAVE": incar.get("LWAVE", False)}}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if jobs[1].settings_override:
post_opt_settings = jobs[1].settings_override + post_opt_settings
jobs[1].settings_override = post_opt_settings
return jobs | Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L301-L343 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.full_opt_run | def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02,
max_steps=10, ediffg=-0.05, half_kpts_first_relax=False,
**vasp_job_kwargs):
"""
Returns a generator of jobs for a full optimization run. Basically,
this runs an infinite series of geometry optimization jobs until the
% vol change in a particular optimization is less than vol_change_tol.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
vol_change_tol (float): The tolerance at which to stop a run.
Defaults to 0.05, i.e., 5%.
max_steps (int): The maximum number of runs. Defaults to 10 (
highly unlikely that this limit is ever reached).
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs.
"""
for i in range(max_steps):
if i == 0:
settings = None
backup = True
if half_kpts_first_relax and os.path.exists("KPOINTS") and \
os.path.exists("POSCAR"):
kpts = Kpoints.from_file("KPOINTS")
orig_kpts_dict = kpts.as_dict()
kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist()
low_kpts_dict = kpts.as_dict()
settings = [
{"dict": "KPOINTS",
"action": {"_set": low_kpts_dict}}
]
else:
backup = False
initial = Poscar.from_file("POSCAR").structure
final = Poscar.from_file("CONTCAR").structure
vol_change = (final.volume - initial.volume) / initial.volume
logger.info("Vol change = %.1f %%!" % (vol_change * 100))
if abs(vol_change) < vol_change_tol:
logger.info("Stopping optimization!")
break
else:
incar_update = {"ISTART": 1}
if ediffg:
incar_update["EDIFFG"] = ediffg
settings = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if i == 1 and half_kpts_first_relax:
settings.append({"dict": "KPOINTS",
"action": {"_set": orig_kpts_dict}})
logger.info("Generating job = %d!" % (i+1))
yield VaspJob(vasp_cmd, final=False, backup=backup,
suffix=".relax%d" % (i+1), settings_override=settings,
**vasp_job_kwargs) | python | def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02,
max_steps=10, ediffg=-0.05, half_kpts_first_relax=False,
**vasp_job_kwargs):
"""
Returns a generator of jobs for a full optimization run. Basically,
this runs an infinite series of geometry optimization jobs until the
% vol change in a particular optimization is less than vol_change_tol.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
vol_change_tol (float): The tolerance at which to stop a run.
Defaults to 0.05, i.e., 5%.
max_steps (int): The maximum number of runs. Defaults to 10 (
highly unlikely that this limit is ever reached).
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs.
"""
for i in range(max_steps):
if i == 0:
settings = None
backup = True
if half_kpts_first_relax and os.path.exists("KPOINTS") and \
os.path.exists("POSCAR"):
kpts = Kpoints.from_file("KPOINTS")
orig_kpts_dict = kpts.as_dict()
kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist()
low_kpts_dict = kpts.as_dict()
settings = [
{"dict": "KPOINTS",
"action": {"_set": low_kpts_dict}}
]
else:
backup = False
initial = Poscar.from_file("POSCAR").structure
final = Poscar.from_file("CONTCAR").structure
vol_change = (final.volume - initial.volume) / initial.volume
logger.info("Vol change = %.1f %%!" % (vol_change * 100))
if abs(vol_change) < vol_change_tol:
logger.info("Stopping optimization!")
break
else:
incar_update = {"ISTART": 1}
if ediffg:
incar_update["EDIFFG"] = ediffg
settings = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if i == 1 and half_kpts_first_relax:
settings.append({"dict": "KPOINTS",
"action": {"_set": orig_kpts_dict}})
logger.info("Generating job = %d!" % (i+1))
yield VaspJob(vasp_cmd, final=False, backup=backup,
suffix=".relax%d" % (i+1), settings_override=settings,
**vasp_job_kwargs) | Returns a generator of jobs for a full optimization run. Basically,
this runs an infinite series of geometry optimization jobs until the
% vol change in a particular optimization is less than vol_change_tol.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
vol_change_tol (float): The tolerance at which to stop a run.
Defaults to 0.05, i.e., 5%.
max_steps (int): The maximum number of runs. Defaults to 10 (
highly unlikely that this limit is ever reached).
ediffg (float): Force convergence criteria for subsequent runs (
ignored for the initial run.)
half_kpts_first_relax (bool): Whether to halve the kpoint grid
for the first relaxation. Speeds up difficult convergence
considerably. Defaults to False.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L346-L412 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspJob.constrained_opt_run | def constrained_opt_run(cls, vasp_cmd, lattice_direction, initial_strain,
atom_relax=True, max_steps=20, algo="bfgs",
**vasp_job_kwargs):
"""
Returns a generator of jobs for a constrained optimization run. Typical
use case is when you want to approximate a biaxial strain situation,
e.g., you apply a defined strain to a and b directions of the lattice,
but allows the c-direction to relax.
Some guidelines on the use of this method:
i. It is recommended you do not use the Auto kpoint generation. The
grid generated via Auto may fluctuate with changes in lattice
param, resulting in numerical noise.
ii. Make sure your EDIFF/EDIFFG is properly set in your INCAR. The
optimization relies on these values to determine convergence.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
lattice_direction (str): Which direction to relax. Valid values are
"a", "b" or "c".
initial_strain (float): An initial strain to be applied to the
lattice_direction. This can usually be estimated as the
negative of the strain applied in the other two directions.
E.g., if you apply a tensile strain of 0.05 to the a and b
directions, you can use -0.05 as a reasonable first guess for
initial strain.
atom_relax (bool): Whether to relax atomic positions.
max_steps (int): The maximum number of runs. Defaults to 20 (
highly unlikely that this limit is ever reached).
algo (str): Algorithm to use to find minimum. Default is "bfgs",
which is fast, but can be sensitive to numerical noise
in energy calculations. The alternative is "bisection",
which is more robust but can be a bit slow. The code does fall
back on the bisection when bfgs gives a non-sensical result,
e.g., negative lattice params.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs. At the end of the run, an "EOS.txt" is written
which provides a quick look at the E vs lattice parameter.
"""
nsw = 99 if atom_relax else 0
incar = Incar.from_file("INCAR")
# Set the energy convergence criteria as the EDIFFG (if present) or
# 10 x EDIFF (which itself defaults to 1e-4 if not present).
if incar.get("EDIFFG") and incar.get("EDIFFG") > 0:
etol = incar["EDIFFG"]
else:
etol = incar.get("EDIFF", 1e-4) * 10
if lattice_direction == "a":
lattice_index = 0
elif lattice_direction == "b":
lattice_index = 1
else:
lattice_index = 2
energies = {}
for i in range(max_steps):
if i == 0:
settings = [
{"dict": "INCAR",
"action": {"_set": {"ISIF": 2, "NSW": nsw}}}]
structure = Poscar.from_file("POSCAR").structure
x = structure.lattice.abc[lattice_index]
backup = True
else:
backup = False
v = Vasprun("vasprun.xml")
structure = v.final_structure
energy = v.final_energy
lattice = structure.lattice
x = lattice.abc[lattice_index]
energies[x] = energy
if i == 1:
x *= (1 + initial_strain)
else:
# Sort the lattice parameter by energies.
min_x = min(energies.keys(), key=lambda e: energies[e])
sorted_x = sorted(energies.keys())
ind = sorted_x.index(min_x)
if ind == 0:
other = ind + 1
elif ind == len(sorted_x) - 1:
other = ind - 1
else:
other = ind + 1 \
if energies[sorted_x[ind + 1]] \
< energies[sorted_x[ind - 1]] \
else ind - 1
if abs(energies[min_x]
- energies[sorted_x[other]]) < etol:
logger.info("Stopping optimization! Final %s = %f"
% (lattice_direction, min_x))
break
if ind == 0 and len(sorted_x) > 2:
# Lowest energy lies outside of range of lowest value.
# we decrease the lattice parameter in the next
# iteration to find a minimum. This applies only when
# there are at least 3 values.
x = sorted_x[0] - abs(sorted_x[1] - sorted_x[0])
logger.info("Lowest energy lies below bounds. "
"Setting %s = %f." % (lattice_direction, x))
elif ind == len(sorted_x) - 1 and len(sorted_x) > 2:
# Lowest energy lies outside of range of highest value.
# we increase the lattice parameter in the next
# iteration to find a minimum. This applies only when
# there are at least 3 values.
x = sorted_x[-1] + abs(sorted_x[-1] - sorted_x[-2])
logger.info("Lowest energy lies above bounds. "
"Setting %s = %f." % (lattice_direction, x))
else:
if algo.lower() == "bfgs" and len(sorted_x) >= 4:
try:
# If there are more than 4 data points, we will
# do a quadratic fit to accelerate convergence.
x1 = list(energies.keys())
y1 = [energies[j] for j in x1]
z1 = np.polyfit(x1, y1, 2)
pp = np.poly1d(z1)
from scipy.optimize import minimize
result = minimize(
pp, min_x,
bounds=[(sorted_x[0], sorted_x[-1])])
if (not result.success) or result.x[0] < 0:
raise ValueError(
"Negative lattice constant!")
x = result.x[0]
logger.info("BFGS minimized %s = %f."
% (lattice_direction, x))
except ValueError as ex:
# Fall back on bisection algo if the bfgs fails.
logger.info(str(ex))
x = (min_x + sorted_x[other]) / 2
logger.info("Falling back on bisection %s = %f."
% (lattice_direction, x))
else:
x = (min_x + sorted_x[other]) / 2
logger.info("Bisection %s = %f."
% (lattice_direction, x))
lattice = lattice.matrix
lattice[lattice_index] = lattice[lattice_index] / \
np.linalg.norm(lattice[lattice_index]) * x
s = Structure(lattice, structure.species, structure.frac_coords)
fname = "POSCAR.%f" % x
s.to(filename=fname)
incar_update = {"ISTART": 1, "NSW": nsw, "ISIF": 2}
settings = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": fname,
"action": {"_file_copy": {"dest": "POSCAR"}}}]
logger.info("Generating job = %d with parameter %f!" % (i + 1, x))
yield VaspJob(vasp_cmd, final=False, backup=backup,
suffix=".static.%f" % x,
settings_override=settings, **vasp_job_kwargs)
with open("EOS.txt", "wt") as f:
f.write("# %s energy\n" % lattice_direction)
for k in sorted(energies.keys()):
f.write("%f %f\n" % (k, energies[k])) | python | def constrained_opt_run(cls, vasp_cmd, lattice_direction, initial_strain,
atom_relax=True, max_steps=20, algo="bfgs",
**vasp_job_kwargs):
"""
Returns a generator of jobs for a constrained optimization run. Typical
use case is when you want to approximate a biaxial strain situation,
e.g., you apply a defined strain to a and b directions of the lattice,
but allows the c-direction to relax.
Some guidelines on the use of this method:
i. It is recommended you do not use the Auto kpoint generation. The
grid generated via Auto may fluctuate with changes in lattice
param, resulting in numerical noise.
ii. Make sure your EDIFF/EDIFFG is properly set in your INCAR. The
optimization relies on these values to determine convergence.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
lattice_direction (str): Which direction to relax. Valid values are
"a", "b" or "c".
initial_strain (float): An initial strain to be applied to the
lattice_direction. This can usually be estimated as the
negative of the strain applied in the other two directions.
E.g., if you apply a tensile strain of 0.05 to the a and b
directions, you can use -0.05 as a reasonable first guess for
initial strain.
atom_relax (bool): Whether to relax atomic positions.
max_steps (int): The maximum number of runs. Defaults to 20 (
highly unlikely that this limit is ever reached).
algo (str): Algorithm to use to find minimum. Default is "bfgs",
which is fast, but can be sensitive to numerical noise
in energy calculations. The alternative is "bisection",
which is more robust but can be a bit slow. The code does fall
back on the bisection when bfgs gives a non-sensical result,
e.g., negative lattice params.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs. At the end of the run, an "EOS.txt" is written
which provides a quick look at the E vs lattice parameter.
"""
nsw = 99 if atom_relax else 0
incar = Incar.from_file("INCAR")
# Set the energy convergence criteria as the EDIFFG (if present) or
# 10 x EDIFF (which itself defaults to 1e-4 if not present).
if incar.get("EDIFFG") and incar.get("EDIFFG") > 0:
etol = incar["EDIFFG"]
else:
etol = incar.get("EDIFF", 1e-4) * 10
if lattice_direction == "a":
lattice_index = 0
elif lattice_direction == "b":
lattice_index = 1
else:
lattice_index = 2
energies = {}
for i in range(max_steps):
if i == 0:
settings = [
{"dict": "INCAR",
"action": {"_set": {"ISIF": 2, "NSW": nsw}}}]
structure = Poscar.from_file("POSCAR").structure
x = structure.lattice.abc[lattice_index]
backup = True
else:
backup = False
v = Vasprun("vasprun.xml")
structure = v.final_structure
energy = v.final_energy
lattice = structure.lattice
x = lattice.abc[lattice_index]
energies[x] = energy
if i == 1:
x *= (1 + initial_strain)
else:
# Sort the lattice parameter by energies.
min_x = min(energies.keys(), key=lambda e: energies[e])
sorted_x = sorted(energies.keys())
ind = sorted_x.index(min_x)
if ind == 0:
other = ind + 1
elif ind == len(sorted_x) - 1:
other = ind - 1
else:
other = ind + 1 \
if energies[sorted_x[ind + 1]] \
< energies[sorted_x[ind - 1]] \
else ind - 1
if abs(energies[min_x]
- energies[sorted_x[other]]) < etol:
logger.info("Stopping optimization! Final %s = %f"
% (lattice_direction, min_x))
break
if ind == 0 and len(sorted_x) > 2:
# Lowest energy lies outside of range of lowest value.
# we decrease the lattice parameter in the next
# iteration to find a minimum. This applies only when
# there are at least 3 values.
x = sorted_x[0] - abs(sorted_x[1] - sorted_x[0])
logger.info("Lowest energy lies below bounds. "
"Setting %s = %f." % (lattice_direction, x))
elif ind == len(sorted_x) - 1 and len(sorted_x) > 2:
# Lowest energy lies outside of range of highest value.
# we increase the lattice parameter in the next
# iteration to find a minimum. This applies only when
# there are at least 3 values.
x = sorted_x[-1] + abs(sorted_x[-1] - sorted_x[-2])
logger.info("Lowest energy lies above bounds. "
"Setting %s = %f." % (lattice_direction, x))
else:
if algo.lower() == "bfgs" and len(sorted_x) >= 4:
try:
# If there are more than 4 data points, we will
# do a quadratic fit to accelerate convergence.
x1 = list(energies.keys())
y1 = [energies[j] for j in x1]
z1 = np.polyfit(x1, y1, 2)
pp = np.poly1d(z1)
from scipy.optimize import minimize
result = minimize(
pp, min_x,
bounds=[(sorted_x[0], sorted_x[-1])])
if (not result.success) or result.x[0] < 0:
raise ValueError(
"Negative lattice constant!")
x = result.x[0]
logger.info("BFGS minimized %s = %f."
% (lattice_direction, x))
except ValueError as ex:
# Fall back on bisection algo if the bfgs fails.
logger.info(str(ex))
x = (min_x + sorted_x[other]) / 2
logger.info("Falling back on bisection %s = %f."
% (lattice_direction, x))
else:
x = (min_x + sorted_x[other]) / 2
logger.info("Bisection %s = %f."
% (lattice_direction, x))
lattice = lattice.matrix
lattice[lattice_index] = lattice[lattice_index] / \
np.linalg.norm(lattice[lattice_index]) * x
s = Structure(lattice, structure.species, structure.frac_coords)
fname = "POSCAR.%f" % x
s.to(filename=fname)
incar_update = {"ISTART": 1, "NSW": nsw, "ISIF": 2}
settings = [
{"dict": "INCAR",
"action": {"_set": incar_update}},
{"file": fname,
"action": {"_file_copy": {"dest": "POSCAR"}}}]
logger.info("Generating job = %d with parameter %f!" % (i + 1, x))
yield VaspJob(vasp_cmd, final=False, backup=backup,
suffix=".static.%f" % x,
settings_override=settings, **vasp_job_kwargs)
with open("EOS.txt", "wt") as f:
f.write("# %s energy\n" % lattice_direction)
for k in sorted(energies.keys()):
f.write("%f %f\n" % (k, energies[k])) | Returns a generator of jobs for a constrained optimization run. Typical
use case is when you want to approximate a biaxial strain situation,
e.g., you apply a defined strain to a and b directions of the lattice,
but allows the c-direction to relax.
Some guidelines on the use of this method:
i. It is recommended you do not use the Auto kpoint generation. The
grid generated via Auto may fluctuate with changes in lattice
param, resulting in numerical noise.
ii. Make sure your EDIFF/EDIFFG is properly set in your INCAR. The
optimization relies on these values to determine convergence.
Args:
vasp_cmd (str): Command to run vasp as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "pvasp.5.2.11"]
lattice_direction (str): Which direction to relax. Valid values are
"a", "b" or "c".
initial_strain (float): An initial strain to be applied to the
lattice_direction. This can usually be estimated as the
negative of the strain applied in the other two directions.
E.g., if you apply a tensile strain of 0.05 to the a and b
directions, you can use -0.05 as a reasonable first guess for
initial strain.
atom_relax (bool): Whether to relax atomic positions.
max_steps (int): The maximum number of runs. Defaults to 20 (
highly unlikely that this limit is ever reached).
algo (str): Algorithm to use to find minimum. Default is "bfgs",
which is fast, but can be sensitive to numerical noise
in energy calculations. The alternative is "bisection",
which is more robust but can be a bit slow. The code does fall
back on the bisection when bfgs gives a non-sensical result,
e.g., negative lattice params.
\*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
:class:`custodian.vasp.jobs.VaspJob`.
Returns:
Generator of jobs. At the end of the run, an "EOS.txt" is written
which provides a quick look at the E vs lattice parameter. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L415-L590 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspNEBJob.setup | def setup(self):
"""
Performs initial setup for VaspNEBJob, including overriding any settings
and backing up.
"""
neb_dirs = self.neb_dirs
if self.backup:
# Back up KPOINTS, INCAR, POTCAR
for f in VASP_NEB_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
# Back up POSCARs
for path in neb_dirs:
poscar = os.path.join(path, "POSCAR")
shutil.copy(poscar, "{}.orig".format(poscar))
if self.half_kpts and os.path.exists("KPOINTS"):
kpts = Kpoints.from_file("KPOINTS")
kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1)
kpts.kpts = kpts.kpts.astype(int).tolist()
if tuple(kpts.kpts[0]) == (1, 1, 1):
kpt_dic = kpts.as_dict()
kpt_dic["generation_style"] = 'Gamma'
kpts = Kpoints.from_dict(kpt_dic)
kpts.write_file("KPOINTS")
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
import multiprocessing
# Try sge environment variable first
# (since multiprocessing counts cores on the current
# machine only)
ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.auto_continue and \
os.path.exists("STOPCAR") and \
not os.access("STOPCAR", os.W_OK):
# Remove STOPCAR
os.chmod("STOPCAR", 0o644)
os.remove("STOPCAR")
# Copy CONTCAR to POSCAR
for path in self.neb_sub:
contcar = os.path.join(path, "CONTCAR")
poscar = os.path.join(path, "POSCAR")
shutil.copy(contcar, poscar)
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override) | python | def setup(self):
"""
Performs initial setup for VaspNEBJob, including overriding any settings
and backing up.
"""
neb_dirs = self.neb_dirs
if self.backup:
# Back up KPOINTS, INCAR, POTCAR
for f in VASP_NEB_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
# Back up POSCARs
for path in neb_dirs:
poscar = os.path.join(path, "POSCAR")
shutil.copy(poscar, "{}.orig".format(poscar))
if self.half_kpts and os.path.exists("KPOINTS"):
kpts = Kpoints.from_file("KPOINTS")
kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1)
kpts.kpts = kpts.kpts.astype(int).tolist()
if tuple(kpts.kpts[0]) == (1, 1, 1):
kpt_dic = kpts.as_dict()
kpt_dic["generation_style"] = 'Gamma'
kpts = Kpoints.from_dict(kpt_dic)
kpts.write_file("KPOINTS")
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
import multiprocessing
# Try sge environment variable first
# (since multiprocessing counts cores on the current
# machine only)
ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.auto_continue and \
os.path.exists("STOPCAR") and \
not os.access("STOPCAR", os.W_OK):
# Remove STOPCAR
os.chmod("STOPCAR", 0o644)
os.remove("STOPCAR")
# Copy CONTCAR to POSCAR
for path in self.neb_sub:
contcar = os.path.join(path, "CONTCAR")
poscar = os.path.join(path, "POSCAR")
shutil.copy(contcar, poscar)
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override) | Performs initial setup for VaspNEBJob, including overriding any settings
and backing up. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L686-L744 |
materialsproject/custodian | custodian/vasp/jobs.py | VaspNEBJob.postprocess | def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
# Add suffix to all sub_dir/{items}
for path in self.neb_dirs:
for f in VASP_NEB_OUTPUT_SUB_FILES:
f = os.path.join(path, f)
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
# Add suffix to all output files
for f in VASP_NEB_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix)) | python | def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
# Add suffix to all sub_dir/{items}
for path in self.neb_dirs:
for f in VASP_NEB_OUTPUT_SUB_FILES:
f = os.path.join(path, f)
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
# Add suffix to all output files
for f in VASP_NEB_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix)) | Postprocessing includes renaming and gzipping where necessary. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L771-L791 |
materialsproject/custodian | custodian/nwchem/jobs.py | NwchemJob.setup | def setup(self):
"""
Performs backup if necessary.
"""
if self.backup:
shutil.copy(self.input_file, "{}.orig".format(self.input_file)) | python | def setup(self):
"""
Performs backup if necessary.
"""
if self.backup:
shutil.copy(self.input_file, "{}.orig".format(self.input_file)) | Performs backup if necessary. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/nwchem/jobs.py#L58-L63 |
materialsproject/custodian | custodian/nwchem/jobs.py | NwchemJob.run | def run(self):
"""
Performs actual nwchem run.
"""
with zopen(self.output_file, 'w') as fout:
return subprocess.Popen(self.nwchem_cmd + [self.input_file],
stdout=fout) | python | def run(self):
"""
Performs actual nwchem run.
"""
with zopen(self.output_file, 'w') as fout:
return subprocess.Popen(self.nwchem_cmd + [self.input_file],
stdout=fout) | Performs actual nwchem run. | https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/nwchem/jobs.py#L65-L71 |
wdecoster/nanofilt | nanofilt/NanoFilt.py | valid_GC | def valid_GC(x):
"""type function for argparse to check GC values.
Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1
"""
x = float(x)
if x < 0.0 or x > 1.0:
raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x))
return x | python | def valid_GC(x):
"""type function for argparse to check GC values.
Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1
"""
x = float(x)
if x < 0.0 or x > 1.0:
raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x))
return x | type function for argparse to check GC values.
Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1 | https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L157-L165 |
wdecoster/nanofilt | nanofilt/NanoFilt.py | filter_stream | def filter_stream(fq, args):
"""Filter a fastq file on stdin.
Print fastq record to stdout if it passes
- quality filter (optional)
- length filter (optional)
- min/maxGC filter (optional)
Optionally trim a number of nucleotides from beginning and end.
Record has to be longer than args.length (default 1) after trimming
Use a faster silent quality_check if no filtering on quality is required
"""
if args.quality:
quality_check = ave_qual
else:
quality_check = silent_quality_check
minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0))
for rec in SeqIO.parse(fq, "fastq"):
if args.GC_filter:
gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec)
else:
gc = 0.50 # dummy variable
if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \
and minlen <= len(rec) <= args.maxlength \
and args.minGC <= gc <= args.maxGC:
print(rec[args.headcrop:args.tailcrop].format("fastq"), end="") | python | def filter_stream(fq, args):
"""Filter a fastq file on stdin.
Print fastq record to stdout if it passes
- quality filter (optional)
- length filter (optional)
- min/maxGC filter (optional)
Optionally trim a number of nucleotides from beginning and end.
Record has to be longer than args.length (default 1) after trimming
Use a faster silent quality_check if no filtering on quality is required
"""
if args.quality:
quality_check = ave_qual
else:
quality_check = silent_quality_check
minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0))
for rec in SeqIO.parse(fq, "fastq"):
if args.GC_filter:
gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec)
else:
gc = 0.50 # dummy variable
if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \
and minlen <= len(rec) <= args.maxlength \
and args.minGC <= gc <= args.maxGC:
print(rec[args.headcrop:args.tailcrop].format("fastq"), end="") | Filter a fastq file on stdin.
Print fastq record to stdout if it passes
- quality filter (optional)
- length filter (optional)
- min/maxGC filter (optional)
Optionally trim a number of nucleotides from beginning and end.
Record has to be longer than args.length (default 1) after trimming
Use a faster silent quality_check if no filtering on quality is required | https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L173-L197 |
wdecoster/nanofilt | nanofilt/NanoFilt.py | filter_using_summary | def filter_using_summary(fq, args):
"""Use quality scores from albacore summary file for filtering
Use the summary file from albacore for more accurate quality estimate
Get the dataframe from nanoget, convert to dictionary
"""
data = {entry[0]: entry[1] for entry in process_summary(
summaryfile=args.summary,
threads="NA",
readtype=args.readtype,
barcoded=False)[
["readIDs", "quals"]].itertuples(index=False)}
try:
for record in SeqIO.parse(fq, "fastq"):
if data[record.id] > args.quality \
and args.length <= len(record) <= args.maxlength:
print(record[args.headcrop:args.tailcrop].format("fastq"), end="")
except KeyError:
logging.error("mismatch between summary and fastq: \
{} was not found in the summary file.".format(record.id))
sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \
{} was not found in the summary file.\nQuitting.'.format(record.id)) | python | def filter_using_summary(fq, args):
"""Use quality scores from albacore summary file for filtering
Use the summary file from albacore for more accurate quality estimate
Get the dataframe from nanoget, convert to dictionary
"""
data = {entry[0]: entry[1] for entry in process_summary(
summaryfile=args.summary,
threads="NA",
readtype=args.readtype,
barcoded=False)[
["readIDs", "quals"]].itertuples(index=False)}
try:
for record in SeqIO.parse(fq, "fastq"):
if data[record.id] > args.quality \
and args.length <= len(record) <= args.maxlength:
print(record[args.headcrop:args.tailcrop].format("fastq"), end="")
except KeyError:
logging.error("mismatch between summary and fastq: \
{} was not found in the summary file.".format(record.id))
sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \
{} was not found in the summary file.\nQuitting.'.format(record.id)) | Use quality scores from albacore summary file for filtering
Use the summary file from albacore for more accurate quality estimate
Get the dataframe from nanoget, convert to dictionary | https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L200-L221 |
milesrichardson/ParsePy | parse_rest/connection.py | master_key_required | def master_key_required(func):
'''decorator describing methods that require the master key'''
def ret(obj, *args, **kw):
conn = ACCESS_KEYS
if not (conn and conn.get('master_key')):
message = '%s requires the master key' % func.__name__
raise core.ParseError(message)
func(obj, *args, **kw)
return ret | python | def master_key_required(func):
'''decorator describing methods that require the master key'''
def ret(obj, *args, **kw):
conn = ACCESS_KEYS
if not (conn and conn.get('master_key')):
message = '%s requires the master key' % func.__name__
raise core.ParseError(message)
func(obj, *args, **kw)
return ret | decorator describing methods that require the master key | https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/connection.py#L66-L74 |
milesrichardson/ParsePy | parse_rest/connection.py | ParseBase.execute | def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw):
"""
if batch == False, execute a command with the given parameters and
return the response JSON.
If batch == True, return the dictionary that would be used in a batch
command.
"""
if batch:
urlsplitter = urlparse(API_ROOT).netloc
ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]}
if kw:
ret["body"] = kw
return ret
if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS):
raise core.ParseError('Missing connection credentials')
app_id = ACCESS_KEYS.get('app_id')
rest_key = ACCESS_KEYS.get('rest_key')
master_key = ACCESS_KEYS.get('master_key')
url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri
if _body is None:
data = kw and json.dumps(kw, default=date_handler) or "{}"
else:
data = _body
if http_verb == 'GET' and data:
url += '?%s' % urlencode(kw)
data = None
else:
if cls.__name__ == 'File':
data = data
else:
data = data.encode('utf-8')
headers = {
'Content-type': 'application/json',
'X-Parse-Application-Id': app_id,
'X-Parse-REST-API-Key': rest_key
}
headers.update(extra_headers or {})
if cls.__name__ == 'File':
request = Request(url.encode('utf-8'), data, headers)
else:
request = Request(url, data, headers)
if ACCESS_KEYS.get('session_token'):
request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token'))
elif master_key:
request.add_header('X-Parse-Master-Key', master_key)
request.get_method = lambda: http_verb
try:
response = urlopen(request, timeout=CONNECTION_TIMEOUT)
except HTTPError as e:
exc = {
400: core.ResourceRequestBadRequest,
401: core.ResourceRequestLoginRequired,
403: core.ResourceRequestForbidden,
404: core.ResourceRequestNotFound
}.get(e.code, core.ParseError)
raise exc(e.read())
return json.loads(response.read().decode('utf-8')) | python | def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw):
"""
if batch == False, execute a command with the given parameters and
return the response JSON.
If batch == True, return the dictionary that would be used in a batch
command.
"""
if batch:
urlsplitter = urlparse(API_ROOT).netloc
ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]}
if kw:
ret["body"] = kw
return ret
if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS):
raise core.ParseError('Missing connection credentials')
app_id = ACCESS_KEYS.get('app_id')
rest_key = ACCESS_KEYS.get('rest_key')
master_key = ACCESS_KEYS.get('master_key')
url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri
if _body is None:
data = kw and json.dumps(kw, default=date_handler) or "{}"
else:
data = _body
if http_verb == 'GET' and data:
url += '?%s' % urlencode(kw)
data = None
else:
if cls.__name__ == 'File':
data = data
else:
data = data.encode('utf-8')
headers = {
'Content-type': 'application/json',
'X-Parse-Application-Id': app_id,
'X-Parse-REST-API-Key': rest_key
}
headers.update(extra_headers or {})
if cls.__name__ == 'File':
request = Request(url.encode('utf-8'), data, headers)
else:
request = Request(url, data, headers)
if ACCESS_KEYS.get('session_token'):
request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token'))
elif master_key:
request.add_header('X-Parse-Master-Key', master_key)
request.get_method = lambda: http_verb
try:
response = urlopen(request, timeout=CONNECTION_TIMEOUT)
except HTTPError as e:
exc = {
400: core.ResourceRequestBadRequest,
401: core.ResourceRequestLoginRequired,
403: core.ResourceRequestForbidden,
404: core.ResourceRequestNotFound
}.get(e.code, core.ParseError)
raise exc(e.read())
return json.loads(response.read().decode('utf-8')) | if batch == False, execute a command with the given parameters and
return the response JSON.
If batch == True, return the dictionary that would be used in a batch
command. | https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/connection.py#L85-L150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.