repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
CI-WATER/gsshapy | gsshapy/lib/wms_dataset_chunk.py | datasetHeaderChunk | def datasetHeaderChunk(key, lines):
"""
Process the dataset header
"""
KEYWORDS = ('DATASET',
'OBJTYPE',
'VECTYPE',
'BEGSCL',
'BEGVEC',
'OBJID',
'ND',
'NC',
'NAME')
TYPE_KEYS = ('BEGSCL', 'BEGVEC')
result = {'type': None,
'numberData': None,
'numberCells': None,
'name': None,
'objectID': None,
'objectType': None,
'vectorType': None}
chunks = pt.chunk(KEYWORDS, lines)
for key, chunkList in iteritems(chunks):
for chunk in chunkList:
schunk = pt.splitLine(chunk[0])
if key == 'ND':
result['numberData'] = int(schunk[1])
elif key == 'NC':
result['numberCells'] = int(schunk[1])
elif key == 'NAME':
result['name'] = schunk[1]
elif key == 'OBJID':
result['objectID'] = int(schunk[1])
elif key == 'OBJTYPE':
result['objectType'] = schunk[1]
elif key == 'VECTYPE':
result['vectorType'] = schunk[1]
elif key in TYPE_KEYS:
result['type'] = schunk[0]
return result | python | def datasetHeaderChunk(key, lines):
"""
Process the dataset header
"""
KEYWORDS = ('DATASET',
'OBJTYPE',
'VECTYPE',
'BEGSCL',
'BEGVEC',
'OBJID',
'ND',
'NC',
'NAME')
TYPE_KEYS = ('BEGSCL', 'BEGVEC')
result = {'type': None,
'numberData': None,
'numberCells': None,
'name': None,
'objectID': None,
'objectType': None,
'vectorType': None}
chunks = pt.chunk(KEYWORDS, lines)
for key, chunkList in iteritems(chunks):
for chunk in chunkList:
schunk = pt.splitLine(chunk[0])
if key == 'ND':
result['numberData'] = int(schunk[1])
elif key == 'NC':
result['numberCells'] = int(schunk[1])
elif key == 'NAME':
result['name'] = schunk[1]
elif key == 'OBJID':
result['objectID'] = int(schunk[1])
elif key == 'OBJTYPE':
result['objectType'] = schunk[1]
elif key == 'VECTYPE':
result['vectorType'] = schunk[1]
elif key in TYPE_KEYS:
result['type'] = schunk[0]
return result | [
"def",
"datasetHeaderChunk",
"(",
"key",
",",
"lines",
")",
":",
"KEYWORDS",
"=",
"(",
"'DATASET'",
",",
"'OBJTYPE'",
",",
"'VECTYPE'",
",",
"'BEGSCL'",
",",
"'BEGVEC'",
",",
"'OBJID'",
",",
"'ND'",
",",
"'NC'",
",",
"'NAME'",
")",
"TYPE_KEYS",
"=",
"(",
"'BEGSCL'",
",",
"'BEGVEC'",
")",
"result",
"=",
"{",
"'type'",
":",
"None",
",",
"'numberData'",
":",
"None",
",",
"'numberCells'",
":",
"None",
",",
"'name'",
":",
"None",
",",
"'objectID'",
":",
"None",
",",
"'objectType'",
":",
"None",
",",
"'vectorType'",
":",
"None",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"pt",
".",
"splitLine",
"(",
"chunk",
"[",
"0",
"]",
")",
"if",
"key",
"==",
"'ND'",
":",
"result",
"[",
"'numberData'",
"]",
"=",
"int",
"(",
"schunk",
"[",
"1",
"]",
")",
"elif",
"key",
"==",
"'NC'",
":",
"result",
"[",
"'numberCells'",
"]",
"=",
"int",
"(",
"schunk",
"[",
"1",
"]",
")",
"elif",
"key",
"==",
"'NAME'",
":",
"result",
"[",
"'name'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"key",
"==",
"'OBJID'",
":",
"result",
"[",
"'objectID'",
"]",
"=",
"int",
"(",
"schunk",
"[",
"1",
"]",
")",
"elif",
"key",
"==",
"'OBJTYPE'",
":",
"result",
"[",
"'objectType'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"key",
"==",
"'VECTYPE'",
":",
"result",
"[",
"'vectorType'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"key",
"in",
"TYPE_KEYS",
":",
"result",
"[",
"'type'",
"]",
"=",
"schunk",
"[",
"0",
"]",
"return",
"result"
] | Process the dataset header | [
"Process",
"the",
"dataset",
"header"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/wms_dataset_chunk.py#L14-L66 | train |
CI-WATER/gsshapy | gsshapy/lib/wms_dataset_chunk.py | datasetScalarTimeStepChunk | def datasetScalarTimeStepChunk(lines, numberColumns, numberCells):
"""
Process the time step chunks for scalar datasets
"""
END_DATASET_TAG = 'ENDDS'
# Define the result object
result = {'iStatus': None,
'timestamp': None,
'cellArray': None,
'rasterText': None}
# Split the chunks
timeStep = pt.splitLine(lines.pop(0))
# Extract cells, ignoring the status indicators
startCellsIndex = numberCells
# Handle case when status cells are not included (istat = 0)
iStatus = int(timeStep[1])
if iStatus == 0:
startCellsIndex = 0
# Strip off ending dataset tag
if END_DATASET_TAG in lines[-1]:
lines.pop(-1)
# Assemble the array string
arrayString = '[['
columnCounter = 1
lenLines = len(lines) - 1
# Also assemble raster text field to preserve for spatial datasets
rasterText = ''
for index in range(startCellsIndex, len(lines)):
# Check columns condition
if columnCounter % numberColumns != 0 and index != lenLines:
arrayString += lines[index].strip() + ', '
elif columnCounter % numberColumns == 0 and index != lenLines:
arrayString += lines[index].strip() + '], ['
elif index == lenLines:
arrayString += lines[index].strip() + ']]'
# Advance counter
columnCounter += 1
rasterText += lines[index]
# Get Value Array
result['cellArray'] = arrayString
result['rasterText'] = rasterText
# Assign Result
result['iStatus'] = iStatus
result['timestamp'] = float(timeStep[2])
return result | python | def datasetScalarTimeStepChunk(lines, numberColumns, numberCells):
"""
Process the time step chunks for scalar datasets
"""
END_DATASET_TAG = 'ENDDS'
# Define the result object
result = {'iStatus': None,
'timestamp': None,
'cellArray': None,
'rasterText': None}
# Split the chunks
timeStep = pt.splitLine(lines.pop(0))
# Extract cells, ignoring the status indicators
startCellsIndex = numberCells
# Handle case when status cells are not included (istat = 0)
iStatus = int(timeStep[1])
if iStatus == 0:
startCellsIndex = 0
# Strip off ending dataset tag
if END_DATASET_TAG in lines[-1]:
lines.pop(-1)
# Assemble the array string
arrayString = '[['
columnCounter = 1
lenLines = len(lines) - 1
# Also assemble raster text field to preserve for spatial datasets
rasterText = ''
for index in range(startCellsIndex, len(lines)):
# Check columns condition
if columnCounter % numberColumns != 0 and index != lenLines:
arrayString += lines[index].strip() + ', '
elif columnCounter % numberColumns == 0 and index != lenLines:
arrayString += lines[index].strip() + '], ['
elif index == lenLines:
arrayString += lines[index].strip() + ']]'
# Advance counter
columnCounter += 1
rasterText += lines[index]
# Get Value Array
result['cellArray'] = arrayString
result['rasterText'] = rasterText
# Assign Result
result['iStatus'] = iStatus
result['timestamp'] = float(timeStep[2])
return result | [
"def",
"datasetScalarTimeStepChunk",
"(",
"lines",
",",
"numberColumns",
",",
"numberCells",
")",
":",
"END_DATASET_TAG",
"=",
"'ENDDS'",
"# Define the result object",
"result",
"=",
"{",
"'iStatus'",
":",
"None",
",",
"'timestamp'",
":",
"None",
",",
"'cellArray'",
":",
"None",
",",
"'rasterText'",
":",
"None",
"}",
"# Split the chunks",
"timeStep",
"=",
"pt",
".",
"splitLine",
"(",
"lines",
".",
"pop",
"(",
"0",
")",
")",
"# Extract cells, ignoring the status indicators",
"startCellsIndex",
"=",
"numberCells",
"# Handle case when status cells are not included (istat = 0)",
"iStatus",
"=",
"int",
"(",
"timeStep",
"[",
"1",
"]",
")",
"if",
"iStatus",
"==",
"0",
":",
"startCellsIndex",
"=",
"0",
"# Strip off ending dataset tag",
"if",
"END_DATASET_TAG",
"in",
"lines",
"[",
"-",
"1",
"]",
":",
"lines",
".",
"pop",
"(",
"-",
"1",
")",
"# Assemble the array string",
"arrayString",
"=",
"'[['",
"columnCounter",
"=",
"1",
"lenLines",
"=",
"len",
"(",
"lines",
")",
"-",
"1",
"# Also assemble raster text field to preserve for spatial datasets",
"rasterText",
"=",
"''",
"for",
"index",
"in",
"range",
"(",
"startCellsIndex",
",",
"len",
"(",
"lines",
")",
")",
":",
"# Check columns condition",
"if",
"columnCounter",
"%",
"numberColumns",
"!=",
"0",
"and",
"index",
"!=",
"lenLines",
":",
"arrayString",
"+=",
"lines",
"[",
"index",
"]",
".",
"strip",
"(",
")",
"+",
"', '",
"elif",
"columnCounter",
"%",
"numberColumns",
"==",
"0",
"and",
"index",
"!=",
"lenLines",
":",
"arrayString",
"+=",
"lines",
"[",
"index",
"]",
".",
"strip",
"(",
")",
"+",
"'], ['",
"elif",
"index",
"==",
"lenLines",
":",
"arrayString",
"+=",
"lines",
"[",
"index",
"]",
".",
"strip",
"(",
")",
"+",
"']]'",
"# Advance counter",
"columnCounter",
"+=",
"1",
"rasterText",
"+=",
"lines",
"[",
"index",
"]",
"# Get Value Array",
"result",
"[",
"'cellArray'",
"]",
"=",
"arrayString",
"result",
"[",
"'rasterText'",
"]",
"=",
"rasterText",
"# Assign Result",
"result",
"[",
"'iStatus'",
"]",
"=",
"iStatus",
"result",
"[",
"'timestamp'",
"]",
"=",
"float",
"(",
"timeStep",
"[",
"2",
"]",
")",
"return",
"result"
] | Process the time step chunks for scalar datasets | [
"Process",
"the",
"time",
"step",
"chunks",
"for",
"scalar",
"datasets"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/wms_dataset_chunk.py#L69-L127 | train |
vinci1it2000/schedula | schedula/utils/io.py | save_dispatcher | def save_dispatcher(dsp, path):
"""
Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp, f) | python | def save_dispatcher(dsp, path):
"""
Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp, f) | [
"def",
"save_dispatcher",
"(",
"dsp",
",",
"path",
")",
":",
"import",
"dill",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"dill",
".",
"dump",
"(",
"dsp",
",",
"f",
")"
] | Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name) | [
"Write",
"Dispatcher",
"object",
"in",
"Python",
"pickle",
"format",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L16-L48 | train |
vinci1it2000/schedula | schedula/utils/io.py | save_default_values | def save_default_values(dsp, path):
"""
Write Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.default_values, f) | python | def save_default_values(dsp, path):
"""
Write Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.default_values, f) | [
"def",
"save_default_values",
"(",
"dsp",
",",
"path",
")",
":",
"import",
"dill",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"dill",
".",
"dump",
"(",
"dsp",
".",
"default_values",
",",
"f",
")"
] | Write Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name) | [
"Write",
"Dispatcher",
"default",
"values",
"in",
"Python",
"pickle",
"format",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L91-L123 | train |
vinci1it2000/schedula | schedula/utils/io.py | load_default_values | def load_default_values(dsp, path):
"""
Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
"""
import dill
# noinspection PyArgumentList
with open(path, 'rb') as f:
dsp.__init__(dmap=dsp.dmap, default_values=dill.load(f)) | python | def load_default_values(dsp, path):
"""
Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
"""
import dill
# noinspection PyArgumentList
with open(path, 'rb') as f:
dsp.__init__(dmap=dsp.dmap, default_values=dill.load(f)) | [
"def",
"load_default_values",
"(",
"dsp",
",",
"path",
")",
":",
"import",
"dill",
"# noinspection PyArgumentList",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"dsp",
".",
"__init__",
"(",
"dmap",
"=",
"dsp",
".",
"dmap",
",",
"default_values",
"=",
"dill",
".",
"load",
"(",
"f",
")",
")"
] | Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3 | [
"Load",
"Dispatcher",
"default",
"values",
"in",
"Python",
"pickle",
"format",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L126-L164 | train |
vinci1it2000/schedula | schedula/utils/io.py | save_map | def save_map(dsp, path):
"""
Write Dispatcher graph object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.dmap, f) | python | def save_map(dsp, path):
"""
Write Dispatcher graph object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.dmap, f) | [
"def",
"save_map",
"(",
"dsp",
",",
"path",
")",
":",
"import",
"dill",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"dill",
".",
"dump",
"(",
"dsp",
".",
"dmap",
",",
"f",
")"
] | Write Dispatcher graph object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name) | [
"Write",
"Dispatcher",
"graph",
"object",
"in",
"Python",
"pickle",
"format",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L167-L197 | train |
CI-WATER/gsshapy | gsshapy/lib/parsetools.py | chunk | def chunk(keywords, lines):
"""
Divide a file into chunks between
key words in the list
"""
chunks = dict()
chunk = []
# Create an empty dictionary using all the keywords
for keyword in keywords:
chunks[keyword] = []
# Populate dictionary with lists of chunks associated
# with the keywords in the list
for line in lines:
if line.strip():
token = line.split()[0]
if token in keywords:
chunk = [line]
chunks[token].append(chunk)
else:
chunk.append(line)
return chunks | python | def chunk(keywords, lines):
"""
Divide a file into chunks between
key words in the list
"""
chunks = dict()
chunk = []
# Create an empty dictionary using all the keywords
for keyword in keywords:
chunks[keyword] = []
# Populate dictionary with lists of chunks associated
# with the keywords in the list
for line in lines:
if line.strip():
token = line.split()[0]
if token in keywords:
chunk = [line]
chunks[token].append(chunk)
else:
chunk.append(line)
return chunks | [
"def",
"chunk",
"(",
"keywords",
",",
"lines",
")",
":",
"chunks",
"=",
"dict",
"(",
")",
"chunk",
"=",
"[",
"]",
"# Create an empty dictionary using all the keywords",
"for",
"keyword",
"in",
"keywords",
":",
"chunks",
"[",
"keyword",
"]",
"=",
"[",
"]",
"# Populate dictionary with lists of chunks associated",
"# with the keywords in the list ",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"strip",
"(",
")",
":",
"token",
"=",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"token",
"in",
"keywords",
":",
"chunk",
"=",
"[",
"line",
"]",
"chunks",
"[",
"token",
"]",
".",
"append",
"(",
"chunk",
")",
"else",
":",
"chunk",
".",
"append",
"(",
"line",
")",
"return",
"chunks"
] | Divide a file into chunks between
key words in the list | [
"Divide",
"a",
"file",
"into",
"chunks",
"between",
"key",
"words",
"in",
"the",
"list"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L46-L69 | train |
CI-WATER/gsshapy | gsshapy/lib/parsetools.py | valueReadPreprocessor | def valueReadPreprocessor(valueString, replaceParamsFile=None):
"""
Apply global pre-processing to values during reading throughout the project.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string
"""
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
# Default
processedValue = valueString
# Check for replacement variables
if replaceParamsFile is not None and valueString is not None:
if '[' in valueString or ']' in valueString:
# Set default value
processedValue = '{0}'.format(REPLACE_NO_VALUE)
# Find the matching parameter and return the negative of the id
for targetParam in replaceParamsFile.targetParameters:
if targetParam.targetVariable == valueString:
processedValue = '{0}'.format(-1 * targetParam.id)
break
return processedValue | python | def valueReadPreprocessor(valueString, replaceParamsFile=None):
"""
Apply global pre-processing to values during reading throughout the project.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string
"""
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
# Default
processedValue = valueString
# Check for replacement variables
if replaceParamsFile is not None and valueString is not None:
if '[' in valueString or ']' in valueString:
# Set default value
processedValue = '{0}'.format(REPLACE_NO_VALUE)
# Find the matching parameter and return the negative of the id
for targetParam in replaceParamsFile.targetParameters:
if targetParam.targetVariable == valueString:
processedValue = '{0}'.format(-1 * targetParam.id)
break
return processedValue | [
"def",
"valueReadPreprocessor",
"(",
"valueString",
",",
"replaceParamsFile",
"=",
"None",
")",
":",
"if",
"type",
"(",
"valueString",
")",
"is",
"bool",
":",
"log",
".",
"warning",
"(",
"\"Only numerical variable types can be handled by the valueReadPreprocessor function.\"",
")",
"return",
"valueString",
"# Default",
"processedValue",
"=",
"valueString",
"# Check for replacement variables",
"if",
"replaceParamsFile",
"is",
"not",
"None",
"and",
"valueString",
"is",
"not",
"None",
":",
"if",
"'['",
"in",
"valueString",
"or",
"']'",
"in",
"valueString",
":",
"# Set default value",
"processedValue",
"=",
"'{0}'",
".",
"format",
"(",
"REPLACE_NO_VALUE",
")",
"# Find the matching parameter and return the negative of the id",
"for",
"targetParam",
"in",
"replaceParamsFile",
".",
"targetParameters",
":",
"if",
"targetParam",
".",
"targetVariable",
"==",
"valueString",
":",
"processedValue",
"=",
"'{0}'",
".",
"format",
"(",
"-",
"1",
"*",
"targetParam",
".",
"id",
")",
"break",
"return",
"processedValue"
] | Apply global pre-processing to values during reading throughout the project.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string | [
"Apply",
"global",
"pre",
"-",
"processing",
"to",
"values",
"during",
"reading",
"throughout",
"the",
"project",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L72-L103 | train |
CI-WATER/gsshapy | gsshapy/lib/parsetools.py | valueWritePreprocessor | def valueWritePreprocessor(valueString, replaceParamsFile=None):
"""
Look up variable name in replace param file for the negative id given and return it.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string
"""
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
# Default
variableString = valueString
# Check for replacement variables
if replaceParamsFile is not None:
# Set Default
if variableString == REPLACE_NO_VALUE:
variableString = '[NO_VARIABLE]'
else:
try:
number = int(valueString)
if number < 0:
parameterID = number * -1
# Find the matching parameter
for targetParam in replaceParamsFile.targetParameters:
if targetParam.id == parameterID:
variableString = targetParam.targetVariable
break
except:
pass
return variableString | python | def valueWritePreprocessor(valueString, replaceParamsFile=None):
"""
Look up variable name in replace param file for the negative id given and return it.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string
"""
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
# Default
variableString = valueString
# Check for replacement variables
if replaceParamsFile is not None:
# Set Default
if variableString == REPLACE_NO_VALUE:
variableString = '[NO_VARIABLE]'
else:
try:
number = int(valueString)
if number < 0:
parameterID = number * -1
# Find the matching parameter
for targetParam in replaceParamsFile.targetParameters:
if targetParam.id == parameterID:
variableString = targetParam.targetVariable
break
except:
pass
return variableString | [
"def",
"valueWritePreprocessor",
"(",
"valueString",
",",
"replaceParamsFile",
"=",
"None",
")",
":",
"if",
"type",
"(",
"valueString",
")",
"is",
"bool",
":",
"log",
".",
"warning",
"(",
"\"Only numerical variable types can be handled by the valueReadPreprocessor function.\"",
")",
"return",
"valueString",
"# Default",
"variableString",
"=",
"valueString",
"# Check for replacement variables",
"if",
"replaceParamsFile",
"is",
"not",
"None",
":",
"# Set Default",
"if",
"variableString",
"==",
"REPLACE_NO_VALUE",
":",
"variableString",
"=",
"'[NO_VARIABLE]'",
"else",
":",
"try",
":",
"number",
"=",
"int",
"(",
"valueString",
")",
"if",
"number",
"<",
"0",
":",
"parameterID",
"=",
"number",
"*",
"-",
"1",
"# Find the matching parameter",
"for",
"targetParam",
"in",
"replaceParamsFile",
".",
"targetParameters",
":",
"if",
"targetParam",
".",
"id",
"==",
"parameterID",
":",
"variableString",
"=",
"targetParam",
".",
"targetVariable",
"break",
"except",
":",
"pass",
"return",
"variableString"
] | Look up variable name in replace param file for the negative id given and return it.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string | [
"Look",
"up",
"variable",
"name",
"in",
"replace",
"param",
"file",
"for",
"the",
"negative",
"id",
"given",
"and",
"return",
"it",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L106-L144 | train |
bayesimpact/fex | fex/collection.py | Collection.run | def run(self, dataset_path):
"""Run all FeatureExtractors and output results to CSV."""
features = self._generate_features(self._feature_extractors)
features.to_csv(dataset_path) | python | def run(self, dataset_path):
"""Run all FeatureExtractors and output results to CSV."""
features = self._generate_features(self._feature_extractors)
features.to_csv(dataset_path) | [
"def",
"run",
"(",
"self",
",",
"dataset_path",
")",
":",
"features",
"=",
"self",
".",
"_generate_features",
"(",
"self",
".",
"_feature_extractors",
")",
"features",
".",
"to_csv",
"(",
"dataset_path",
")"
] | Run all FeatureExtractors and output results to CSV. | [
"Run",
"all",
"FeatureExtractors",
"and",
"output",
"results",
"to",
"CSV",
"."
] | 2d9b4e9be2bf98847a36055b907411fd5557eb77 | https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/collection.py#L28-L31 | train |
bayesimpact/fex | fex/collection.py | Collection._generate_features | def _generate_features(self, feature_extractors):
"""Run all FeatureExtractors and record results in a key-value format.
:param feature_extractors: iterable of `FeatureExtractor` objects.
"""
results = [pd.DataFrame()]
n_ext = len(feature_extractors)
for i, extractor in enumerate(feature_extractors):
log.info("generating: '%s' (%d/%d)", extractor.name, i + 1, n_ext)
cached_extractor = self._cache[extractor.name]
if extractor.same(cached_extractor):
log.info('pulling from cache')
extractor = cached_extractor
else:
log.info('running...')
extractor.extract()
results.append(extractor.result)
if self.cache_path:
self._cache[extractor.name] = extractor
if self.cache_path:
with open(self.cache_path, 'wb') as f:
pickle.dump(self._cache, f)
return pd.concat(results, axis=1) | python | def _generate_features(self, feature_extractors):
"""Run all FeatureExtractors and record results in a key-value format.
:param feature_extractors: iterable of `FeatureExtractor` objects.
"""
results = [pd.DataFrame()]
n_ext = len(feature_extractors)
for i, extractor in enumerate(feature_extractors):
log.info("generating: '%s' (%d/%d)", extractor.name, i + 1, n_ext)
cached_extractor = self._cache[extractor.name]
if extractor.same(cached_extractor):
log.info('pulling from cache')
extractor = cached_extractor
else:
log.info('running...')
extractor.extract()
results.append(extractor.result)
if self.cache_path:
self._cache[extractor.name] = extractor
if self.cache_path:
with open(self.cache_path, 'wb') as f:
pickle.dump(self._cache, f)
return pd.concat(results, axis=1) | [
"def",
"_generate_features",
"(",
"self",
",",
"feature_extractors",
")",
":",
"results",
"=",
"[",
"pd",
".",
"DataFrame",
"(",
")",
"]",
"n_ext",
"=",
"len",
"(",
"feature_extractors",
")",
"for",
"i",
",",
"extractor",
"in",
"enumerate",
"(",
"feature_extractors",
")",
":",
"log",
".",
"info",
"(",
"\"generating: '%s' (%d/%d)\"",
",",
"extractor",
".",
"name",
",",
"i",
"+",
"1",
",",
"n_ext",
")",
"cached_extractor",
"=",
"self",
".",
"_cache",
"[",
"extractor",
".",
"name",
"]",
"if",
"extractor",
".",
"same",
"(",
"cached_extractor",
")",
":",
"log",
".",
"info",
"(",
"'pulling from cache'",
")",
"extractor",
"=",
"cached_extractor",
"else",
":",
"log",
".",
"info",
"(",
"'running...'",
")",
"extractor",
".",
"extract",
"(",
")",
"results",
".",
"append",
"(",
"extractor",
".",
"result",
")",
"if",
"self",
".",
"cache_path",
":",
"self",
".",
"_cache",
"[",
"extractor",
".",
"name",
"]",
"=",
"extractor",
"if",
"self",
".",
"cache_path",
":",
"with",
"open",
"(",
"self",
".",
"cache_path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
".",
"_cache",
",",
"f",
")",
"return",
"pd",
".",
"concat",
"(",
"results",
",",
"axis",
"=",
"1",
")"
] | Run all FeatureExtractors and record results in a key-value format.
:param feature_extractors: iterable of `FeatureExtractor` objects. | [
"Run",
"all",
"FeatureExtractors",
"and",
"record",
"results",
"in",
"a",
"key",
"-",
"value",
"format",
"."
] | 2d9b4e9be2bf98847a36055b907411fd5557eb77 | https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/collection.py#L33-L58 | train |
CI-WATER/gsshapy | gsshapy/base/file_base.py | GsshaPyFileObjectBase.read | def read(self, directory, filename, session, spatial=False,
spatialReferenceID=4236, replaceParamFile=None, **kwargs):
"""
Generic read file into database method.
Args:
directory (str): Directory containing the file to be read.
filename (str): Name of the file which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if
spatial is True. Defaults to srid 4236.
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are reading contains replacement parameters.
"""
# Read parameter derivatives
path = os.path.join(directory, filename)
filename_split = filename.split('.')
name = filename_split[0]
# Default file extension
extension = ''
if len(filename_split) >= 2:
extension = filename_split[-1]
if os.path.isfile(path):
# Add self to session
session.add(self)
# Read
self._read(directory, filename, session, path, name, extension,
spatial, spatialReferenceID, replaceParamFile, **kwargs)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE)
else:
# Rollback the session if the file doesn't exist
session.rollback()
# Print warning
log.warning('Could not find file named {0}. File not read.'.format(filename)) | python | def read(self, directory, filename, session, spatial=False,
spatialReferenceID=4236, replaceParamFile=None, **kwargs):
"""
Generic read file into database method.
Args:
directory (str): Directory containing the file to be read.
filename (str): Name of the file which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if
spatial is True. Defaults to srid 4236.
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are reading contains replacement parameters.
"""
# Read parameter derivatives
path = os.path.join(directory, filename)
filename_split = filename.split('.')
name = filename_split[0]
# Default file extension
extension = ''
if len(filename_split) >= 2:
extension = filename_split[-1]
if os.path.isfile(path):
# Add self to session
session.add(self)
# Read
self._read(directory, filename, session, path, name, extension,
spatial, spatialReferenceID, replaceParamFile, **kwargs)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE)
else:
# Rollback the session if the file doesn't exist
session.rollback()
# Print warning
log.warning('Could not find file named {0}. File not read.'.format(filename)) | [
"def",
"read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
",",
"replaceParamFile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Read parameter derivatives",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"filename_split",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"name",
"=",
"filename_split",
"[",
"0",
"]",
"# Default file extension",
"extension",
"=",
"''",
"if",
"len",
"(",
"filename_split",
")",
">=",
"2",
":",
"extension",
"=",
"filename_split",
"[",
"-",
"1",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"# Add self to session",
"session",
".",
"add",
"(",
"self",
")",
"# Read",
"self",
".",
"_read",
"(",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
",",
"*",
"*",
"kwargs",
")",
"# Commit to database",
"self",
".",
"_commit",
"(",
"session",
",",
"self",
".",
"COMMIT_ERROR_MESSAGE",
")",
"else",
":",
"# Rollback the session if the file doesn't exist",
"session",
".",
"rollback",
"(",
")",
"# Print warning",
"log",
".",
"warning",
"(",
"'Could not find file named {0}. File not read.'",
".",
"format",
"(",
"filename",
")",
")"
] | Generic read file into database method.
Args:
directory (str): Directory containing the file to be read.
filename (str): Name of the file which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if
spatial is True. Defaults to srid 4236.
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are reading contains replacement parameters. | [
"Generic",
"read",
"file",
"into",
"database",
"method",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L37-L80 | train |
CI-WATER/gsshapy | gsshapy/base/file_base.py | GsshaPyFileObjectBase.write | def write(self, session, directory, name, replaceParamFile=None, **kwargs):
"""
Write from database back to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
directory (str): Directory where the file will be written.
name (str): The name of the file that will be created (including the file extension is optional).
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are writing contains replacement parameters.
"""
# Assemble Path to file
name_split = name.split('.')
name = name_split[0]
# Default extension
extension = ''
if len(name_split) >= 2:
extension = name_split[-1]
# Run name preprocessor method if present
try:
name = self._namePreprocessor(name)
except:
'DO NOTHING'
if extension == '':
filename = '{0}.{1}'.format(name, self.fileExtension)
else:
filename = '{0}.{1}'.format(name, extension)
filePath = os.path.join(directory, filename)
with io_open(filePath, 'w') as openFile:
# Write Lines
self._write(session=session,
openFile=openFile,
replaceParamFile=replaceParamFile,
**kwargs) | python | def write(self, session, directory, name, replaceParamFile=None, **kwargs):
"""
Write from database back to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
directory (str): Directory where the file will be written.
name (str): The name of the file that will be created (including the file extension is optional).
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are writing contains replacement parameters.
"""
# Assemble Path to file
name_split = name.split('.')
name = name_split[0]
# Default extension
extension = ''
if len(name_split) >= 2:
extension = name_split[-1]
# Run name preprocessor method if present
try:
name = self._namePreprocessor(name)
except:
'DO NOTHING'
if extension == '':
filename = '{0}.{1}'.format(name, self.fileExtension)
else:
filename = '{0}.{1}'.format(name, extension)
filePath = os.path.join(directory, filename)
with io_open(filePath, 'w') as openFile:
# Write Lines
self._write(session=session,
openFile=openFile,
replaceParamFile=replaceParamFile,
**kwargs) | [
"def",
"write",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
",",
"replaceParamFile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Assemble Path to file",
"name_split",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"name",
"=",
"name_split",
"[",
"0",
"]",
"# Default extension",
"extension",
"=",
"''",
"if",
"len",
"(",
"name_split",
")",
">=",
"2",
":",
"extension",
"=",
"name_split",
"[",
"-",
"1",
"]",
"# Run name preprocessor method if present",
"try",
":",
"name",
"=",
"self",
".",
"_namePreprocessor",
"(",
"name",
")",
"except",
":",
"'DO NOTHING'",
"if",
"extension",
"==",
"''",
":",
"filename",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"fileExtension",
")",
"else",
":",
"filename",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"extension",
")",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"with",
"io_open",
"(",
"filePath",
",",
"'w'",
")",
"as",
"openFile",
":",
"# Write Lines",
"self",
".",
"_write",
"(",
"session",
"=",
"session",
",",
"openFile",
"=",
"openFile",
",",
"replaceParamFile",
"=",
"replaceParamFile",
",",
"*",
"*",
"kwargs",
")"
] | Write from database back to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
directory (str): Directory where the file will be written.
name (str): The name of the file that will be created (including the file extension is optional).
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are writing contains replacement parameters. | [
"Write",
"from",
"database",
"back",
"to",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L82-L122 | train |
CI-WATER/gsshapy | gsshapy/base/file_base.py | GsshaPyFileObjectBase._commit | def _commit(self, session, errorMessage):
"""
Custom commit function for file objects
"""
try:
session.commit()
except IntegrityError:
# Raise special error if the commit fails due to empty files
log.error('Commit to database failed. %s' % errorMessage)
except:
# Raise other errors as normal
raise | python | def _commit(self, session, errorMessage):
"""
Custom commit function for file objects
"""
try:
session.commit()
except IntegrityError:
# Raise special error if the commit fails due to empty files
log.error('Commit to database failed. %s' % errorMessage)
except:
# Raise other errors as normal
raise | [
"def",
"_commit",
"(",
"self",
",",
"session",
",",
"errorMessage",
")",
":",
"try",
":",
"session",
".",
"commit",
"(",
")",
"except",
"IntegrityError",
":",
"# Raise special error if the commit fails due to empty files",
"log",
".",
"error",
"(",
"'Commit to database failed. %s'",
"%",
"errorMessage",
")",
"except",
":",
"# Raise other errors as normal",
"raise"
] | Custom commit function for file objects | [
"Custom",
"commit",
"function",
"for",
"file",
"objects"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L124-L135 | train |
dsoprea/PySecure | versioneer.py | cmd_versioneer.run | def run(self):
"""Create the versioneer.py file."""
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy) | python | def run(self):
"""Create the versioneer.py file."""
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy) | [
"def",
"run",
"(",
"self",
")",
":",
"print",
"(",
"\" creating %s\"",
"%",
"versionfile_source",
")",
"with",
"open",
"(",
"versionfile_source",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"get_vcs_code",
"(",
")",
")",
"ipy",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"versionfile_source",
")",
",",
"\"__init__.py\"",
")",
"try",
":",
"with",
"open",
"(",
"ipy",
",",
"\"r\"",
")",
"as",
"f",
":",
"old",
"=",
"f",
".",
"read",
"(",
")",
"except",
"EnvironmentError",
":",
"old",
"=",
"\"\"",
"if",
"INIT_PY_SNIPPET",
"not",
"in",
"old",
":",
"print",
"(",
"\" appending to %s\"",
"%",
"ipy",
")",
"with",
"open",
"(",
"ipy",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"INIT_PY_SNIPPET",
")",
"else",
":",
"print",
"(",
"\" %s unmodified\"",
"%",
"ipy",
")",
"# Make sure both the top-level \"versioneer.py\" and versionfile_source",
"# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so",
"# they'll be copied into source distributions. Pip won't be able to",
"# install the package without this.",
"manifest_in",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_root",
"(",
")",
",",
"\"MANIFEST.in\"",
")",
"simple_includes",
"=",
"set",
"(",
")",
"try",
":",
"with",
"open",
"(",
"manifest_in",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"\"include \"",
")",
":",
"for",
"include",
"in",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
":",
"simple_includes",
".",
"add",
"(",
"include",
")",
"except",
"EnvironmentError",
":",
"pass",
"# That doesn't cover everything MANIFEST.in can do",
"# (http://docs.python.org/2/distutils/sourcedist.html#commands), so",
"# it might give some false negatives. Appending redundant 'include'",
"# lines is safe, though.",
"if",
"\"versioneer.py\"",
"not",
"in",
"simple_includes",
":",
"print",
"(",
"\" appending 'versioneer.py' to MANIFEST.in\"",
")",
"with",
"open",
"(",
"manifest_in",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"include versioneer.py\\n\"",
")",
"else",
":",
"print",
"(",
"\" 'versioneer.py' already in MANIFEST.in\"",
")",
"if",
"versionfile_source",
"not",
"in",
"simple_includes",
":",
"print",
"(",
"\" appending versionfile_source ('%s') to MANIFEST.in\"",
"%",
"versionfile_source",
")",
"with",
"open",
"(",
"manifest_in",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"include %s\\n\"",
"%",
"versionfile_source",
")",
"else",
":",
"print",
"(",
"\" versionfile_source already in MANIFEST.in\"",
")",
"# Make VCS-specific changes. For git, this means creating/changing",
"# .gitattributes to mark _version.py for export-time keyword",
"# substitution.",
"do_vcs_install_f",
"=",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"VCS",
"+",
"'_do_vcs_install'",
")",
"do_vcs_install_f",
"(",
"manifest_in",
",",
"versionfile_source",
",",
"ipy",
")"
] | Create the versioneer.py file. | [
"Create",
"the",
"versioneer",
".",
"py",
"file",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/versioneer.py#L1378-L1435 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | linkChunk | def linkChunk(key, chunk):
"""
Parse LINK Chunk Method
"""
# Extract link type card
linkType = chunk[1].strip().split()[0]
# Cases
if linkType == 'DX':
# Cross section link type handler
result = xSectionLink(chunk)
elif linkType == 'STRUCTURE':
# Structure link type handler
result = structureLink(chunk)
elif linkType in ('RESERVOIR', 'LAKE'):
# Reservoir link type handler
result = reservoirLink(chunk)
return result | python | def linkChunk(key, chunk):
"""
Parse LINK Chunk Method
"""
# Extract link type card
linkType = chunk[1].strip().split()[0]
# Cases
if linkType == 'DX':
# Cross section link type handler
result = xSectionLink(chunk)
elif linkType == 'STRUCTURE':
# Structure link type handler
result = structureLink(chunk)
elif linkType in ('RESERVOIR', 'LAKE'):
# Reservoir link type handler
result = reservoirLink(chunk)
return result | [
"def",
"linkChunk",
"(",
"key",
",",
"chunk",
")",
":",
"# Extract link type card",
"linkType",
"=",
"chunk",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"# Cases",
"if",
"linkType",
"==",
"'DX'",
":",
"# Cross section link type handler",
"result",
"=",
"xSectionLink",
"(",
"chunk",
")",
"elif",
"linkType",
"==",
"'STRUCTURE'",
":",
"# Structure link type handler",
"result",
"=",
"structureLink",
"(",
"chunk",
")",
"elif",
"linkType",
"in",
"(",
"'RESERVOIR'",
",",
"'LAKE'",
")",
":",
"# Reservoir link type handler",
"result",
"=",
"reservoirLink",
"(",
"chunk",
")",
"return",
"result"
] | Parse LINK Chunk Method | [
"Parse",
"LINK",
"Chunk",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L45-L64 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | structureLink | def structureLink(lines):
"""
Parse STRUCTURE LINK Method
"""
# Constants
KEYWORDS = ('LINK',
'STRUCTURE',
'NUMSTRUCTS',
'STRUCTTYPE')
WEIR_KEYWORDS = ('STRUCTTYPE',
'CREST_LENGTH',
'CREST_LOW_ELEV',
'DISCHARGE_COEFF_FORWARD',
'DISCHARGE_COEFF_REVERSE',
'CREST_LOW_LOC',
'STEEP_SLOPE',
'SHALLOW_SLOPE')
CULVERT_KEYWORDS = ('STRUCTTYPE',
'UPINVERT',
'DOWNINVERT',
'INLET_DISCH_COEFF',
'REV_FLOW_DISCH_COEFF',
'SLOPE',
'LENGTH',
'ROUGH_COEFF',
'DIAMETER',
'WIDTH',
'HEIGHT')
WEIRS = ('WEIR', 'SAG_WEIR')
CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')
CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')
result = {'type': 'STRUCTURE',
'header': {'link': None,
'numstructs': None},
'structures':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'STRUCTTYPE':
# Structure handler
structType = chunk[0].strip().split()[1]
# Cases
if structType in WEIRS:
weirResult = {'structtype': None,
'crest_length': None,
'crest_low_elev': None,
'discharge_coeff_forward': None,
'discharge_coeff_reverse': None,
'crest_low_loc': None,
'steep_slope': None,
'shallow_slope': None}
# Weir type structures handler
result['structures'].append(structureChunk(WEIR_KEYWORDS, weirResult, chunk))
elif structType in CULVERTS:
culvertResult = {'structtype': None,
'upinvert': None,
'downinvert': None,
'inlet_disch_coeff': None,
'rev_flow_disch_coeff': None,
'slope': None,
'length': None,
'rough_coeff': None,
'diameter': None,
'width': None,
'height': None}
# Culvert type structures handler
result['structures'].append(structureChunk(CULVERT_KEYWORDS, culvertResult, chunk))
elif structType in CURVES:
# Curve type handler
pass
elif key != 'STRUCTURE':
# All other variables header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result | python | def structureLink(lines):
"""
Parse STRUCTURE LINK Method
"""
# Constants
KEYWORDS = ('LINK',
'STRUCTURE',
'NUMSTRUCTS',
'STRUCTTYPE')
WEIR_KEYWORDS = ('STRUCTTYPE',
'CREST_LENGTH',
'CREST_LOW_ELEV',
'DISCHARGE_COEFF_FORWARD',
'DISCHARGE_COEFF_REVERSE',
'CREST_LOW_LOC',
'STEEP_SLOPE',
'SHALLOW_SLOPE')
CULVERT_KEYWORDS = ('STRUCTTYPE',
'UPINVERT',
'DOWNINVERT',
'INLET_DISCH_COEFF',
'REV_FLOW_DISCH_COEFF',
'SLOPE',
'LENGTH',
'ROUGH_COEFF',
'DIAMETER',
'WIDTH',
'HEIGHT')
WEIRS = ('WEIR', 'SAG_WEIR')
CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')
CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')
result = {'type': 'STRUCTURE',
'header': {'link': None,
'numstructs': None},
'structures':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'STRUCTTYPE':
# Structure handler
structType = chunk[0].strip().split()[1]
# Cases
if structType in WEIRS:
weirResult = {'structtype': None,
'crest_length': None,
'crest_low_elev': None,
'discharge_coeff_forward': None,
'discharge_coeff_reverse': None,
'crest_low_loc': None,
'steep_slope': None,
'shallow_slope': None}
# Weir type structures handler
result['structures'].append(structureChunk(WEIR_KEYWORDS, weirResult, chunk))
elif structType in CULVERTS:
culvertResult = {'structtype': None,
'upinvert': None,
'downinvert': None,
'inlet_disch_coeff': None,
'rev_flow_disch_coeff': None,
'slope': None,
'length': None,
'rough_coeff': None,
'diameter': None,
'width': None,
'height': None}
# Culvert type structures handler
result['structures'].append(structureChunk(CULVERT_KEYWORDS, culvertResult, chunk))
elif structType in CURVES:
# Curve type handler
pass
elif key != 'STRUCTURE':
# All other variables header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result | [
"def",
"structureLink",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'LINK'",
",",
"'STRUCTURE'",
",",
"'NUMSTRUCTS'",
",",
"'STRUCTTYPE'",
")",
"WEIR_KEYWORDS",
"=",
"(",
"'STRUCTTYPE'",
",",
"'CREST_LENGTH'",
",",
"'CREST_LOW_ELEV'",
",",
"'DISCHARGE_COEFF_FORWARD'",
",",
"'DISCHARGE_COEFF_REVERSE'",
",",
"'CREST_LOW_LOC'",
",",
"'STEEP_SLOPE'",
",",
"'SHALLOW_SLOPE'",
")",
"CULVERT_KEYWORDS",
"=",
"(",
"'STRUCTTYPE'",
",",
"'UPINVERT'",
",",
"'DOWNINVERT'",
",",
"'INLET_DISCH_COEFF'",
",",
"'REV_FLOW_DISCH_COEFF'",
",",
"'SLOPE'",
",",
"'LENGTH'",
",",
"'ROUGH_COEFF'",
",",
"'DIAMETER'",
",",
"'WIDTH'",
",",
"'HEIGHT'",
")",
"WEIRS",
"=",
"(",
"'WEIR'",
",",
"'SAG_WEIR'",
")",
"CULVERTS",
"=",
"(",
"'ROUND_CULVERT'",
",",
"'RECT_CULVERT'",
")",
"CURVES",
"=",
"(",
"'RATING_CURVE'",
",",
"'SCHEDULED_RELEASE'",
",",
"'RULE_CURVE'",
")",
"result",
"=",
"{",
"'type'",
":",
"'STRUCTURE'",
",",
"'header'",
":",
"{",
"'link'",
":",
"None",
",",
"'numstructs'",
":",
"None",
"}",
",",
"'structures'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Cases",
"if",
"key",
"==",
"'STRUCTTYPE'",
":",
"# Structure handler",
"structType",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
"# Cases",
"if",
"structType",
"in",
"WEIRS",
":",
"weirResult",
"=",
"{",
"'structtype'",
":",
"None",
",",
"'crest_length'",
":",
"None",
",",
"'crest_low_elev'",
":",
"None",
",",
"'discharge_coeff_forward'",
":",
"None",
",",
"'discharge_coeff_reverse'",
":",
"None",
",",
"'crest_low_loc'",
":",
"None",
",",
"'steep_slope'",
":",
"None",
",",
"'shallow_slope'",
":",
"None",
"}",
"# Weir type structures handler",
"result",
"[",
"'structures'",
"]",
".",
"append",
"(",
"structureChunk",
"(",
"WEIR_KEYWORDS",
",",
"weirResult",
",",
"chunk",
")",
")",
"elif",
"structType",
"in",
"CULVERTS",
":",
"culvertResult",
"=",
"{",
"'structtype'",
":",
"None",
",",
"'upinvert'",
":",
"None",
",",
"'downinvert'",
":",
"None",
",",
"'inlet_disch_coeff'",
":",
"None",
",",
"'rev_flow_disch_coeff'",
":",
"None",
",",
"'slope'",
":",
"None",
",",
"'length'",
":",
"None",
",",
"'rough_coeff'",
":",
"None",
",",
"'diameter'",
":",
"None",
",",
"'width'",
":",
"None",
",",
"'height'",
":",
"None",
"}",
"# Culvert type structures handler",
"result",
"[",
"'structures'",
"]",
".",
"append",
"(",
"structureChunk",
"(",
"CULVERT_KEYWORDS",
",",
"culvertResult",
",",
"chunk",
")",
")",
"elif",
"structType",
"in",
"CURVES",
":",
"# Curve type handler",
"pass",
"elif",
"key",
"!=",
"'STRUCTURE'",
":",
"# All other variables header",
"result",
"[",
"'header'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
"return",
"result"
] | Parse STRUCTURE LINK Method | [
"Parse",
"STRUCTURE",
"LINK",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L66-L158 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | xSectionLink | def xSectionLink(lines):
"""
Parse Cross Section Links Method
"""
# Constants
KEYWORDS = ('LINK',
'DX',
'TRAPEZOID',
'TRAPEZOID_ERODE',
'TRAPEZOID_SUBSURFACE',
'ERODE_TRAPEZOID',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAPEZOID',
'SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'ERODE_TRAPEZOID_SUBSURFACE',
'ERODE_SUBSURFACE_TRAPEZOID',
'SUBSURFACE_TRAPEZOID_ERODE',
'SUBSURFACE_ERODE_TRAPEZOID',
'BREAKPOINT',
'BREAKPOINT_ERODE',
'BREAKPOINT_SUBSURFACE',
'ERODE_BREAKPOINT',
'ERODE_SUBSURFACE',
'SUBSURFACE_BREAKPOINT',
'SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'ERODE_BREAKPOINT_SUBSURFACE',
'ERODE_SUBSURFACE_BREAKPOINT',
'SUBSURFACE_BREAKPOINT_ERODE',
'SUBSURFACE_ERODE_BREAKPOINT',
'TRAP',
'TRAP_ERODE',
'TRAP_SUBSURFACE',
'ERODE_TRAP',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAP',
'SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'ERODE_TRAP_SUBSURFACE',
'ERODE_SUBSURFACE_TRAP',
'SUBSURFACE_TRAP_ERODE',
'SUBSURFACE_ERODE_TRAP',
'NODES',
'NODE',
'XSEC')
ERODE = ('TRAPEZOID_ERODE',
'TRAP_ERODE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_ERODE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
SUBSURFACE = ('TRAPEZOID_SUBSURFACE',
'TRAP_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
result = {'type': 'XSEC',
'header': {'link': None,
'dx': None,
'xSecType': None,
'nodes': None,
'erode': False,
'subsurface': False},
'xSection': None,
'nodes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'NODE':
# Extract node x and y
result['nodes'].append(nodeChunk(chunk))
elif key == 'XSEC':
# Extract cross section information
result['xSection'] = xSectionChunk(chunk)
elif ('TRAPEZOID' in key) or ('BREAKPOINT' in key) or ('TRAP' in key):
# Cross section type handler
result['header']['xSecType'] = key
elif key in ERODE:
# Erode handler
result['header']['erode'] = True
elif key in SUBSURFACE:
# Subsurface handler
result['header']['subsurface'] = True
else:
# Extract all other variables into header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result | python | def xSectionLink(lines):
"""
Parse Cross Section Links Method
"""
# Constants
KEYWORDS = ('LINK',
'DX',
'TRAPEZOID',
'TRAPEZOID_ERODE',
'TRAPEZOID_SUBSURFACE',
'ERODE_TRAPEZOID',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAPEZOID',
'SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'ERODE_TRAPEZOID_SUBSURFACE',
'ERODE_SUBSURFACE_TRAPEZOID',
'SUBSURFACE_TRAPEZOID_ERODE',
'SUBSURFACE_ERODE_TRAPEZOID',
'BREAKPOINT',
'BREAKPOINT_ERODE',
'BREAKPOINT_SUBSURFACE',
'ERODE_BREAKPOINT',
'ERODE_SUBSURFACE',
'SUBSURFACE_BREAKPOINT',
'SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'ERODE_BREAKPOINT_SUBSURFACE',
'ERODE_SUBSURFACE_BREAKPOINT',
'SUBSURFACE_BREAKPOINT_ERODE',
'SUBSURFACE_ERODE_BREAKPOINT',
'TRAP',
'TRAP_ERODE',
'TRAP_SUBSURFACE',
'ERODE_TRAP',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAP',
'SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'ERODE_TRAP_SUBSURFACE',
'ERODE_SUBSURFACE_TRAP',
'SUBSURFACE_TRAP_ERODE',
'SUBSURFACE_ERODE_TRAP',
'NODES',
'NODE',
'XSEC')
ERODE = ('TRAPEZOID_ERODE',
'TRAP_ERODE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_ERODE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
SUBSURFACE = ('TRAPEZOID_SUBSURFACE',
'TRAP_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
result = {'type': 'XSEC',
'header': {'link': None,
'dx': None,
'xSecType': None,
'nodes': None,
'erode': False,
'subsurface': False},
'xSection': None,
'nodes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'NODE':
# Extract node x and y
result['nodes'].append(nodeChunk(chunk))
elif key == 'XSEC':
# Extract cross section information
result['xSection'] = xSectionChunk(chunk)
elif ('TRAPEZOID' in key) or ('BREAKPOINT' in key) or ('TRAP' in key):
# Cross section type handler
result['header']['xSecType'] = key
elif key in ERODE:
# Erode handler
result['header']['erode'] = True
elif key in SUBSURFACE:
# Subsurface handler
result['header']['subsurface'] = True
else:
# Extract all other variables into header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result | [
"def",
"xSectionLink",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'LINK'",
",",
"'DX'",
",",
"'TRAPEZOID'",
",",
"'TRAPEZOID_ERODE'",
",",
"'TRAPEZOID_SUBSURFACE'",
",",
"'ERODE_TRAPEZOID'",
",",
"'ERODE_SUBSURFACE'",
",",
"'SUBSURFACE_TRAPEZOID'",
",",
"'SUBSURFACE_ERODE'",
",",
"'TRAPEZOID_ERODE_SUBSURFACE'",
",",
"'TRAPEZOID_SUBSURFACE_ERODE'",
",",
"'ERODE_TRAPEZOID_SUBSURFACE'",
",",
"'ERODE_SUBSURFACE_TRAPEZOID'",
",",
"'SUBSURFACE_TRAPEZOID_ERODE'",
",",
"'SUBSURFACE_ERODE_TRAPEZOID'",
",",
"'BREAKPOINT'",
",",
"'BREAKPOINT_ERODE'",
",",
"'BREAKPOINT_SUBSURFACE'",
",",
"'ERODE_BREAKPOINT'",
",",
"'ERODE_SUBSURFACE'",
",",
"'SUBSURFACE_BREAKPOINT'",
",",
"'SUBSURFACE_ERODE'",
",",
"'BREAKPOINT_ERODE_SUBSURFACE'",
",",
"'BREAKPOINT_SUBSURFACE_ERODE'",
",",
"'ERODE_BREAKPOINT_SUBSURFACE'",
",",
"'ERODE_SUBSURFACE_BREAKPOINT'",
",",
"'SUBSURFACE_BREAKPOINT_ERODE'",
",",
"'SUBSURFACE_ERODE_BREAKPOINT'",
",",
"'TRAP'",
",",
"'TRAP_ERODE'",
",",
"'TRAP_SUBSURFACE'",
",",
"'ERODE_TRAP'",
",",
"'ERODE_SUBSURFACE'",
",",
"'SUBSURFACE_TRAP'",
",",
"'SUBSURFACE_ERODE'",
",",
"'TRAP_ERODE_SUBSURFACE'",
",",
"'TRAP_SUBSURFACE_ERODE'",
",",
"'ERODE_TRAP_SUBSURFACE'",
",",
"'ERODE_SUBSURFACE_TRAP'",
",",
"'SUBSURFACE_TRAP_ERODE'",
",",
"'SUBSURFACE_ERODE_TRAP'",
",",
"'NODES'",
",",
"'NODE'",
",",
"'XSEC'",
")",
"ERODE",
"=",
"(",
"'TRAPEZOID_ERODE'",
",",
"'TRAP_ERODE'",
",",
"'TRAP_SUBSURFACE_ERODE'",
",",
"'TRAP_ERODE_SUBSURFACE'",
",",
"'BREAKPOINT_ERODE'",
",",
"'TRAPEZOID_SUBSURFACE_ERODE'",
",",
"'TRAPEZOID_ERODE_SUBSURFACE'",
",",
"'BREAKPOINT_SUBSURFACE_ERODE'",
",",
"'BREAKPOINT_ERODE_SUBSURFACE'",
")",
"SUBSURFACE",
"=",
"(",
"'TRAPEZOID_SUBSURFACE'",
",",
"'TRAP_SUBSURFACE'",
",",
"'TRAP_SUBSURFACE_ERODE'",
",",
"'TRAP_ERODE_SUBSURFACE'",
",",
"'BREAKPOINT_SUBSURFACE'",
",",
"'TRAPEZOID_SUBSURFACE_ERODE'",
",",
"'TRAPEZOID_ERODE_SUBSURFACE'",
",",
"'BREAKPOINT_SUBSURFACE_ERODE'",
",",
"'BREAKPOINT_ERODE_SUBSURFACE'",
")",
"result",
"=",
"{",
"'type'",
":",
"'XSEC'",
",",
"'header'",
":",
"{",
"'link'",
":",
"None",
",",
"'dx'",
":",
"None",
",",
"'xSecType'",
":",
"None",
",",
"'nodes'",
":",
"None",
",",
"'erode'",
":",
"False",
",",
"'subsurface'",
":",
"False",
"}",
",",
"'xSection'",
":",
"None",
",",
"'nodes'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Cases",
"if",
"key",
"==",
"'NODE'",
":",
"# Extract node x and y",
"result",
"[",
"'nodes'",
"]",
".",
"append",
"(",
"nodeChunk",
"(",
"chunk",
")",
")",
"elif",
"key",
"==",
"'XSEC'",
":",
"# Extract cross section information",
"result",
"[",
"'xSection'",
"]",
"=",
"xSectionChunk",
"(",
"chunk",
")",
"elif",
"(",
"'TRAPEZOID'",
"in",
"key",
")",
"or",
"(",
"'BREAKPOINT'",
"in",
"key",
")",
"or",
"(",
"'TRAP'",
"in",
"key",
")",
":",
"# Cross section type handler",
"result",
"[",
"'header'",
"]",
"[",
"'xSecType'",
"]",
"=",
"key",
"elif",
"key",
"in",
"ERODE",
":",
"# Erode handler",
"result",
"[",
"'header'",
"]",
"[",
"'erode'",
"]",
"=",
"True",
"elif",
"key",
"in",
"SUBSURFACE",
":",
"# Subsurface handler",
"result",
"[",
"'header'",
"]",
"[",
"'subsurface'",
"]",
"=",
"True",
"else",
":",
"# Extract all other variables into header",
"result",
"[",
"'header'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
"return",
"result"
] | Parse Cross Section Links Method | [
"Parse",
"Cross",
"Section",
"Links",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L160-L273 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | reservoirLink | def reservoirLink(lines):
"""
Parse RESERVOIR Link Method
"""
# Constants
KEYWORDS = ('LINK',
'RESERVOIR',
'RES_MINWSE',
'RES_INITWSE',
'RES_MAXWSE',
'RES_NUMPTS',
'LAKE',
'MINWSE',
'INITWSE',
'MAXWSE',
'NUMPTS')
result = {'header': {'link': None,
'res_minwse': None,
'res_initwse': None,
'res_maxwse': None,
'res_numpts': None,
'minwse': None,
'initwse': None,
'maxwse': None,
'numpts': None},
'type': None,
'points': []}
pair = {'i': None,
'j': None}
# Rechunk the chunk
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if key in ('NUMPTS', 'RES_NUMPTS'):
# Points handler
result['header'][key.lower()] = schunk[1]
# Parse points
for idx in range(1, len(chunk)):
schunk = chunk[idx].strip().split()
for count, ordinate in enumerate(schunk):
# Divide ordinates into ij pairs
if (count % 2) == 0:
pair['i'] = ordinate
else:
pair['j'] = ordinate
result['points'].append(pair)
pair = {'i': None,
'j': None}
elif key in ('LAKE', 'RESERVOIR'):
# Type handler
result['type'] = schunk[0]
else:
# Header variables handler
result['header'][key.lower()] = schunk[1]
return result | python | def reservoirLink(lines):
"""
Parse RESERVOIR Link Method
"""
# Constants
KEYWORDS = ('LINK',
'RESERVOIR',
'RES_MINWSE',
'RES_INITWSE',
'RES_MAXWSE',
'RES_NUMPTS',
'LAKE',
'MINWSE',
'INITWSE',
'MAXWSE',
'NUMPTS')
result = {'header': {'link': None,
'res_minwse': None,
'res_initwse': None,
'res_maxwse': None,
'res_numpts': None,
'minwse': None,
'initwse': None,
'maxwse': None,
'numpts': None},
'type': None,
'points': []}
pair = {'i': None,
'j': None}
# Rechunk the chunk
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if key in ('NUMPTS', 'RES_NUMPTS'):
# Points handler
result['header'][key.lower()] = schunk[1]
# Parse points
for idx in range(1, len(chunk)):
schunk = chunk[idx].strip().split()
for count, ordinate in enumerate(schunk):
# Divide ordinates into ij pairs
if (count % 2) == 0:
pair['i'] = ordinate
else:
pair['j'] = ordinate
result['points'].append(pair)
pair = {'i': None,
'j': None}
elif key in ('LAKE', 'RESERVOIR'):
# Type handler
result['type'] = schunk[0]
else:
# Header variables handler
result['header'][key.lower()] = schunk[1]
return result | [
"def",
"reservoirLink",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'LINK'",
",",
"'RESERVOIR'",
",",
"'RES_MINWSE'",
",",
"'RES_INITWSE'",
",",
"'RES_MAXWSE'",
",",
"'RES_NUMPTS'",
",",
"'LAKE'",
",",
"'MINWSE'",
",",
"'INITWSE'",
",",
"'MAXWSE'",
",",
"'NUMPTS'",
")",
"result",
"=",
"{",
"'header'",
":",
"{",
"'link'",
":",
"None",
",",
"'res_minwse'",
":",
"None",
",",
"'res_initwse'",
":",
"None",
",",
"'res_maxwse'",
":",
"None",
",",
"'res_numpts'",
":",
"None",
",",
"'minwse'",
":",
"None",
",",
"'initwse'",
":",
"None",
",",
"'maxwse'",
":",
"None",
",",
"'numpts'",
":",
"None",
"}",
",",
"'type'",
":",
"None",
",",
"'points'",
":",
"[",
"]",
"}",
"pair",
"=",
"{",
"'i'",
":",
"None",
",",
"'j'",
":",
"None",
"}",
"# Rechunk the chunk",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"key",
"in",
"(",
"'NUMPTS'",
",",
"'RES_NUMPTS'",
")",
":",
"# Points handler",
"result",
"[",
"'header'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"# Parse points",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"chunk",
")",
")",
":",
"schunk",
"=",
"chunk",
"[",
"idx",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"for",
"count",
",",
"ordinate",
"in",
"enumerate",
"(",
"schunk",
")",
":",
"# Divide ordinates into ij pairs",
"if",
"(",
"count",
"%",
"2",
")",
"==",
"0",
":",
"pair",
"[",
"'i'",
"]",
"=",
"ordinate",
"else",
":",
"pair",
"[",
"'j'",
"]",
"=",
"ordinate",
"result",
"[",
"'points'",
"]",
".",
"append",
"(",
"pair",
")",
"pair",
"=",
"{",
"'i'",
":",
"None",
",",
"'j'",
":",
"None",
"}",
"elif",
"key",
"in",
"(",
"'LAKE'",
",",
"'RESERVOIR'",
")",
":",
"# Type handler",
"result",
"[",
"'type'",
"]",
"=",
"schunk",
"[",
"0",
"]",
"else",
":",
"# Header variables handler",
"result",
"[",
"'header'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"return",
"result"
] | Parse RESERVOIR Link Method | [
"Parse",
"RESERVOIR",
"Link",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L275-L342 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | nodeChunk | def nodeChunk(lines):
"""
Parse NODE Method
"""
# Constants
KEYWORDS = ('NODE',
'X_Y',
'ELEV')
result = {'node': None,
'x': None,
'y': None,
'elev': None}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
if key == 'X_Y':
result['x'] = schunk[1]
result['y'] = schunk[2]
else:
result[key.lower()] = schunk[1]
return result | python | def nodeChunk(lines):
"""
Parse NODE Method
"""
# Constants
KEYWORDS = ('NODE',
'X_Y',
'ELEV')
result = {'node': None,
'x': None,
'y': None,
'elev': None}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
if key == 'X_Y':
result['x'] = schunk[1]
result['y'] = schunk[2]
else:
result[key.lower()] = schunk[1]
return result | [
"def",
"nodeChunk",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'NODE'",
",",
"'X_Y'",
",",
"'ELEV'",
")",
"result",
"=",
"{",
"'node'",
":",
"None",
",",
"'x'",
":",
"None",
",",
"'y'",
":",
"None",
",",
"'elev'",
":",
"None",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"key",
"==",
"'X_Y'",
":",
"result",
"[",
"'x'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"result",
"[",
"'y'",
"]",
"=",
"schunk",
"[",
"2",
"]",
"else",
":",
"result",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"return",
"result"
] | Parse NODE Method | [
"Parse",
"NODE",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L344-L371 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | xSectionChunk | def xSectionChunk(lines):
"""
Parse XSEC Method
"""
# Constants
KEYWORDS = ('MANNINGS_N',
'BOTTOM_WIDTH',
'BANKFULL_DEPTH',
'SIDE_SLOPE',
'NPAIRS',
'NUM_INTERP',
'X1',
'ERODE',
'MAX_EROSION',
'SUBSURFACE',
'M_RIVER',
'K_RIVER')
result = {'mannings_n': None,
'bottom_width': None,
'bankfull_depth': None,
'side_slope': None,
'npairs': None,
'num_interp': None,
'erode': False,
'subsurface': False,
'max_erosion': None,
'm_river': None,
'k_river': None,
'breakpoints': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Cases
if key == 'X1':
# Extract breakpoint XY pairs
x = schunk[1]
y = schunk[2]
result['breakpoints'].append({'x': x, 'y': y})
if key in ('SUBSURFACE', 'ERODE'):
# Set booleans
result[key.lower()] = True
else:
# Extract value
result[key.lower()] = schunk[1]
return result | python | def xSectionChunk(lines):
"""
Parse XSEC Method
"""
# Constants
KEYWORDS = ('MANNINGS_N',
'BOTTOM_WIDTH',
'BANKFULL_DEPTH',
'SIDE_SLOPE',
'NPAIRS',
'NUM_INTERP',
'X1',
'ERODE',
'MAX_EROSION',
'SUBSURFACE',
'M_RIVER',
'K_RIVER')
result = {'mannings_n': None,
'bottom_width': None,
'bankfull_depth': None,
'side_slope': None,
'npairs': None,
'num_interp': None,
'erode': False,
'subsurface': False,
'max_erosion': None,
'm_river': None,
'k_river': None,
'breakpoints': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Cases
if key == 'X1':
# Extract breakpoint XY pairs
x = schunk[1]
y = schunk[2]
result['breakpoints'].append({'x': x, 'y': y})
if key in ('SUBSURFACE', 'ERODE'):
# Set booleans
result[key.lower()] = True
else:
# Extract value
result[key.lower()] = schunk[1]
return result | [
"def",
"xSectionChunk",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'MANNINGS_N'",
",",
"'BOTTOM_WIDTH'",
",",
"'BANKFULL_DEPTH'",
",",
"'SIDE_SLOPE'",
",",
"'NPAIRS'",
",",
"'NUM_INTERP'",
",",
"'X1'",
",",
"'ERODE'",
",",
"'MAX_EROSION'",
",",
"'SUBSURFACE'",
",",
"'M_RIVER'",
",",
"'K_RIVER'",
")",
"result",
"=",
"{",
"'mannings_n'",
":",
"None",
",",
"'bottom_width'",
":",
"None",
",",
"'bankfull_depth'",
":",
"None",
",",
"'side_slope'",
":",
"None",
",",
"'npairs'",
":",
"None",
",",
"'num_interp'",
":",
"None",
",",
"'erode'",
":",
"False",
",",
"'subsurface'",
":",
"False",
",",
"'max_erosion'",
":",
"None",
",",
"'m_river'",
":",
"None",
",",
"'k_river'",
":",
"None",
",",
"'breakpoints'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Strip and split the line (only one item in each list)",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"key",
"==",
"'X1'",
":",
"# Extract breakpoint XY pairs",
"x",
"=",
"schunk",
"[",
"1",
"]",
"y",
"=",
"schunk",
"[",
"2",
"]",
"result",
"[",
"'breakpoints'",
"]",
".",
"append",
"(",
"{",
"'x'",
":",
"x",
",",
"'y'",
":",
"y",
"}",
")",
"if",
"key",
"in",
"(",
"'SUBSURFACE'",
",",
"'ERODE'",
")",
":",
"# Set booleans",
"result",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"True",
"else",
":",
"# Extract value",
"result",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"return",
"result"
] | Parse XSEC Method | [
"Parse",
"XSEC",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L373-L427 | train |
CI-WATER/gsshapy | gsshapy/lib/cif_chunk.py | structureChunk | def structureChunk(keywords, resultDict, lines):
"""
Parse Weir and Culvert Structures Method
"""
chunks = pt.chunk(keywords, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Extract values and assign to appropriate key in resultDict
resultDict[key.lower()] = schunk[1]
return resultDict | python | def structureChunk(keywords, resultDict, lines):
"""
Parse Weir and Culvert Structures Method
"""
chunks = pt.chunk(keywords, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Extract values and assign to appropriate key in resultDict
resultDict[key.lower()] = schunk[1]
return resultDict | [
"def",
"structureChunk",
"(",
"keywords",
",",
"resultDict",
",",
"lines",
")",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"keywords",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Strip and split the line (only one item in each list)",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Extract values and assign to appropriate key in resultDict",
"resultDict",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"return",
"resultDict"
] | Parse Weir and Culvert Structures Method | [
"Parse",
"Weir",
"and",
"Culvert",
"Structures",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L429-L445 | train |
Robpol86/etaprogress | etaprogress/components/bars.py | BarUndefinedAnimated.bar | def bar(self, width, **_):
"""Returns the completed progress bar. Every time this is called the animation moves.
Positional arguments:
width -- the width of the entire bar (including borders).
"""
width -= self._width_offset
self._position += self._direction
# Change direction.
if self._position <= 0 and self._direction < 0:
self._position = 0
self._direction = 1
elif self._position > width:
self._position = width - 1
self._direction = -1
final_bar = (
self.CHAR_LEFT_BORDER +
self.CHAR_EMPTY * self._position +
self.CHAR_ANIMATED +
self.CHAR_EMPTY * (width - self._position) +
self.CHAR_RIGHT_BORDER
)
return final_bar | python | def bar(self, width, **_):
"""Returns the completed progress bar. Every time this is called the animation moves.
Positional arguments:
width -- the width of the entire bar (including borders).
"""
width -= self._width_offset
self._position += self._direction
# Change direction.
if self._position <= 0 and self._direction < 0:
self._position = 0
self._direction = 1
elif self._position > width:
self._position = width - 1
self._direction = -1
final_bar = (
self.CHAR_LEFT_BORDER +
self.CHAR_EMPTY * self._position +
self.CHAR_ANIMATED +
self.CHAR_EMPTY * (width - self._position) +
self.CHAR_RIGHT_BORDER
)
return final_bar | [
"def",
"bar",
"(",
"self",
",",
"width",
",",
"*",
"*",
"_",
")",
":",
"width",
"-=",
"self",
".",
"_width_offset",
"self",
".",
"_position",
"+=",
"self",
".",
"_direction",
"# Change direction.",
"if",
"self",
".",
"_position",
"<=",
"0",
"and",
"self",
".",
"_direction",
"<",
"0",
":",
"self",
".",
"_position",
"=",
"0",
"self",
".",
"_direction",
"=",
"1",
"elif",
"self",
".",
"_position",
">",
"width",
":",
"self",
".",
"_position",
"=",
"width",
"-",
"1",
"self",
".",
"_direction",
"=",
"-",
"1",
"final_bar",
"=",
"(",
"self",
".",
"CHAR_LEFT_BORDER",
"+",
"self",
".",
"CHAR_EMPTY",
"*",
"self",
".",
"_position",
"+",
"self",
".",
"CHAR_ANIMATED",
"+",
"self",
".",
"CHAR_EMPTY",
"*",
"(",
"width",
"-",
"self",
".",
"_position",
")",
"+",
"self",
".",
"CHAR_RIGHT_BORDER",
")",
"return",
"final_bar"
] | Returns the completed progress bar. Every time this is called the animation moves.
Positional arguments:
width -- the width of the entire bar (including borders). | [
"Returns",
"the",
"completed",
"progress",
"bar",
".",
"Every",
"time",
"this",
"is",
"called",
"the",
"animation",
"moves",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/bars.py#L34-L58 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._read | def _read(self, directory, filename, session, path, name, extension,
spatial=False, spatialReferenceID=4236, replaceParamFile=None,
readIndexMaps=True):
"""
Mapping Table Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'INDEX_MAP': mtc.indexMapChunk,
'ROUGHNESS': mtc.mapTableChunk,
'INTERCEPTION': mtc.mapTableChunk,
'RETENTION': mtc.mapTableChunk,
'GREEN_AMPT_INFILTRATION': mtc.mapTableChunk,
'GREEN_AMPT_INITIAL_SOIL_MOISTURE': mtc.mapTableChunk,
'RICHARDS_EQN_INFILTRATION_BROOKS': mtc.mapTableChunk,
'RICHARDS_EQN_INFILTRATION_HAVERCAMP': mtc.mapTableChunk,
'EVAPOTRANSPIRATION': mtc.mapTableChunk,
'WELL_TABLE': mtc.mapTableChunk,
'OVERLAND_BOUNDARY': mtc.mapTableChunk,
'TIME_SERIES_INDEX': mtc.mapTableChunk,
'GROUNDWATER': mtc.mapTableChunk,
'GROUNDWATER_BOUNDARY': mtc.mapTableChunk,
'AREA_REDUCTION': mtc.mapTableChunk,
'WETLAND_PROPERTIES': mtc.mapTableChunk,
'MULTI_LAYER_SOIL': mtc.mapTableChunk,
'SOIL_EROSION_PROPS': mtc.mapTableChunk,
'CONTAMINANT_TRANSPORT': mtc.contamChunk,
'SEDIMENTS': mtc.sedimentChunk}
indexMaps = dict()
mapTables = []
# Parse file into chunks associated with keywords/cards
with io_open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Call chunk specific parsers for each chunk
result = KEYWORDS[key](key, chunk)
# Index Map handler
if key == 'INDEX_MAP':
# Create GSSHAPY IndexMap object from result object
indexMap = IndexMap(name=result['idxName'])
# Dictionary used to map index maps to mapping tables
indexMaps[result['idxName']] = indexMap
# Associate IndexMap with MapTableFile
indexMap.mapTableFile = self
if readIndexMaps:
# Invoke IndexMap read method
indexMap.read(directory=directory, filename=result['filename'], session=session,
spatial=spatial, spatialReferenceID=spatialReferenceID)
else:
# add path to file
indexMap.filename = result['filename']
# Map Table handler
else:
# Create a list of all the map tables in the file
if result:
mapTables.append(result)
# Create GSSHAPY ORM objects with the resulting objects that are
# returned from the parser functions
self._createGsshaPyObjects(mapTables, indexMaps, replaceParamFile, directory, session, spatial, spatialReferenceID) | python | def _read(self, directory, filename, session, path, name, extension,
spatial=False, spatialReferenceID=4236, replaceParamFile=None,
readIndexMaps=True):
"""
Mapping Table Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'INDEX_MAP': mtc.indexMapChunk,
'ROUGHNESS': mtc.mapTableChunk,
'INTERCEPTION': mtc.mapTableChunk,
'RETENTION': mtc.mapTableChunk,
'GREEN_AMPT_INFILTRATION': mtc.mapTableChunk,
'GREEN_AMPT_INITIAL_SOIL_MOISTURE': mtc.mapTableChunk,
'RICHARDS_EQN_INFILTRATION_BROOKS': mtc.mapTableChunk,
'RICHARDS_EQN_INFILTRATION_HAVERCAMP': mtc.mapTableChunk,
'EVAPOTRANSPIRATION': mtc.mapTableChunk,
'WELL_TABLE': mtc.mapTableChunk,
'OVERLAND_BOUNDARY': mtc.mapTableChunk,
'TIME_SERIES_INDEX': mtc.mapTableChunk,
'GROUNDWATER': mtc.mapTableChunk,
'GROUNDWATER_BOUNDARY': mtc.mapTableChunk,
'AREA_REDUCTION': mtc.mapTableChunk,
'WETLAND_PROPERTIES': mtc.mapTableChunk,
'MULTI_LAYER_SOIL': mtc.mapTableChunk,
'SOIL_EROSION_PROPS': mtc.mapTableChunk,
'CONTAMINANT_TRANSPORT': mtc.contamChunk,
'SEDIMENTS': mtc.sedimentChunk}
indexMaps = dict()
mapTables = []
# Parse file into chunks associated with keywords/cards
with io_open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Call chunk specific parsers for each chunk
result = KEYWORDS[key](key, chunk)
# Index Map handler
if key == 'INDEX_MAP':
# Create GSSHAPY IndexMap object from result object
indexMap = IndexMap(name=result['idxName'])
# Dictionary used to map index maps to mapping tables
indexMaps[result['idxName']] = indexMap
# Associate IndexMap with MapTableFile
indexMap.mapTableFile = self
if readIndexMaps:
# Invoke IndexMap read method
indexMap.read(directory=directory, filename=result['filename'], session=session,
spatial=spatial, spatialReferenceID=spatialReferenceID)
else:
# add path to file
indexMap.filename = result['filename']
# Map Table handler
else:
# Create a list of all the map tables in the file
if result:
mapTables.append(result)
# Create GSSHAPY ORM objects with the resulting objects that are
# returned from the parser functions
self._createGsshaPyObjects(mapTables, indexMaps, replaceParamFile, directory, session, spatial, spatialReferenceID) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
",",
"replaceParamFile",
"=",
"None",
",",
"readIndexMaps",
"=",
"True",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Dictionary of keywords/cards and parse function names",
"KEYWORDS",
"=",
"{",
"'INDEX_MAP'",
":",
"mtc",
".",
"indexMapChunk",
",",
"'ROUGHNESS'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'INTERCEPTION'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'RETENTION'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'GREEN_AMPT_INFILTRATION'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'GREEN_AMPT_INITIAL_SOIL_MOISTURE'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'RICHARDS_EQN_INFILTRATION_BROOKS'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'RICHARDS_EQN_INFILTRATION_HAVERCAMP'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'EVAPOTRANSPIRATION'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'WELL_TABLE'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'OVERLAND_BOUNDARY'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'TIME_SERIES_INDEX'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'GROUNDWATER'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'GROUNDWATER_BOUNDARY'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'AREA_REDUCTION'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'WETLAND_PROPERTIES'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'MULTI_LAYER_SOIL'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'SOIL_EROSION_PROPS'",
":",
"mtc",
".",
"mapTableChunk",
",",
"'CONTAMINANT_TRANSPORT'",
":",
"mtc",
".",
"contamChunk",
",",
"'SEDIMENTS'",
":",
"mtc",
".",
"sedimentChunk",
"}",
"indexMaps",
"=",
"dict",
"(",
")",
"mapTables",
"=",
"[",
"]",
"# Parse file into chunks associated with keywords/cards",
"with",
"io_open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Call chunk specific parsers for each chunk",
"result",
"=",
"KEYWORDS",
"[",
"key",
"]",
"(",
"key",
",",
"chunk",
")",
"# Index Map handler",
"if",
"key",
"==",
"'INDEX_MAP'",
":",
"# Create GSSHAPY IndexMap object from result object",
"indexMap",
"=",
"IndexMap",
"(",
"name",
"=",
"result",
"[",
"'idxName'",
"]",
")",
"# Dictionary used to map index maps to mapping tables",
"indexMaps",
"[",
"result",
"[",
"'idxName'",
"]",
"]",
"=",
"indexMap",
"# Associate IndexMap with MapTableFile",
"indexMap",
".",
"mapTableFile",
"=",
"self",
"if",
"readIndexMaps",
":",
"# Invoke IndexMap read method",
"indexMap",
".",
"read",
"(",
"directory",
"=",
"directory",
",",
"filename",
"=",
"result",
"[",
"'filename'",
"]",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"else",
":",
"# add path to file",
"indexMap",
".",
"filename",
"=",
"result",
"[",
"'filename'",
"]",
"# Map Table handler",
"else",
":",
"# Create a list of all the map tables in the file",
"if",
"result",
":",
"mapTables",
".",
"append",
"(",
"result",
")",
"# Create GSSHAPY ORM objects with the resulting objects that are",
"# returned from the parser functions",
"self",
".",
"_createGsshaPyObjects",
"(",
"mapTables",
",",
"indexMaps",
",",
"replaceParamFile",
",",
"directory",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")"
] | Mapping Table Read from File Method | [
"Mapping",
"Table",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L86-L159 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._write | def _write(self, session, openFile, replaceParamFile=None, writeIndexMaps=True):
"""
Map Table Write to File Method
"""
# Extract directory
directory = os.path.split(openFile.name)[0]
# Derive a Unique Set of Contaminants
for mapTable in self.getOrderedMapTables(session):
if mapTable.name == 'CONTAMINANT_TRANSPORT':
contaminantList = []
for mtValue in mapTable.values:
if mtValue.contaminant not in contaminantList:
contaminantList.append(mtValue.contaminant)
contaminants = sorted(contaminantList, key=lambda x: (x.indexMap.name, x.name))
# Write first line to file
openFile.write('GSSHA_INDEX_MAP_TABLES\n')
# Write list of index maps
for indexMap in self.indexMaps:
# Write to map table file
openFile.write('INDEX_MAP%s"%s" "%s"\n' % (' ' * 16, indexMap.filename, indexMap.name))
if writeIndexMaps:
# Initiate index map write
indexMap.write(directory, session=session)
for mapTable in self.getOrderedMapTables(session):
if mapTable.name == 'SEDIMENTS':
self._writeSedimentTable(session=session,
fileObject=openFile,
mapTable=mapTable,
replaceParamFile=replaceParamFile)
elif mapTable.name == 'CONTAMINANT_TRANSPORT':
self._writeContaminantTable(session=session,
fileObject=openFile,
mapTable=mapTable,
contaminants=contaminants,
replaceParamFile=replaceParamFile)
else:
self._writeMapTable(session=session,
fileObject=openFile,
mapTable=mapTable,
replaceParamFile=replaceParamFile) | python | def _write(self, session, openFile, replaceParamFile=None, writeIndexMaps=True):
"""
Map Table Write to File Method
"""
# Extract directory
directory = os.path.split(openFile.name)[0]
# Derive a Unique Set of Contaminants
for mapTable in self.getOrderedMapTables(session):
if mapTable.name == 'CONTAMINANT_TRANSPORT':
contaminantList = []
for mtValue in mapTable.values:
if mtValue.contaminant not in contaminantList:
contaminantList.append(mtValue.contaminant)
contaminants = sorted(contaminantList, key=lambda x: (x.indexMap.name, x.name))
# Write first line to file
openFile.write('GSSHA_INDEX_MAP_TABLES\n')
# Write list of index maps
for indexMap in self.indexMaps:
# Write to map table file
openFile.write('INDEX_MAP%s"%s" "%s"\n' % (' ' * 16, indexMap.filename, indexMap.name))
if writeIndexMaps:
# Initiate index map write
indexMap.write(directory, session=session)
for mapTable in self.getOrderedMapTables(session):
if mapTable.name == 'SEDIMENTS':
self._writeSedimentTable(session=session,
fileObject=openFile,
mapTable=mapTable,
replaceParamFile=replaceParamFile)
elif mapTable.name == 'CONTAMINANT_TRANSPORT':
self._writeContaminantTable(session=session,
fileObject=openFile,
mapTable=mapTable,
contaminants=contaminants,
replaceParamFile=replaceParamFile)
else:
self._writeMapTable(session=session,
fileObject=openFile,
mapTable=mapTable,
replaceParamFile=replaceParamFile) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
"=",
"None",
",",
"writeIndexMaps",
"=",
"True",
")",
":",
"# Extract directory",
"directory",
"=",
"os",
".",
"path",
".",
"split",
"(",
"openFile",
".",
"name",
")",
"[",
"0",
"]",
"# Derive a Unique Set of Contaminants",
"for",
"mapTable",
"in",
"self",
".",
"getOrderedMapTables",
"(",
"session",
")",
":",
"if",
"mapTable",
".",
"name",
"==",
"'CONTAMINANT_TRANSPORT'",
":",
"contaminantList",
"=",
"[",
"]",
"for",
"mtValue",
"in",
"mapTable",
".",
"values",
":",
"if",
"mtValue",
".",
"contaminant",
"not",
"in",
"contaminantList",
":",
"contaminantList",
".",
"append",
"(",
"mtValue",
".",
"contaminant",
")",
"contaminants",
"=",
"sorted",
"(",
"contaminantList",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"indexMap",
".",
"name",
",",
"x",
".",
"name",
")",
")",
"# Write first line to file",
"openFile",
".",
"write",
"(",
"'GSSHA_INDEX_MAP_TABLES\\n'",
")",
"# Write list of index maps",
"for",
"indexMap",
"in",
"self",
".",
"indexMaps",
":",
"# Write to map table file",
"openFile",
".",
"write",
"(",
"'INDEX_MAP%s\"%s\" \"%s\"\\n'",
"%",
"(",
"' '",
"*",
"16",
",",
"indexMap",
".",
"filename",
",",
"indexMap",
".",
"name",
")",
")",
"if",
"writeIndexMaps",
":",
"# Initiate index map write",
"indexMap",
".",
"write",
"(",
"directory",
",",
"session",
"=",
"session",
")",
"for",
"mapTable",
"in",
"self",
".",
"getOrderedMapTables",
"(",
"session",
")",
":",
"if",
"mapTable",
".",
"name",
"==",
"'SEDIMENTS'",
":",
"self",
".",
"_writeSedimentTable",
"(",
"session",
"=",
"session",
",",
"fileObject",
"=",
"openFile",
",",
"mapTable",
"=",
"mapTable",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"elif",
"mapTable",
".",
"name",
"==",
"'CONTAMINANT_TRANSPORT'",
":",
"self",
".",
"_writeContaminantTable",
"(",
"session",
"=",
"session",
",",
"fileObject",
"=",
"openFile",
",",
"mapTable",
"=",
"mapTable",
",",
"contaminants",
"=",
"contaminants",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"else",
":",
"self",
".",
"_writeMapTable",
"(",
"session",
"=",
"session",
",",
"fileObject",
"=",
"openFile",
",",
"mapTable",
"=",
"mapTable",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")"
] | Map Table Write to File Method | [
"Map",
"Table",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L161-L206 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile.getOrderedMapTables | def getOrderedMapTables(self, session):
"""
Retrieve the map tables ordered by name
"""
return session.query(MapTable).filter(MapTable.mapTableFile == self).order_by(MapTable.name).all() | python | def getOrderedMapTables(self, session):
"""
Retrieve the map tables ordered by name
"""
return session.query(MapTable).filter(MapTable.mapTableFile == self).order_by(MapTable.name).all() | [
"def",
"getOrderedMapTables",
"(",
"self",
",",
"session",
")",
":",
"return",
"session",
".",
"query",
"(",
"MapTable",
")",
".",
"filter",
"(",
"MapTable",
".",
"mapTableFile",
"==",
"self",
")",
".",
"order_by",
"(",
"MapTable",
".",
"name",
")",
".",
"all",
"(",
")"
] | Retrieve the map tables ordered by name | [
"Retrieve",
"the",
"map",
"tables",
"ordered",
"by",
"name"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L208-L212 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile.deleteMapTable | def deleteMapTable(self, name, session):
"""
Remove duplicate map table if it exists
"""
duplicate_map_tables = session.query(MapTable).filter(MapTable.mapTableFile == self).filter(MapTable.name == name).all()
for duplicate_map_table in duplicate_map_tables:
if duplicate_map_table.indexMap:
session.delete(duplicate_map_table.indexMap)
session.delete(duplicate_map_table)
session.commit() | python | def deleteMapTable(self, name, session):
"""
Remove duplicate map table if it exists
"""
duplicate_map_tables = session.query(MapTable).filter(MapTable.mapTableFile == self).filter(MapTable.name == name).all()
for duplicate_map_table in duplicate_map_tables:
if duplicate_map_table.indexMap:
session.delete(duplicate_map_table.indexMap)
session.delete(duplicate_map_table)
session.commit() | [
"def",
"deleteMapTable",
"(",
"self",
",",
"name",
",",
"session",
")",
":",
"duplicate_map_tables",
"=",
"session",
".",
"query",
"(",
"MapTable",
")",
".",
"filter",
"(",
"MapTable",
".",
"mapTableFile",
"==",
"self",
")",
".",
"filter",
"(",
"MapTable",
".",
"name",
"==",
"name",
")",
".",
"all",
"(",
")",
"for",
"duplicate_map_table",
"in",
"duplicate_map_tables",
":",
"if",
"duplicate_map_table",
".",
"indexMap",
":",
"session",
".",
"delete",
"(",
"duplicate_map_table",
".",
"indexMap",
")",
"session",
".",
"delete",
"(",
"duplicate_map_table",
")",
"session",
".",
"commit",
"(",
")"
] | Remove duplicate map table if it exists | [
"Remove",
"duplicate",
"map",
"table",
"if",
"it",
"exists"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L214-L224 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._createGsshaPyObjects | def _createGsshaPyObjects(self, mapTables, indexMaps, replaceParamFile, directory, session, spatial, spatialReferenceID):
"""
Create GSSHAPY Mapping Table ORM Objects Method
"""
for mt in mapTables:
# Create GSSHAPY MapTable object
try:
# Make sure the index map name listed with the map table is in the list of
# index maps read from the top of the mapping table file (Note that the index maps for the sediment
# and contaminant tables will have names of None, so we skip these cases.
if mt['indexMapName'] is not None:
indexMaps[mt['indexMapName']]
mapTable = MapTable(name=mt['name'],
numIDs=mt['numVars']['NUM_IDS'],
maxNumCells=mt['numVars']['MAX_NUMBER_CELLS'],
numSed=mt['numVars'].get('NUM_SED'),
numContam=mt['numVars'].get('NUM_CONTAM'),
maxSoilID=mt['numVars'].get('MAX_SOIL_ID'))
# Associate MapTable with this MapTableFile and IndexMaps
mapTable.mapTableFile = self
## NOTE: Index maps are associated wth contaminants for CONTAMINANT_TRANSPORT map
## tables. The SEDIMENTS map table are associated with index maps via the
## SOIL_EROSION_PROPS map table.
if mt['indexMapName']:
mapTable.indexMap = indexMaps[mt['indexMapName']]
# CONTAMINANT_TRANSPORT map table handler
if mt['name'] == 'CONTAMINANT_TRANSPORT':
for contam in mt['contaminants']:
# Preprocess the contaminant output paths to be relative
outputBaseFilename = self._preprocessContaminantOutFilePath(contam['outPath'])
# Initialize GSSHAPY MTContaminant object
contaminant = MTContaminant(name=contam['name'],
outputFilename=outputBaseFilename,
precipConc=vrp(contam['contamVars']['PRECIP_CONC'], replaceParamFile),
partition=vrp(contam['contamVars']['PARTITION'], replaceParamFile),
numIDs=contam['contamVars']['NUM_IDS'])
# Associate MTContaminant with appropriate IndexMap
indexMap = indexMaps[contam['indexMapName']]
contaminant.indexMap = indexMap
self._createValueObjects(contam['valueList'], contam['varList'], mapTable, indexMap,
contaminant, replaceParamFile)
# Read any output files if they are present
self._readContaminantOutputFiles(directory, outputBaseFilename, session, spatial, spatialReferenceID)
# SEDIMENTS map table handler
elif mt['name'] == 'SEDIMENTS':
for line in mt['valueList']:
# Create GSSHAPY MTSediment object
sediment = MTSediment(description=line[0],
specificGravity=vrp(line[1], replaceParamFile),
particleDiameter=vrp(line[2], replaceParamFile),
outputFilename=line[3])
# Associate the MTSediment with the MapTable
sediment.mapTable = mapTable
# All other map table handler
else:
indexMap = indexMaps[mt['indexMapName']]
# Create MTValue and MTIndex objects
self._createValueObjects(mt['valueList'], mt['varList'], mapTable, indexMap, None, replaceParamFile)
except KeyError:
log.info(('Index Map "%s" for Mapping Table "%s" not found in list of index maps in the mapping '
'table file. The Mapping Table was not read into the database.') % (
mt['indexMapName'], mt['name'])) | python | def _createGsshaPyObjects(self, mapTables, indexMaps, replaceParamFile, directory, session, spatial, spatialReferenceID):
"""
Create GSSHAPY Mapping Table ORM Objects Method
"""
for mt in mapTables:
# Create GSSHAPY MapTable object
try:
# Make sure the index map name listed with the map table is in the list of
# index maps read from the top of the mapping table file (Note that the index maps for the sediment
# and contaminant tables will have names of None, so we skip these cases.
if mt['indexMapName'] is not None:
indexMaps[mt['indexMapName']]
mapTable = MapTable(name=mt['name'],
numIDs=mt['numVars']['NUM_IDS'],
maxNumCells=mt['numVars']['MAX_NUMBER_CELLS'],
numSed=mt['numVars'].get('NUM_SED'),
numContam=mt['numVars'].get('NUM_CONTAM'),
maxSoilID=mt['numVars'].get('MAX_SOIL_ID'))
# Associate MapTable with this MapTableFile and IndexMaps
mapTable.mapTableFile = self
## NOTE: Index maps are associated wth contaminants for CONTAMINANT_TRANSPORT map
## tables. The SEDIMENTS map table are associated with index maps via the
## SOIL_EROSION_PROPS map table.
if mt['indexMapName']:
mapTable.indexMap = indexMaps[mt['indexMapName']]
# CONTAMINANT_TRANSPORT map table handler
if mt['name'] == 'CONTAMINANT_TRANSPORT':
for contam in mt['contaminants']:
# Preprocess the contaminant output paths to be relative
outputBaseFilename = self._preprocessContaminantOutFilePath(contam['outPath'])
# Initialize GSSHAPY MTContaminant object
contaminant = MTContaminant(name=contam['name'],
outputFilename=outputBaseFilename,
precipConc=vrp(contam['contamVars']['PRECIP_CONC'], replaceParamFile),
partition=vrp(contam['contamVars']['PARTITION'], replaceParamFile),
numIDs=contam['contamVars']['NUM_IDS'])
# Associate MTContaminant with appropriate IndexMap
indexMap = indexMaps[contam['indexMapName']]
contaminant.indexMap = indexMap
self._createValueObjects(contam['valueList'], contam['varList'], mapTable, indexMap,
contaminant, replaceParamFile)
# Read any output files if they are present
self._readContaminantOutputFiles(directory, outputBaseFilename, session, spatial, spatialReferenceID)
# SEDIMENTS map table handler
elif mt['name'] == 'SEDIMENTS':
for line in mt['valueList']:
# Create GSSHAPY MTSediment object
sediment = MTSediment(description=line[0],
specificGravity=vrp(line[1], replaceParamFile),
particleDiameter=vrp(line[2], replaceParamFile),
outputFilename=line[3])
# Associate the MTSediment with the MapTable
sediment.mapTable = mapTable
# All other map table handler
else:
indexMap = indexMaps[mt['indexMapName']]
# Create MTValue and MTIndex objects
self._createValueObjects(mt['valueList'], mt['varList'], mapTable, indexMap, None, replaceParamFile)
except KeyError:
log.info(('Index Map "%s" for Mapping Table "%s" not found in list of index maps in the mapping '
'table file. The Mapping Table was not read into the database.') % (
mt['indexMapName'], mt['name'])) | [
"def",
"_createGsshaPyObjects",
"(",
"self",
",",
"mapTables",
",",
"indexMaps",
",",
"replaceParamFile",
",",
"directory",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
":",
"for",
"mt",
"in",
"mapTables",
":",
"# Create GSSHAPY MapTable object",
"try",
":",
"# Make sure the index map name listed with the map table is in the list of",
"# index maps read from the top of the mapping table file (Note that the index maps for the sediment",
"# and contaminant tables will have names of None, so we skip these cases.",
"if",
"mt",
"[",
"'indexMapName'",
"]",
"is",
"not",
"None",
":",
"indexMaps",
"[",
"mt",
"[",
"'indexMapName'",
"]",
"]",
"mapTable",
"=",
"MapTable",
"(",
"name",
"=",
"mt",
"[",
"'name'",
"]",
",",
"numIDs",
"=",
"mt",
"[",
"'numVars'",
"]",
"[",
"'NUM_IDS'",
"]",
",",
"maxNumCells",
"=",
"mt",
"[",
"'numVars'",
"]",
"[",
"'MAX_NUMBER_CELLS'",
"]",
",",
"numSed",
"=",
"mt",
"[",
"'numVars'",
"]",
".",
"get",
"(",
"'NUM_SED'",
")",
",",
"numContam",
"=",
"mt",
"[",
"'numVars'",
"]",
".",
"get",
"(",
"'NUM_CONTAM'",
")",
",",
"maxSoilID",
"=",
"mt",
"[",
"'numVars'",
"]",
".",
"get",
"(",
"'MAX_SOIL_ID'",
")",
")",
"# Associate MapTable with this MapTableFile and IndexMaps",
"mapTable",
".",
"mapTableFile",
"=",
"self",
"## NOTE: Index maps are associated wth contaminants for CONTAMINANT_TRANSPORT map",
"## tables. The SEDIMENTS map table are associated with index maps via the",
"## SOIL_EROSION_PROPS map table.",
"if",
"mt",
"[",
"'indexMapName'",
"]",
":",
"mapTable",
".",
"indexMap",
"=",
"indexMaps",
"[",
"mt",
"[",
"'indexMapName'",
"]",
"]",
"# CONTAMINANT_TRANSPORT map table handler",
"if",
"mt",
"[",
"'name'",
"]",
"==",
"'CONTAMINANT_TRANSPORT'",
":",
"for",
"contam",
"in",
"mt",
"[",
"'contaminants'",
"]",
":",
"# Preprocess the contaminant output paths to be relative",
"outputBaseFilename",
"=",
"self",
".",
"_preprocessContaminantOutFilePath",
"(",
"contam",
"[",
"'outPath'",
"]",
")",
"# Initialize GSSHAPY MTContaminant object",
"contaminant",
"=",
"MTContaminant",
"(",
"name",
"=",
"contam",
"[",
"'name'",
"]",
",",
"outputFilename",
"=",
"outputBaseFilename",
",",
"precipConc",
"=",
"vrp",
"(",
"contam",
"[",
"'contamVars'",
"]",
"[",
"'PRECIP_CONC'",
"]",
",",
"replaceParamFile",
")",
",",
"partition",
"=",
"vrp",
"(",
"contam",
"[",
"'contamVars'",
"]",
"[",
"'PARTITION'",
"]",
",",
"replaceParamFile",
")",
",",
"numIDs",
"=",
"contam",
"[",
"'contamVars'",
"]",
"[",
"'NUM_IDS'",
"]",
")",
"# Associate MTContaminant with appropriate IndexMap",
"indexMap",
"=",
"indexMaps",
"[",
"contam",
"[",
"'indexMapName'",
"]",
"]",
"contaminant",
".",
"indexMap",
"=",
"indexMap",
"self",
".",
"_createValueObjects",
"(",
"contam",
"[",
"'valueList'",
"]",
",",
"contam",
"[",
"'varList'",
"]",
",",
"mapTable",
",",
"indexMap",
",",
"contaminant",
",",
"replaceParamFile",
")",
"# Read any output files if they are present",
"self",
".",
"_readContaminantOutputFiles",
"(",
"directory",
",",
"outputBaseFilename",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
"# SEDIMENTS map table handler",
"elif",
"mt",
"[",
"'name'",
"]",
"==",
"'SEDIMENTS'",
":",
"for",
"line",
"in",
"mt",
"[",
"'valueList'",
"]",
":",
"# Create GSSHAPY MTSediment object",
"sediment",
"=",
"MTSediment",
"(",
"description",
"=",
"line",
"[",
"0",
"]",
",",
"specificGravity",
"=",
"vrp",
"(",
"line",
"[",
"1",
"]",
",",
"replaceParamFile",
")",
",",
"particleDiameter",
"=",
"vrp",
"(",
"line",
"[",
"2",
"]",
",",
"replaceParamFile",
")",
",",
"outputFilename",
"=",
"line",
"[",
"3",
"]",
")",
"# Associate the MTSediment with the MapTable",
"sediment",
".",
"mapTable",
"=",
"mapTable",
"# All other map table handler",
"else",
":",
"indexMap",
"=",
"indexMaps",
"[",
"mt",
"[",
"'indexMapName'",
"]",
"]",
"# Create MTValue and MTIndex objects",
"self",
".",
"_createValueObjects",
"(",
"mt",
"[",
"'valueList'",
"]",
",",
"mt",
"[",
"'varList'",
"]",
",",
"mapTable",
",",
"indexMap",
",",
"None",
",",
"replaceParamFile",
")",
"except",
"KeyError",
":",
"log",
".",
"info",
"(",
"(",
"'Index Map \"%s\" for Mapping Table \"%s\" not found in list of index maps in the mapping '",
"'table file. The Mapping Table was not read into the database.'",
")",
"%",
"(",
"mt",
"[",
"'indexMapName'",
"]",
",",
"mt",
"[",
"'name'",
"]",
")",
")"
] | Create GSSHAPY Mapping Table ORM Objects Method | [
"Create",
"GSSHAPY",
"Mapping",
"Table",
"ORM",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L226-L301 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._createValueObjects | def _createValueObjects(self, valueList, varList, mapTable, indexMap, contaminant, replaceParamFile):
"""
Populate GSSHAPY MTValue and MTIndex Objects Method
"""
def assign_values_to_table(value_list, layer_id):
for i, value in enumerate(value_list):
value = vrp(value, replaceParamFile)
# Create MTValue object and associate with MTIndex and MapTable
mtValue = MTValue(variable=varList[i], value=float(value))
mtValue.index = mtIndex
mtValue.mapTable = mapTable
mtValue.layer_id = layer_id
# MTContaminant handler (associate MTValue with MTContaminant)
if contaminant:
mtValue.contaminant = contaminant
for row in valueList:
# Create GSSHAPY MTIndex object and associate with IndexMap
mtIndex = MTIndex(index=row['index'], description1=row['description1'], description2=row['description2'])
mtIndex.indexMap = indexMap
if len(np.shape(row['values'])) == 2:
# this is for ids with multiple layers
for layer_id, values in enumerate(row['values']):
assign_values_to_table(values, layer_id)
else:
assign_values_to_table(row['values'], 0) | python | def _createValueObjects(self, valueList, varList, mapTable, indexMap, contaminant, replaceParamFile):
"""
Populate GSSHAPY MTValue and MTIndex Objects Method
"""
def assign_values_to_table(value_list, layer_id):
for i, value in enumerate(value_list):
value = vrp(value, replaceParamFile)
# Create MTValue object and associate with MTIndex and MapTable
mtValue = MTValue(variable=varList[i], value=float(value))
mtValue.index = mtIndex
mtValue.mapTable = mapTable
mtValue.layer_id = layer_id
# MTContaminant handler (associate MTValue with MTContaminant)
if contaminant:
mtValue.contaminant = contaminant
for row in valueList:
# Create GSSHAPY MTIndex object and associate with IndexMap
mtIndex = MTIndex(index=row['index'], description1=row['description1'], description2=row['description2'])
mtIndex.indexMap = indexMap
if len(np.shape(row['values'])) == 2:
# this is for ids with multiple layers
for layer_id, values in enumerate(row['values']):
assign_values_to_table(values, layer_id)
else:
assign_values_to_table(row['values'], 0) | [
"def",
"_createValueObjects",
"(",
"self",
",",
"valueList",
",",
"varList",
",",
"mapTable",
",",
"indexMap",
",",
"contaminant",
",",
"replaceParamFile",
")",
":",
"def",
"assign_values_to_table",
"(",
"value_list",
",",
"layer_id",
")",
":",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"value_list",
")",
":",
"value",
"=",
"vrp",
"(",
"value",
",",
"replaceParamFile",
")",
"# Create MTValue object and associate with MTIndex and MapTable",
"mtValue",
"=",
"MTValue",
"(",
"variable",
"=",
"varList",
"[",
"i",
"]",
",",
"value",
"=",
"float",
"(",
"value",
")",
")",
"mtValue",
".",
"index",
"=",
"mtIndex",
"mtValue",
".",
"mapTable",
"=",
"mapTable",
"mtValue",
".",
"layer_id",
"=",
"layer_id",
"# MTContaminant handler (associate MTValue with MTContaminant)",
"if",
"contaminant",
":",
"mtValue",
".",
"contaminant",
"=",
"contaminant",
"for",
"row",
"in",
"valueList",
":",
"# Create GSSHAPY MTIndex object and associate with IndexMap",
"mtIndex",
"=",
"MTIndex",
"(",
"index",
"=",
"row",
"[",
"'index'",
"]",
",",
"description1",
"=",
"row",
"[",
"'description1'",
"]",
",",
"description2",
"=",
"row",
"[",
"'description2'",
"]",
")",
"mtIndex",
".",
"indexMap",
"=",
"indexMap",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"row",
"[",
"'values'",
"]",
")",
")",
"==",
"2",
":",
"# this is for ids with multiple layers",
"for",
"layer_id",
",",
"values",
"in",
"enumerate",
"(",
"row",
"[",
"'values'",
"]",
")",
":",
"assign_values_to_table",
"(",
"values",
",",
"layer_id",
")",
"else",
":",
"assign_values_to_table",
"(",
"row",
"[",
"'values'",
"]",
",",
"0",
")"
] | Populate GSSHAPY MTValue and MTIndex Objects Method | [
"Populate",
"GSSHAPY",
"MTValue",
"and",
"MTIndex",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L303-L329 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._readContaminantOutputFiles | def _readContaminantOutputFiles(self, directory, baseFileName, session, spatial, spatialReferenceID):
"""
Read any contaminant output files if available
"""
if not os.path.isdir(directory):
return
if baseFileName == '':
return
# Look for channel output files denoted by the ".chan" after the base filename
chanBaseFileName = '.'.join([baseFileName, 'chan'])
# Get contents of directory
directoryList = os.listdir(directory)
# Compile a list of files with "basename.chan" in them
chanFiles = []
for thing in directoryList:
if chanBaseFileName in thing:
chanFiles.append(thing)
# Assume all "chan" files are link node dataset files and try to read them
for chanFile in chanFiles:
linkNodeDatasetFile = LinkNodeDatasetFile()
linkNodeDatasetFile.projectFile = self.projectFile
try:
linkNodeDatasetFile.read(directory=directory,
filename=chanFile,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID)
except:
log.warning('Attempted to read Contaminant Transport Output file {0}, but failed.'.format(chanFile)) | python | def _readContaminantOutputFiles(self, directory, baseFileName, session, spatial, spatialReferenceID):
"""
Read any contaminant output files if available
"""
if not os.path.isdir(directory):
return
if baseFileName == '':
return
# Look for channel output files denoted by the ".chan" after the base filename
chanBaseFileName = '.'.join([baseFileName, 'chan'])
# Get contents of directory
directoryList = os.listdir(directory)
# Compile a list of files with "basename.chan" in them
chanFiles = []
for thing in directoryList:
if chanBaseFileName in thing:
chanFiles.append(thing)
# Assume all "chan" files are link node dataset files and try to read them
for chanFile in chanFiles:
linkNodeDatasetFile = LinkNodeDatasetFile()
linkNodeDatasetFile.projectFile = self.projectFile
try:
linkNodeDatasetFile.read(directory=directory,
filename=chanFile,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID)
except:
log.warning('Attempted to read Contaminant Transport Output file {0}, but failed.'.format(chanFile)) | [
"def",
"_readContaminantOutputFiles",
"(",
"self",
",",
"directory",
",",
"baseFileName",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"return",
"if",
"baseFileName",
"==",
"''",
":",
"return",
"# Look for channel output files denoted by the \".chan\" after the base filename",
"chanBaseFileName",
"=",
"'.'",
".",
"join",
"(",
"[",
"baseFileName",
",",
"'chan'",
"]",
")",
"# Get contents of directory",
"directoryList",
"=",
"os",
".",
"listdir",
"(",
"directory",
")",
"# Compile a list of files with \"basename.chan\" in them",
"chanFiles",
"=",
"[",
"]",
"for",
"thing",
"in",
"directoryList",
":",
"if",
"chanBaseFileName",
"in",
"thing",
":",
"chanFiles",
".",
"append",
"(",
"thing",
")",
"# Assume all \"chan\" files are link node dataset files and try to read them",
"for",
"chanFile",
"in",
"chanFiles",
":",
"linkNodeDatasetFile",
"=",
"LinkNodeDatasetFile",
"(",
")",
"linkNodeDatasetFile",
".",
"projectFile",
"=",
"self",
".",
"projectFile",
"try",
":",
"linkNodeDatasetFile",
".",
"read",
"(",
"directory",
"=",
"directory",
",",
"filename",
"=",
"chanFile",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"except",
":",
"log",
".",
"warning",
"(",
"'Attempted to read Contaminant Transport Output file {0}, but failed.'",
".",
"format",
"(",
"chanFile",
")",
")"
] | Read any contaminant output files if available | [
"Read",
"any",
"contaminant",
"output",
"files",
"if",
"available"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L331-L364 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._writeMapTable | def _writeMapTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write
"""
# Write mapping name
fileObject.write('%s "%s"\n' % (mapTable.name, mapTable.indexMap.name))
# Write mapping table global variables
if mapTable.numIDs:
fileObject.write('NUM_IDS %s\n' % (mapTable.numIDs))
if mapTable.maxNumCells:
fileObject.write('MAX_NUMBER_CELLS %s\n' % (mapTable.maxNumCells))
if mapTable.numSed:
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
if mapTable.maxSoilID:
fileObject.write('MAX_SOIL_ID %s\n' % (mapTable.maxSoilID))
# Write value lines from the database
self._writeValues(session, fileObject, mapTable, None, replaceParamFile) | python | def _writeMapTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write
"""
# Write mapping name
fileObject.write('%s "%s"\n' % (mapTable.name, mapTable.indexMap.name))
# Write mapping table global variables
if mapTable.numIDs:
fileObject.write('NUM_IDS %s\n' % (mapTable.numIDs))
if mapTable.maxNumCells:
fileObject.write('MAX_NUMBER_CELLS %s\n' % (mapTable.maxNumCells))
if mapTable.numSed:
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
if mapTable.maxSoilID:
fileObject.write('MAX_SOIL_ID %s\n' % (mapTable.maxSoilID))
# Write value lines from the database
self._writeValues(session, fileObject, mapTable, None, replaceParamFile) | [
"def",
"_writeMapTable",
"(",
"self",
",",
"session",
",",
"fileObject",
",",
"mapTable",
",",
"replaceParamFile",
")",
":",
"# Write mapping name",
"fileObject",
".",
"write",
"(",
"'%s \"%s\"\\n'",
"%",
"(",
"mapTable",
".",
"name",
",",
"mapTable",
".",
"indexMap",
".",
"name",
")",
")",
"# Write mapping table global variables",
"if",
"mapTable",
".",
"numIDs",
":",
"fileObject",
".",
"write",
"(",
"'NUM_IDS %s\\n'",
"%",
"(",
"mapTable",
".",
"numIDs",
")",
")",
"if",
"mapTable",
".",
"maxNumCells",
":",
"fileObject",
".",
"write",
"(",
"'MAX_NUMBER_CELLS %s\\n'",
"%",
"(",
"mapTable",
".",
"maxNumCells",
")",
")",
"if",
"mapTable",
".",
"numSed",
":",
"fileObject",
".",
"write",
"(",
"'NUM_SED %s\\n'",
"%",
"(",
"mapTable",
".",
"numSed",
")",
")",
"if",
"mapTable",
".",
"maxSoilID",
":",
"fileObject",
".",
"write",
"(",
"'MAX_SOIL_ID %s\\n'",
"%",
"(",
"mapTable",
".",
"maxSoilID",
")",
")",
"# Write value lines from the database",
"self",
".",
"_writeValues",
"(",
"session",
",",
"fileObject",
",",
"mapTable",
",",
"None",
",",
"replaceParamFile",
")"
] | Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write | [
"Write",
"Generic",
"Map",
"Table",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L366-L395 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._writeContaminantTable | def _writeContaminantTable(self, session, fileObject, mapTable, contaminants, replaceParamFile):
"""
This method writes the contaminant transport mapping table case.
"""
# Write the contaminant mapping table header
fileObject.write('%s\n' % (mapTable.name))
fileObject.write('NUM_CONTAM %s\n' % (mapTable.numContam))
# Write out each contaminant and it's values
for contaminant in contaminants:
fileObject.write(
'"%s" "%s" %s\n' % (contaminant.name, contaminant.indexMap.name, contaminant.outputFilename))
# Add trailing zeros to values / replacement parameter
precipConcString = vwp(contaminant.precipConc, replaceParamFile)
partitionString = vwp(contaminant.partition, replaceParamFile)
try:
precipConc = '%.2f' % precipConcString
except:
precipConc = '%s' % precipConcString
try:
partition = '%.2f' % partitionString
except:
partition = '%s' % partitionString
# Write global variables for the contaminant
fileObject.write('PRECIP_CONC%s%s\n' % (' ' * 10, precipConc))
fileObject.write('PARTITION%s%s\n' % (' ' * 12, partition))
fileObject.write('NUM_IDS %s\n' % contaminant.numIDs)
# Write value lines
self._writeValues(session, fileObject, mapTable, contaminant, replaceParamFile) | python | def _writeContaminantTable(self, session, fileObject, mapTable, contaminants, replaceParamFile):
"""
This method writes the contaminant transport mapping table case.
"""
# Write the contaminant mapping table header
fileObject.write('%s\n' % (mapTable.name))
fileObject.write('NUM_CONTAM %s\n' % (mapTable.numContam))
# Write out each contaminant and it's values
for contaminant in contaminants:
fileObject.write(
'"%s" "%s" %s\n' % (contaminant.name, contaminant.indexMap.name, contaminant.outputFilename))
# Add trailing zeros to values / replacement parameter
precipConcString = vwp(contaminant.precipConc, replaceParamFile)
partitionString = vwp(contaminant.partition, replaceParamFile)
try:
precipConc = '%.2f' % precipConcString
except:
precipConc = '%s' % precipConcString
try:
partition = '%.2f' % partitionString
except:
partition = '%s' % partitionString
# Write global variables for the contaminant
fileObject.write('PRECIP_CONC%s%s\n' % (' ' * 10, precipConc))
fileObject.write('PARTITION%s%s\n' % (' ' * 12, partition))
fileObject.write('NUM_IDS %s\n' % contaminant.numIDs)
# Write value lines
self._writeValues(session, fileObject, mapTable, contaminant, replaceParamFile) | [
"def",
"_writeContaminantTable",
"(",
"self",
",",
"session",
",",
"fileObject",
",",
"mapTable",
",",
"contaminants",
",",
"replaceParamFile",
")",
":",
"# Write the contaminant mapping table header",
"fileObject",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"mapTable",
".",
"name",
")",
")",
"fileObject",
".",
"write",
"(",
"'NUM_CONTAM %s\\n'",
"%",
"(",
"mapTable",
".",
"numContam",
")",
")",
"# Write out each contaminant and it's values",
"for",
"contaminant",
"in",
"contaminants",
":",
"fileObject",
".",
"write",
"(",
"'\"%s\" \"%s\" %s\\n'",
"%",
"(",
"contaminant",
".",
"name",
",",
"contaminant",
".",
"indexMap",
".",
"name",
",",
"contaminant",
".",
"outputFilename",
")",
")",
"# Add trailing zeros to values / replacement parameter",
"precipConcString",
"=",
"vwp",
"(",
"contaminant",
".",
"precipConc",
",",
"replaceParamFile",
")",
"partitionString",
"=",
"vwp",
"(",
"contaminant",
".",
"partition",
",",
"replaceParamFile",
")",
"try",
":",
"precipConc",
"=",
"'%.2f'",
"%",
"precipConcString",
"except",
":",
"precipConc",
"=",
"'%s'",
"%",
"precipConcString",
"try",
":",
"partition",
"=",
"'%.2f'",
"%",
"partitionString",
"except",
":",
"partition",
"=",
"'%s'",
"%",
"partitionString",
"# Write global variables for the contaminant",
"fileObject",
".",
"write",
"(",
"'PRECIP_CONC%s%s\\n'",
"%",
"(",
"' '",
"*",
"10",
",",
"precipConc",
")",
")",
"fileObject",
".",
"write",
"(",
"'PARTITION%s%s\\n'",
"%",
"(",
"' '",
"*",
"12",
",",
"partition",
")",
")",
"fileObject",
".",
"write",
"(",
"'NUM_IDS %s\\n'",
"%",
"contaminant",
".",
"numIDs",
")",
"# Write value lines",
"self",
".",
"_writeValues",
"(",
"session",
",",
"fileObject",
",",
"mapTable",
",",
"contaminant",
",",
"replaceParamFile",
")"
] | This method writes the contaminant transport mapping table case. | [
"This",
"method",
"writes",
"the",
"contaminant",
"transport",
"mapping",
"table",
"case",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L398-L430 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._writeSedimentTable | def _writeSedimentTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Sediment Mapping Table Method
This method writes the sediments special mapping table case.
"""
# Write the sediment mapping table header
fileObject.write('%s\n' % (mapTable.name))
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
# Write the value header line
fileObject.write(
'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\n' % (' ' * 22, ' ' * 3, ' ' * 5))
# Retrive the sediment mapping table values
sediments = session.query(MTSediment). \
filter(MTSediment.mapTable == mapTable). \
order_by(MTSediment.id). \
all()
# Write sediments out to file
for sediment in sediments:
# Determine spacing for aesthetics
space1 = 42 - len(sediment.description)
# Pad values with zeros / Get replacement variable
specGravString = vwp(sediment.specificGravity, replaceParamFile)
partDiamString = vwp(sediment.particleDiameter, replaceParamFile)
try:
specGrav = '%.6f' % specGravString
except:
specGrav = '%s' % specGravString
try:
partDiam = '%.6f' % partDiamString
except:
partDiam = '%s' % partDiamString
fileObject.write('%s%s%s%s%s%s%s\n' % (
sediment.description, ' ' * space1, specGrav, ' ' * 5, partDiam, ' ' * 6, sediment.outputFilename)) | python | def _writeSedimentTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Sediment Mapping Table Method
This method writes the sediments special mapping table case.
"""
# Write the sediment mapping table header
fileObject.write('%s\n' % (mapTable.name))
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
# Write the value header line
fileObject.write(
'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\n' % (' ' * 22, ' ' * 3, ' ' * 5))
# Retrive the sediment mapping table values
sediments = session.query(MTSediment). \
filter(MTSediment.mapTable == mapTable). \
order_by(MTSediment.id). \
all()
# Write sediments out to file
for sediment in sediments:
# Determine spacing for aesthetics
space1 = 42 - len(sediment.description)
# Pad values with zeros / Get replacement variable
specGravString = vwp(sediment.specificGravity, replaceParamFile)
partDiamString = vwp(sediment.particleDiameter, replaceParamFile)
try:
specGrav = '%.6f' % specGravString
except:
specGrav = '%s' % specGravString
try:
partDiam = '%.6f' % partDiamString
except:
partDiam = '%s' % partDiamString
fileObject.write('%s%s%s%s%s%s%s\n' % (
sediment.description, ' ' * space1, specGrav, ' ' * 5, partDiam, ' ' * 6, sediment.outputFilename)) | [
"def",
"_writeSedimentTable",
"(",
"self",
",",
"session",
",",
"fileObject",
",",
"mapTable",
",",
"replaceParamFile",
")",
":",
"# Write the sediment mapping table header",
"fileObject",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"mapTable",
".",
"name",
")",
")",
"fileObject",
".",
"write",
"(",
"'NUM_SED %s\\n'",
"%",
"(",
"mapTable",
".",
"numSed",
")",
")",
"# Write the value header line",
"fileObject",
".",
"write",
"(",
"'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\\n'",
"%",
"(",
"' '",
"*",
"22",
",",
"' '",
"*",
"3",
",",
"' '",
"*",
"5",
")",
")",
"# Retrive the sediment mapping table values",
"sediments",
"=",
"session",
".",
"query",
"(",
"MTSediment",
")",
".",
"filter",
"(",
"MTSediment",
".",
"mapTable",
"==",
"mapTable",
")",
".",
"order_by",
"(",
"MTSediment",
".",
"id",
")",
".",
"all",
"(",
")",
"# Write sediments out to file",
"for",
"sediment",
"in",
"sediments",
":",
"# Determine spacing for aesthetics",
"space1",
"=",
"42",
"-",
"len",
"(",
"sediment",
".",
"description",
")",
"# Pad values with zeros / Get replacement variable",
"specGravString",
"=",
"vwp",
"(",
"sediment",
".",
"specificGravity",
",",
"replaceParamFile",
")",
"partDiamString",
"=",
"vwp",
"(",
"sediment",
".",
"particleDiameter",
",",
"replaceParamFile",
")",
"try",
":",
"specGrav",
"=",
"'%.6f'",
"%",
"specGravString",
"except",
":",
"specGrav",
"=",
"'%s'",
"%",
"specGravString",
"try",
":",
"partDiam",
"=",
"'%.6f'",
"%",
"partDiamString",
"except",
":",
"partDiam",
"=",
"'%s'",
"%",
"partDiamString",
"fileObject",
".",
"write",
"(",
"'%s%s%s%s%s%s%s\\n'",
"%",
"(",
"sediment",
".",
"description",
",",
"' '",
"*",
"space1",
",",
"specGrav",
",",
"' '",
"*",
"5",
",",
"partDiam",
",",
"' '",
"*",
"6",
",",
"sediment",
".",
"outputFilename",
")",
")"
] | Write Sediment Mapping Table Method
This method writes the sediments special mapping table case. | [
"Write",
"Sediment",
"Mapping",
"Table",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L433-L475 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._valuePivot | def _valuePivot(self, session, mapTable, contaminant, replaceParaFile):
"""
This function retrieves the values of a mapping table from the database and pivots them into the format that is
required by the mapping table file. This function returns a list of strings that can be printed to the file
directly.
"""
# Retrieve the indices for the current mapping table and mapping table file
indexes = session.query(MTIndex). \
join(MTValue.index). \
filter(MTValue.mapTable == mapTable). \
filter(MTValue.contaminant == contaminant). \
order_by(MTIndex.index). \
all()
# determine number of layers
layer_indices = [0]
if mapTable.name in ('MULTI_LAYER_SOIL', 'RICHARDS_EQN_INFILTRATION_BROOKS'):
layer_indices = range(3)
# ----------------------------------------
# Construct each line in the mapping table
#-----------------------------------------
# All lines will be compiled into this list
lines = []
values = {}
for idx in indexes:
for layer_index in layer_indices:
# Retrieve values for the current index
values = session.query(MTValue). \
filter(MTValue.mapTable == mapTable). \
filter(MTValue.contaminant == contaminant). \
filter(MTValue.index == idx). \
filter(MTValue.layer_id == layer_index). \
order_by(MTValue.id). \
all()
# NOTE: The second order_by modifier in the query above handles the special ordering of XSEDIMENT columns
# in soil erosion properties table (i.e. these columns must be in the same order as the sediments in the
# sediments table. Accomplished by using the sedimentID field). Similarly, the contaminant filter is only
# used in the case of the contaminant transport table. Values that don't belong to a contaminant will have
# a contaminant attribute equal to None. Compare usage of this function by _writeMapTable and
# _writeContaminant.
#Value string
valString = ''
# Define valString
for val in values:
if val.value <= -9999:
continue
# Format value with trailing zeros up to 6 digits
processedValue = vwp(val.value, replaceParaFile)
try:
numString = '%.6f' % processedValue
except:
numString = '%s' % processedValue
valString = '%s%s%s' % (valString, numString, ' ' * 3)
# Determine spacing for aesthetics (so each column lines up)
spacing1 = max(1, 6 - len(str(idx.index)))
spacing2 = max(1, 40 - len(idx.description1))
spacing3 = max(1, 40 - len(idx.description2))
# Compile each mapping table line
if layer_index == 0:
line = '%s%s%s%s%s%s%s\n' % (
idx.index, ' ' * spacing1, idx.description1, ' ' * spacing2, idx.description2, ' ' * spacing3, valString)
else:
num_prepend_spaces = len(str(idx.index)) + spacing1 + len(idx.description1) \
+ spacing2 + len(idx.description2) + spacing3
line = '{0}{1}\n'.format(' ' * num_prepend_spaces, valString)
# Compile each lines into a list
lines.append(line)
#-----------------------------
# Define the value header line
#-----------------------------
# Define varString for the header line
varString = ''
# Compile list of variables (from MTValue object list) into a single string of variables
for idx, val in enumerate(values):
if val.variable == 'XSEDIMENT': # Special case for XSEDIMENT variable
if idx >= len(values) - 1:
varString = '%s%s%s%s' % (varString, mapTable.numSed, ' SEDIMENTS....', ' ' * 2)
else:
varString = '%s%s%s' % (varString, val.variable, ' ' * 2)
# Compile the mapping table header
header = 'ID%sDESCRIPTION1%sDESCRIPTION2%s%s\n' % (' ' * 4, ' ' * 28, ' ' * 28, varString)
# Prepend the header line to the list of lines
lines.insert(0, header)
# Return the list of lines
return lines | python | def _valuePivot(self, session, mapTable, contaminant, replaceParaFile):
"""
This function retrieves the values of a mapping table from the database and pivots them into the format that is
required by the mapping table file. This function returns a list of strings that can be printed to the file
directly.
"""
# Retrieve the indices for the current mapping table and mapping table file
indexes = session.query(MTIndex). \
join(MTValue.index). \
filter(MTValue.mapTable == mapTable). \
filter(MTValue.contaminant == contaminant). \
order_by(MTIndex.index). \
all()
# determine number of layers
layer_indices = [0]
if mapTable.name in ('MULTI_LAYER_SOIL', 'RICHARDS_EQN_INFILTRATION_BROOKS'):
layer_indices = range(3)
# ----------------------------------------
# Construct each line in the mapping table
#-----------------------------------------
# All lines will be compiled into this list
lines = []
values = {}
for idx in indexes:
for layer_index in layer_indices:
# Retrieve values for the current index
values = session.query(MTValue). \
filter(MTValue.mapTable == mapTable). \
filter(MTValue.contaminant == contaminant). \
filter(MTValue.index == idx). \
filter(MTValue.layer_id == layer_index). \
order_by(MTValue.id). \
all()
# NOTE: The second order_by modifier in the query above handles the special ordering of XSEDIMENT columns
# in soil erosion properties table (i.e. these columns must be in the same order as the sediments in the
# sediments table. Accomplished by using the sedimentID field). Similarly, the contaminant filter is only
# used in the case of the contaminant transport table. Values that don't belong to a contaminant will have
# a contaminant attribute equal to None. Compare usage of this function by _writeMapTable and
# _writeContaminant.
#Value string
valString = ''
# Define valString
for val in values:
if val.value <= -9999:
continue
# Format value with trailing zeros up to 6 digits
processedValue = vwp(val.value, replaceParaFile)
try:
numString = '%.6f' % processedValue
except:
numString = '%s' % processedValue
valString = '%s%s%s' % (valString, numString, ' ' * 3)
# Determine spacing for aesthetics (so each column lines up)
spacing1 = max(1, 6 - len(str(idx.index)))
spacing2 = max(1, 40 - len(idx.description1))
spacing3 = max(1, 40 - len(idx.description2))
# Compile each mapping table line
if layer_index == 0:
line = '%s%s%s%s%s%s%s\n' % (
idx.index, ' ' * spacing1, idx.description1, ' ' * spacing2, idx.description2, ' ' * spacing3, valString)
else:
num_prepend_spaces = len(str(idx.index)) + spacing1 + len(idx.description1) \
+ spacing2 + len(idx.description2) + spacing3
line = '{0}{1}\n'.format(' ' * num_prepend_spaces, valString)
# Compile each lines into a list
lines.append(line)
#-----------------------------
# Define the value header line
#-----------------------------
# Define varString for the header line
varString = ''
# Compile list of variables (from MTValue object list) into a single string of variables
for idx, val in enumerate(values):
if val.variable == 'XSEDIMENT': # Special case for XSEDIMENT variable
if idx >= len(values) - 1:
varString = '%s%s%s%s' % (varString, mapTable.numSed, ' SEDIMENTS....', ' ' * 2)
else:
varString = '%s%s%s' % (varString, val.variable, ' ' * 2)
# Compile the mapping table header
header = 'ID%sDESCRIPTION1%sDESCRIPTION2%s%s\n' % (' ' * 4, ' ' * 28, ' ' * 28, varString)
# Prepend the header line to the list of lines
lines.insert(0, header)
# Return the list of lines
return lines | [
"def",
"_valuePivot",
"(",
"self",
",",
"session",
",",
"mapTable",
",",
"contaminant",
",",
"replaceParaFile",
")",
":",
"# Retrieve the indices for the current mapping table and mapping table file",
"indexes",
"=",
"session",
".",
"query",
"(",
"MTIndex",
")",
".",
"join",
"(",
"MTValue",
".",
"index",
")",
".",
"filter",
"(",
"MTValue",
".",
"mapTable",
"==",
"mapTable",
")",
".",
"filter",
"(",
"MTValue",
".",
"contaminant",
"==",
"contaminant",
")",
".",
"order_by",
"(",
"MTIndex",
".",
"index",
")",
".",
"all",
"(",
")",
"# determine number of layers",
"layer_indices",
"=",
"[",
"0",
"]",
"if",
"mapTable",
".",
"name",
"in",
"(",
"'MULTI_LAYER_SOIL'",
",",
"'RICHARDS_EQN_INFILTRATION_BROOKS'",
")",
":",
"layer_indices",
"=",
"range",
"(",
"3",
")",
"# ----------------------------------------",
"# Construct each line in the mapping table",
"#-----------------------------------------",
"# All lines will be compiled into this list",
"lines",
"=",
"[",
"]",
"values",
"=",
"{",
"}",
"for",
"idx",
"in",
"indexes",
":",
"for",
"layer_index",
"in",
"layer_indices",
":",
"# Retrieve values for the current index",
"values",
"=",
"session",
".",
"query",
"(",
"MTValue",
")",
".",
"filter",
"(",
"MTValue",
".",
"mapTable",
"==",
"mapTable",
")",
".",
"filter",
"(",
"MTValue",
".",
"contaminant",
"==",
"contaminant",
")",
".",
"filter",
"(",
"MTValue",
".",
"index",
"==",
"idx",
")",
".",
"filter",
"(",
"MTValue",
".",
"layer_id",
"==",
"layer_index",
")",
".",
"order_by",
"(",
"MTValue",
".",
"id",
")",
".",
"all",
"(",
")",
"# NOTE: The second order_by modifier in the query above handles the special ordering of XSEDIMENT columns",
"# in soil erosion properties table (i.e. these columns must be in the same order as the sediments in the",
"# sediments table. Accomplished by using the sedimentID field). Similarly, the contaminant filter is only",
"# used in the case of the contaminant transport table. Values that don't belong to a contaminant will have",
"# a contaminant attribute equal to None. Compare usage of this function by _writeMapTable and",
"# _writeContaminant.",
"#Value string",
"valString",
"=",
"''",
"# Define valString",
"for",
"val",
"in",
"values",
":",
"if",
"val",
".",
"value",
"<=",
"-",
"9999",
":",
"continue",
"# Format value with trailing zeros up to 6 digits",
"processedValue",
"=",
"vwp",
"(",
"val",
".",
"value",
",",
"replaceParaFile",
")",
"try",
":",
"numString",
"=",
"'%.6f'",
"%",
"processedValue",
"except",
":",
"numString",
"=",
"'%s'",
"%",
"processedValue",
"valString",
"=",
"'%s%s%s'",
"%",
"(",
"valString",
",",
"numString",
",",
"' '",
"*",
"3",
")",
"# Determine spacing for aesthetics (so each column lines up)",
"spacing1",
"=",
"max",
"(",
"1",
",",
"6",
"-",
"len",
"(",
"str",
"(",
"idx",
".",
"index",
")",
")",
")",
"spacing2",
"=",
"max",
"(",
"1",
",",
"40",
"-",
"len",
"(",
"idx",
".",
"description1",
")",
")",
"spacing3",
"=",
"max",
"(",
"1",
",",
"40",
"-",
"len",
"(",
"idx",
".",
"description2",
")",
")",
"# Compile each mapping table line",
"if",
"layer_index",
"==",
"0",
":",
"line",
"=",
"'%s%s%s%s%s%s%s\\n'",
"%",
"(",
"idx",
".",
"index",
",",
"' '",
"*",
"spacing1",
",",
"idx",
".",
"description1",
",",
"' '",
"*",
"spacing2",
",",
"idx",
".",
"description2",
",",
"' '",
"*",
"spacing3",
",",
"valString",
")",
"else",
":",
"num_prepend_spaces",
"=",
"len",
"(",
"str",
"(",
"idx",
".",
"index",
")",
")",
"+",
"spacing1",
"+",
"len",
"(",
"idx",
".",
"description1",
")",
"+",
"spacing2",
"+",
"len",
"(",
"idx",
".",
"description2",
")",
"+",
"spacing3",
"line",
"=",
"'{0}{1}\\n'",
".",
"format",
"(",
"' '",
"*",
"num_prepend_spaces",
",",
"valString",
")",
"# Compile each lines into a list",
"lines",
".",
"append",
"(",
"line",
")",
"#-----------------------------",
"# Define the value header line",
"#-----------------------------",
"# Define varString for the header line",
"varString",
"=",
"''",
"# Compile list of variables (from MTValue object list) into a single string of variables",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"values",
")",
":",
"if",
"val",
".",
"variable",
"==",
"'XSEDIMENT'",
":",
"# Special case for XSEDIMENT variable",
"if",
"idx",
">=",
"len",
"(",
"values",
")",
"-",
"1",
":",
"varString",
"=",
"'%s%s%s%s'",
"%",
"(",
"varString",
",",
"mapTable",
".",
"numSed",
",",
"' SEDIMENTS....'",
",",
"' '",
"*",
"2",
")",
"else",
":",
"varString",
"=",
"'%s%s%s'",
"%",
"(",
"varString",
",",
"val",
".",
"variable",
",",
"' '",
"*",
"2",
")",
"# Compile the mapping table header",
"header",
"=",
"'ID%sDESCRIPTION1%sDESCRIPTION2%s%s\\n'",
"%",
"(",
"' '",
"*",
"4",
",",
"' '",
"*",
"28",
",",
"' '",
"*",
"28",
",",
"varString",
")",
"# Prepend the header line to the list of lines",
"lines",
".",
"insert",
"(",
"0",
",",
"header",
")",
"# Return the list of lines",
"return",
"lines"
] | This function retrieves the values of a mapping table from the database and pivots them into the format that is
required by the mapping table file. This function returns a list of strings that can be printed to the file
directly. | [
"This",
"function",
"retrieves",
"the",
"values",
"of",
"a",
"mapping",
"table",
"from",
"the",
"database",
"and",
"pivots",
"them",
"into",
"the",
"format",
"that",
"is",
"required",
"by",
"the",
"mapping",
"table",
"file",
".",
"This",
"function",
"returns",
"a",
"list",
"of",
"strings",
"that",
"can",
"be",
"printed",
"to",
"the",
"file",
"directly",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L477-L576 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile._preprocessContaminantOutFilePath | def _preprocessContaminantOutFilePath(outPath):
"""
Preprocess the contaminant output file path to a relative path.
"""
if '/' in outPath:
splitPath = outPath.split('/')
elif '\\' in outPath:
splitPath = outPath.split('\\')
else:
splitPath = [outPath, ]
if splitPath[-1] == '':
outputFilename = splitPath[-2]
else:
outputFilename = splitPath[-1]
if '.' in outputFilename:
outputFilename = outputFilename.split('.')[0]
return outputFilename | python | def _preprocessContaminantOutFilePath(outPath):
"""
Preprocess the contaminant output file path to a relative path.
"""
if '/' in outPath:
splitPath = outPath.split('/')
elif '\\' in outPath:
splitPath = outPath.split('\\')
else:
splitPath = [outPath, ]
if splitPath[-1] == '':
outputFilename = splitPath[-2]
else:
outputFilename = splitPath[-1]
if '.' in outputFilename:
outputFilename = outputFilename.split('.')[0]
return outputFilename | [
"def",
"_preprocessContaminantOutFilePath",
"(",
"outPath",
")",
":",
"if",
"'/'",
"in",
"outPath",
":",
"splitPath",
"=",
"outPath",
".",
"split",
"(",
"'/'",
")",
"elif",
"'\\\\'",
"in",
"outPath",
":",
"splitPath",
"=",
"outPath",
".",
"split",
"(",
"'\\\\'",
")",
"else",
":",
"splitPath",
"=",
"[",
"outPath",
",",
"]",
"if",
"splitPath",
"[",
"-",
"1",
"]",
"==",
"''",
":",
"outputFilename",
"=",
"splitPath",
"[",
"-",
"2",
"]",
"else",
":",
"outputFilename",
"=",
"splitPath",
"[",
"-",
"1",
"]",
"if",
"'.'",
"in",
"outputFilename",
":",
"outputFilename",
"=",
"outputFilename",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"outputFilename"
] | Preprocess the contaminant output file path to a relative path. | [
"Preprocess",
"the",
"contaminant",
"output",
"file",
"path",
"to",
"a",
"relative",
"path",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L587-L609 | train |
CI-WATER/gsshapy | gsshapy/orm/cmt.py | MapTableFile.addRoughnessMapFromLandUse | def addRoughnessMapFromLandUse(self, name,
session,
land_use_grid,
land_use_to_roughness_table=None,
land_use_grid_id=None,
):
"""
Adds a roughness map from land use file
Example::
from gsshapy.orm import ProjectFile
from gsshapy.lib import db_tools as dbt
from os import path, chdir
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
land_use_grid = 'LC_5min_global_2012.tif'
land_use_to_roughness_table = ''/gsshapy/gridtogssha/land_cover/land_cover_glcf_modis.txt'
# Create Test DB
sqlalchemy_url, sql_engine = dbt.init_sqlite_memory()
# Create DB Sessions
db_session = dbt.create_session(sqlalchemy_url, sql_engine)
# Instantiate GSSHAPY object for reading to database
project_manager = ProjectFile()
# Call read method
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
project_manager.mapTableFile.addRoughnessMapFromLandUse("roughness",
db_session,
land_use_to_roughness_table,
land_use_grid,
)
# WRITE OUT UPDATED GSSHA PROJECT FILE
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard')
"""
LAND_USE_GRID_TABLES = {
'nga' : 'land_cover_nga.txt',
'glcf' : 'land_cover_glcf_modis.txt',
'nlcd' : 'land_cover_nlcd.txt',
}
# read in table
if isinstance(land_use_to_roughness_table, pd.DataFrame):
df = land_use_to_roughness_table
else:
if land_use_to_roughness_table is None:
if land_use_grid_id is None:
raise ValueError("Must have land_use_to_roughness_table or land_use_grid_id set ...")
land_use_to_roughness_table = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'grid',
'land_cover',
LAND_USE_GRID_TABLES[land_use_grid_id])
# make sure paths are absolute as the working directory changes
land_use_to_roughness_table = os.path.abspath(land_use_to_roughness_table)
df = pd.read_table(land_use_to_roughness_table, delim_whitespace=True,
header=None, skiprows=1,
names=('id', 'description', 'roughness'),
dtype={'id':'int', 'description':'str', 'roughness':'float'},
)
# make sure paths are absolute as the working directory changes
land_use_grid = os.path.abspath(land_use_grid)
# resample land use grid to gssha grid
land_use_resampled = resample_grid(land_use_grid,
self.projectFile.getGrid(),
resample_method=gdalconst.GRA_NearestNeighbour,
as_gdal_grid=True)
unique_land_use_ids = np.unique(land_use_resampled.np_array())
# only add ids in index map subset
df = df[df.id.isin(unique_land_use_ids)]
# make sure all needed land use IDs exist
for land_use_id in unique_land_use_ids:
if land_use_id not in df.id.values:
raise IndexError("Land use ID {0} not found in table.".format(land_use_id))
# delete duplicate/old tables with same name if they exist
self.deleteMapTable("ROUGHNESS", session)
# get num ids
mapTable = MapTable(name="ROUGHNESS",
numIDs=len(df.index),
maxNumCells=0,
numSed=0,
numContam=0)
# Create GSSHAPY IndexMap object from result object
indexMap = IndexMap(name=name)
indexMap.mapTableFile = self
mapTable.indexMap = indexMap
# Associate MapTable with this MapTableFile and IndexMaps
mapTable.mapTableFile = self
# add values to table
for row in df.itertuples():
idx = MTIndex(str(row.id), row.description, '')
idx.indexMap = indexMap
val = MTValue('ROUGH', row.roughness)
val.index = idx
val.mapTable = mapTable
# remove MANNING_N card becasue it is mutually exclusive
manningn_card = self.projectFile.getCard('MANNING_N')
if manningn_card:
session.delete(manningn_card)
session.commit()
mapTable.indexMap.filename = '{0}.idx'.format(name)
# write file
with tmp_chdir(self.projectFile.project_directory):
land_use_resampled.to_grass_ascii(mapTable.indexMap.filename,
print_nodata=False)
# update project card
if not self.projectFile.getCard('MAPPING_TABLE'):
self.projectFile.setCard('MAPPING_TABLE',
'{0}.cmt'.format(self.projectFile.name),
add_quotes=True) | python | def addRoughnessMapFromLandUse(self, name,
session,
land_use_grid,
land_use_to_roughness_table=None,
land_use_grid_id=None,
):
"""
Adds a roughness map from land use file
Example::
from gsshapy.orm import ProjectFile
from gsshapy.lib import db_tools as dbt
from os import path, chdir
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
land_use_grid = 'LC_5min_global_2012.tif'
land_use_to_roughness_table = ''/gsshapy/gridtogssha/land_cover/land_cover_glcf_modis.txt'
# Create Test DB
sqlalchemy_url, sql_engine = dbt.init_sqlite_memory()
# Create DB Sessions
db_session = dbt.create_session(sqlalchemy_url, sql_engine)
# Instantiate GSSHAPY object for reading to database
project_manager = ProjectFile()
# Call read method
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
project_manager.mapTableFile.addRoughnessMapFromLandUse("roughness",
db_session,
land_use_to_roughness_table,
land_use_grid,
)
# WRITE OUT UPDATED GSSHA PROJECT FILE
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard')
"""
LAND_USE_GRID_TABLES = {
'nga' : 'land_cover_nga.txt',
'glcf' : 'land_cover_glcf_modis.txt',
'nlcd' : 'land_cover_nlcd.txt',
}
# read in table
if isinstance(land_use_to_roughness_table, pd.DataFrame):
df = land_use_to_roughness_table
else:
if land_use_to_roughness_table is None:
if land_use_grid_id is None:
raise ValueError("Must have land_use_to_roughness_table or land_use_grid_id set ...")
land_use_to_roughness_table = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'grid',
'land_cover',
LAND_USE_GRID_TABLES[land_use_grid_id])
# make sure paths are absolute as the working directory changes
land_use_to_roughness_table = os.path.abspath(land_use_to_roughness_table)
df = pd.read_table(land_use_to_roughness_table, delim_whitespace=True,
header=None, skiprows=1,
names=('id', 'description', 'roughness'),
dtype={'id':'int', 'description':'str', 'roughness':'float'},
)
# make sure paths are absolute as the working directory changes
land_use_grid = os.path.abspath(land_use_grid)
# resample land use grid to gssha grid
land_use_resampled = resample_grid(land_use_grid,
self.projectFile.getGrid(),
resample_method=gdalconst.GRA_NearestNeighbour,
as_gdal_grid=True)
unique_land_use_ids = np.unique(land_use_resampled.np_array())
# only add ids in index map subset
df = df[df.id.isin(unique_land_use_ids)]
# make sure all needed land use IDs exist
for land_use_id in unique_land_use_ids:
if land_use_id not in df.id.values:
raise IndexError("Land use ID {0} not found in table.".format(land_use_id))
# delete duplicate/old tables with same name if they exist
self.deleteMapTable("ROUGHNESS", session)
# get num ids
mapTable = MapTable(name="ROUGHNESS",
numIDs=len(df.index),
maxNumCells=0,
numSed=0,
numContam=0)
# Create GSSHAPY IndexMap object from result object
indexMap = IndexMap(name=name)
indexMap.mapTableFile = self
mapTable.indexMap = indexMap
# Associate MapTable with this MapTableFile and IndexMaps
mapTable.mapTableFile = self
# add values to table
for row in df.itertuples():
idx = MTIndex(str(row.id), row.description, '')
idx.indexMap = indexMap
val = MTValue('ROUGH', row.roughness)
val.index = idx
val.mapTable = mapTable
# remove MANNING_N card becasue it is mutually exclusive
manningn_card = self.projectFile.getCard('MANNING_N')
if manningn_card:
session.delete(manningn_card)
session.commit()
mapTable.indexMap.filename = '{0}.idx'.format(name)
# write file
with tmp_chdir(self.projectFile.project_directory):
land_use_resampled.to_grass_ascii(mapTable.indexMap.filename,
print_nodata=False)
# update project card
if not self.projectFile.getCard('MAPPING_TABLE'):
self.projectFile.setCard('MAPPING_TABLE',
'{0}.cmt'.format(self.projectFile.name),
add_quotes=True) | [
"def",
"addRoughnessMapFromLandUse",
"(",
"self",
",",
"name",
",",
"session",
",",
"land_use_grid",
",",
"land_use_to_roughness_table",
"=",
"None",
",",
"land_use_grid_id",
"=",
"None",
",",
")",
":",
"LAND_USE_GRID_TABLES",
"=",
"{",
"'nga'",
":",
"'land_cover_nga.txt'",
",",
"'glcf'",
":",
"'land_cover_glcf_modis.txt'",
",",
"'nlcd'",
":",
"'land_cover_nlcd.txt'",
",",
"}",
"# read in table",
"if",
"isinstance",
"(",
"land_use_to_roughness_table",
",",
"pd",
".",
"DataFrame",
")",
":",
"df",
"=",
"land_use_to_roughness_table",
"else",
":",
"if",
"land_use_to_roughness_table",
"is",
"None",
":",
"if",
"land_use_grid_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must have land_use_to_roughness_table or land_use_grid_id set ...\"",
")",
"land_use_to_roughness_table",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"'..'",
",",
"'grid'",
",",
"'land_cover'",
",",
"LAND_USE_GRID_TABLES",
"[",
"land_use_grid_id",
"]",
")",
"# make sure paths are absolute as the working directory changes",
"land_use_to_roughness_table",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"land_use_to_roughness_table",
")",
"df",
"=",
"pd",
".",
"read_table",
"(",
"land_use_to_roughness_table",
",",
"delim_whitespace",
"=",
"True",
",",
"header",
"=",
"None",
",",
"skiprows",
"=",
"1",
",",
"names",
"=",
"(",
"'id'",
",",
"'description'",
",",
"'roughness'",
")",
",",
"dtype",
"=",
"{",
"'id'",
":",
"'int'",
",",
"'description'",
":",
"'str'",
",",
"'roughness'",
":",
"'float'",
"}",
",",
")",
"# make sure paths are absolute as the working directory changes",
"land_use_grid",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"land_use_grid",
")",
"# resample land use grid to gssha grid",
"land_use_resampled",
"=",
"resample_grid",
"(",
"land_use_grid",
",",
"self",
".",
"projectFile",
".",
"getGrid",
"(",
")",
",",
"resample_method",
"=",
"gdalconst",
".",
"GRA_NearestNeighbour",
",",
"as_gdal_grid",
"=",
"True",
")",
"unique_land_use_ids",
"=",
"np",
".",
"unique",
"(",
"land_use_resampled",
".",
"np_array",
"(",
")",
")",
"# only add ids in index map subset",
"df",
"=",
"df",
"[",
"df",
".",
"id",
".",
"isin",
"(",
"unique_land_use_ids",
")",
"]",
"# make sure all needed land use IDs exist",
"for",
"land_use_id",
"in",
"unique_land_use_ids",
":",
"if",
"land_use_id",
"not",
"in",
"df",
".",
"id",
".",
"values",
":",
"raise",
"IndexError",
"(",
"\"Land use ID {0} not found in table.\"",
".",
"format",
"(",
"land_use_id",
")",
")",
"# delete duplicate/old tables with same name if they exist",
"self",
".",
"deleteMapTable",
"(",
"\"ROUGHNESS\"",
",",
"session",
")",
"# get num ids",
"mapTable",
"=",
"MapTable",
"(",
"name",
"=",
"\"ROUGHNESS\"",
",",
"numIDs",
"=",
"len",
"(",
"df",
".",
"index",
")",
",",
"maxNumCells",
"=",
"0",
",",
"numSed",
"=",
"0",
",",
"numContam",
"=",
"0",
")",
"# Create GSSHAPY IndexMap object from result object",
"indexMap",
"=",
"IndexMap",
"(",
"name",
"=",
"name",
")",
"indexMap",
".",
"mapTableFile",
"=",
"self",
"mapTable",
".",
"indexMap",
"=",
"indexMap",
"# Associate MapTable with this MapTableFile and IndexMaps",
"mapTable",
".",
"mapTableFile",
"=",
"self",
"# add values to table",
"for",
"row",
"in",
"df",
".",
"itertuples",
"(",
")",
":",
"idx",
"=",
"MTIndex",
"(",
"str",
"(",
"row",
".",
"id",
")",
",",
"row",
".",
"description",
",",
"''",
")",
"idx",
".",
"indexMap",
"=",
"indexMap",
"val",
"=",
"MTValue",
"(",
"'ROUGH'",
",",
"row",
".",
"roughness",
")",
"val",
".",
"index",
"=",
"idx",
"val",
".",
"mapTable",
"=",
"mapTable",
"# remove MANNING_N card becasue it is mutually exclusive",
"manningn_card",
"=",
"self",
".",
"projectFile",
".",
"getCard",
"(",
"'MANNING_N'",
")",
"if",
"manningn_card",
":",
"session",
".",
"delete",
"(",
"manningn_card",
")",
"session",
".",
"commit",
"(",
")",
"mapTable",
".",
"indexMap",
".",
"filename",
"=",
"'{0}.idx'",
".",
"format",
"(",
"name",
")",
"# write file",
"with",
"tmp_chdir",
"(",
"self",
".",
"projectFile",
".",
"project_directory",
")",
":",
"land_use_resampled",
".",
"to_grass_ascii",
"(",
"mapTable",
".",
"indexMap",
".",
"filename",
",",
"print_nodata",
"=",
"False",
")",
"# update project card",
"if",
"not",
"self",
".",
"projectFile",
".",
"getCard",
"(",
"'MAPPING_TABLE'",
")",
":",
"self",
".",
"projectFile",
".",
"setCard",
"(",
"'MAPPING_TABLE'",
",",
"'{0}.cmt'",
".",
"format",
"(",
"self",
".",
"projectFile",
".",
"name",
")",
",",
"add_quotes",
"=",
"True",
")"
] | Adds a roughness map from land use file
Example::
from gsshapy.orm import ProjectFile
from gsshapy.lib import db_tools as dbt
from os import path, chdir
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
land_use_grid = 'LC_5min_global_2012.tif'
land_use_to_roughness_table = ''/gsshapy/gridtogssha/land_cover/land_cover_glcf_modis.txt'
# Create Test DB
sqlalchemy_url, sql_engine = dbt.init_sqlite_memory()
# Create DB Sessions
db_session = dbt.create_session(sqlalchemy_url, sql_engine)
# Instantiate GSSHAPY object for reading to database
project_manager = ProjectFile()
# Call read method
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
project_manager.mapTableFile.addRoughnessMapFromLandUse("roughness",
db_session,
land_use_to_roughness_table,
land_use_grid,
)
# WRITE OUT UPDATED GSSHA PROJECT FILE
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard') | [
"Adds",
"a",
"roughness",
"map",
"from",
"land",
"use",
"file"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L611-L744 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._set_wildcards | def _set_wildcards(self, inputs=None, outputs=None):
"""
Update wildcards set with the input data nodes that are also outputs.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
"""
w = self._wildcards = set() # Clean wildcards.
if outputs and inputs:
node, wi = self.nodes, self._wait_in.get # Namespace shortcut.
# Input data nodes that are in output_targets.
w_crd = {u: node[u] for u in inputs if u in outputs or wi(u, False)}
# Data nodes without the wildcard.
w.update([k for k, v in w_crd.items() if v.get('wildcard', True)]) | python | def _set_wildcards(self, inputs=None, outputs=None):
"""
Update wildcards set with the input data nodes that are also outputs.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
"""
w = self._wildcards = set() # Clean wildcards.
if outputs and inputs:
node, wi = self.nodes, self._wait_in.get # Namespace shortcut.
# Input data nodes that are in output_targets.
w_crd = {u: node[u] for u in inputs if u in outputs or wi(u, False)}
# Data nodes without the wildcard.
w.update([k for k, v in w_crd.items() if v.get('wildcard', True)]) | [
"def",
"_set_wildcards",
"(",
"self",
",",
"inputs",
"=",
"None",
",",
"outputs",
"=",
"None",
")",
":",
"w",
"=",
"self",
".",
"_wildcards",
"=",
"set",
"(",
")",
"# Clean wildcards.",
"if",
"outputs",
"and",
"inputs",
":",
"node",
",",
"wi",
"=",
"self",
".",
"nodes",
",",
"self",
".",
"_wait_in",
".",
"get",
"# Namespace shortcut.",
"# Input data nodes that are in output_targets.",
"w_crd",
"=",
"{",
"u",
":",
"node",
"[",
"u",
"]",
"for",
"u",
"in",
"inputs",
"if",
"u",
"in",
"outputs",
"or",
"wi",
"(",
"u",
",",
"False",
")",
"}",
"# Data nodes without the wildcard.",
"w",
".",
"update",
"(",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"w_crd",
".",
"items",
"(",
")",
"if",
"v",
".",
"get",
"(",
"'wildcard'",
",",
"True",
")",
"]",
")"
] | Update wildcards set with the input data nodes that are also outputs.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional | [
"Update",
"wildcards",
"set",
"with",
"the",
"input",
"data",
"nodes",
"that",
"are",
"also",
"outputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L108-L130 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution.result | def result(self, timeout=None):
"""
Set all asynchronous results.
:param timeout:
The number of seconds to wait for the result if the futures aren't
done. If None, then there is no limit on the wait time.
:type timeout: float
:return:
Update Solution.
:rtype: Solution
"""
it, exceptions, future_lists = [], [], []
from concurrent.futures import Future, wait as wait_fut
def update(fut, data, key):
if isinstance(fut, Future):
it.append((fut, data, key))
elif isinstance(fut, AsyncList) and fut not in future_lists:
future_lists.append(fut)
it.extend([(j, fut, i)
for i, j in enumerate(fut)
if isinstance(j, Future)][::-1])
for s in self.sub_sol.values():
for k, v in list(s.items()):
update(v, s, k)
for d in s.workflow.nodes.values():
if 'results' in d:
update(d['results'], d, 'results')
for d in s.workflow.edges.values():
if 'value' in d:
update(d['value'], d, 'value')
wait_fut({v[0] for v in it}, timeout)
for f, d, k in it:
try:
d[k] = await_result(f, 0)
except SkipNode as e:
exceptions.append((f, d, k, e.ex))
del d[k]
except (Exception, ExecutorShutdown, DispatcherAbort) as ex:
exceptions.append((f, d, k, ex))
del d[k]
if exceptions:
raise exceptions[0][-1]
return self | python | def result(self, timeout=None):
"""
Set all asynchronous results.
:param timeout:
The number of seconds to wait for the result if the futures aren't
done. If None, then there is no limit on the wait time.
:type timeout: float
:return:
Update Solution.
:rtype: Solution
"""
it, exceptions, future_lists = [], [], []
from concurrent.futures import Future, wait as wait_fut
def update(fut, data, key):
if isinstance(fut, Future):
it.append((fut, data, key))
elif isinstance(fut, AsyncList) and fut not in future_lists:
future_lists.append(fut)
it.extend([(j, fut, i)
for i, j in enumerate(fut)
if isinstance(j, Future)][::-1])
for s in self.sub_sol.values():
for k, v in list(s.items()):
update(v, s, k)
for d in s.workflow.nodes.values():
if 'results' in d:
update(d['results'], d, 'results')
for d in s.workflow.edges.values():
if 'value' in d:
update(d['value'], d, 'value')
wait_fut({v[0] for v in it}, timeout)
for f, d, k in it:
try:
d[k] = await_result(f, 0)
except SkipNode as e:
exceptions.append((f, d, k, e.ex))
del d[k]
except (Exception, ExecutorShutdown, DispatcherAbort) as ex:
exceptions.append((f, d, k, ex))
del d[k]
if exceptions:
raise exceptions[0][-1]
return self | [
"def",
"result",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"it",
",",
"exceptions",
",",
"future_lists",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"from",
"concurrent",
".",
"futures",
"import",
"Future",
",",
"wait",
"as",
"wait_fut",
"def",
"update",
"(",
"fut",
",",
"data",
",",
"key",
")",
":",
"if",
"isinstance",
"(",
"fut",
",",
"Future",
")",
":",
"it",
".",
"append",
"(",
"(",
"fut",
",",
"data",
",",
"key",
")",
")",
"elif",
"isinstance",
"(",
"fut",
",",
"AsyncList",
")",
"and",
"fut",
"not",
"in",
"future_lists",
":",
"future_lists",
".",
"append",
"(",
"fut",
")",
"it",
".",
"extend",
"(",
"[",
"(",
"j",
",",
"fut",
",",
"i",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"fut",
")",
"if",
"isinstance",
"(",
"j",
",",
"Future",
")",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"for",
"s",
"in",
"self",
".",
"sub_sol",
".",
"values",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"s",
".",
"items",
"(",
")",
")",
":",
"update",
"(",
"v",
",",
"s",
",",
"k",
")",
"for",
"d",
"in",
"s",
".",
"workflow",
".",
"nodes",
".",
"values",
"(",
")",
":",
"if",
"'results'",
"in",
"d",
":",
"update",
"(",
"d",
"[",
"'results'",
"]",
",",
"d",
",",
"'results'",
")",
"for",
"d",
"in",
"s",
".",
"workflow",
".",
"edges",
".",
"values",
"(",
")",
":",
"if",
"'value'",
"in",
"d",
":",
"update",
"(",
"d",
"[",
"'value'",
"]",
",",
"d",
",",
"'value'",
")",
"wait_fut",
"(",
"{",
"v",
"[",
"0",
"]",
"for",
"v",
"in",
"it",
"}",
",",
"timeout",
")",
"for",
"f",
",",
"d",
",",
"k",
"in",
"it",
":",
"try",
":",
"d",
"[",
"k",
"]",
"=",
"await_result",
"(",
"f",
",",
"0",
")",
"except",
"SkipNode",
"as",
"e",
":",
"exceptions",
".",
"append",
"(",
"(",
"f",
",",
"d",
",",
"k",
",",
"e",
".",
"ex",
")",
")",
"del",
"d",
"[",
"k",
"]",
"except",
"(",
"Exception",
",",
"ExecutorShutdown",
",",
"DispatcherAbort",
")",
"as",
"ex",
":",
"exceptions",
".",
"append",
"(",
"(",
"f",
",",
"d",
",",
"k",
",",
"ex",
")",
")",
"del",
"d",
"[",
"k",
"]",
"if",
"exceptions",
":",
"raise",
"exceptions",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"return",
"self"
] | Set all asynchronous results.
:param timeout:
The number of seconds to wait for the result if the futures aren't
done. If None, then there is no limit on the wait time.
:type timeout: float
:return:
Update Solution.
:rtype: Solution | [
"Set",
"all",
"asynchronous",
"results",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L194-L245 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._check_targets | def _check_targets(self):
"""
Returns a function to terminate the ArciDispatch algorithm when all
targets have been visited.
:return:
A function to terminate the ArciDispatch algorithm.
:rtype: (str) -> bool
"""
if self.outputs:
targets = self.outputs.copy() # Namespace shortcut for speed.
def check_targets(node_id):
"""
Terminates ArciDispatch algorithm when all targets have been
visited.
:param node_id:
Data or function node id.
:type node_id: str
:return:
True if all targets have been visited, otherwise False.
:rtype: bool
"""
try:
targets.remove(node_id) # Remove visited node.
return not targets # If no targets terminate the algorithm.
except KeyError: # The node is not in the targets set.
return False
else:
# noinspection PyUnusedLocal
def check_targets(node_id):
return False
return check_targets | python | def _check_targets(self):
"""
Returns a function to terminate the ArciDispatch algorithm when all
targets have been visited.
:return:
A function to terminate the ArciDispatch algorithm.
:rtype: (str) -> bool
"""
if self.outputs:
targets = self.outputs.copy() # Namespace shortcut for speed.
def check_targets(node_id):
"""
Terminates ArciDispatch algorithm when all targets have been
visited.
:param node_id:
Data or function node id.
:type node_id: str
:return:
True if all targets have been visited, otherwise False.
:rtype: bool
"""
try:
targets.remove(node_id) # Remove visited node.
return not targets # If no targets terminate the algorithm.
except KeyError: # The node is not in the targets set.
return False
else:
# noinspection PyUnusedLocal
def check_targets(node_id):
return False
return check_targets | [
"def",
"_check_targets",
"(",
"self",
")",
":",
"if",
"self",
".",
"outputs",
":",
"targets",
"=",
"self",
".",
"outputs",
".",
"copy",
"(",
")",
"# Namespace shortcut for speed.",
"def",
"check_targets",
"(",
"node_id",
")",
":",
"\"\"\"\n Terminates ArciDispatch algorithm when all targets have been\n visited.\n\n :param node_id:\n Data or function node id.\n :type node_id: str\n\n :return:\n True if all targets have been visited, otherwise False.\n :rtype: bool\n \"\"\"",
"try",
":",
"targets",
".",
"remove",
"(",
"node_id",
")",
"# Remove visited node.",
"return",
"not",
"targets",
"# If no targets terminate the algorithm.",
"except",
"KeyError",
":",
"# The node is not in the targets set.",
"return",
"False",
"else",
":",
"# noinspection PyUnusedLocal",
"def",
"check_targets",
"(",
"node_id",
")",
":",
"return",
"False",
"return",
"check_targets"
] | Returns a function to terminate the ArciDispatch algorithm when all
targets have been visited.
:return:
A function to terminate the ArciDispatch algorithm.
:rtype: (str) -> bool | [
"Returns",
"a",
"function",
"to",
"terminate",
"the",
"ArciDispatch",
"algorithm",
"when",
"all",
"targets",
"have",
"been",
"visited",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L375-L413 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._get_node_estimations | def _get_node_estimations(self, node_attr, node_id):
"""
Returns the data nodes estimations and `wait_inputs` flag.
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data node's id.
:type node_id: str
:returns:
- node estimations with minimum distance from the starting node, and
- `wait_inputs` flag
:rtype: (dict[str, T], bool)
"""
# Get data node estimations.
estimations = self._wf_pred[node_id]
wait_in = node_attr['wait_inputs'] # Namespace shortcut.
# Check if node has multiple estimations and it is not waiting inputs.
if len(estimations) > 1 and not self._wait_in.get(node_id, wait_in):
# Namespace shortcuts.
dist, edg_length, adj = self.dist, self._edge_length, self.dmap.adj
est = [] # Estimations' heap.
for k, v in estimations.items(): # Calculate length.
if k is not START:
d = dist[k] + edg_length(adj[k][node_id], node_attr)
heapq.heappush(est, (d, k, v))
# The estimation with minimum distance from the starting node.
estimations = {est[0][1]: est[0][2]}
# Remove unused workflow edges.
self.workflow.remove_edges_from([(v[1], node_id) for v in est[1:]])
return estimations, wait_in | python | def _get_node_estimations(self, node_attr, node_id):
"""
Returns the data nodes estimations and `wait_inputs` flag.
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data node's id.
:type node_id: str
:returns:
- node estimations with minimum distance from the starting node, and
- `wait_inputs` flag
:rtype: (dict[str, T], bool)
"""
# Get data node estimations.
estimations = self._wf_pred[node_id]
wait_in = node_attr['wait_inputs'] # Namespace shortcut.
# Check if node has multiple estimations and it is not waiting inputs.
if len(estimations) > 1 and not self._wait_in.get(node_id, wait_in):
# Namespace shortcuts.
dist, edg_length, adj = self.dist, self._edge_length, self.dmap.adj
est = [] # Estimations' heap.
for k, v in estimations.items(): # Calculate length.
if k is not START:
d = dist[k] + edg_length(adj[k][node_id], node_attr)
heapq.heappush(est, (d, k, v))
# The estimation with minimum distance from the starting node.
estimations = {est[0][1]: est[0][2]}
# Remove unused workflow edges.
self.workflow.remove_edges_from([(v[1], node_id) for v in est[1:]])
return estimations, wait_in | [
"def",
"_get_node_estimations",
"(",
"self",
",",
"node_attr",
",",
"node_id",
")",
":",
"# Get data node estimations.",
"estimations",
"=",
"self",
".",
"_wf_pred",
"[",
"node_id",
"]",
"wait_in",
"=",
"node_attr",
"[",
"'wait_inputs'",
"]",
"# Namespace shortcut.",
"# Check if node has multiple estimations and it is not waiting inputs.",
"if",
"len",
"(",
"estimations",
")",
">",
"1",
"and",
"not",
"self",
".",
"_wait_in",
".",
"get",
"(",
"node_id",
",",
"wait_in",
")",
":",
"# Namespace shortcuts.",
"dist",
",",
"edg_length",
",",
"adj",
"=",
"self",
".",
"dist",
",",
"self",
".",
"_edge_length",
",",
"self",
".",
"dmap",
".",
"adj",
"est",
"=",
"[",
"]",
"# Estimations' heap.",
"for",
"k",
",",
"v",
"in",
"estimations",
".",
"items",
"(",
")",
":",
"# Calculate length.",
"if",
"k",
"is",
"not",
"START",
":",
"d",
"=",
"dist",
"[",
"k",
"]",
"+",
"edg_length",
"(",
"adj",
"[",
"k",
"]",
"[",
"node_id",
"]",
",",
"node_attr",
")",
"heapq",
".",
"heappush",
"(",
"est",
",",
"(",
"d",
",",
"k",
",",
"v",
")",
")",
"# The estimation with minimum distance from the starting node.",
"estimations",
"=",
"{",
"est",
"[",
"0",
"]",
"[",
"1",
"]",
":",
"est",
"[",
"0",
"]",
"[",
"2",
"]",
"}",
"# Remove unused workflow edges.",
"self",
".",
"workflow",
".",
"remove_edges_from",
"(",
"[",
"(",
"v",
"[",
"1",
"]",
",",
"node_id",
")",
"for",
"v",
"in",
"est",
"[",
"1",
":",
"]",
"]",
")",
"return",
"estimations",
",",
"wait_in"
] | Returns the data nodes estimations and `wait_inputs` flag.
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data node's id.
:type node_id: str
:returns:
- node estimations with minimum distance from the starting node, and
- `wait_inputs` flag
:rtype: (dict[str, T], bool) | [
"Returns",
"the",
"data",
"nodes",
"estimations",
"and",
"wait_inputs",
"flag",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L498-L540 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._set_node_output | def _set_node_output(self, node_id, no_call, next_nds=None, **kw):
"""
Set the node outputs from node inputs.
:param node_id:
Data or function node id.
:type node_id: str
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts.
node_attr = self.nodes[node_id]
node_type = node_attr['type']
if node_type == 'data': # Set data node.
return self._set_data_node_output(node_id, node_attr, no_call,
next_nds, **kw)
elif node_type == 'function': # Set function node.
return self._set_function_node_output(node_id, node_attr, no_call,
next_nds, **kw) | python | def _set_node_output(self, node_id, no_call, next_nds=None, **kw):
"""
Set the node outputs from node inputs.
:param node_id:
Data or function node id.
:type node_id: str
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts.
node_attr = self.nodes[node_id]
node_type = node_attr['type']
if node_type == 'data': # Set data node.
return self._set_data_node_output(node_id, node_attr, no_call,
next_nds, **kw)
elif node_type == 'function': # Set function node.
return self._set_function_node_output(node_id, node_attr, no_call,
next_nds, **kw) | [
"def",
"_set_node_output",
"(",
"self",
",",
"node_id",
",",
"no_call",
",",
"next_nds",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"# Namespace shortcuts.",
"node_attr",
"=",
"self",
".",
"nodes",
"[",
"node_id",
"]",
"node_type",
"=",
"node_attr",
"[",
"'type'",
"]",
"if",
"node_type",
"==",
"'data'",
":",
"# Set data node.",
"return",
"self",
".",
"_set_data_node_output",
"(",
"node_id",
",",
"node_attr",
",",
"no_call",
",",
"next_nds",
",",
"*",
"*",
"kw",
")",
"elif",
"node_type",
"==",
"'function'",
":",
"# Set function node.",
"return",
"self",
".",
"_set_function_node_output",
"(",
"node_id",
",",
"node_attr",
",",
"no_call",
",",
"next_nds",
",",
"*",
"*",
"kw",
")"
] | Set the node outputs from node inputs.
:param node_id:
Data or function node id.
:type node_id: str
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool | [
"Set",
"the",
"node",
"outputs",
"from",
"node",
"inputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L554-L581 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._set_data_node_output | def _set_data_node_output(self, node_id, node_attr, no_call, next_nds=None,
**kw):
"""
Set the data node output from node estimations.
:param node_id:
Data node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimations are not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Get data node estimations.
est, wait_in = self._get_node_estimations(node_attr, node_id)
if not no_call:
if node_id is PLOT:
est = est.copy()
est[PLOT] = {'value': {'obj': self}}
sf, args = False, ({k: v['value'] for k, v in est.items()},)
if not (wait_in or 'function' in node_attr):
# Data node that has just one estimation value.
sf, args = True, tuple(args[0].values())
try:
# Final estimation of the node and node status.
value = async_thread(self, args, node_attr, node_id, sf, **kw)
except SkipNode:
return False
if value is not NONE: # Set data output.
self[node_id] = value
value = {'value': value} # Output value.
else:
self[node_id] = NONE # Set data output.
value = {} # Output value.
if next_nds:
# namespace shortcuts for speed.
wf_add_edge = self._wf_add_edge
for u in next_nds: # Set workflow.
wf_add_edge(node_id, u, **value)
else:
# namespace shortcuts for speed.
n, has, sub_sol = self.nodes, self.workflow.has_edge, self.sub_sol
def no_visited_in_sub_dsp(i):
node = n[i]
if node['type'] == 'dispatcher' and has(i, node_id):
visited = sub_sol[self.index + node['index']]._visited
return node['inputs'][node_id] not in visited
return True
# List of functions.
succ_fun = [u for u in self._succ[node_id]
if no_visited_in_sub_dsp(u)]
# Check if it has functions as outputs and wildcard condition.
if succ_fun and succ_fun[0] not in self._visited:
# namespace shortcuts for speed.
wf_add_edge = self._wf_add_edge
for u in succ_fun: # Set workflow.
wf_add_edge(node_id, u, **value)
return True | python | def _set_data_node_output(self, node_id, node_attr, no_call, next_nds=None,
**kw):
"""
Set the data node output from node estimations.
:param node_id:
Data node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimations are not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Get data node estimations.
est, wait_in = self._get_node_estimations(node_attr, node_id)
if not no_call:
if node_id is PLOT:
est = est.copy()
est[PLOT] = {'value': {'obj': self}}
sf, args = False, ({k: v['value'] for k, v in est.items()},)
if not (wait_in or 'function' in node_attr):
# Data node that has just one estimation value.
sf, args = True, tuple(args[0].values())
try:
# Final estimation of the node and node status.
value = async_thread(self, args, node_attr, node_id, sf, **kw)
except SkipNode:
return False
if value is not NONE: # Set data output.
self[node_id] = value
value = {'value': value} # Output value.
else:
self[node_id] = NONE # Set data output.
value = {} # Output value.
if next_nds:
# namespace shortcuts for speed.
wf_add_edge = self._wf_add_edge
for u in next_nds: # Set workflow.
wf_add_edge(node_id, u, **value)
else:
# namespace shortcuts for speed.
n, has, sub_sol = self.nodes, self.workflow.has_edge, self.sub_sol
def no_visited_in_sub_dsp(i):
node = n[i]
if node['type'] == 'dispatcher' and has(i, node_id):
visited = sub_sol[self.index + node['index']]._visited
return node['inputs'][node_id] not in visited
return True
# List of functions.
succ_fun = [u for u in self._succ[node_id]
if no_visited_in_sub_dsp(u)]
# Check if it has functions as outputs and wildcard condition.
if succ_fun and succ_fun[0] not in self._visited:
# namespace shortcuts for speed.
wf_add_edge = self._wf_add_edge
for u in succ_fun: # Set workflow.
wf_add_edge(node_id, u, **value)
return True | [
"def",
"_set_data_node_output",
"(",
"self",
",",
"node_id",
",",
"node_attr",
",",
"no_call",
",",
"next_nds",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"# Get data node estimations.",
"est",
",",
"wait_in",
"=",
"self",
".",
"_get_node_estimations",
"(",
"node_attr",
",",
"node_id",
")",
"if",
"not",
"no_call",
":",
"if",
"node_id",
"is",
"PLOT",
":",
"est",
"=",
"est",
".",
"copy",
"(",
")",
"est",
"[",
"PLOT",
"]",
"=",
"{",
"'value'",
":",
"{",
"'obj'",
":",
"self",
"}",
"}",
"sf",
",",
"args",
"=",
"False",
",",
"(",
"{",
"k",
":",
"v",
"[",
"'value'",
"]",
"for",
"k",
",",
"v",
"in",
"est",
".",
"items",
"(",
")",
"}",
",",
")",
"if",
"not",
"(",
"wait_in",
"or",
"'function'",
"in",
"node_attr",
")",
":",
"# Data node that has just one estimation value.",
"sf",
",",
"args",
"=",
"True",
",",
"tuple",
"(",
"args",
"[",
"0",
"]",
".",
"values",
"(",
")",
")",
"try",
":",
"# Final estimation of the node and node status.",
"value",
"=",
"async_thread",
"(",
"self",
",",
"args",
",",
"node_attr",
",",
"node_id",
",",
"sf",
",",
"*",
"*",
"kw",
")",
"except",
"SkipNode",
":",
"return",
"False",
"if",
"value",
"is",
"not",
"NONE",
":",
"# Set data output.",
"self",
"[",
"node_id",
"]",
"=",
"value",
"value",
"=",
"{",
"'value'",
":",
"value",
"}",
"# Output value.",
"else",
":",
"self",
"[",
"node_id",
"]",
"=",
"NONE",
"# Set data output.",
"value",
"=",
"{",
"}",
"# Output value.",
"if",
"next_nds",
":",
"# namespace shortcuts for speed.",
"wf_add_edge",
"=",
"self",
".",
"_wf_add_edge",
"for",
"u",
"in",
"next_nds",
":",
"# Set workflow.",
"wf_add_edge",
"(",
"node_id",
",",
"u",
",",
"*",
"*",
"value",
")",
"else",
":",
"# namespace shortcuts for speed.",
"n",
",",
"has",
",",
"sub_sol",
"=",
"self",
".",
"nodes",
",",
"self",
".",
"workflow",
".",
"has_edge",
",",
"self",
".",
"sub_sol",
"def",
"no_visited_in_sub_dsp",
"(",
"i",
")",
":",
"node",
"=",
"n",
"[",
"i",
"]",
"if",
"node",
"[",
"'type'",
"]",
"==",
"'dispatcher'",
"and",
"has",
"(",
"i",
",",
"node_id",
")",
":",
"visited",
"=",
"sub_sol",
"[",
"self",
".",
"index",
"+",
"node",
"[",
"'index'",
"]",
"]",
".",
"_visited",
"return",
"node",
"[",
"'inputs'",
"]",
"[",
"node_id",
"]",
"not",
"in",
"visited",
"return",
"True",
"# List of functions.",
"succ_fun",
"=",
"[",
"u",
"for",
"u",
"in",
"self",
".",
"_succ",
"[",
"node_id",
"]",
"if",
"no_visited_in_sub_dsp",
"(",
"u",
")",
"]",
"# Check if it has functions as outputs and wildcard condition.",
"if",
"succ_fun",
"and",
"succ_fun",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"_visited",
":",
"# namespace shortcuts for speed.",
"wf_add_edge",
"=",
"self",
".",
"_wf_add_edge",
"for",
"u",
"in",
"succ_fun",
":",
"# Set workflow.",
"wf_add_edge",
"(",
"node_id",
",",
"u",
",",
"*",
"*",
"value",
")",
"return",
"True"
] | Set the data node output from node estimations.
:param node_id:
Data node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimations are not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool | [
"Set",
"the",
"data",
"node",
"output",
"from",
"node",
"estimations",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L642-L721 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._set_function_node_output | def _set_function_node_output(self, node_id, node_attr, no_call,
next_nds=None, **kw):
"""
Set the function node output from node inputs.
:param node_id:
Function node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts for speed.
o_nds, dist = node_attr['outputs'], self.dist
# List of nodes that can still be estimated by the function node.
output_nodes = next_nds or set(self._succ[node_id]).difference(dist)
if not output_nodes: # This function is not needed.
self.workflow.remove_node(node_id) # Remove function node.
return False
wf_add_edge = self._wf_add_edge # Namespace shortcuts for speed.
if no_call:
for u in output_nodes: # Set workflow out.
wf_add_edge(node_id, u)
return True
args = self._wf_pred[node_id] # List of the function's arguments.
args = [args[k]['value'] for k in node_attr['inputs']]
try:
self._check_function_domain(args, node_attr, node_id)
res = async_thread(self, args, node_attr, node_id, **kw)
# noinspection PyUnresolvedReferences
self.workflow.node[node_id]['results'] = res
except SkipNode:
return False
# Set workflow.
for k, v in zip(o_nds, res if len(o_nds) > 1 else [res]):
if k in output_nodes and v is not NONE:
wf_add_edge(node_id, k, value=v)
return True | python | def _set_function_node_output(self, node_id, node_attr, no_call,
next_nds=None, **kw):
"""
Set the function node output from node inputs.
:param node_id:
Function node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts for speed.
o_nds, dist = node_attr['outputs'], self.dist
# List of nodes that can still be estimated by the function node.
output_nodes = next_nds or set(self._succ[node_id]).difference(dist)
if not output_nodes: # This function is not needed.
self.workflow.remove_node(node_id) # Remove function node.
return False
wf_add_edge = self._wf_add_edge # Namespace shortcuts for speed.
if no_call:
for u in output_nodes: # Set workflow out.
wf_add_edge(node_id, u)
return True
args = self._wf_pred[node_id] # List of the function's arguments.
args = [args[k]['value'] for k in node_attr['inputs']]
try:
self._check_function_domain(args, node_attr, node_id)
res = async_thread(self, args, node_attr, node_id, **kw)
# noinspection PyUnresolvedReferences
self.workflow.node[node_id]['results'] = res
except SkipNode:
return False
# Set workflow.
for k, v in zip(o_nds, res if len(o_nds) > 1 else [res]):
if k in output_nodes and v is not NONE:
wf_add_edge(node_id, k, value=v)
return True | [
"def",
"_set_function_node_output",
"(",
"self",
",",
"node_id",
",",
"node_attr",
",",
"no_call",
",",
"next_nds",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"# Namespace shortcuts for speed.",
"o_nds",
",",
"dist",
"=",
"node_attr",
"[",
"'outputs'",
"]",
",",
"self",
".",
"dist",
"# List of nodes that can still be estimated by the function node.",
"output_nodes",
"=",
"next_nds",
"or",
"set",
"(",
"self",
".",
"_succ",
"[",
"node_id",
"]",
")",
".",
"difference",
"(",
"dist",
")",
"if",
"not",
"output_nodes",
":",
"# This function is not needed.",
"self",
".",
"workflow",
".",
"remove_node",
"(",
"node_id",
")",
"# Remove function node.",
"return",
"False",
"wf_add_edge",
"=",
"self",
".",
"_wf_add_edge",
"# Namespace shortcuts for speed.",
"if",
"no_call",
":",
"for",
"u",
"in",
"output_nodes",
":",
"# Set workflow out.",
"wf_add_edge",
"(",
"node_id",
",",
"u",
")",
"return",
"True",
"args",
"=",
"self",
".",
"_wf_pred",
"[",
"node_id",
"]",
"# List of the function's arguments.",
"args",
"=",
"[",
"args",
"[",
"k",
"]",
"[",
"'value'",
"]",
"for",
"k",
"in",
"node_attr",
"[",
"'inputs'",
"]",
"]",
"try",
":",
"self",
".",
"_check_function_domain",
"(",
"args",
",",
"node_attr",
",",
"node_id",
")",
"res",
"=",
"async_thread",
"(",
"self",
",",
"args",
",",
"node_attr",
",",
"node_id",
",",
"*",
"*",
"kw",
")",
"# noinspection PyUnresolvedReferences",
"self",
".",
"workflow",
".",
"node",
"[",
"node_id",
"]",
"[",
"'results'",
"]",
"=",
"res",
"except",
"SkipNode",
":",
"return",
"False",
"# Set workflow.",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"o_nds",
",",
"res",
"if",
"len",
"(",
"o_nds",
")",
">",
"1",
"else",
"[",
"res",
"]",
")",
":",
"if",
"k",
"in",
"output_nodes",
"and",
"v",
"is",
"not",
"NONE",
":",
"wf_add_edge",
"(",
"node_id",
",",
"k",
",",
"value",
"=",
"v",
")",
"return",
"True"
] | Set the function node output from node inputs.
:param node_id:
Function node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool | [
"Set",
"the",
"function",
"node",
"output",
"from",
"node",
"inputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L743-L798 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._add_initial_value | def _add_initial_value(self, data_id, value, initial_dist=0.0,
fringe=None, check_cutoff=None, no_call=None):
"""
Add initial values updating workflow, seen, and fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node value e.g., {'value': val}.
:type value: dict[str, T]
:param initial_dist:
Data node initial distance in the ArciDispatch algorithm.
:type initial_dist: float, int, optional
:return:
True if the data has been visited, otherwise false.
:rtype: bool
"""
# Namespace shortcuts for speed.
nodes, seen, edge_weight = self.nodes, self.seen, self._edge_length
wf_remove_edge, check_wait_in = self._wf_remove_edge, self.check_wait_in
wf_add_edge, dsp_in = self._wf_add_edge, self._set_sub_dsp_node_input
update_view = self._update_meeting
if fringe is None:
fringe = self.fringe
if no_call is None:
no_call = self.no_call
check_cutoff = check_cutoff or self.check_cutoff
if data_id not in nodes: # Data node is not in the dmap.
return False
wait_in = nodes[data_id]['wait_inputs'] # Store wait inputs flag.
index = nodes[data_id]['index'] # Store node index.
wf_add_edge(START, data_id, **value) # Add edge.
if data_id in self._wildcards: # Check if the data node has wildcard.
self._visited.add(data_id) # Update visited nodes.
self.workflow.add_node(data_id) # Add node to workflow.
for w, edge_data in self.dmap[data_id].items(): # See func node.
wf_add_edge(data_id, w, **value) # Set workflow.
node = nodes[w] # Node attributes.
# Evaluate distance.
vw_dist = initial_dist + edge_weight(edge_data, node)
update_view(w, vw_dist) # Update view distance.
# Check the cutoff limit and if all inputs are satisfied.
if check_cutoff(vw_dist):
wf_remove_edge(data_id, w) # Remove workflow edge.
continue # Pass the node.
elif node['type'] == 'dispatcher':
dsp_in(data_id, w, fringe, check_cutoff, no_call, vw_dist)
elif check_wait_in(True, w):
continue # Pass the node.
seen[w] = vw_dist # Update distance.
vd = (True, w, self.index + node['index']) # Virtual distance.
heapq.heappush(fringe, (vw_dist, vd, (w, self))) # Add 2 heapq.
return True
update_view(data_id, initial_dist) # Update view distance.
if check_cutoff(initial_dist): # Check the cutoff limit.
wf_remove_edge(START, data_id) # Remove workflow edge.
elif not check_wait_in(wait_in, data_id): # Check inputs.
seen[data_id] = initial_dist # Update distance.
vd = (wait_in, data_id, self.index + index) # Virtual distance.
# Add node to heapq.
heapq.heappush(fringe, (initial_dist, vd, (data_id, self)))
return True
return False | python | def _add_initial_value(self, data_id, value, initial_dist=0.0,
fringe=None, check_cutoff=None, no_call=None):
"""
Add initial values updating workflow, seen, and fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node value e.g., {'value': val}.
:type value: dict[str, T]
:param initial_dist:
Data node initial distance in the ArciDispatch algorithm.
:type initial_dist: float, int, optional
:return:
True if the data has been visited, otherwise false.
:rtype: bool
"""
# Namespace shortcuts for speed.
nodes, seen, edge_weight = self.nodes, self.seen, self._edge_length
wf_remove_edge, check_wait_in = self._wf_remove_edge, self.check_wait_in
wf_add_edge, dsp_in = self._wf_add_edge, self._set_sub_dsp_node_input
update_view = self._update_meeting
if fringe is None:
fringe = self.fringe
if no_call is None:
no_call = self.no_call
check_cutoff = check_cutoff or self.check_cutoff
if data_id not in nodes: # Data node is not in the dmap.
return False
wait_in = nodes[data_id]['wait_inputs'] # Store wait inputs flag.
index = nodes[data_id]['index'] # Store node index.
wf_add_edge(START, data_id, **value) # Add edge.
if data_id in self._wildcards: # Check if the data node has wildcard.
self._visited.add(data_id) # Update visited nodes.
self.workflow.add_node(data_id) # Add node to workflow.
for w, edge_data in self.dmap[data_id].items(): # See func node.
wf_add_edge(data_id, w, **value) # Set workflow.
node = nodes[w] # Node attributes.
# Evaluate distance.
vw_dist = initial_dist + edge_weight(edge_data, node)
update_view(w, vw_dist) # Update view distance.
# Check the cutoff limit and if all inputs are satisfied.
if check_cutoff(vw_dist):
wf_remove_edge(data_id, w) # Remove workflow edge.
continue # Pass the node.
elif node['type'] == 'dispatcher':
dsp_in(data_id, w, fringe, check_cutoff, no_call, vw_dist)
elif check_wait_in(True, w):
continue # Pass the node.
seen[w] = vw_dist # Update distance.
vd = (True, w, self.index + node['index']) # Virtual distance.
heapq.heappush(fringe, (vw_dist, vd, (w, self))) # Add 2 heapq.
return True
update_view(data_id, initial_dist) # Update view distance.
if check_cutoff(initial_dist): # Check the cutoff limit.
wf_remove_edge(START, data_id) # Remove workflow edge.
elif not check_wait_in(wait_in, data_id): # Check inputs.
seen[data_id] = initial_dist # Update distance.
vd = (wait_in, data_id, self.index + index) # Virtual distance.
# Add node to heapq.
heapq.heappush(fringe, (initial_dist, vd, (data_id, self)))
return True
return False | [
"def",
"_add_initial_value",
"(",
"self",
",",
"data_id",
",",
"value",
",",
"initial_dist",
"=",
"0.0",
",",
"fringe",
"=",
"None",
",",
"check_cutoff",
"=",
"None",
",",
"no_call",
"=",
"None",
")",
":",
"# Namespace shortcuts for speed.",
"nodes",
",",
"seen",
",",
"edge_weight",
"=",
"self",
".",
"nodes",
",",
"self",
".",
"seen",
",",
"self",
".",
"_edge_length",
"wf_remove_edge",
",",
"check_wait_in",
"=",
"self",
".",
"_wf_remove_edge",
",",
"self",
".",
"check_wait_in",
"wf_add_edge",
",",
"dsp_in",
"=",
"self",
".",
"_wf_add_edge",
",",
"self",
".",
"_set_sub_dsp_node_input",
"update_view",
"=",
"self",
".",
"_update_meeting",
"if",
"fringe",
"is",
"None",
":",
"fringe",
"=",
"self",
".",
"fringe",
"if",
"no_call",
"is",
"None",
":",
"no_call",
"=",
"self",
".",
"no_call",
"check_cutoff",
"=",
"check_cutoff",
"or",
"self",
".",
"check_cutoff",
"if",
"data_id",
"not",
"in",
"nodes",
":",
"# Data node is not in the dmap.",
"return",
"False",
"wait_in",
"=",
"nodes",
"[",
"data_id",
"]",
"[",
"'wait_inputs'",
"]",
"# Store wait inputs flag.",
"index",
"=",
"nodes",
"[",
"data_id",
"]",
"[",
"'index'",
"]",
"# Store node index.",
"wf_add_edge",
"(",
"START",
",",
"data_id",
",",
"*",
"*",
"value",
")",
"# Add edge.",
"if",
"data_id",
"in",
"self",
".",
"_wildcards",
":",
"# Check if the data node has wildcard.",
"self",
".",
"_visited",
".",
"add",
"(",
"data_id",
")",
"# Update visited nodes.",
"self",
".",
"workflow",
".",
"add_node",
"(",
"data_id",
")",
"# Add node to workflow.",
"for",
"w",
",",
"edge_data",
"in",
"self",
".",
"dmap",
"[",
"data_id",
"]",
".",
"items",
"(",
")",
":",
"# See func node.",
"wf_add_edge",
"(",
"data_id",
",",
"w",
",",
"*",
"*",
"value",
")",
"# Set workflow.",
"node",
"=",
"nodes",
"[",
"w",
"]",
"# Node attributes.",
"# Evaluate distance.",
"vw_dist",
"=",
"initial_dist",
"+",
"edge_weight",
"(",
"edge_data",
",",
"node",
")",
"update_view",
"(",
"w",
",",
"vw_dist",
")",
"# Update view distance.",
"# Check the cutoff limit and if all inputs are satisfied.",
"if",
"check_cutoff",
"(",
"vw_dist",
")",
":",
"wf_remove_edge",
"(",
"data_id",
",",
"w",
")",
"# Remove workflow edge.",
"continue",
"# Pass the node.",
"elif",
"node",
"[",
"'type'",
"]",
"==",
"'dispatcher'",
":",
"dsp_in",
"(",
"data_id",
",",
"w",
",",
"fringe",
",",
"check_cutoff",
",",
"no_call",
",",
"vw_dist",
")",
"elif",
"check_wait_in",
"(",
"True",
",",
"w",
")",
":",
"continue",
"# Pass the node.",
"seen",
"[",
"w",
"]",
"=",
"vw_dist",
"# Update distance.",
"vd",
"=",
"(",
"True",
",",
"w",
",",
"self",
".",
"index",
"+",
"node",
"[",
"'index'",
"]",
")",
"# Virtual distance.",
"heapq",
".",
"heappush",
"(",
"fringe",
",",
"(",
"vw_dist",
",",
"vd",
",",
"(",
"w",
",",
"self",
")",
")",
")",
"# Add 2 heapq.",
"return",
"True",
"update_view",
"(",
"data_id",
",",
"initial_dist",
")",
"# Update view distance.",
"if",
"check_cutoff",
"(",
"initial_dist",
")",
":",
"# Check the cutoff limit.",
"wf_remove_edge",
"(",
"START",
",",
"data_id",
")",
"# Remove workflow edge.",
"elif",
"not",
"check_wait_in",
"(",
"wait_in",
",",
"data_id",
")",
":",
"# Check inputs.",
"seen",
"[",
"data_id",
"]",
"=",
"initial_dist",
"# Update distance.",
"vd",
"=",
"(",
"wait_in",
",",
"data_id",
",",
"self",
".",
"index",
"+",
"index",
")",
"# Virtual distance.",
"# Add node to heapq.",
"heapq",
".",
"heappush",
"(",
"fringe",
",",
"(",
"initial_dist",
",",
"vd",
",",
"(",
"data_id",
",",
"self",
")",
")",
")",
"return",
"True",
"return",
"False"
] | Add initial values updating workflow, seen, and fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node value e.g., {'value': val}.
:type value: dict[str, T]
:param initial_dist:
Data node initial distance in the ArciDispatch algorithm.
:type initial_dist: float, int, optional
:return:
True if the data has been visited, otherwise false.
:rtype: bool | [
"Add",
"initial",
"values",
"updating",
"workflow",
"seen",
"and",
"fringe",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L800-L903 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._visit_nodes | def _visit_nodes(self, node_id, dist, fringe, check_cutoff, no_call=False,
**kw):
"""
Visits a node, updating workflow, seen, and fringe..
:param node_id:
Node id to visit.
:type node_id: str
:param dist:
Distance from the starting node.
:type dist: float, int
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool, optional
:return:
False if all dispatcher targets have been reached, otherwise True.
:rtype: bool
"""
# Namespace shortcuts.
wf_rm_edge, wf_has_edge = self._wf_remove_edge, self.workflow.has_edge
edge_weight, nodes = self._edge_length, self.nodes
self.dist[node_id] = dist # Set minimum dist.
self._visited.add(node_id) # Update visited nodes.
if not self._set_node_output(node_id, no_call, **kw): # Set output.
# Some error occurs or inputs are not in the function domain.
return True
if self.check_targets(node_id): # Check if the targets are satisfied.
return False # Stop loop.
for w, e_data in self.dmap[node_id].items():
if not wf_has_edge(node_id, w): # Check wildcard option.
continue
node = nodes[w] # Get node attributes.
vw_d = dist + edge_weight(e_data, node) # Evaluate dist.
if check_cutoff(vw_d): # Check the cutoff limit.
wf_rm_edge(node_id, w) # Remove edge that cannot be see.
continue
if node['type'] == 'dispatcher':
self._set_sub_dsp_node_input(
node_id, w, fringe, check_cutoff, no_call, vw_d)
else: # See the node.
self._see_node(w, fringe, vw_d)
return True | python | def _visit_nodes(self, node_id, dist, fringe, check_cutoff, no_call=False,
**kw):
"""
Visits a node, updating workflow, seen, and fringe..
:param node_id:
Node id to visit.
:type node_id: str
:param dist:
Distance from the starting node.
:type dist: float, int
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool, optional
:return:
False if all dispatcher targets have been reached, otherwise True.
:rtype: bool
"""
# Namespace shortcuts.
wf_rm_edge, wf_has_edge = self._wf_remove_edge, self.workflow.has_edge
edge_weight, nodes = self._edge_length, self.nodes
self.dist[node_id] = dist # Set minimum dist.
self._visited.add(node_id) # Update visited nodes.
if not self._set_node_output(node_id, no_call, **kw): # Set output.
# Some error occurs or inputs are not in the function domain.
return True
if self.check_targets(node_id): # Check if the targets are satisfied.
return False # Stop loop.
for w, e_data in self.dmap[node_id].items():
if not wf_has_edge(node_id, w): # Check wildcard option.
continue
node = nodes[w] # Get node attributes.
vw_d = dist + edge_weight(e_data, node) # Evaluate dist.
if check_cutoff(vw_d): # Check the cutoff limit.
wf_rm_edge(node_id, w) # Remove edge that cannot be see.
continue
if node['type'] == 'dispatcher':
self._set_sub_dsp_node_input(
node_id, w, fringe, check_cutoff, no_call, vw_d)
else: # See the node.
self._see_node(w, fringe, vw_d)
return True | [
"def",
"_visit_nodes",
"(",
"self",
",",
"node_id",
",",
"dist",
",",
"fringe",
",",
"check_cutoff",
",",
"no_call",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"# Namespace shortcuts.",
"wf_rm_edge",
",",
"wf_has_edge",
"=",
"self",
".",
"_wf_remove_edge",
",",
"self",
".",
"workflow",
".",
"has_edge",
"edge_weight",
",",
"nodes",
"=",
"self",
".",
"_edge_length",
",",
"self",
".",
"nodes",
"self",
".",
"dist",
"[",
"node_id",
"]",
"=",
"dist",
"# Set minimum dist.",
"self",
".",
"_visited",
".",
"add",
"(",
"node_id",
")",
"# Update visited nodes.",
"if",
"not",
"self",
".",
"_set_node_output",
"(",
"node_id",
",",
"no_call",
",",
"*",
"*",
"kw",
")",
":",
"# Set output.",
"# Some error occurs or inputs are not in the function domain.",
"return",
"True",
"if",
"self",
".",
"check_targets",
"(",
"node_id",
")",
":",
"# Check if the targets are satisfied.",
"return",
"False",
"# Stop loop.",
"for",
"w",
",",
"e_data",
"in",
"self",
".",
"dmap",
"[",
"node_id",
"]",
".",
"items",
"(",
")",
":",
"if",
"not",
"wf_has_edge",
"(",
"node_id",
",",
"w",
")",
":",
"# Check wildcard option.",
"continue",
"node",
"=",
"nodes",
"[",
"w",
"]",
"# Get node attributes.",
"vw_d",
"=",
"dist",
"+",
"edge_weight",
"(",
"e_data",
",",
"node",
")",
"# Evaluate dist.",
"if",
"check_cutoff",
"(",
"vw_d",
")",
":",
"# Check the cutoff limit.",
"wf_rm_edge",
"(",
"node_id",
",",
"w",
")",
"# Remove edge that cannot be see.",
"continue",
"if",
"node",
"[",
"'type'",
"]",
"==",
"'dispatcher'",
":",
"self",
".",
"_set_sub_dsp_node_input",
"(",
"node_id",
",",
"w",
",",
"fringe",
",",
"check_cutoff",
",",
"no_call",
",",
"vw_d",
")",
"else",
":",
"# See the node.",
"self",
".",
"_see_node",
"(",
"w",
",",
"fringe",
",",
"vw_d",
")",
"return",
"True"
] | Visits a node, updating workflow, seen, and fringe..
:param node_id:
Node id to visit.
:type node_id: str
:param dist:
Distance from the starting node.
:type dist: float, int
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool, optional
:return:
False if all dispatcher targets have been reached, otherwise True.
:rtype: bool | [
"Visits",
"a",
"node",
"updating",
"workflow",
"seen",
"and",
"fringe",
".."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L912-L976 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._see_node | def _see_node(self, node_id, fringe, dist, w_wait_in=0):
"""
See a node, updating seen and fringe.
:param node_id:
Node id to see.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param w_wait_in:
Additional weight for sorting correctly the nodes in the fringe.
:type w_wait_in: int, float
:return:
True if the node is visible, otherwise False.
:rtype: bool
"""
# Namespace shortcuts.
seen, dists = self.seen, self.dist
wait_in = self.nodes[node_id]['wait_inputs'] # Wait inputs flag.
self._update_meeting(node_id, dist) # Update view distance.
# Check if inputs are satisfied.
if self.check_wait_in(wait_in, node_id):
pass # Pass the node
elif node_id in dists: # The node w already estimated.
if dist < dists[node_id]: # Error for negative paths.
raise DispatcherError('Contradictory paths found: '
'negative weights?', sol=self)
elif node_id not in seen or dist < seen[node_id]: # Check min dist.
seen[node_id] = dist # Update dist.
index = self.nodes[node_id]['index'] # Node index.
# Virtual distance.
vd = (w_wait_in + int(wait_in), node_id, self.index + index)
# Add to heapq.
heapq.heappush(fringe, (dist, vd, (node_id, self)))
return True # The node is visible.
return False | python | def _see_node(self, node_id, fringe, dist, w_wait_in=0):
"""
See a node, updating seen and fringe.
:param node_id:
Node id to see.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param w_wait_in:
Additional weight for sorting correctly the nodes in the fringe.
:type w_wait_in: int, float
:return:
True if the node is visible, otherwise False.
:rtype: bool
"""
# Namespace shortcuts.
seen, dists = self.seen, self.dist
wait_in = self.nodes[node_id]['wait_inputs'] # Wait inputs flag.
self._update_meeting(node_id, dist) # Update view distance.
# Check if inputs are satisfied.
if self.check_wait_in(wait_in, node_id):
pass # Pass the node
elif node_id in dists: # The node w already estimated.
if dist < dists[node_id]: # Error for negative paths.
raise DispatcherError('Contradictory paths found: '
'negative weights?', sol=self)
elif node_id not in seen or dist < seen[node_id]: # Check min dist.
seen[node_id] = dist # Update dist.
index = self.nodes[node_id]['index'] # Node index.
# Virtual distance.
vd = (w_wait_in + int(wait_in), node_id, self.index + index)
# Add to heapq.
heapq.heappush(fringe, (dist, vd, (node_id, self)))
return True # The node is visible.
return False | [
"def",
"_see_node",
"(",
"self",
",",
"node_id",
",",
"fringe",
",",
"dist",
",",
"w_wait_in",
"=",
"0",
")",
":",
"# Namespace shortcuts.",
"seen",
",",
"dists",
"=",
"self",
".",
"seen",
",",
"self",
".",
"dist",
"wait_in",
"=",
"self",
".",
"nodes",
"[",
"node_id",
"]",
"[",
"'wait_inputs'",
"]",
"# Wait inputs flag.",
"self",
".",
"_update_meeting",
"(",
"node_id",
",",
"dist",
")",
"# Update view distance.",
"# Check if inputs are satisfied.",
"if",
"self",
".",
"check_wait_in",
"(",
"wait_in",
",",
"node_id",
")",
":",
"pass",
"# Pass the node",
"elif",
"node_id",
"in",
"dists",
":",
"# The node w already estimated.",
"if",
"dist",
"<",
"dists",
"[",
"node_id",
"]",
":",
"# Error for negative paths.",
"raise",
"DispatcherError",
"(",
"'Contradictory paths found: '",
"'negative weights?'",
",",
"sol",
"=",
"self",
")",
"elif",
"node_id",
"not",
"in",
"seen",
"or",
"dist",
"<",
"seen",
"[",
"node_id",
"]",
":",
"# Check min dist.",
"seen",
"[",
"node_id",
"]",
"=",
"dist",
"# Update dist.",
"index",
"=",
"self",
".",
"nodes",
"[",
"node_id",
"]",
"[",
"'index'",
"]",
"# Node index.",
"# Virtual distance.",
"vd",
"=",
"(",
"w_wait_in",
"+",
"int",
"(",
"wait_in",
")",
",",
"node_id",
",",
"self",
".",
"index",
"+",
"index",
")",
"# Add to heapq.",
"heapq",
".",
"heappush",
"(",
"fringe",
",",
"(",
"dist",
",",
"vd",
",",
"(",
"node_id",
",",
"self",
")",
")",
")",
"return",
"True",
"# The node is visible.",
"return",
"False"
] | See a node, updating seen and fringe.
:param node_id:
Node id to see.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param w_wait_in:
Additional weight for sorting correctly the nodes in the fringe.
:type w_wait_in: int, float
:return:
True if the node is visible, otherwise False.
:rtype: bool | [
"See",
"a",
"node",
"updating",
"seen",
"and",
"fringe",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L978-L1030 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._remove_unused_nodes | def _remove_unused_nodes(self):
"""
Removes unused function and sub-dispatcher nodes.
"""
# Namespace shortcuts.
nodes, wf_remove_node = self.nodes, self.workflow.remove_node
add_visited, succ = self._visited.add, self.workflow.succ
# Remove unused function and sub-dispatcher nodes.
for n in (set(self._wf_pred) - set(self._visited)):
node_type = nodes[n]['type'] # Node type.
if node_type == 'data':
continue # Skip data node.
if node_type == 'dispatcher' and succ[n]:
add_visited(n) # Add to visited nodes.
i = self.index + nodes[n]['index']
self.sub_sol[i]._remove_unused_nodes()
continue # Skip sub-dispatcher node with outputs.
wf_remove_node(n) | python | def _remove_unused_nodes(self):
"""
Removes unused function and sub-dispatcher nodes.
"""
# Namespace shortcuts.
nodes, wf_remove_node = self.nodes, self.workflow.remove_node
add_visited, succ = self._visited.add, self.workflow.succ
# Remove unused function and sub-dispatcher nodes.
for n in (set(self._wf_pred) - set(self._visited)):
node_type = nodes[n]['type'] # Node type.
if node_type == 'data':
continue # Skip data node.
if node_type == 'dispatcher' and succ[n]:
add_visited(n) # Add to visited nodes.
i = self.index + nodes[n]['index']
self.sub_sol[i]._remove_unused_nodes()
continue # Skip sub-dispatcher node with outputs.
wf_remove_node(n) | [
"def",
"_remove_unused_nodes",
"(",
"self",
")",
":",
"# Namespace shortcuts.",
"nodes",
",",
"wf_remove_node",
"=",
"self",
".",
"nodes",
",",
"self",
".",
"workflow",
".",
"remove_node",
"add_visited",
",",
"succ",
"=",
"self",
".",
"_visited",
".",
"add",
",",
"self",
".",
"workflow",
".",
"succ",
"# Remove unused function and sub-dispatcher nodes.",
"for",
"n",
"in",
"(",
"set",
"(",
"self",
".",
"_wf_pred",
")",
"-",
"set",
"(",
"self",
".",
"_visited",
")",
")",
":",
"node_type",
"=",
"nodes",
"[",
"n",
"]",
"[",
"'type'",
"]",
"# Node type.",
"if",
"node_type",
"==",
"'data'",
":",
"continue",
"# Skip data node.",
"if",
"node_type",
"==",
"'dispatcher'",
"and",
"succ",
"[",
"n",
"]",
":",
"add_visited",
"(",
"n",
")",
"# Add to visited nodes.",
"i",
"=",
"self",
".",
"index",
"+",
"nodes",
"[",
"n",
"]",
"[",
"'index'",
"]",
"self",
".",
"sub_sol",
"[",
"i",
"]",
".",
"_remove_unused_nodes",
"(",
")",
"continue",
"# Skip sub-dispatcher node with outputs.",
"wf_remove_node",
"(",
"n",
")"
] | Removes unused function and sub-dispatcher nodes. | [
"Removes",
"unused",
"function",
"and",
"sub",
"-",
"dispatcher",
"nodes",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1032-L1054 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._init_sub_dsp | def _init_sub_dsp(self, dsp, fringe, outputs, no_call, initial_dist, index,
full_name):
"""
Initialize the dispatcher as sub-dispatcher and update the fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
"""
# Initialize as sub-dispatcher.
sol = self.__class__(
dsp, {}, outputs, False, None, None, no_call, False,
wait_in=self._wait_in.get(dsp, None), index=self.index + index,
full_name=full_name
)
sol.sub_sol = self.sub_sol
for f in sol.fringe: # Update the fringe.
item = (initial_dist + f[0], (2,) + f[1][1:], f[-1])
heapq.heappush(fringe, item)
return sol | python | def _init_sub_dsp(self, dsp, fringe, outputs, no_call, initial_dist, index,
full_name):
"""
Initialize the dispatcher as sub-dispatcher and update the fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
"""
# Initialize as sub-dispatcher.
sol = self.__class__(
dsp, {}, outputs, False, None, None, no_call, False,
wait_in=self._wait_in.get(dsp, None), index=self.index + index,
full_name=full_name
)
sol.sub_sol = self.sub_sol
for f in sol.fringe: # Update the fringe.
item = (initial_dist + f[0], (2,) + f[1][1:], f[-1])
heapq.heappush(fringe, item)
return sol | [
"def",
"_init_sub_dsp",
"(",
"self",
",",
"dsp",
",",
"fringe",
",",
"outputs",
",",
"no_call",
",",
"initial_dist",
",",
"index",
",",
"full_name",
")",
":",
"# Initialize as sub-dispatcher.",
"sol",
"=",
"self",
".",
"__class__",
"(",
"dsp",
",",
"{",
"}",
",",
"outputs",
",",
"False",
",",
"None",
",",
"None",
",",
"no_call",
",",
"False",
",",
"wait_in",
"=",
"self",
".",
"_wait_in",
".",
"get",
"(",
"dsp",
",",
"None",
")",
",",
"index",
"=",
"self",
".",
"index",
"+",
"index",
",",
"full_name",
"=",
"full_name",
")",
"sol",
".",
"sub_sol",
"=",
"self",
".",
"sub_sol",
"for",
"f",
"in",
"sol",
".",
"fringe",
":",
"# Update the fringe.",
"item",
"=",
"(",
"initial_dist",
"+",
"f",
"[",
"0",
"]",
",",
"(",
"2",
",",
")",
"+",
"f",
"[",
"1",
"]",
"[",
"1",
":",
"]",
",",
"f",
"[",
"-",
"1",
"]",
")",
"heapq",
".",
"heappush",
"(",
"fringe",
",",
"item",
")",
"return",
"sol"
] | Initialize the dispatcher as sub-dispatcher and update the fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable
:param no_call:
If True data node estimation function is not used.
:type no_call: bool | [
"Initialize",
"the",
"dispatcher",
"as",
"sub",
"-",
"dispatcher",
"and",
"update",
"the",
"fringe",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1056-L1087 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._set_sub_dsp_node_input | def _set_sub_dsp_node_input(self, node_id, dsp_id, fringe, check_cutoff,
no_call, initial_dist):
"""
Initializes the sub-dispatcher and set its inputs.
:param node_id:
Input node to set.
:type node_id: str
:param dsp_id:
Sub-dispatcher node id.
:type dsp_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param initial_dist:
Distance to reach the sub-dispatcher node.
:type initial_dist: int, float
:return:
If the input have been set.
:rtype: bool
"""
# Namespace shortcuts.
node = self.nodes[dsp_id]
dsp, pred = node['function'], self._wf_pred[dsp_id]
distances, sub_sol = self.dist, self.sub_sol
iv_nodes = [node_id] # Nodes do be added as initial values.
self._meet[dsp_id] = initial_dist # Set view distance.
# Check if inputs are satisfied.
if self.check_wait_in(node['wait_inputs'], dsp_id):
return False # Pass the node
if dsp_id not in distances:
kw = {}
dom = self._check_sub_dsp_domain(dsp_id, node, pred, kw)
if dom is True:
iv_nodes = pred # Args respect the domain.
elif dom is False:
return False
# Initialize the sub-dispatcher.
sub_sol[self.index + node['index']] = sol = self._init_sub_dsp(
dsp, fringe, node['outputs'], no_call, initial_dist,
node['index'], self.full_name + (dsp_id,)
)
self.workflow.add_node(dsp_id, solution=sol, **kw)
distances[dsp_id] = initial_dist # Update min distance.
else:
sol = sub_sol[self.index + node['index']]
for n_id in iv_nodes:
# Namespace shortcuts.
val = pred[n_id]
for n in stlp(node['inputs'][n_id]):
# Add initial value to the sub-dispatcher.
sol._add_initial_value(
n, val, initial_dist, fringe, check_cutoff, no_call
)
return True | python | def _set_sub_dsp_node_input(self, node_id, dsp_id, fringe, check_cutoff,
no_call, initial_dist):
"""
Initializes the sub-dispatcher and set its inputs.
:param node_id:
Input node to set.
:type node_id: str
:param dsp_id:
Sub-dispatcher node id.
:type dsp_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param initial_dist:
Distance to reach the sub-dispatcher node.
:type initial_dist: int, float
:return:
If the input have been set.
:rtype: bool
"""
# Namespace shortcuts.
node = self.nodes[dsp_id]
dsp, pred = node['function'], self._wf_pred[dsp_id]
distances, sub_sol = self.dist, self.sub_sol
iv_nodes = [node_id] # Nodes do be added as initial values.
self._meet[dsp_id] = initial_dist # Set view distance.
# Check if inputs are satisfied.
if self.check_wait_in(node['wait_inputs'], dsp_id):
return False # Pass the node
if dsp_id not in distances:
kw = {}
dom = self._check_sub_dsp_domain(dsp_id, node, pred, kw)
if dom is True:
iv_nodes = pred # Args respect the domain.
elif dom is False:
return False
# Initialize the sub-dispatcher.
sub_sol[self.index + node['index']] = sol = self._init_sub_dsp(
dsp, fringe, node['outputs'], no_call, initial_dist,
node['index'], self.full_name + (dsp_id,)
)
self.workflow.add_node(dsp_id, solution=sol, **kw)
distances[dsp_id] = initial_dist # Update min distance.
else:
sol = sub_sol[self.index + node['index']]
for n_id in iv_nodes:
# Namespace shortcuts.
val = pred[n_id]
for n in stlp(node['inputs'][n_id]):
# Add initial value to the sub-dispatcher.
sol._add_initial_value(
n, val, initial_dist, fringe, check_cutoff, no_call
)
return True | [
"def",
"_set_sub_dsp_node_input",
"(",
"self",
",",
"node_id",
",",
"dsp_id",
",",
"fringe",
",",
"check_cutoff",
",",
"no_call",
",",
"initial_dist",
")",
":",
"# Namespace shortcuts.",
"node",
"=",
"self",
".",
"nodes",
"[",
"dsp_id",
"]",
"dsp",
",",
"pred",
"=",
"node",
"[",
"'function'",
"]",
",",
"self",
".",
"_wf_pred",
"[",
"dsp_id",
"]",
"distances",
",",
"sub_sol",
"=",
"self",
".",
"dist",
",",
"self",
".",
"sub_sol",
"iv_nodes",
"=",
"[",
"node_id",
"]",
"# Nodes do be added as initial values.",
"self",
".",
"_meet",
"[",
"dsp_id",
"]",
"=",
"initial_dist",
"# Set view distance.",
"# Check if inputs are satisfied.",
"if",
"self",
".",
"check_wait_in",
"(",
"node",
"[",
"'wait_inputs'",
"]",
",",
"dsp_id",
")",
":",
"return",
"False",
"# Pass the node",
"if",
"dsp_id",
"not",
"in",
"distances",
":",
"kw",
"=",
"{",
"}",
"dom",
"=",
"self",
".",
"_check_sub_dsp_domain",
"(",
"dsp_id",
",",
"node",
",",
"pred",
",",
"kw",
")",
"if",
"dom",
"is",
"True",
":",
"iv_nodes",
"=",
"pred",
"# Args respect the domain.",
"elif",
"dom",
"is",
"False",
":",
"return",
"False",
"# Initialize the sub-dispatcher.",
"sub_sol",
"[",
"self",
".",
"index",
"+",
"node",
"[",
"'index'",
"]",
"]",
"=",
"sol",
"=",
"self",
".",
"_init_sub_dsp",
"(",
"dsp",
",",
"fringe",
",",
"node",
"[",
"'outputs'",
"]",
",",
"no_call",
",",
"initial_dist",
",",
"node",
"[",
"'index'",
"]",
",",
"self",
".",
"full_name",
"+",
"(",
"dsp_id",
",",
")",
")",
"self",
".",
"workflow",
".",
"add_node",
"(",
"dsp_id",
",",
"solution",
"=",
"sol",
",",
"*",
"*",
"kw",
")",
"distances",
"[",
"dsp_id",
"]",
"=",
"initial_dist",
"# Update min distance.",
"else",
":",
"sol",
"=",
"sub_sol",
"[",
"self",
".",
"index",
"+",
"node",
"[",
"'index'",
"]",
"]",
"for",
"n_id",
"in",
"iv_nodes",
":",
"# Namespace shortcuts.",
"val",
"=",
"pred",
"[",
"n_id",
"]",
"for",
"n",
"in",
"stlp",
"(",
"node",
"[",
"'inputs'",
"]",
"[",
"n_id",
"]",
")",
":",
"# Add initial value to the sub-dispatcher.",
"sol",
".",
"_add_initial_value",
"(",
"n",
",",
"val",
",",
"initial_dist",
",",
"fringe",
",",
"check_cutoff",
",",
"no_call",
")",
"return",
"True"
] | Initializes the sub-dispatcher and set its inputs.
:param node_id:
Input node to set.
:type node_id: str
:param dsp_id:
Sub-dispatcher node id.
:type dsp_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param check_cutoff:
Check the cutoff limit.
:type check_cutoff: (int | float) -> bool
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param initial_dist:
Distance to reach the sub-dispatcher node.
:type initial_dist: int, float
:return:
If the input have been set.
:rtype: bool | [
"Initializes",
"the",
"sub",
"-",
"dispatcher",
"and",
"set",
"its",
"inputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1142-L1218 | train |
vinci1it2000/schedula | schedula/utils/sol.py | Solution._warning | def _warning(self, msg, node_id, ex, *args, **kwargs):
"""
Handles the error messages.
.. note:: If `self.raises` is True the dispatcher interrupt the dispatch
when an error occur, otherwise it logs a warning.
"""
raises = self.raises(ex) if callable(self.raises) else self.raises
if raises and isinstance(ex, DispatcherError):
ex.update(self)
raise ex
self._errors[node_id] = msg % ((node_id, ex) + args)
node_id = '/'.join(self.full_name + (node_id,))
if raises:
raise DispatcherError(msg, node_id, ex, *args, sol=self, **kwargs)
else:
kwargs['exc_info'] = kwargs.get('exc_info', 1)
log.error(msg, node_id, ex, *args, **kwargs) | python | def _warning(self, msg, node_id, ex, *args, **kwargs):
"""
Handles the error messages.
.. note:: If `self.raises` is True the dispatcher interrupt the dispatch
when an error occur, otherwise it logs a warning.
"""
raises = self.raises(ex) if callable(self.raises) else self.raises
if raises and isinstance(ex, DispatcherError):
ex.update(self)
raise ex
self._errors[node_id] = msg % ((node_id, ex) + args)
node_id = '/'.join(self.full_name + (node_id,))
if raises:
raise DispatcherError(msg, node_id, ex, *args, sol=self, **kwargs)
else:
kwargs['exc_info'] = kwargs.get('exc_info', 1)
log.error(msg, node_id, ex, *args, **kwargs) | [
"def",
"_warning",
"(",
"self",
",",
"msg",
",",
"node_id",
",",
"ex",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raises",
"=",
"self",
".",
"raises",
"(",
"ex",
")",
"if",
"callable",
"(",
"self",
".",
"raises",
")",
"else",
"self",
".",
"raises",
"if",
"raises",
"and",
"isinstance",
"(",
"ex",
",",
"DispatcherError",
")",
":",
"ex",
".",
"update",
"(",
"self",
")",
"raise",
"ex",
"self",
".",
"_errors",
"[",
"node_id",
"]",
"=",
"msg",
"%",
"(",
"(",
"node_id",
",",
"ex",
")",
"+",
"args",
")",
"node_id",
"=",
"'/'",
".",
"join",
"(",
"self",
".",
"full_name",
"+",
"(",
"node_id",
",",
")",
")",
"if",
"raises",
":",
"raise",
"DispatcherError",
"(",
"msg",
",",
"node_id",
",",
"ex",
",",
"*",
"args",
",",
"sol",
"=",
"self",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"kwargs",
"[",
"'exc_info'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'exc_info'",
",",
"1",
")",
"log",
".",
"error",
"(",
"msg",
",",
"node_id",
",",
"ex",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Handles the error messages.
.. note:: If `self.raises` is True the dispatcher interrupt the dispatch
when an error occur, otherwise it logs a warning. | [
"Handles",
"the",
"error",
"messages",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1220-L1241 | train |
CI-WATER/gsshapy | gsshapy/orm/gst.py | GridStreamFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Grid Stream File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Keywords
KEYWORDS = ('STREAMCELLS',
'CELLIJ')
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'STREAMCELLS':
# PIPECELLS Handler
schunk = chunk[0].strip().split()
self.streamCells = schunk[1]
elif key == 'CELLIJ':
# CELLIJ Handler
# Parse CELLIJ Chunk
result = self._cellChunk(chunk)
# Create GSSHAPY object
self._createGsshaPyObjects(result) | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Grid Stream File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Keywords
KEYWORDS = ('STREAMCELLS',
'CELLIJ')
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'STREAMCELLS':
# PIPECELLS Handler
schunk = chunk[0].strip().split()
self.streamCells = schunk[1]
elif key == 'CELLIJ':
# CELLIJ Handler
# Parse CELLIJ Chunk
result = self._cellChunk(chunk)
# Create GSSHAPY object
self._createGsshaPyObjects(result) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Keywords",
"KEYWORDS",
"=",
"(",
"'STREAMCELLS'",
",",
"'CELLIJ'",
")",
"# Parse file into chunks associated with keywords/cards",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Cases",
"if",
"key",
"==",
"'STREAMCELLS'",
":",
"# PIPECELLS Handler",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"self",
".",
"streamCells",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"key",
"==",
"'CELLIJ'",
":",
"# CELLIJ Handler",
"# Parse CELLIJ Chunk",
"result",
"=",
"self",
".",
"_cellChunk",
"(",
"chunk",
")",
"# Create GSSHAPY object",
"self",
".",
"_createGsshaPyObjects",
"(",
"result",
")"
] | Grid Stream File Read from File Method | [
"Grid",
"Stream",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L60-L92 | train |
CI-WATER/gsshapy | gsshapy/orm/gst.py | GridStreamFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Grid Stream File Write to File Method
"""
# Write lines
openFile.write('GRIDSTREAMFILE\n')
openFile.write('STREAMCELLS %s\n' % self.streamCells)
for cell in self.gridStreamCells:
openFile.write('CELLIJ %s %s\n' % (cell.cellI, cell.cellJ))
openFile.write('NUMNODES %s\n' % cell.numNodes)
for node in cell.gridStreamNodes:
openFile.write('LINKNODE %s %s %.6f\n' % (
node.linkNumber,
node.nodeNumber,
node.nodePercentGrid)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Grid Stream File Write to File Method
"""
# Write lines
openFile.write('GRIDSTREAMFILE\n')
openFile.write('STREAMCELLS %s\n' % self.streamCells)
for cell in self.gridStreamCells:
openFile.write('CELLIJ %s %s\n' % (cell.cellI, cell.cellJ))
openFile.write('NUMNODES %s\n' % cell.numNodes)
for node in cell.gridStreamNodes:
openFile.write('LINKNODE %s %s %.6f\n' % (
node.linkNumber,
node.nodeNumber,
node.nodePercentGrid)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Write lines",
"openFile",
".",
"write",
"(",
"'GRIDSTREAMFILE\\n'",
")",
"openFile",
".",
"write",
"(",
"'STREAMCELLS %s\\n'",
"%",
"self",
".",
"streamCells",
")",
"for",
"cell",
"in",
"self",
".",
"gridStreamCells",
":",
"openFile",
".",
"write",
"(",
"'CELLIJ %s %s\\n'",
"%",
"(",
"cell",
".",
"cellI",
",",
"cell",
".",
"cellJ",
")",
")",
"openFile",
".",
"write",
"(",
"'NUMNODES %s\\n'",
"%",
"cell",
".",
"numNodes",
")",
"for",
"node",
"in",
"cell",
".",
"gridStreamNodes",
":",
"openFile",
".",
"write",
"(",
"'LINKNODE %s %s %.6f\\n'",
"%",
"(",
"node",
".",
"linkNumber",
",",
"node",
".",
"nodeNumber",
",",
"node",
".",
"nodePercentGrid",
")",
")"
] | Grid Stream File Write to File Method | [
"Grid",
"Stream",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L95-L111 | train |
CI-WATER/gsshapy | gsshapy/orm/gst.py | GridStreamFile._createGsshaPyObjects | def _createGsshaPyObjects(self, cell):
"""
Create GSSHAPY PipeGridCell and PipeGridNode Objects Method
"""
# Initialize GSSHAPY PipeGridCell object
gridCell = GridStreamCell(cellI=cell['i'],
cellJ=cell['j'],
numNodes=cell['numNodes'])
# Associate GridStreamCell with GridStreamFile
gridCell.gridStreamFile = self
for linkNode in cell['linkNodes']:
# Create GSSHAPY GridStreamNode object
gridNode = GridStreamNode(linkNumber=linkNode['linkNumber'],
nodeNumber=linkNode['nodeNumber'],
nodePercentGrid=linkNode['percent'])
# Associate GridStreamNode with GridStreamCell
gridNode.gridStreamCell = gridCell | python | def _createGsshaPyObjects(self, cell):
"""
Create GSSHAPY PipeGridCell and PipeGridNode Objects Method
"""
# Initialize GSSHAPY PipeGridCell object
gridCell = GridStreamCell(cellI=cell['i'],
cellJ=cell['j'],
numNodes=cell['numNodes'])
# Associate GridStreamCell with GridStreamFile
gridCell.gridStreamFile = self
for linkNode in cell['linkNodes']:
# Create GSSHAPY GridStreamNode object
gridNode = GridStreamNode(linkNumber=linkNode['linkNumber'],
nodeNumber=linkNode['nodeNumber'],
nodePercentGrid=linkNode['percent'])
# Associate GridStreamNode with GridStreamCell
gridNode.gridStreamCell = gridCell | [
"def",
"_createGsshaPyObjects",
"(",
"self",
",",
"cell",
")",
":",
"# Initialize GSSHAPY PipeGridCell object",
"gridCell",
"=",
"GridStreamCell",
"(",
"cellI",
"=",
"cell",
"[",
"'i'",
"]",
",",
"cellJ",
"=",
"cell",
"[",
"'j'",
"]",
",",
"numNodes",
"=",
"cell",
"[",
"'numNodes'",
"]",
")",
"# Associate GridStreamCell with GridStreamFile",
"gridCell",
".",
"gridStreamFile",
"=",
"self",
"for",
"linkNode",
"in",
"cell",
"[",
"'linkNodes'",
"]",
":",
"# Create GSSHAPY GridStreamNode object",
"gridNode",
"=",
"GridStreamNode",
"(",
"linkNumber",
"=",
"linkNode",
"[",
"'linkNumber'",
"]",
",",
"nodeNumber",
"=",
"linkNode",
"[",
"'nodeNumber'",
"]",
",",
"nodePercentGrid",
"=",
"linkNode",
"[",
"'percent'",
"]",
")",
"# Associate GridStreamNode with GridStreamCell",
"gridNode",
".",
"gridStreamCell",
"=",
"gridCell"
] | Create GSSHAPY PipeGridCell and PipeGridNode Objects Method | [
"Create",
"GSSHAPY",
"PipeGridCell",
"and",
"PipeGridNode",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L113-L132 | train |
CI-WATER/gsshapy | gsshapy/orm/idx.py | IndexMap._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read()
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1])
elif 'south' in spline[0].lower():
self.south = float(spline[1])
elif 'east' in spline[0].lower():
self.east = float(spline[1])
elif 'west' in spline[0].lower():
self.west = float(spline[1])
elif 'rows' in spline[0].lower():
self.rows = int(spline[1])
elif 'cols' in spline[0].lower():
self.columns = int(spline[1])
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID
# Assign other properties
self.filename = filename | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read()
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1])
elif 'south' in spline[0].lower():
self.south = float(spline[1])
elif 'east' in spline[0].lower():
self.east = float(spline[1])
elif 'west' in spline[0].lower():
self.west = float(spline[1])
elif 'rows' in spline[0].lower():
self.rows = int(spline[1])
elif 'cols' in spline[0].lower():
self.columns = int(spline[1])
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID
# Assign other properties
self.filename = filename | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and read plain text into text field",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"rasterText",
"=",
"f",
".",
"read",
"(",
")",
"# Retrieve metadata from header",
"lines",
"=",
"self",
".",
"rasterText",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
"[",
"0",
":",
"6",
"]",
":",
"spline",
"=",
"line",
".",
"split",
"(",
")",
"if",
"'north'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"north",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'south'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"south",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'east'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"east",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'west'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"west",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'rows'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"rows",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'cols'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"columns",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"if",
"spatial",
":",
"# Get well known binary from the raster file using the MapKit RasterLoader",
"wkbRaster",
"=",
"RasterLoader",
".",
"grassAsciiRasterToWKB",
"(",
"session",
"=",
"session",
",",
"grassRasterPath",
"=",
"path",
",",
"srid",
"=",
"str",
"(",
"spatialReferenceID",
")",
",",
"noData",
"=",
"'-1'",
")",
"self",
".",
"raster",
"=",
"wkbRaster",
"self",
".",
"srid",
"=",
"spatialReferenceID",
"# Assign other properties",
"self",
".",
"filename",
"=",
"filename"
] | Index Map Read from File Method | [
"Index",
"Map",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/idx.py#L96-L135 | train |
CI-WATER/gsshapy | gsshapy/orm/idx.py | IndexMap.write | def write(self, directory, name=None, session=None, replaceParamFile=None):
"""
Index Map Write to File Method
"""
# Initiate file
if name != None:
filename = '%s.%s' % (name, self.fileExtension)
filePath = os.path.join(directory, filename)
else:
filePath = os.path.join(directory, self.filename)
# If the raster field is not empty, write from this field
if type(self.raster) != type(None):
# Configure RasterConverter
converter = RasterConverter(session)
# Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid
grassAsciiGrid = converter.getAsGrassAsciiRaster(rasterFieldName='raster',
tableName=self.__tablename__,
rasterIdFieldName='id',
rasterId=self.id)
# Write to file
with open(filePath, 'w') as mapFile:
mapFile.write(grassAsciiGrid)
else:
if self.rasterText is not None:
# Open file and write, raster_text only
with open(filePath, 'w') as mapFile:
mapFile.write(self.rasterText) | python | def write(self, directory, name=None, session=None, replaceParamFile=None):
"""
Index Map Write to File Method
"""
# Initiate file
if name != None:
filename = '%s.%s' % (name, self.fileExtension)
filePath = os.path.join(directory, filename)
else:
filePath = os.path.join(directory, self.filename)
# If the raster field is not empty, write from this field
if type(self.raster) != type(None):
# Configure RasterConverter
converter = RasterConverter(session)
# Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid
grassAsciiGrid = converter.getAsGrassAsciiRaster(rasterFieldName='raster',
tableName=self.__tablename__,
rasterIdFieldName='id',
rasterId=self.id)
# Write to file
with open(filePath, 'w') as mapFile:
mapFile.write(grassAsciiGrid)
else:
if self.rasterText is not None:
# Open file and write, raster_text only
with open(filePath, 'w') as mapFile:
mapFile.write(self.rasterText) | [
"def",
"write",
"(",
"self",
",",
"directory",
",",
"name",
"=",
"None",
",",
"session",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"# Initiate file",
"if",
"name",
"!=",
"None",
":",
"filename",
"=",
"'%s.%s'",
"%",
"(",
"name",
",",
"self",
".",
"fileExtension",
")",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"else",
":",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"self",
".",
"filename",
")",
"# If the raster field is not empty, write from this field",
"if",
"type",
"(",
"self",
".",
"raster",
")",
"!=",
"type",
"(",
"None",
")",
":",
"# Configure RasterConverter",
"converter",
"=",
"RasterConverter",
"(",
"session",
")",
"# Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid",
"grassAsciiGrid",
"=",
"converter",
".",
"getAsGrassAsciiRaster",
"(",
"rasterFieldName",
"=",
"'raster'",
",",
"tableName",
"=",
"self",
".",
"__tablename__",
",",
"rasterIdFieldName",
"=",
"'id'",
",",
"rasterId",
"=",
"self",
".",
"id",
")",
"# Write to file",
"with",
"open",
"(",
"filePath",
",",
"'w'",
")",
"as",
"mapFile",
":",
"mapFile",
".",
"write",
"(",
"grassAsciiGrid",
")",
"else",
":",
"if",
"self",
".",
"rasterText",
"is",
"not",
"None",
":",
"# Open file and write, raster_text only",
"with",
"open",
"(",
"filePath",
",",
"'w'",
")",
"as",
"mapFile",
":",
"mapFile",
".",
"write",
"(",
"self",
".",
"rasterText",
")"
] | Index Map Write to File Method | [
"Index",
"Map",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/idx.py#L137-L168 | train |
CI-WATER/gsshapy | gsshapy/orm/gpi.py | GridPipeFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Grid Pipe File Write to File Method
"""
# Write Lines
openFile.write('GRIDPIPEFILE\n')
openFile.write('PIPECELLS %s\n' % self.pipeCells)
for cell in self.gridPipeCells:
openFile.write('CELLIJ %s %s\n' % (cell.cellI, cell.cellJ))
openFile.write('NUMPIPES %s\n' % cell.numPipes)
for node in cell.gridPipeNodes:
openFile.write('SPIPE %s %s %.6f\n' % (
node.linkNumber,
node.nodeNumber,
node.fractPipeLength)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Grid Pipe File Write to File Method
"""
# Write Lines
openFile.write('GRIDPIPEFILE\n')
openFile.write('PIPECELLS %s\n' % self.pipeCells)
for cell in self.gridPipeCells:
openFile.write('CELLIJ %s %s\n' % (cell.cellI, cell.cellJ))
openFile.write('NUMPIPES %s\n' % cell.numPipes)
for node in cell.gridPipeNodes:
openFile.write('SPIPE %s %s %.6f\n' % (
node.linkNumber,
node.nodeNumber,
node.fractPipeLength)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Write Lines",
"openFile",
".",
"write",
"(",
"'GRIDPIPEFILE\\n'",
")",
"openFile",
".",
"write",
"(",
"'PIPECELLS %s\\n'",
"%",
"self",
".",
"pipeCells",
")",
"for",
"cell",
"in",
"self",
".",
"gridPipeCells",
":",
"openFile",
".",
"write",
"(",
"'CELLIJ %s %s\\n'",
"%",
"(",
"cell",
".",
"cellI",
",",
"cell",
".",
"cellJ",
")",
")",
"openFile",
".",
"write",
"(",
"'NUMPIPES %s\\n'",
"%",
"cell",
".",
"numPipes",
")",
"for",
"node",
"in",
"cell",
".",
"gridPipeNodes",
":",
"openFile",
".",
"write",
"(",
"'SPIPE %s %s %.6f\\n'",
"%",
"(",
"node",
".",
"linkNumber",
",",
"node",
".",
"nodeNumber",
",",
"node",
".",
"fractPipeLength",
")",
")"
] | Grid Pipe File Write to File Method | [
"Grid",
"Pipe",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L94-L110 | train |
CI-WATER/gsshapy | gsshapy/orm/gpi.py | GridPipeFile._createGsshaPyObjects | def _createGsshaPyObjects(self, cell):
"""
Create GSSHAPY GridPipeCell and GridPipeNode Objects Method
"""
# Initialize GSSHAPY GridPipeCell object
gridCell = GridPipeCell(cellI=cell['i'],
cellJ=cell['j'],
numPipes=cell['numPipes'])
# Associate GridPipeCell with GridPipeFile
gridCell.gridPipeFile = self
for spipe in cell['spipes']:
# Create GSSHAPY GridPipeNode object
gridNode = GridPipeNode(linkNumber=spipe['linkNumber'],
nodeNumber=spipe['nodeNumber'],
fractPipeLength=spipe['fraction'])
# Associate GridPipeNode with GridPipeCell
gridNode.gridPipeCell = gridCell | python | def _createGsshaPyObjects(self, cell):
"""
Create GSSHAPY GridPipeCell and GridPipeNode Objects Method
"""
# Initialize GSSHAPY GridPipeCell object
gridCell = GridPipeCell(cellI=cell['i'],
cellJ=cell['j'],
numPipes=cell['numPipes'])
# Associate GridPipeCell with GridPipeFile
gridCell.gridPipeFile = self
for spipe in cell['spipes']:
# Create GSSHAPY GridPipeNode object
gridNode = GridPipeNode(linkNumber=spipe['linkNumber'],
nodeNumber=spipe['nodeNumber'],
fractPipeLength=spipe['fraction'])
# Associate GridPipeNode with GridPipeCell
gridNode.gridPipeCell = gridCell | [
"def",
"_createGsshaPyObjects",
"(",
"self",
",",
"cell",
")",
":",
"# Initialize GSSHAPY GridPipeCell object",
"gridCell",
"=",
"GridPipeCell",
"(",
"cellI",
"=",
"cell",
"[",
"'i'",
"]",
",",
"cellJ",
"=",
"cell",
"[",
"'j'",
"]",
",",
"numPipes",
"=",
"cell",
"[",
"'numPipes'",
"]",
")",
"# Associate GridPipeCell with GridPipeFile",
"gridCell",
".",
"gridPipeFile",
"=",
"self",
"for",
"spipe",
"in",
"cell",
"[",
"'spipes'",
"]",
":",
"# Create GSSHAPY GridPipeNode object",
"gridNode",
"=",
"GridPipeNode",
"(",
"linkNumber",
"=",
"spipe",
"[",
"'linkNumber'",
"]",
",",
"nodeNumber",
"=",
"spipe",
"[",
"'nodeNumber'",
"]",
",",
"fractPipeLength",
"=",
"spipe",
"[",
"'fraction'",
"]",
")",
"# Associate GridPipeNode with GridPipeCell",
"gridNode",
".",
"gridPipeCell",
"=",
"gridCell"
] | Create GSSHAPY GridPipeCell and GridPipeNode Objects Method | [
"Create",
"GSSHAPY",
"GridPipeCell",
"and",
"GridPipeNode",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L112-L131 | train |
CI-WATER/gsshapy | gsshapy/orm/gpi.py | GridPipeFile._cellChunk | def _cellChunk(self, lines):
"""
Parse CELLIJ Chunk Method
"""
KEYWORDS = ('CELLIJ',
'NUMPIPES',
'SPIPE')
result = {'i': None,
'j': None,
'numPipes': None,
'spipes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'CELLIJ':
# CELLIJ handler
result['i'] = schunk[1]
result['j'] = schunk[2]
elif card == 'NUMPIPES':
# NUMPIPES handler
result['numPipes'] = schunk[1]
elif card == 'SPIPE':
# SPIPE handler
pipe = {'linkNumber': schunk[1],
'nodeNumber': schunk[2],
'fraction': schunk[3]}
result['spipes'].append(pipe)
return result | python | def _cellChunk(self, lines):
"""
Parse CELLIJ Chunk Method
"""
KEYWORDS = ('CELLIJ',
'NUMPIPES',
'SPIPE')
result = {'i': None,
'j': None,
'numPipes': None,
'spipes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'CELLIJ':
# CELLIJ handler
result['i'] = schunk[1]
result['j'] = schunk[2]
elif card == 'NUMPIPES':
# NUMPIPES handler
result['numPipes'] = schunk[1]
elif card == 'SPIPE':
# SPIPE handler
pipe = {'linkNumber': schunk[1],
'nodeNumber': schunk[2],
'fraction': schunk[3]}
result['spipes'].append(pipe)
return result | [
"def",
"_cellChunk",
"(",
"self",
",",
"lines",
")",
":",
"KEYWORDS",
"=",
"(",
"'CELLIJ'",
",",
"'NUMPIPES'",
",",
"'SPIPE'",
")",
"result",
"=",
"{",
"'i'",
":",
"None",
",",
"'j'",
":",
"None",
",",
"'numPipes'",
":",
"None",
",",
"'spipes'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"card",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"card",
"==",
"'CELLIJ'",
":",
"# CELLIJ handler",
"result",
"[",
"'i'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"result",
"[",
"'j'",
"]",
"=",
"schunk",
"[",
"2",
"]",
"elif",
"card",
"==",
"'NUMPIPES'",
":",
"# NUMPIPES handler",
"result",
"[",
"'numPipes'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"==",
"'SPIPE'",
":",
"# SPIPE handler",
"pipe",
"=",
"{",
"'linkNumber'",
":",
"schunk",
"[",
"1",
"]",
",",
"'nodeNumber'",
":",
"schunk",
"[",
"2",
"]",
",",
"'fraction'",
":",
"schunk",
"[",
"3",
"]",
"}",
"result",
"[",
"'spipes'",
"]",
".",
"append",
"(",
"pipe",
")",
"return",
"result"
] | Parse CELLIJ Chunk Method | [
"Parse",
"CELLIJ",
"Chunk",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L133-L172 | train |
CI-WATER/gsshapy | gsshapy/orm/rep.py | ReplaceParamFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Replace Param File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
self.numParameters = sline[0]
else:
# Create GSSHAPY TargetParameter object
target = TargetParameter(targetVariable=sline[0],
varFormat=sline[1])
# Associate TargetParameter with ReplaceParamFile
target.replaceParamFile = self | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Replace Param File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
self.numParameters = sline[0]
else:
# Create GSSHAPY TargetParameter object
target = TargetParameter(targetVariable=sline[0],
varFormat=sline[1])
# Associate TargetParameter with ReplaceParamFile
target.replaceParamFile = self | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"sline",
")",
"==",
"1",
":",
"self",
".",
"numParameters",
"=",
"sline",
"[",
"0",
"]",
"else",
":",
"# Create GSSHAPY TargetParameter object",
"target",
"=",
"TargetParameter",
"(",
"targetVariable",
"=",
"sline",
"[",
"0",
"]",
",",
"varFormat",
"=",
"sline",
"[",
"1",
"]",
")",
"# Associate TargetParameter with ReplaceParamFile",
"target",
".",
"replaceParamFile",
"=",
"self"
] | Replace Param File Read from File Method | [
"Replace",
"Param",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L55-L74 | train |
CI-WATER/gsshapy | gsshapy/orm/rep.py | ReplaceParamFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Replace Param File Write to File Method
"""
# Retrieve TargetParameter objects
targets = self.targetParameters
# Write lines
openFile.write('%s\n' % self.numParameters)
for target in targets:
openFile.write('%s %s\n' % (target.targetVariable, target.varFormat)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Replace Param File Write to File Method
"""
# Retrieve TargetParameter objects
targets = self.targetParameters
# Write lines
openFile.write('%s\n' % self.numParameters)
for target in targets:
openFile.write('%s %s\n' % (target.targetVariable, target.varFormat)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve TargetParameter objects",
"targets",
"=",
"self",
".",
"targetParameters",
"# Write lines",
"openFile",
".",
"write",
"(",
"'%s\\n'",
"%",
"self",
".",
"numParameters",
")",
"for",
"target",
"in",
"targets",
":",
"openFile",
".",
"write",
"(",
"'%s %s\\n'",
"%",
"(",
"target",
".",
"targetVariable",
",",
"target",
".",
"varFormat",
")",
")"
] | Replace Param File Write to File Method | [
"Replace",
"Param",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L76-L87 | train |
CI-WATER/gsshapy | gsshapy/orm/rep.py | ReplaceValFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Replace Val File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
valLine = ReplaceValLine()
valLine.contents = line
valLine.replaceValFile = self | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Replace Val File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
valLine = ReplaceValLine()
valLine.contents = line
valLine.replaceValFile = self | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"valLine",
"=",
"ReplaceValLine",
"(",
")",
"valLine",
".",
"contents",
"=",
"line",
"valLine",
".",
"replaceValFile",
"=",
"self"
] | Replace Val File Read from File Method | [
"Replace",
"Val",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L147-L159 | train |
CI-WATER/gsshapy | gsshapy/orm/rep.py | ReplaceValFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Replace Val File Write to File Method
"""
# Write lines
for line in self.lines:
openFile.write(line.contents) | python | def _write(self, session, openFile, replaceParamFile):
"""
Replace Val File Write to File Method
"""
# Write lines
for line in self.lines:
openFile.write(line.contents) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Write lines",
"for",
"line",
"in",
"self",
".",
"lines",
":",
"openFile",
".",
"write",
"(",
"line",
".",
"contents",
")"
] | Replace Val File Write to File Method | [
"Replace",
"Val",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L161-L167 | train |
bayesimpact/fex | fex/feature_extractor.py | FeatureExtractor.emit | def emit(self, data_frame):
"""Use this function in emit data into the store.
:param data_frame: DataFrame to be recorded.
"""
if self.result is not None:
raise MultipleEmitsError()
data_frame.columns = [self.prefix + '__' + c
for c in data_frame.columns]
self.result = data_frame | python | def emit(self, data_frame):
"""Use this function in emit data into the store.
:param data_frame: DataFrame to be recorded.
"""
if self.result is not None:
raise MultipleEmitsError()
data_frame.columns = [self.prefix + '__' + c
for c in data_frame.columns]
self.result = data_frame | [
"def",
"emit",
"(",
"self",
",",
"data_frame",
")",
":",
"if",
"self",
".",
"result",
"is",
"not",
"None",
":",
"raise",
"MultipleEmitsError",
"(",
")",
"data_frame",
".",
"columns",
"=",
"[",
"self",
".",
"prefix",
"+",
"'__'",
"+",
"c",
"for",
"c",
"in",
"data_frame",
".",
"columns",
"]",
"self",
".",
"result",
"=",
"data_frame"
] | Use this function in emit data into the store.
:param data_frame: DataFrame to be recorded. | [
"Use",
"this",
"function",
"in",
"emit",
"data",
"into",
"the",
"store",
"."
] | 2d9b4e9be2bf98847a36055b907411fd5557eb77 | https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/feature_extractor.py#L41-L50 | train |
tjvr/skip | skip/__init__.py | Interpreter.trigger_hats | def trigger_hats(self, command, arg=None, callback=None):
"""Returns a list with each script that is triggered."""
threads = []
for scriptable in [self.project.stage] + self.project.sprites:
threads += self.trigger_scriptable_hats(scriptable, command, arg,
callback)
return threads | python | def trigger_hats(self, command, arg=None, callback=None):
"""Returns a list with each script that is triggered."""
threads = []
for scriptable in [self.project.stage] + self.project.sprites:
threads += self.trigger_scriptable_hats(scriptable, command, arg,
callback)
return threads | [
"def",
"trigger_hats",
"(",
"self",
",",
"command",
",",
"arg",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"threads",
"=",
"[",
"]",
"for",
"scriptable",
"in",
"[",
"self",
".",
"project",
".",
"stage",
"]",
"+",
"self",
".",
"project",
".",
"sprites",
":",
"threads",
"+=",
"self",
".",
"trigger_scriptable_hats",
"(",
"scriptable",
",",
"command",
",",
"arg",
",",
"callback",
")",
"return",
"threads"
] | Returns a list with each script that is triggered. | [
"Returns",
"a",
"list",
"with",
"each",
"script",
"that",
"is",
"triggered",
"."
] | ac84f7198079732bf22c3b8cbc0dc1a073b1d539 | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L115-L121 | train |
tjvr/skip | skip/__init__.py | Interpreter.push_script | def push_script(self, scriptable, script, callback=None):
"""Run the script and add it to the list of threads."""
if script in self.threads:
self.threads[script].finish()
thread = Thread(self.run_script(scriptable, script),
scriptable, callback)
self.new_threads[script] = thread
return thread | python | def push_script(self, scriptable, script, callback=None):
"""Run the script and add it to the list of threads."""
if script in self.threads:
self.threads[script].finish()
thread = Thread(self.run_script(scriptable, script),
scriptable, callback)
self.new_threads[script] = thread
return thread | [
"def",
"push_script",
"(",
"self",
",",
"scriptable",
",",
"script",
",",
"callback",
"=",
"None",
")",
":",
"if",
"script",
"in",
"self",
".",
"threads",
":",
"self",
".",
"threads",
"[",
"script",
"]",
".",
"finish",
"(",
")",
"thread",
"=",
"Thread",
"(",
"self",
".",
"run_script",
"(",
"scriptable",
",",
"script",
")",
",",
"scriptable",
",",
"callback",
")",
"self",
".",
"new_threads",
"[",
"script",
"]",
"=",
"thread",
"return",
"thread"
] | Run the script and add it to the list of threads. | [
"Run",
"the",
"script",
"and",
"add",
"it",
"to",
"the",
"list",
"of",
"threads",
"."
] | ac84f7198079732bf22c3b8cbc0dc1a073b1d539 | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L134-L141 | train |
tjvr/skip | skip/__init__.py | Interpreter.tick | def tick(self, events):
"""Execute one frame of the interpreter.
Don't call more than 40 times per second.
"""
self.add_new_threads()
if self.drag_sprite:
(mx, my) = self.screen.get_mouse_pos()
(ox, oy) = self.drag_offset
new_position = (mx + ox, my + oy)
if self.drag_sprite.position != new_position:
self.has_dragged = True
self.drag_sprite.position = new_position
for event in events:
if event.kind == "key_pressed":
assert event.value in kurt.Insert(None, "key").options()
self.trigger_hats("whenKeyPressed", event.value)
elif event.kind == "mouse_down":
mouse_pos = self.screen.get_mouse_pos()
for sprite in reversed(self.project.sprites):
rect = bounds(sprite)
if rect.collide_point(mouse_pos):
if self.screen.touching_mouse(sprite):
scriptable = sprite
break
else:
scriptable = self.project.stage
if scriptable.is_draggable:
(mx, my) = self.screen.get_mouse_pos()
(x, y) = scriptable.position
self.drag_offset = (x - mx, y - my)
self.drag_sprite = scriptable
self.has_dragged = False
go_to_front(scriptable)
else:
self.trigger_scriptable_hats(scriptable, "whenClicked")
elif event.kind == "mouse_up":
if self.drag_sprite:
if not self.has_dragged:
self.trigger_scriptable_hats(self.drag_sprite,
"whenClicked")
self.drag_sprite = None
remove_threads = []
while 1:
for (script, thread) in self.threads.items():
modified = False
for event in thread.tick():
if event.kind == "stop":
if event.value == "all":
self.stop()
return
elif event.value == "other scripts in sprite":
for (script, other) in self.threads.items():
if other.scriptable == thread.scriptable:
other.finish()
del self.threads[script]
modified = True
break
else:
thread.finish()
del self.threads[script]
modified = True
break
else: # Pass to Screen
yield event
if modified:
break
else:
break
self.add_new_threads() | python | def tick(self, events):
"""Execute one frame of the interpreter.
Don't call more than 40 times per second.
"""
self.add_new_threads()
if self.drag_sprite:
(mx, my) = self.screen.get_mouse_pos()
(ox, oy) = self.drag_offset
new_position = (mx + ox, my + oy)
if self.drag_sprite.position != new_position:
self.has_dragged = True
self.drag_sprite.position = new_position
for event in events:
if event.kind == "key_pressed":
assert event.value in kurt.Insert(None, "key").options()
self.trigger_hats("whenKeyPressed", event.value)
elif event.kind == "mouse_down":
mouse_pos = self.screen.get_mouse_pos()
for sprite in reversed(self.project.sprites):
rect = bounds(sprite)
if rect.collide_point(mouse_pos):
if self.screen.touching_mouse(sprite):
scriptable = sprite
break
else:
scriptable = self.project.stage
if scriptable.is_draggable:
(mx, my) = self.screen.get_mouse_pos()
(x, y) = scriptable.position
self.drag_offset = (x - mx, y - my)
self.drag_sprite = scriptable
self.has_dragged = False
go_to_front(scriptable)
else:
self.trigger_scriptable_hats(scriptable, "whenClicked")
elif event.kind == "mouse_up":
if self.drag_sprite:
if not self.has_dragged:
self.trigger_scriptable_hats(self.drag_sprite,
"whenClicked")
self.drag_sprite = None
remove_threads = []
while 1:
for (script, thread) in self.threads.items():
modified = False
for event in thread.tick():
if event.kind == "stop":
if event.value == "all":
self.stop()
return
elif event.value == "other scripts in sprite":
for (script, other) in self.threads.items():
if other.scriptable == thread.scriptable:
other.finish()
del self.threads[script]
modified = True
break
else:
thread.finish()
del self.threads[script]
modified = True
break
else: # Pass to Screen
yield event
if modified:
break
else:
break
self.add_new_threads() | [
"def",
"tick",
"(",
"self",
",",
"events",
")",
":",
"self",
".",
"add_new_threads",
"(",
")",
"if",
"self",
".",
"drag_sprite",
":",
"(",
"mx",
",",
"my",
")",
"=",
"self",
".",
"screen",
".",
"get_mouse_pos",
"(",
")",
"(",
"ox",
",",
"oy",
")",
"=",
"self",
".",
"drag_offset",
"new_position",
"=",
"(",
"mx",
"+",
"ox",
",",
"my",
"+",
"oy",
")",
"if",
"self",
".",
"drag_sprite",
".",
"position",
"!=",
"new_position",
":",
"self",
".",
"has_dragged",
"=",
"True",
"self",
".",
"drag_sprite",
".",
"position",
"=",
"new_position",
"for",
"event",
"in",
"events",
":",
"if",
"event",
".",
"kind",
"==",
"\"key_pressed\"",
":",
"assert",
"event",
".",
"value",
"in",
"kurt",
".",
"Insert",
"(",
"None",
",",
"\"key\"",
")",
".",
"options",
"(",
")",
"self",
".",
"trigger_hats",
"(",
"\"whenKeyPressed\"",
",",
"event",
".",
"value",
")",
"elif",
"event",
".",
"kind",
"==",
"\"mouse_down\"",
":",
"mouse_pos",
"=",
"self",
".",
"screen",
".",
"get_mouse_pos",
"(",
")",
"for",
"sprite",
"in",
"reversed",
"(",
"self",
".",
"project",
".",
"sprites",
")",
":",
"rect",
"=",
"bounds",
"(",
"sprite",
")",
"if",
"rect",
".",
"collide_point",
"(",
"mouse_pos",
")",
":",
"if",
"self",
".",
"screen",
".",
"touching_mouse",
"(",
"sprite",
")",
":",
"scriptable",
"=",
"sprite",
"break",
"else",
":",
"scriptable",
"=",
"self",
".",
"project",
".",
"stage",
"if",
"scriptable",
".",
"is_draggable",
":",
"(",
"mx",
",",
"my",
")",
"=",
"self",
".",
"screen",
".",
"get_mouse_pos",
"(",
")",
"(",
"x",
",",
"y",
")",
"=",
"scriptable",
".",
"position",
"self",
".",
"drag_offset",
"=",
"(",
"x",
"-",
"mx",
",",
"y",
"-",
"my",
")",
"self",
".",
"drag_sprite",
"=",
"scriptable",
"self",
".",
"has_dragged",
"=",
"False",
"go_to_front",
"(",
"scriptable",
")",
"else",
":",
"self",
".",
"trigger_scriptable_hats",
"(",
"scriptable",
",",
"\"whenClicked\"",
")",
"elif",
"event",
".",
"kind",
"==",
"\"mouse_up\"",
":",
"if",
"self",
".",
"drag_sprite",
":",
"if",
"not",
"self",
".",
"has_dragged",
":",
"self",
".",
"trigger_scriptable_hats",
"(",
"self",
".",
"drag_sprite",
",",
"\"whenClicked\"",
")",
"self",
".",
"drag_sprite",
"=",
"None",
"remove_threads",
"=",
"[",
"]",
"while",
"1",
":",
"for",
"(",
"script",
",",
"thread",
")",
"in",
"self",
".",
"threads",
".",
"items",
"(",
")",
":",
"modified",
"=",
"False",
"for",
"event",
"in",
"thread",
".",
"tick",
"(",
")",
":",
"if",
"event",
".",
"kind",
"==",
"\"stop\"",
":",
"if",
"event",
".",
"value",
"==",
"\"all\"",
":",
"self",
".",
"stop",
"(",
")",
"return",
"elif",
"event",
".",
"value",
"==",
"\"other scripts in sprite\"",
":",
"for",
"(",
"script",
",",
"other",
")",
"in",
"self",
".",
"threads",
".",
"items",
"(",
")",
":",
"if",
"other",
".",
"scriptable",
"==",
"thread",
".",
"scriptable",
":",
"other",
".",
"finish",
"(",
")",
"del",
"self",
".",
"threads",
"[",
"script",
"]",
"modified",
"=",
"True",
"break",
"else",
":",
"thread",
".",
"finish",
"(",
")",
"del",
"self",
".",
"threads",
"[",
"script",
"]",
"modified",
"=",
"True",
"break",
"else",
":",
"# Pass to Screen",
"yield",
"event",
"if",
"modified",
":",
"break",
"else",
":",
"break",
"self",
".",
"add_new_threads",
"(",
")"
] | Execute one frame of the interpreter.
Don't call more than 40 times per second. | [
"Execute",
"one",
"frame",
"of",
"the",
"interpreter",
"."
] | ac84f7198079732bf22c3b8cbc0dc1a073b1d539 | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L147-L224 | train |
tjvr/skip | skip/__init__.py | Interpreter.stop | def stop(self):
"""Stop running threads."""
self.threads = {}
self.new_threads = {}
self.answer = ""
self.ask_lock = False | python | def stop(self):
"""Stop running threads."""
self.threads = {}
self.new_threads = {}
self.answer = ""
self.ask_lock = False | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"threads",
"=",
"{",
"}",
"self",
".",
"new_threads",
"=",
"{",
"}",
"self",
".",
"answer",
"=",
"\"\"",
"self",
".",
"ask_lock",
"=",
"False"
] | Stop running threads. | [
"Stop",
"running",
"threads",
"."
] | ac84f7198079732bf22c3b8cbc0dc1a073b1d539 | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L226-L231 | train |
tjvr/skip | skip/__init__.py | Interpreter.evaluate | def evaluate(self, s, value, insert=None):
"""Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []).
"""
assert not isinstance(value, kurt.Script)
if insert and insert.unevaluated:
return value
if isinstance(value, kurt.Block):
if value.type.shape == "hat":
return []
if value.type not in self.COMMANDS:
if getattr(value.type, '_workaround', None):
value = value.type._workaround(value)
if not value:
raise kurt.BlockNotSupported(value.type)
else:
raise kurt.BlockNotSupported(value.type)
f = self.COMMANDS[value.type]
args = [self.evaluate(s, arg, arg_insert)
for (arg, arg_insert)
in zip(list(value.args), value.type.inserts)]
value = f(s, *args)
def flatten_generators(gen):
for item in gen:
if inspect.isgenerator(item):
for x in flatten_generators(item):
yield x
else:
yield item
if inspect.isgenerator(value):
value = flatten_generators(value)
if value is None:
value = []
if insert:
if isinstance(value, basestring):
value = unicode(value)
if insert.shape in ("number", "number-menu", "string"):
try:
value = float(value)
except (TypeError, ValueError):
if insert.shape == "number":
value = 0
if isinstance(value, float) and value == int(value):
value = int(value)
if insert.kind in ("spriteOrStage", "spriteOrMouse", "stageOrThis",
"spriteOnly", "touching"):
if value not in ("mouse-pointer", "edge"):
value = (self.project.stage if value == "Stage"
else self.project.get_sprite(value))
elif insert.kind == "var":
if value in s.variables:
value = s.variables[value]
else:
value = s.project.variables[value]
elif insert.kind == "list":
if value in s.lists:
value = s.lists[value]
else:
value = s.project.lists[value]
elif insert.kind == "sound":
for sound in s.sounds:
if sound.name == value:
value = sound
break
return value | python | def evaluate(self, s, value, insert=None):
"""Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []).
"""
assert not isinstance(value, kurt.Script)
if insert and insert.unevaluated:
return value
if isinstance(value, kurt.Block):
if value.type.shape == "hat":
return []
if value.type not in self.COMMANDS:
if getattr(value.type, '_workaround', None):
value = value.type._workaround(value)
if not value:
raise kurt.BlockNotSupported(value.type)
else:
raise kurt.BlockNotSupported(value.type)
f = self.COMMANDS[value.type]
args = [self.evaluate(s, arg, arg_insert)
for (arg, arg_insert)
in zip(list(value.args), value.type.inserts)]
value = f(s, *args)
def flatten_generators(gen):
for item in gen:
if inspect.isgenerator(item):
for x in flatten_generators(item):
yield x
else:
yield item
if inspect.isgenerator(value):
value = flatten_generators(value)
if value is None:
value = []
if insert:
if isinstance(value, basestring):
value = unicode(value)
if insert.shape in ("number", "number-menu", "string"):
try:
value = float(value)
except (TypeError, ValueError):
if insert.shape == "number":
value = 0
if isinstance(value, float) and value == int(value):
value = int(value)
if insert.kind in ("spriteOrStage", "spriteOrMouse", "stageOrThis",
"spriteOnly", "touching"):
if value not in ("mouse-pointer", "edge"):
value = (self.project.stage if value == "Stage"
else self.project.get_sprite(value))
elif insert.kind == "var":
if value in s.variables:
value = s.variables[value]
else:
value = s.project.variables[value]
elif insert.kind == "list":
if value in s.lists:
value = s.lists[value]
else:
value = s.project.lists[value]
elif insert.kind == "sound":
for sound in s.sounds:
if sound.name == value:
value = sound
break
return value | [
"def",
"evaluate",
"(",
"self",
",",
"s",
",",
"value",
",",
"insert",
"=",
"None",
")",
":",
"assert",
"not",
"isinstance",
"(",
"value",
",",
"kurt",
".",
"Script",
")",
"if",
"insert",
"and",
"insert",
".",
"unevaluated",
":",
"return",
"value",
"if",
"isinstance",
"(",
"value",
",",
"kurt",
".",
"Block",
")",
":",
"if",
"value",
".",
"type",
".",
"shape",
"==",
"\"hat\"",
":",
"return",
"[",
"]",
"if",
"value",
".",
"type",
"not",
"in",
"self",
".",
"COMMANDS",
":",
"if",
"getattr",
"(",
"value",
".",
"type",
",",
"'_workaround'",
",",
"None",
")",
":",
"value",
"=",
"value",
".",
"type",
".",
"_workaround",
"(",
"value",
")",
"if",
"not",
"value",
":",
"raise",
"kurt",
".",
"BlockNotSupported",
"(",
"value",
".",
"type",
")",
"else",
":",
"raise",
"kurt",
".",
"BlockNotSupported",
"(",
"value",
".",
"type",
")",
"f",
"=",
"self",
".",
"COMMANDS",
"[",
"value",
".",
"type",
"]",
"args",
"=",
"[",
"self",
".",
"evaluate",
"(",
"s",
",",
"arg",
",",
"arg_insert",
")",
"for",
"(",
"arg",
",",
"arg_insert",
")",
"in",
"zip",
"(",
"list",
"(",
"value",
".",
"args",
")",
",",
"value",
".",
"type",
".",
"inserts",
")",
"]",
"value",
"=",
"f",
"(",
"s",
",",
"*",
"args",
")",
"def",
"flatten_generators",
"(",
"gen",
")",
":",
"for",
"item",
"in",
"gen",
":",
"if",
"inspect",
".",
"isgenerator",
"(",
"item",
")",
":",
"for",
"x",
"in",
"flatten_generators",
"(",
"item",
")",
":",
"yield",
"x",
"else",
":",
"yield",
"item",
"if",
"inspect",
".",
"isgenerator",
"(",
"value",
")",
":",
"value",
"=",
"flatten_generators",
"(",
"value",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"[",
"]",
"if",
"insert",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"unicode",
"(",
"value",
")",
"if",
"insert",
".",
"shape",
"in",
"(",
"\"number\"",
",",
"\"number-menu\"",
",",
"\"string\"",
")",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"if",
"insert",
".",
"shape",
"==",
"\"number\"",
":",
"value",
"=",
"0",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
"and",
"value",
"==",
"int",
"(",
"value",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"insert",
".",
"kind",
"in",
"(",
"\"spriteOrStage\"",
",",
"\"spriteOrMouse\"",
",",
"\"stageOrThis\"",
",",
"\"spriteOnly\"",
",",
"\"touching\"",
")",
":",
"if",
"value",
"not",
"in",
"(",
"\"mouse-pointer\"",
",",
"\"edge\"",
")",
":",
"value",
"=",
"(",
"self",
".",
"project",
".",
"stage",
"if",
"value",
"==",
"\"Stage\"",
"else",
"self",
".",
"project",
".",
"get_sprite",
"(",
"value",
")",
")",
"elif",
"insert",
".",
"kind",
"==",
"\"var\"",
":",
"if",
"value",
"in",
"s",
".",
"variables",
":",
"value",
"=",
"s",
".",
"variables",
"[",
"value",
"]",
"else",
":",
"value",
"=",
"s",
".",
"project",
".",
"variables",
"[",
"value",
"]",
"elif",
"insert",
".",
"kind",
"==",
"\"list\"",
":",
"if",
"value",
"in",
"s",
".",
"lists",
":",
"value",
"=",
"s",
".",
"lists",
"[",
"value",
"]",
"else",
":",
"value",
"=",
"s",
".",
"project",
".",
"lists",
"[",
"value",
"]",
"elif",
"insert",
".",
"kind",
"==",
"\"sound\"",
":",
"for",
"sound",
"in",
"s",
".",
"sounds",
":",
"if",
"sound",
".",
"name",
"==",
"value",
":",
"value",
"=",
"sound",
"break",
"return",
"value"
] | Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []). | [
"Expression",
"evaluator",
"."
] | ac84f7198079732bf22c3b8cbc0dc1a073b1d539 | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L240-L320 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_cluster_name | def get_cluster_name(self):
"""
Name identifying this RabbitMQ cluster.
"""
return self._get(
url=self.url + '/api/cluster-name',
headers=self.headers,
auth=self.auth
) | python | def get_cluster_name(self):
"""
Name identifying this RabbitMQ cluster.
"""
return self._get(
url=self.url + '/api/cluster-name',
headers=self.headers,
auth=self.auth
) | [
"def",
"get_cluster_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"self",
".",
"url",
"+",
"'/api/cluster-name'",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"auth",
"=",
"self",
".",
"auth",
")"
] | Name identifying this RabbitMQ cluster. | [
"Name",
"identifying",
"this",
"RabbitMQ",
"cluster",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L16-L24 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_connection | def get_connection(self, name):
"""
An individual connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
)) | python | def get_connection(self, name):
"""
An individual connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"get_connection",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/connections/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | An individual connection.
:param name: The connection name
:type name: str | [
"An",
"individual",
"connection",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L93-L102 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.delete_connection | def delete_connection(self, name, reason=None):
"""
Closes an individual connection. Give an optional reason
:param name: The connection name
:type name: str
:param reason: An option reason why the connection was deleted
:type reason: str
"""
headers = {'X-Reason': reason} if reason else {}
self._api_delete(
'/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
),
headers=headers,
) | python | def delete_connection(self, name, reason=None):
"""
Closes an individual connection. Give an optional reason
:param name: The connection name
:type name: str
:param reason: An option reason why the connection was deleted
:type reason: str
"""
headers = {'X-Reason': reason} if reason else {}
self._api_delete(
'/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
),
headers=headers,
) | [
"def",
"delete_connection",
"(",
"self",
",",
"name",
",",
"reason",
"=",
"None",
")",
":",
"headers",
"=",
"{",
"'X-Reason'",
":",
"reason",
"}",
"if",
"reason",
"else",
"{",
"}",
"self",
".",
"_api_delete",
"(",
"'/api/connections/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
",",
"headers",
"=",
"headers",
",",
")"
] | Closes an individual connection. Give an optional reason
:param name: The connection name
:type name: str
:param reason: An option reason why the connection was deleted
:type reason: str | [
"Closes",
"an",
"individual",
"connection",
".",
"Give",
"an",
"optional",
"reason"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L104-L121 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_connection_channels | def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(
urllib.parse.quote_plus(name)
)) | python | def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"list_connection_channels",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/connections/{0}/channels'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | List of all channels for a given connection.
:param name: The connection name
:type name: str | [
"List",
"of",
"all",
"channels",
"for",
"a",
"given",
"connection",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L123-L132 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_channel | def get_channel(self, name):
"""
Details about an individual channel.
:param name: The channel name
:type name: str
"""
return self._api_get('/api/channels/{0}'.format(
urllib.parse.quote_plus(name)
)) | python | def get_channel(self, name):
"""
Details about an individual channel.
:param name: The channel name
:type name: str
"""
return self._api_get('/api/channels/{0}'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"get_channel",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/channels/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | Details about an individual channel.
:param name: The channel name
:type name: str | [
"Details",
"about",
"an",
"individual",
"channel",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L140-L149 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_consumers_for_vhost | def list_consumers_for_vhost(self, vhost):
"""
A list of all consumers in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/consumers/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | python | def list_consumers_for_vhost(self, vhost):
"""
A list of all consumers in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/consumers/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"list_consumers_for_vhost",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/consumers/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
] | A list of all consumers in a given virtual host.
:param vhost: The vhost name
:type vhost: str | [
"A",
"list",
"of",
"all",
"consumers",
"in",
"a",
"given",
"virtual",
"host",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L157-L166 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_exchanges_for_vhost | def list_exchanges_for_vhost(self, vhost):
"""
A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | python | def list_exchanges_for_vhost(self, vhost):
"""
A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"list_exchanges_for_vhost",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/exchanges/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
] | A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str | [
"A",
"list",
"of",
"all",
"exchanges",
"in",
"a",
"given",
"virtual",
"host",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L174-L183 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_exchange_for_vhost | def get_exchange_for_vhost(self, exchange, vhost):
"""
An individual exchange
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(exchange)
)) | python | def get_exchange_for_vhost(self, exchange, vhost):
"""
An individual exchange
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(exchange)
)) | [
"def",
"get_exchange_for_vhost",
"(",
"self",
",",
"exchange",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/exchanges/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"exchange",
")",
")",
")"
] | An individual exchange
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str | [
"An",
"individual",
"exchange"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L185-L198 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.delete_exchange_for_vhost | def delete_exchange_for_vhost(self, exchange, vhost, if_unused=False):
"""
Delete an individual exchange. You can add the parameter
``if_unused=True``. This prevents the delete from succeeding if the
exchange is bound to a queue or as a source to another exchange.
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
:param if_unused: Set to ``True`` to only delete if it is unused
:type if_unused: bool
"""
self._api_delete(
'/api/exchanges/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(exchange)),
params={
'if-unused': if_unused
},
) | python | def delete_exchange_for_vhost(self, exchange, vhost, if_unused=False):
"""
Delete an individual exchange. You can add the parameter
``if_unused=True``. This prevents the delete from succeeding if the
exchange is bound to a queue or as a source to another exchange.
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
:param if_unused: Set to ``True`` to only delete if it is unused
:type if_unused: bool
"""
self._api_delete(
'/api/exchanges/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(exchange)),
params={
'if-unused': if_unused
},
) | [
"def",
"delete_exchange_for_vhost",
"(",
"self",
",",
"exchange",
",",
"vhost",
",",
"if_unused",
"=",
"False",
")",
":",
"self",
".",
"_api_delete",
"(",
"'/api/exchanges/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"exchange",
")",
")",
",",
"params",
"=",
"{",
"'if-unused'",
":",
"if_unused",
"}",
",",
")"
] | Delete an individual exchange. You can add the parameter
``if_unused=True``. This prevents the delete from succeeding if the
exchange is bound to a queue or as a source to another exchange.
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
:param if_unused: Set to ``True`` to only delete if it is unused
:type if_unused: bool | [
"Delete",
"an",
"individual",
"exchange",
".",
"You",
"can",
"add",
"the",
"parameter",
"if_unused",
"=",
"True",
".",
"This",
"prevents",
"the",
"delete",
"from",
"succeeding",
"if",
"the",
"exchange",
"is",
"bound",
"to",
"a",
"queue",
"or",
"as",
"a",
"source",
"to",
"another",
"exchange",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L232-L254 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_bindings_for_vhost | def list_bindings_for_vhost(self, vhost):
"""
A list of all bindings in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/bindings/{}'.format(
urllib.parse.quote_plus(vhost)
)) | python | def list_bindings_for_vhost(self, vhost):
"""
A list of all bindings in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/bindings/{}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"list_bindings_for_vhost",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/bindings/{}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
] | A list of all bindings in a given virtual host.
:param vhost: The vhost name
:type vhost: str | [
"A",
"list",
"of",
"all",
"bindings",
"in",
"a",
"given",
"virtual",
"host",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L262-L271 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_vhost | def get_vhost(self, name):
"""
Details about an individual vhost.
:param name: The vhost name
:type name: str
"""
return self._api_get('/api/vhosts/{0}'.format(
urllib.parse.quote_plus(name)
)) | python | def get_vhost(self, name):
"""
Details about an individual vhost.
:param name: The vhost name
:type name: str
"""
return self._api_get('/api/vhosts/{0}'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"get_vhost",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/vhosts/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | Details about an individual vhost.
:param name: The vhost name
:type name: str | [
"Details",
"about",
"an",
"individual",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L279-L288 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.create_vhost | def create_vhost(self, name, tracing=False):
"""
Create an individual vhost.
:param name: The vhost name
:type name: str
:param tracing: Set to ``True`` to enable tracing
:type tracing: bool
"""
data = {'tracing': True} if tracing else {}
self._api_put(
'/api/vhosts/{0}'.format(urllib.parse.quote_plus(name)),
data=data,
) | python | def create_vhost(self, name, tracing=False):
"""
Create an individual vhost.
:param name: The vhost name
:type name: str
:param tracing: Set to ``True`` to enable tracing
:type tracing: bool
"""
data = {'tracing': True} if tracing else {}
self._api_put(
'/api/vhosts/{0}'.format(urllib.parse.quote_plus(name)),
data=data,
) | [
"def",
"create_vhost",
"(",
"self",
",",
"name",
",",
"tracing",
"=",
"False",
")",
":",
"data",
"=",
"{",
"'tracing'",
":",
"True",
"}",
"if",
"tracing",
"else",
"{",
"}",
"self",
".",
"_api_put",
"(",
"'/api/vhosts/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
",",
"data",
"=",
"data",
",",
")"
] | Create an individual vhost.
:param name: The vhost name
:type name: str
:param tracing: Set to ``True`` to enable tracing
:type tracing: bool | [
"Create",
"an",
"individual",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L301-L315 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_user | def get_user(self, name):
"""
Details about an individual user.
:param name: The user's name
:type name: str
"""
return self._api_get('/api/users/{0}'.format(
urllib.parse.quote_plus(name)
)) | python | def get_user(self, name):
"""
Details about an individual user.
:param name: The user's name
:type name: str
"""
return self._api_get('/api/users/{0}'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"get_user",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/users/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | Details about an individual user.
:param name: The user's name
:type name: str | [
"Details",
"about",
"an",
"individual",
"user",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L323-L332 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_user_permissions | def list_user_permissions(self, name):
"""
A list of all permissions for a given user.
:param name: The user's name
:type name: str
"""
return self._api_get('/api/users/{0}/permissions'.format(
urllib.parse.quote_plus(name)
)) | python | def list_user_permissions(self, name):
"""
A list of all permissions for a given user.
:param name: The user's name
:type name: str
"""
return self._api_get('/api/users/{0}/permissions'.format(
urllib.parse.quote_plus(name)
)) | [
"def",
"list_user_permissions",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/users/{0}/permissions'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | A list of all permissions for a given user.
:param name: The user's name
:type name: str | [
"A",
"list",
"of",
"all",
"permissions",
"for",
"a",
"given",
"user",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L376-L385 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.list_policies_for_vhost | def list_policies_for_vhost(self, vhost):
"""
A list of all policies for a vhost.
"""
return self._api_get('/api/policies/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | python | def list_policies_for_vhost(self, vhost):
"""
A list of all policies for a vhost.
"""
return self._api_get('/api/policies/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"list_policies_for_vhost",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/policies/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
] | A list of all policies for a vhost. | [
"A",
"list",
"of",
"all",
"policies",
"for",
"a",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L468-L474 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.get_policy_for_vhost | def get_policy_for_vhost(self, vhost, name):
"""
Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
"""
return self._api_get('/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | python | def get_policy_for_vhost(self, vhost, name):
"""
Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
"""
return self._api_get('/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | [
"def",
"get_policy_for_vhost",
"(",
"self",
",",
"vhost",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/policies/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
",",
")",
")"
] | Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str | [
"Get",
"a",
"specific",
"policy",
"for",
"a",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L476-L488 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.create_policy_for_vhost | def create_policy_for_vhost(
self, vhost, name,
definition,
pattern=None,
priority=0,
apply_to='all'):
"""
Create a policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
:param definition: The definition of the policy. Required
:type definition: dict
:param priority: The priority of the policy. Defaults to 0
:param pattern: The pattern of resource names to apply the policy to
:type pattern: str
:type priority: int
:param apply_to: What resource type to apply the policy to.
Usually "exchanges", "queues", or "all". Defaults to "all"
:type apply_to: str
Example ::
# Makes all queues and exchanges on vhost "/" highly available
>>> api.create_policy_for_vhost(
... vhost="/",
... name="ha-all",
... definition={"ha-mode": "all"},
... pattern="",
... apply_to="all")
"""
data = {
"pattern": pattern,
"definition": definition,
"priority": priority,
"apply-to": apply_to
}
self._api_put(
'/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
),
data=data,
) | python | def create_policy_for_vhost(
self, vhost, name,
definition,
pattern=None,
priority=0,
apply_to='all'):
"""
Create a policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
:param definition: The definition of the policy. Required
:type definition: dict
:param priority: The priority of the policy. Defaults to 0
:param pattern: The pattern of resource names to apply the policy to
:type pattern: str
:type priority: int
:param apply_to: What resource type to apply the policy to.
Usually "exchanges", "queues", or "all". Defaults to "all"
:type apply_to: str
Example ::
# Makes all queues and exchanges on vhost "/" highly available
>>> api.create_policy_for_vhost(
... vhost="/",
... name="ha-all",
... definition={"ha-mode": "all"},
... pattern="",
... apply_to="all")
"""
data = {
"pattern": pattern,
"definition": definition,
"priority": priority,
"apply-to": apply_to
}
self._api_put(
'/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
),
data=data,
) | [
"def",
"create_policy_for_vhost",
"(",
"self",
",",
"vhost",
",",
"name",
",",
"definition",
",",
"pattern",
"=",
"None",
",",
"priority",
"=",
"0",
",",
"apply_to",
"=",
"'all'",
")",
":",
"data",
"=",
"{",
"\"pattern\"",
":",
"pattern",
",",
"\"definition\"",
":",
"definition",
",",
"\"priority\"",
":",
"priority",
",",
"\"apply-to\"",
":",
"apply_to",
"}",
"self",
".",
"_api_put",
"(",
"'/api/policies/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
",",
")",
",",
"data",
"=",
"data",
",",
")"
] | Create a policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
:param definition: The definition of the policy. Required
:type definition: dict
:param priority: The priority of the policy. Defaults to 0
:param pattern: The pattern of resource names to apply the policy to
:type pattern: str
:type priority: int
:param apply_to: What resource type to apply the policy to.
Usually "exchanges", "queues", or "all". Defaults to "all"
:type apply_to: str
Example ::
# Makes all queues and exchanges on vhost "/" highly available
>>> api.create_policy_for_vhost(
... vhost="/",
... name="ha-all",
... definition={"ha-mode": "all"},
... pattern="",
... apply_to="all") | [
"Create",
"a",
"policy",
"for",
"a",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L490-L537 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.delete_policy_for_vhost | def delete_policy_for_vhost(self, vhost, name):
"""
Delete a specific policy for a vhost.
:param vhost: The virtual host of the policy
:type vhost: str
:param name: The name of the policy
:type name: str
"""
self._api_delete('/api/policies/{0}/{1}/'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | python | def delete_policy_for_vhost(self, vhost, name):
"""
Delete a specific policy for a vhost.
:param vhost: The virtual host of the policy
:type vhost: str
:param name: The name of the policy
:type name: str
"""
self._api_delete('/api/policies/{0}/{1}/'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | [
"def",
"delete_policy_for_vhost",
"(",
"self",
",",
"vhost",
",",
"name",
")",
":",
"self",
".",
"_api_delete",
"(",
"'/api/policies/{0}/{1}/'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
",",
")",
")"
] | Delete a specific policy for a vhost.
:param vhost: The virtual host of the policy
:type vhost: str
:param name: The name of the policy
:type name: str | [
"Delete",
"a",
"specific",
"policy",
"for",
"a",
"vhost",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L539-L551 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | AdminAPI.is_vhost_alive | def is_vhost_alive(self, vhost):
"""
Declares a test queue, then publishes and consumes a message.
Intended for use by monitoring tools.
:param vhost: The vhost name to check
:type vhost: str
"""
return self._api_get('/api/aliveness-test/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | python | def is_vhost_alive(self, vhost):
"""
Declares a test queue, then publishes and consumes a message.
Intended for use by monitoring tools.
:param vhost: The vhost name to check
:type vhost: str
"""
return self._api_get('/api/aliveness-test/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"is_vhost_alive",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/aliveness-test/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
] | Declares a test queue, then publishes and consumes a message.
Intended for use by monitoring tools.
:param vhost: The vhost name to check
:type vhost: str | [
"Declares",
"a",
"test",
"queue",
"then",
"publishes",
"and",
"consumes",
"a",
"message",
".",
"Intended",
"for",
"use",
"by",
"monitoring",
"tools",
"."
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L553-L563 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetFile.write | def write(self, session, directory, name, maskMap):
"""
Write from database to file.
*session* = SQLAlchemy session object\n
*directory* = to which directory will the files be written (e.g.: '/example/path')\n
*name* = name of file that will be written (e.g.: 'my_project.ext')\n
"""
# Assemble Path to file
name_split = name.split('.')
name = name_split[0]
# Default extension
extension = ''
if len(name_split) >= 2:
extension = name_split[-1]
# Run name preprocessor method if present
try:
name = self._namePreprocessor(name)
except:
'DO NOTHING'
if extension == '':
filename = '{0}.{1}'.format(name, self.fileExtension)
else:
filename = '{0}.{1}'.format(name, extension)
filePath = os.path.join(directory, filename)
with open(filePath, 'w') as openFile:
# Write Lines
self._write(session=session,
openFile=openFile,
maskMap=maskMap) | python | def write(self, session, directory, name, maskMap):
"""
Write from database to file.
*session* = SQLAlchemy session object\n
*directory* = to which directory will the files be written (e.g.: '/example/path')\n
*name* = name of file that will be written (e.g.: 'my_project.ext')\n
"""
# Assemble Path to file
name_split = name.split('.')
name = name_split[0]
# Default extension
extension = ''
if len(name_split) >= 2:
extension = name_split[-1]
# Run name preprocessor method if present
try:
name = self._namePreprocessor(name)
except:
'DO NOTHING'
if extension == '':
filename = '{0}.{1}'.format(name, self.fileExtension)
else:
filename = '{0}.{1}'.format(name, extension)
filePath = os.path.join(directory, filename)
with open(filePath, 'w') as openFile:
# Write Lines
self._write(session=session,
openFile=openFile,
maskMap=maskMap) | [
"def",
"write",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
",",
"maskMap",
")",
":",
"# Assemble Path to file",
"name_split",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"name",
"=",
"name_split",
"[",
"0",
"]",
"# Default extension",
"extension",
"=",
"''",
"if",
"len",
"(",
"name_split",
")",
">=",
"2",
":",
"extension",
"=",
"name_split",
"[",
"-",
"1",
"]",
"# Run name preprocessor method if present",
"try",
":",
"name",
"=",
"self",
".",
"_namePreprocessor",
"(",
"name",
")",
"except",
":",
"'DO NOTHING'",
"if",
"extension",
"==",
"''",
":",
"filename",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"fileExtension",
")",
"else",
":",
"filename",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"extension",
")",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"with",
"open",
"(",
"filePath",
",",
"'w'",
")",
"as",
"openFile",
":",
"# Write Lines",
"self",
".",
"_write",
"(",
"session",
"=",
"session",
",",
"openFile",
"=",
"openFile",
",",
"maskMap",
"=",
"maskMap",
")"
] | Write from database to file.
*session* = SQLAlchemy session object\n
*directory* = to which directory will the files be written (e.g.: '/example/path')\n
*name* = name of file that will be written (e.g.: 'my_project.ext')\n | [
"Write",
"from",
"database",
"to",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L144-L180 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetFile.getAsKmlGridAnimation | def getAsKmlGridAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0.0):
"""
Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Create a raster converter
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString = converter.getAsKmlGridAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString | python | def getAsKmlGridAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0.0):
"""
Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Create a raster converter
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString = converter.getAsKmlGridAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString | [
"def",
"getAsKmlGridAnimation",
"(",
"self",
",",
"session",
",",
"projectFile",
"=",
"None",
",",
"path",
"=",
"None",
",",
"documentName",
"=",
"None",
",",
"colorRamp",
"=",
"None",
",",
"alpha",
"=",
"1.0",
",",
"noDataValue",
"=",
"0.0",
")",
":",
"# Prepare rasters",
"timeStampedRasters",
"=",
"self",
".",
"_assembleRasterParams",
"(",
"projectFile",
",",
"self",
".",
"rasters",
")",
"# Create a raster converter",
"converter",
"=",
"RasterConverter",
"(",
"sqlAlchemyEngineOrSession",
"=",
"session",
")",
"# Configure color ramp",
"if",
"isinstance",
"(",
"colorRamp",
",",
"dict",
")",
":",
"converter",
".",
"setCustomColorRamp",
"(",
"colorRamp",
"[",
"'colors'",
"]",
",",
"colorRamp",
"[",
"'interpolatedPoints'",
"]",
")",
"else",
":",
"converter",
".",
"setDefaultColorRamp",
"(",
"colorRamp",
")",
"if",
"documentName",
"is",
"None",
":",
"documentName",
"=",
"self",
".",
"fileExtension",
"kmlString",
"=",
"converter",
".",
"getAsKmlGridAnimation",
"(",
"tableName",
"=",
"WMSDatasetRaster",
".",
"tableName",
",",
"timeStampedRasters",
"=",
"timeStampedRasters",
",",
"rasterIdFieldName",
"=",
"'id'",
",",
"rasterFieldName",
"=",
"'raster'",
",",
"documentName",
"=",
"documentName",
",",
"alpha",
"=",
"alpha",
",",
"noDataValue",
"=",
"noDataValue",
")",
"if",
"path",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"kmlString",
")",
"return",
"kmlString"
] | Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string | [
"Retrieve",
"the",
"WMS",
"dataset",
"as",
"a",
"gridded",
"time",
"stamped",
"KML",
"string",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L184-L234 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetFile.getAsKmlPngAnimation | def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0,
noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'):
"""
Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images.
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
drawOrder=drawOrder,
cellSize=cellSize,
noDataValue=noDataValue,
resampleMethod=resampleMethod)
if path:
directory = os.path.dirname(path)
archiveName = (os.path.split(path)[1]).split('.')[0]
kmzPath = os.path.join(directory, (archiveName + '.kmz'))
with ZipFile(kmzPath, 'w') as kmz:
kmz.writestr(archiveName + '.kml', kmlString)
for index, binaryPngString in enumerate(binaryPngStrings):
kmz.writestr('raster{0}.png'.format(index), binaryPngString)
return kmlString, binaryPngStrings | python | def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0,
noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'):
"""
Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images.
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
drawOrder=drawOrder,
cellSize=cellSize,
noDataValue=noDataValue,
resampleMethod=resampleMethod)
if path:
directory = os.path.dirname(path)
archiveName = (os.path.split(path)[1]).split('.')[0]
kmzPath = os.path.join(directory, (archiveName + '.kmz'))
with ZipFile(kmzPath, 'w') as kmz:
kmz.writestr(archiveName + '.kml', kmlString)
for index, binaryPngString in enumerate(binaryPngStrings):
kmz.writestr('raster{0}.png'.format(index), binaryPngString)
return kmlString, binaryPngStrings | [
"def",
"getAsKmlPngAnimation",
"(",
"self",
",",
"session",
",",
"projectFile",
"=",
"None",
",",
"path",
"=",
"None",
",",
"documentName",
"=",
"None",
",",
"colorRamp",
"=",
"None",
",",
"alpha",
"=",
"1.0",
",",
"noDataValue",
"=",
"0",
",",
"drawOrder",
"=",
"0",
",",
"cellSize",
"=",
"None",
",",
"resampleMethod",
"=",
"'NearestNeighbour'",
")",
":",
"# Prepare rasters",
"timeStampedRasters",
"=",
"self",
".",
"_assembleRasterParams",
"(",
"projectFile",
",",
"self",
".",
"rasters",
")",
"# Make sure the raster field is valid",
"converter",
"=",
"RasterConverter",
"(",
"sqlAlchemyEngineOrSession",
"=",
"session",
")",
"# Configure color ramp",
"if",
"isinstance",
"(",
"colorRamp",
",",
"dict",
")",
":",
"converter",
".",
"setCustomColorRamp",
"(",
"colorRamp",
"[",
"'colors'",
"]",
",",
"colorRamp",
"[",
"'interpolatedPoints'",
"]",
")",
"else",
":",
"converter",
".",
"setDefaultColorRamp",
"(",
"colorRamp",
")",
"if",
"documentName",
"is",
"None",
":",
"documentName",
"=",
"self",
".",
"fileExtension",
"kmlString",
",",
"binaryPngStrings",
"=",
"converter",
".",
"getAsKmlPngAnimation",
"(",
"tableName",
"=",
"WMSDatasetRaster",
".",
"tableName",
",",
"timeStampedRasters",
"=",
"timeStampedRasters",
",",
"rasterIdFieldName",
"=",
"'id'",
",",
"rasterFieldName",
"=",
"'raster'",
",",
"documentName",
"=",
"documentName",
",",
"alpha",
"=",
"alpha",
",",
"drawOrder",
"=",
"drawOrder",
",",
"cellSize",
"=",
"cellSize",
",",
"noDataValue",
"=",
"noDataValue",
",",
"resampleMethod",
"=",
"resampleMethod",
")",
"if",
"path",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"archiveName",
"=",
"(",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"1",
"]",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"kmzPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"(",
"archiveName",
"+",
"'.kmz'",
")",
")",
"with",
"ZipFile",
"(",
"kmzPath",
",",
"'w'",
")",
"as",
"kmz",
":",
"kmz",
".",
"writestr",
"(",
"archiveName",
"+",
"'.kml'",
",",
"kmlString",
")",
"for",
"index",
",",
"binaryPngString",
"in",
"enumerate",
"(",
"binaryPngStrings",
")",
":",
"kmz",
".",
"writestr",
"(",
"'raster{0}.png'",
".",
"format",
"(",
"index",
")",
",",
"binaryPngString",
")",
"return",
"kmlString",
",",
"binaryPngStrings"
] | Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images. | [
"Retrieve",
"the",
"WMS",
"dataset",
"as",
"a",
"PNG",
"time",
"stamped",
"KMZ"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L236-L306 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, maskMap):
"""
WMS Dataset File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
if isinstance(maskMap, RasterMapFile) and maskMap.fileExtension == 'msk':
# Vars from mask map
columns = maskMap.columns
rows = maskMap.rows
upperLeftX = maskMap.west
upperLeftY = maskMap.north
# Derive the cell size (GSSHA cells are square, so it is the same in both directions)
cellSizeX = int(abs(maskMap.west - maskMap.east) / columns)
cellSizeY = -1 * cellSizeX
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'DATASET': wdc.datasetHeaderChunk,
'TS': wdc.datasetScalarTimeStepChunk}
# Open file and read plain text into text field
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse header chunk first
header = wdc.datasetHeaderChunk('DATASET', chunks['DATASET'][0])
# Parse each time step chunk and aggregate
timeStepRasters = []
for chunk in chunks['TS']:
timeStepRasters.append(wdc.datasetScalarTimeStepChunk(chunk, columns, header['numberCells']))
# Set WMS dataset file properties
self.name = header['name']
self.numberCells = header['numberCells']
self.numberData = header['numberData']
self.objectID = header['objectID']
if header['type'] == 'BEGSCL':
self.objectType = header['objectType']
self.type = self.SCALAR_TYPE
elif header['type'] == 'BEGVEC':
self.vectorType = header['objectType']
self.type = self.VECTOR_TYPE
# Create WMS raster dataset files for each raster
for timeStep, timeStepRaster in enumerate(timeStepRasters):
# Create new WMS raster dataset file object
wmsRasterDatasetFile = WMSDatasetRaster()
# Set the wms dataset for this WMS raster dataset file
wmsRasterDatasetFile.wmsDataset = self
# Set the time step and timestamp and other properties
wmsRasterDatasetFile.iStatus = timeStepRaster['iStatus']
wmsRasterDatasetFile.timestamp = timeStepRaster['timestamp']
wmsRasterDatasetFile.timeStep = timeStep + 1
# If spatial is enabled create PostGIS rasters
if spatial:
# Process the values/cell array
wmsRasterDatasetFile.raster = RasterLoader.makeSingleBandWKBRaster(session,
columns, rows,
upperLeftX, upperLeftY,
cellSizeX, cellSizeY,
0, 0,
spatialReferenceID,
timeStepRaster['cellArray'])
# Otherwise, set the raster text properties
else:
wmsRasterDatasetFile.rasterText = timeStepRaster['rasterText']
# Add current file object to the session
session.add(self)
else:
log.warning("Could not read {0}. Mask Map must be supplied "
"to read WMS Datasets.".format(filename)) | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, maskMap):
"""
WMS Dataset File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
if isinstance(maskMap, RasterMapFile) and maskMap.fileExtension == 'msk':
# Vars from mask map
columns = maskMap.columns
rows = maskMap.rows
upperLeftX = maskMap.west
upperLeftY = maskMap.north
# Derive the cell size (GSSHA cells are square, so it is the same in both directions)
cellSizeX = int(abs(maskMap.west - maskMap.east) / columns)
cellSizeY = -1 * cellSizeX
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'DATASET': wdc.datasetHeaderChunk,
'TS': wdc.datasetScalarTimeStepChunk}
# Open file and read plain text into text field
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse header chunk first
header = wdc.datasetHeaderChunk('DATASET', chunks['DATASET'][0])
# Parse each time step chunk and aggregate
timeStepRasters = []
for chunk in chunks['TS']:
timeStepRasters.append(wdc.datasetScalarTimeStepChunk(chunk, columns, header['numberCells']))
# Set WMS dataset file properties
self.name = header['name']
self.numberCells = header['numberCells']
self.numberData = header['numberData']
self.objectID = header['objectID']
if header['type'] == 'BEGSCL':
self.objectType = header['objectType']
self.type = self.SCALAR_TYPE
elif header['type'] == 'BEGVEC':
self.vectorType = header['objectType']
self.type = self.VECTOR_TYPE
# Create WMS raster dataset files for each raster
for timeStep, timeStepRaster in enumerate(timeStepRasters):
# Create new WMS raster dataset file object
wmsRasterDatasetFile = WMSDatasetRaster()
# Set the wms dataset for this WMS raster dataset file
wmsRasterDatasetFile.wmsDataset = self
# Set the time step and timestamp and other properties
wmsRasterDatasetFile.iStatus = timeStepRaster['iStatus']
wmsRasterDatasetFile.timestamp = timeStepRaster['timestamp']
wmsRasterDatasetFile.timeStep = timeStep + 1
# If spatial is enabled create PostGIS rasters
if spatial:
# Process the values/cell array
wmsRasterDatasetFile.raster = RasterLoader.makeSingleBandWKBRaster(session,
columns, rows,
upperLeftX, upperLeftY,
cellSizeX, cellSizeY,
0, 0,
spatialReferenceID,
timeStepRaster['cellArray'])
# Otherwise, set the raster text properties
else:
wmsRasterDatasetFile.rasterText = timeStepRaster['rasterText']
# Add current file object to the session
session.add(self)
else:
log.warning("Could not read {0}. Mask Map must be supplied "
"to read WMS Datasets.".format(filename)) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"maskMap",
")",
":",
"# Assign file extension attribute to file object",
"self",
".",
"fileExtension",
"=",
"extension",
"if",
"isinstance",
"(",
"maskMap",
",",
"RasterMapFile",
")",
"and",
"maskMap",
".",
"fileExtension",
"==",
"'msk'",
":",
"# Vars from mask map",
"columns",
"=",
"maskMap",
".",
"columns",
"rows",
"=",
"maskMap",
".",
"rows",
"upperLeftX",
"=",
"maskMap",
".",
"west",
"upperLeftY",
"=",
"maskMap",
".",
"north",
"# Derive the cell size (GSSHA cells are square, so it is the same in both directions)",
"cellSizeX",
"=",
"int",
"(",
"abs",
"(",
"maskMap",
".",
"west",
"-",
"maskMap",
".",
"east",
")",
"/",
"columns",
")",
"cellSizeY",
"=",
"-",
"1",
"*",
"cellSizeX",
"# Dictionary of keywords/cards and parse function names",
"KEYWORDS",
"=",
"{",
"'DATASET'",
":",
"wdc",
".",
"datasetHeaderChunk",
",",
"'TS'",
":",
"wdc",
".",
"datasetScalarTimeStepChunk",
"}",
"# Open file and read plain text into text field",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse header chunk first",
"header",
"=",
"wdc",
".",
"datasetHeaderChunk",
"(",
"'DATASET'",
",",
"chunks",
"[",
"'DATASET'",
"]",
"[",
"0",
"]",
")",
"# Parse each time step chunk and aggregate",
"timeStepRasters",
"=",
"[",
"]",
"for",
"chunk",
"in",
"chunks",
"[",
"'TS'",
"]",
":",
"timeStepRasters",
".",
"append",
"(",
"wdc",
".",
"datasetScalarTimeStepChunk",
"(",
"chunk",
",",
"columns",
",",
"header",
"[",
"'numberCells'",
"]",
")",
")",
"# Set WMS dataset file properties",
"self",
".",
"name",
"=",
"header",
"[",
"'name'",
"]",
"self",
".",
"numberCells",
"=",
"header",
"[",
"'numberCells'",
"]",
"self",
".",
"numberData",
"=",
"header",
"[",
"'numberData'",
"]",
"self",
".",
"objectID",
"=",
"header",
"[",
"'objectID'",
"]",
"if",
"header",
"[",
"'type'",
"]",
"==",
"'BEGSCL'",
":",
"self",
".",
"objectType",
"=",
"header",
"[",
"'objectType'",
"]",
"self",
".",
"type",
"=",
"self",
".",
"SCALAR_TYPE",
"elif",
"header",
"[",
"'type'",
"]",
"==",
"'BEGVEC'",
":",
"self",
".",
"vectorType",
"=",
"header",
"[",
"'objectType'",
"]",
"self",
".",
"type",
"=",
"self",
".",
"VECTOR_TYPE",
"# Create WMS raster dataset files for each raster",
"for",
"timeStep",
",",
"timeStepRaster",
"in",
"enumerate",
"(",
"timeStepRasters",
")",
":",
"# Create new WMS raster dataset file object",
"wmsRasterDatasetFile",
"=",
"WMSDatasetRaster",
"(",
")",
"# Set the wms dataset for this WMS raster dataset file",
"wmsRasterDatasetFile",
".",
"wmsDataset",
"=",
"self",
"# Set the time step and timestamp and other properties",
"wmsRasterDatasetFile",
".",
"iStatus",
"=",
"timeStepRaster",
"[",
"'iStatus'",
"]",
"wmsRasterDatasetFile",
".",
"timestamp",
"=",
"timeStepRaster",
"[",
"'timestamp'",
"]",
"wmsRasterDatasetFile",
".",
"timeStep",
"=",
"timeStep",
"+",
"1",
"# If spatial is enabled create PostGIS rasters",
"if",
"spatial",
":",
"# Process the values/cell array",
"wmsRasterDatasetFile",
".",
"raster",
"=",
"RasterLoader",
".",
"makeSingleBandWKBRaster",
"(",
"session",
",",
"columns",
",",
"rows",
",",
"upperLeftX",
",",
"upperLeftY",
",",
"cellSizeX",
",",
"cellSizeY",
",",
"0",
",",
"0",
",",
"spatialReferenceID",
",",
"timeStepRaster",
"[",
"'cellArray'",
"]",
")",
"# Otherwise, set the raster text properties",
"else",
":",
"wmsRasterDatasetFile",
".",
"rasterText",
"=",
"timeStepRaster",
"[",
"'rasterText'",
"]",
"# Add current file object to the session",
"session",
".",
"add",
"(",
"self",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Could not read {0}. Mask Map must be supplied \"",
"\"to read WMS Datasets.\"",
".",
"format",
"(",
"filename",
")",
")"
] | WMS Dataset File Read from File Method | [
"WMS",
"Dataset",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L308-L390 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetFile._write | def _write(self, session, openFile, maskMap):
"""
WMS Dataset File Write to File Method
"""
# Magic numbers
FIRST_VALUE_INDEX = 12
# Write the header
openFile.write('DATASET\r\n')
if self.type == self.SCALAR_TYPE:
openFile.write('OBJTYPE {0}\r\n'.format(self.objectType))
openFile.write('BEGSCL\r\n')
elif self.type == self.VECTOR_TYPE:
openFile.write('VECTYPE {0}\r\n'.format(self.vectorType))
openFile.write('BEGVEC\r\n')
openFile.write('OBJID {0}\r\n'.format(self.objectID))
openFile.write('ND {0}\r\n'.format(self.numberData))
openFile.write('NC {0}\r\n'.format(self.numberCells))
openFile.write('NAME {0}\r\n'.format(self.name))
# Retrieve the mask map to use as the status rasters
statusString = ''
if isinstance(maskMap, RasterMapFile):
# Convert Mask Map to GRASS ASCII Raster
statusGrassRasterString = maskMap.getAsGrassAsciiGrid(session)
if statusGrassRasterString is not None:
# Split by lines
statusValues = statusGrassRasterString.split()
else:
statusValues = maskMap.rasterText.split()
# Assemble into a string in the WMS Dataset format
for i in range(FIRST_VALUE_INDEX, len(statusValues)):
statusString += statusValues[i] + '\r\n'
# Write time steps
for timeStepRaster in self.rasters:
# Write time step header
openFile.write('TS {0} {1}\r\n'.format(timeStepRaster.iStatus, timeStepRaster.timestamp))
# Write status raster (mask map) if applicable
if timeStepRaster.iStatus == 1:
openFile.write(statusString)
# Write value raster
valueString = timeStepRaster.getAsWmsDatasetString(session)
if valueString is not None:
openFile.write(valueString)
else:
openFile.write(timeStepRaster.rasterText)
# Write ending tag for the dataset
openFile.write('ENDDS\r\n') | python | def _write(self, session, openFile, maskMap):
"""
WMS Dataset File Write to File Method
"""
# Magic numbers
FIRST_VALUE_INDEX = 12
# Write the header
openFile.write('DATASET\r\n')
if self.type == self.SCALAR_TYPE:
openFile.write('OBJTYPE {0}\r\n'.format(self.objectType))
openFile.write('BEGSCL\r\n')
elif self.type == self.VECTOR_TYPE:
openFile.write('VECTYPE {0}\r\n'.format(self.vectorType))
openFile.write('BEGVEC\r\n')
openFile.write('OBJID {0}\r\n'.format(self.objectID))
openFile.write('ND {0}\r\n'.format(self.numberData))
openFile.write('NC {0}\r\n'.format(self.numberCells))
openFile.write('NAME {0}\r\n'.format(self.name))
# Retrieve the mask map to use as the status rasters
statusString = ''
if isinstance(maskMap, RasterMapFile):
# Convert Mask Map to GRASS ASCII Raster
statusGrassRasterString = maskMap.getAsGrassAsciiGrid(session)
if statusGrassRasterString is not None:
# Split by lines
statusValues = statusGrassRasterString.split()
else:
statusValues = maskMap.rasterText.split()
# Assemble into a string in the WMS Dataset format
for i in range(FIRST_VALUE_INDEX, len(statusValues)):
statusString += statusValues[i] + '\r\n'
# Write time steps
for timeStepRaster in self.rasters:
# Write time step header
openFile.write('TS {0} {1}\r\n'.format(timeStepRaster.iStatus, timeStepRaster.timestamp))
# Write status raster (mask map) if applicable
if timeStepRaster.iStatus == 1:
openFile.write(statusString)
# Write value raster
valueString = timeStepRaster.getAsWmsDatasetString(session)
if valueString is not None:
openFile.write(valueString)
else:
openFile.write(timeStepRaster.rasterText)
# Write ending tag for the dataset
openFile.write('ENDDS\r\n') | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"maskMap",
")",
":",
"# Magic numbers",
"FIRST_VALUE_INDEX",
"=",
"12",
"# Write the header",
"openFile",
".",
"write",
"(",
"'DATASET\\r\\n'",
")",
"if",
"self",
".",
"type",
"==",
"self",
".",
"SCALAR_TYPE",
":",
"openFile",
".",
"write",
"(",
"'OBJTYPE {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"objectType",
")",
")",
"openFile",
".",
"write",
"(",
"'BEGSCL\\r\\n'",
")",
"elif",
"self",
".",
"type",
"==",
"self",
".",
"VECTOR_TYPE",
":",
"openFile",
".",
"write",
"(",
"'VECTYPE {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"vectorType",
")",
")",
"openFile",
".",
"write",
"(",
"'BEGVEC\\r\\n'",
")",
"openFile",
".",
"write",
"(",
"'OBJID {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"objectID",
")",
")",
"openFile",
".",
"write",
"(",
"'ND {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"numberData",
")",
")",
"openFile",
".",
"write",
"(",
"'NC {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"numberCells",
")",
")",
"openFile",
".",
"write",
"(",
"'NAME {0}\\r\\n'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"# Retrieve the mask map to use as the status rasters",
"statusString",
"=",
"''",
"if",
"isinstance",
"(",
"maskMap",
",",
"RasterMapFile",
")",
":",
"# Convert Mask Map to GRASS ASCII Raster",
"statusGrassRasterString",
"=",
"maskMap",
".",
"getAsGrassAsciiGrid",
"(",
"session",
")",
"if",
"statusGrassRasterString",
"is",
"not",
"None",
":",
"# Split by lines",
"statusValues",
"=",
"statusGrassRasterString",
".",
"split",
"(",
")",
"else",
":",
"statusValues",
"=",
"maskMap",
".",
"rasterText",
".",
"split",
"(",
")",
"# Assemble into a string in the WMS Dataset format",
"for",
"i",
"in",
"range",
"(",
"FIRST_VALUE_INDEX",
",",
"len",
"(",
"statusValues",
")",
")",
":",
"statusString",
"+=",
"statusValues",
"[",
"i",
"]",
"+",
"'\\r\\n'",
"# Write time steps",
"for",
"timeStepRaster",
"in",
"self",
".",
"rasters",
":",
"# Write time step header",
"openFile",
".",
"write",
"(",
"'TS {0} {1}\\r\\n'",
".",
"format",
"(",
"timeStepRaster",
".",
"iStatus",
",",
"timeStepRaster",
".",
"timestamp",
")",
")",
"# Write status raster (mask map) if applicable",
"if",
"timeStepRaster",
".",
"iStatus",
"==",
"1",
":",
"openFile",
".",
"write",
"(",
"statusString",
")",
"# Write value raster",
"valueString",
"=",
"timeStepRaster",
".",
"getAsWmsDatasetString",
"(",
"session",
")",
"if",
"valueString",
"is",
"not",
"None",
":",
"openFile",
".",
"write",
"(",
"valueString",
")",
"else",
":",
"openFile",
".",
"write",
"(",
"timeStepRaster",
".",
"rasterText",
")",
"# Write ending tag for the dataset",
"openFile",
".",
"write",
"(",
"'ENDDS\\r\\n'",
")"
] | WMS Dataset File Write to File Method | [
"WMS",
"Dataset",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L392-L450 | train |
CI-WATER/gsshapy | gsshapy/orm/wms_dataset.py | WMSDatasetRaster.getAsWmsDatasetString | def getAsWmsDatasetString(self, session):
"""
Retrieve the WMS Raster as a string in the WMS Dataset format
"""
# Magic numbers
FIRST_VALUE_INDEX = 12
# Write value raster
if type(self.raster) != type(None):
# Convert to GRASS ASCII Raster
valueGrassRasterString = self.getAsGrassAsciiGrid(session)
# Split by lines
values = valueGrassRasterString.split()
# Assemble into string
wmsDatasetString = ''
for i in range(FIRST_VALUE_INDEX, len(values)):
wmsDatasetString += '{0:.6f}\r\n'.format(float(values[i]))
return wmsDatasetString
else:
wmsDatasetString = self.rasterText | python | def getAsWmsDatasetString(self, session):
"""
Retrieve the WMS Raster as a string in the WMS Dataset format
"""
# Magic numbers
FIRST_VALUE_INDEX = 12
# Write value raster
if type(self.raster) != type(None):
# Convert to GRASS ASCII Raster
valueGrassRasterString = self.getAsGrassAsciiGrid(session)
# Split by lines
values = valueGrassRasterString.split()
# Assemble into string
wmsDatasetString = ''
for i in range(FIRST_VALUE_INDEX, len(values)):
wmsDatasetString += '{0:.6f}\r\n'.format(float(values[i]))
return wmsDatasetString
else:
wmsDatasetString = self.rasterText | [
"def",
"getAsWmsDatasetString",
"(",
"self",
",",
"session",
")",
":",
"# Magic numbers",
"FIRST_VALUE_INDEX",
"=",
"12",
"# Write value raster",
"if",
"type",
"(",
"self",
".",
"raster",
")",
"!=",
"type",
"(",
"None",
")",
":",
"# Convert to GRASS ASCII Raster",
"valueGrassRasterString",
"=",
"self",
".",
"getAsGrassAsciiGrid",
"(",
"session",
")",
"# Split by lines",
"values",
"=",
"valueGrassRasterString",
".",
"split",
"(",
")",
"# Assemble into string",
"wmsDatasetString",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"FIRST_VALUE_INDEX",
",",
"len",
"(",
"values",
")",
")",
":",
"wmsDatasetString",
"+=",
"'{0:.6f}\\r\\n'",
".",
"format",
"(",
"float",
"(",
"values",
"[",
"i",
"]",
")",
")",
"return",
"wmsDatasetString",
"else",
":",
"wmsDatasetString",
"=",
"self",
".",
"rasterText"
] | Retrieve the WMS Raster as a string in the WMS Dataset format | [
"Retrieve",
"the",
"WMS",
"Raster",
"as",
"a",
"string",
"in",
"the",
"WMS",
"Dataset",
"format"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L530-L553 | train |
CI-WATER/gsshapy | gsshapy/lib/check_geometry.py | check_watershed_boundary_geometry | def check_watershed_boundary_geometry(shapefile_path):
"""Make sure that there are no random artifacts in the file."""
wfg = gpd.read_file(shapefile_path)
first_shape = wfg.iloc[0].geometry
if hasattr(first_shape, 'geoms'):
raise ValueError(
"Invalid watershed boundary geometry. "
"To fix this, remove disconnected shapes or run "
"gsshapy.modeling.GSSHAModel.clean_boundary_shapefile") | python | def check_watershed_boundary_geometry(shapefile_path):
"""Make sure that there are no random artifacts in the file."""
wfg = gpd.read_file(shapefile_path)
first_shape = wfg.iloc[0].geometry
if hasattr(first_shape, 'geoms'):
raise ValueError(
"Invalid watershed boundary geometry. "
"To fix this, remove disconnected shapes or run "
"gsshapy.modeling.GSSHAModel.clean_boundary_shapefile") | [
"def",
"check_watershed_boundary_geometry",
"(",
"shapefile_path",
")",
":",
"wfg",
"=",
"gpd",
".",
"read_file",
"(",
"shapefile_path",
")",
"first_shape",
"=",
"wfg",
".",
"iloc",
"[",
"0",
"]",
".",
"geometry",
"if",
"hasattr",
"(",
"first_shape",
",",
"'geoms'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid watershed boundary geometry. \"",
"\"To fix this, remove disconnected shapes or run \"",
"\"gsshapy.modeling.GSSHAModel.clean_boundary_shapefile\"",
")"
] | Make sure that there are no random artifacts in the file. | [
"Make",
"sure",
"that",
"there",
"are",
"no",
"random",
"artifacts",
"in",
"the",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/check_geometry.py#L4-L12 | train |
CyberZHG/keras-bi-lm | keras_bi_lm/model.py | BiLM.get_batch | def get_batch(sentences,
token_dict,
ignore_case=False,
unk_index=1,
eos_index=2):
"""Get a batch of inputs and outputs from given sentences.
:param sentences: A list of list of tokens.
:param token_dict: The dict that maps a token to an integer. `<UNK>` and `<EOS>` should be preserved.
:param ignore_case: Whether ignoring the case of the token.
:param unk_index: The index for unknown token.
:param eos_index: The index for ending of sentence.
:return inputs, outputs: The inputs and outputs of the batch.
"""
batch_size = len(sentences)
max_sentence_len = max(map(len, sentences))
inputs = [[0] * max_sentence_len for _ in range(batch_size)]
outputs_forward = [[0] * max_sentence_len for _ in range(batch_size)]
outputs_backward = [[0] * max_sentence_len for _ in range(batch_size)]
for i, sentence in enumerate(sentences):
outputs_forward[i][len(sentence) - 1] = eos_index
outputs_backward[i][0] = eos_index
for j, token in enumerate(sentence):
if ignore_case:
index = token_dict.get(token.lower(), unk_index)
else:
index = token_dict.get(token, unk_index)
inputs[i][j] = index
if j - 1 >= 0:
outputs_forward[i][j - 1] = index
if j + 1 < len(sentence):
outputs_backward[i][j + 1] = index
outputs_forward = np.expand_dims(np.asarray(outputs_forward), axis=-1)
outputs_backward = np.expand_dims(np.asarray(outputs_backward), axis=-1)
return np.asarray(inputs), [outputs_forward, outputs_backward] | python | def get_batch(sentences,
token_dict,
ignore_case=False,
unk_index=1,
eos_index=2):
"""Get a batch of inputs and outputs from given sentences.
:param sentences: A list of list of tokens.
:param token_dict: The dict that maps a token to an integer. `<UNK>` and `<EOS>` should be preserved.
:param ignore_case: Whether ignoring the case of the token.
:param unk_index: The index for unknown token.
:param eos_index: The index for ending of sentence.
:return inputs, outputs: The inputs and outputs of the batch.
"""
batch_size = len(sentences)
max_sentence_len = max(map(len, sentences))
inputs = [[0] * max_sentence_len for _ in range(batch_size)]
outputs_forward = [[0] * max_sentence_len for _ in range(batch_size)]
outputs_backward = [[0] * max_sentence_len for _ in range(batch_size)]
for i, sentence in enumerate(sentences):
outputs_forward[i][len(sentence) - 1] = eos_index
outputs_backward[i][0] = eos_index
for j, token in enumerate(sentence):
if ignore_case:
index = token_dict.get(token.lower(), unk_index)
else:
index = token_dict.get(token, unk_index)
inputs[i][j] = index
if j - 1 >= 0:
outputs_forward[i][j - 1] = index
if j + 1 < len(sentence):
outputs_backward[i][j + 1] = index
outputs_forward = np.expand_dims(np.asarray(outputs_forward), axis=-1)
outputs_backward = np.expand_dims(np.asarray(outputs_backward), axis=-1)
return np.asarray(inputs), [outputs_forward, outputs_backward] | [
"def",
"get_batch",
"(",
"sentences",
",",
"token_dict",
",",
"ignore_case",
"=",
"False",
",",
"unk_index",
"=",
"1",
",",
"eos_index",
"=",
"2",
")",
":",
"batch_size",
"=",
"len",
"(",
"sentences",
")",
"max_sentence_len",
"=",
"max",
"(",
"map",
"(",
"len",
",",
"sentences",
")",
")",
"inputs",
"=",
"[",
"[",
"0",
"]",
"*",
"max_sentence_len",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
"outputs_forward",
"=",
"[",
"[",
"0",
"]",
"*",
"max_sentence_len",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
"outputs_backward",
"=",
"[",
"[",
"0",
"]",
"*",
"max_sentence_len",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"sentences",
")",
":",
"outputs_forward",
"[",
"i",
"]",
"[",
"len",
"(",
"sentence",
")",
"-",
"1",
"]",
"=",
"eos_index",
"outputs_backward",
"[",
"i",
"]",
"[",
"0",
"]",
"=",
"eos_index",
"for",
"j",
",",
"token",
"in",
"enumerate",
"(",
"sentence",
")",
":",
"if",
"ignore_case",
":",
"index",
"=",
"token_dict",
".",
"get",
"(",
"token",
".",
"lower",
"(",
")",
",",
"unk_index",
")",
"else",
":",
"index",
"=",
"token_dict",
".",
"get",
"(",
"token",
",",
"unk_index",
")",
"inputs",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"index",
"if",
"j",
"-",
"1",
">=",
"0",
":",
"outputs_forward",
"[",
"i",
"]",
"[",
"j",
"-",
"1",
"]",
"=",
"index",
"if",
"j",
"+",
"1",
"<",
"len",
"(",
"sentence",
")",
":",
"outputs_backward",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
"]",
"=",
"index",
"outputs_forward",
"=",
"np",
".",
"expand_dims",
"(",
"np",
".",
"asarray",
"(",
"outputs_forward",
")",
",",
"axis",
"=",
"-",
"1",
")",
"outputs_backward",
"=",
"np",
".",
"expand_dims",
"(",
"np",
".",
"asarray",
"(",
"outputs_backward",
")",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"np",
".",
"asarray",
"(",
"inputs",
")",
",",
"[",
"outputs_forward",
",",
"outputs_backward",
"]"
] | Get a batch of inputs and outputs from given sentences.
:param sentences: A list of list of tokens.
:param token_dict: The dict that maps a token to an integer. `<UNK>` and `<EOS>` should be preserved.
:param ignore_case: Whether ignoring the case of the token.
:param unk_index: The index for unknown token.
:param eos_index: The index for ending of sentence.
:return inputs, outputs: The inputs and outputs of the batch. | [
"Get",
"a",
"batch",
"of",
"inputs",
"and",
"outputs",
"from",
"given",
"sentences",
"."
] | 615e1131052d488420d759bab2370d504c9fc074 | https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L168-L203 | train |
CyberZHG/keras-bi-lm | keras_bi_lm/model.py | BiLM.fit | def fit(self, inputs, outputs, epochs=1):
"""Simple wrapper of model.fit.
:param inputs: Inputs.
:param outputs: List of forward and backward outputs.
:param epochs: Number of epoch.
:return: None
"""
self.model.fit(inputs, outputs, epochs=epochs) | python | def fit(self, inputs, outputs, epochs=1):
"""Simple wrapper of model.fit.
:param inputs: Inputs.
:param outputs: List of forward and backward outputs.
:param epochs: Number of epoch.
:return: None
"""
self.model.fit(inputs, outputs, epochs=epochs) | [
"def",
"fit",
"(",
"self",
",",
"inputs",
",",
"outputs",
",",
"epochs",
"=",
"1",
")",
":",
"self",
".",
"model",
".",
"fit",
"(",
"inputs",
",",
"outputs",
",",
"epochs",
"=",
"epochs",
")"
] | Simple wrapper of model.fit.
:param inputs: Inputs.
:param outputs: List of forward and backward outputs.
:param epochs: Number of epoch.
:return: None | [
"Simple",
"wrapper",
"of",
"model",
".",
"fit",
"."
] | 615e1131052d488420d759bab2370d504c9fc074 | https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L205-L214 | train |
CyberZHG/keras-bi-lm | keras_bi_lm/model.py | BiLM.get_feature_layers | def get_feature_layers(self, input_layer=None, trainable=False, use_weighted_sum=False):
"""Get layers that output the Bi-LM feature.
:param input_layer: Use existing input layer.
:param trainable: Whether the layers are still trainable.
:param use_weighted_sum: Whether to use weighted sum of RNN layers.
:return [input_layer,] output_layer: Input and output layer.
"""
model = keras.models.clone_model(self.model, input_layer)
if not trainable:
for layer in model.layers:
layer.trainable = False
if use_weighted_sum:
rnn_layers_forward = list(map(
lambda x: model.get_layer(x.name.split('/')[0].split(':')[0].split('_')[0]).output,
self.rnn_layers_forward,
))
rnn_layers_backward = list(map(
lambda x: model.get_layer(x.name.split('/')[0].split(':')[0].split('_')[0]).output,
self.rnn_layers_backward,
))
forward_layer = WeightedSum(name='Bi-LM-Forward-Sum')(rnn_layers_forward)
backward_layer_rev = WeightedSum(name='Bi-LM-Backward-Sum-Rev')(rnn_layers_backward)
backward_layer = keras.layers.Lambda(
function=self._reverse_x,
mask=lambda _, mask: self._reverse_x(mask),
name='Bi-LM-Backward-Sum'
)(backward_layer_rev)
else:
forward_layer = model.get_layer(name='Bi-LM-Forward').output
backward_layer = model.get_layer(name='Bi-LM-Backward').output
output_layer = keras.layers.Concatenate(name='Bi-LM-Feature')([forward_layer, backward_layer])
if input_layer is None:
input_layer = model.layers[0].input
return input_layer, output_layer
return output_layer | python | def get_feature_layers(self, input_layer=None, trainable=False, use_weighted_sum=False):
"""Get layers that output the Bi-LM feature.
:param input_layer: Use existing input layer.
:param trainable: Whether the layers are still trainable.
:param use_weighted_sum: Whether to use weighted sum of RNN layers.
:return [input_layer,] output_layer: Input and output layer.
"""
model = keras.models.clone_model(self.model, input_layer)
if not trainable:
for layer in model.layers:
layer.trainable = False
if use_weighted_sum:
rnn_layers_forward = list(map(
lambda x: model.get_layer(x.name.split('/')[0].split(':')[0].split('_')[0]).output,
self.rnn_layers_forward,
))
rnn_layers_backward = list(map(
lambda x: model.get_layer(x.name.split('/')[0].split(':')[0].split('_')[0]).output,
self.rnn_layers_backward,
))
forward_layer = WeightedSum(name='Bi-LM-Forward-Sum')(rnn_layers_forward)
backward_layer_rev = WeightedSum(name='Bi-LM-Backward-Sum-Rev')(rnn_layers_backward)
backward_layer = keras.layers.Lambda(
function=self._reverse_x,
mask=lambda _, mask: self._reverse_x(mask),
name='Bi-LM-Backward-Sum'
)(backward_layer_rev)
else:
forward_layer = model.get_layer(name='Bi-LM-Forward').output
backward_layer = model.get_layer(name='Bi-LM-Backward').output
output_layer = keras.layers.Concatenate(name='Bi-LM-Feature')([forward_layer, backward_layer])
if input_layer is None:
input_layer = model.layers[0].input
return input_layer, output_layer
return output_layer | [
"def",
"get_feature_layers",
"(",
"self",
",",
"input_layer",
"=",
"None",
",",
"trainable",
"=",
"False",
",",
"use_weighted_sum",
"=",
"False",
")",
":",
"model",
"=",
"keras",
".",
"models",
".",
"clone_model",
"(",
"self",
".",
"model",
",",
"input_layer",
")",
"if",
"not",
"trainable",
":",
"for",
"layer",
"in",
"model",
".",
"layers",
":",
"layer",
".",
"trainable",
"=",
"False",
"if",
"use_weighted_sum",
":",
"rnn_layers_forward",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"model",
".",
"get_layer",
"(",
"x",
".",
"name",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
".",
"output",
",",
"self",
".",
"rnn_layers_forward",
",",
")",
")",
"rnn_layers_backward",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"model",
".",
"get_layer",
"(",
"x",
".",
"name",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
".",
"output",
",",
"self",
".",
"rnn_layers_backward",
",",
")",
")",
"forward_layer",
"=",
"WeightedSum",
"(",
"name",
"=",
"'Bi-LM-Forward-Sum'",
")",
"(",
"rnn_layers_forward",
")",
"backward_layer_rev",
"=",
"WeightedSum",
"(",
"name",
"=",
"'Bi-LM-Backward-Sum-Rev'",
")",
"(",
"rnn_layers_backward",
")",
"backward_layer",
"=",
"keras",
".",
"layers",
".",
"Lambda",
"(",
"function",
"=",
"self",
".",
"_reverse_x",
",",
"mask",
"=",
"lambda",
"_",
",",
"mask",
":",
"self",
".",
"_reverse_x",
"(",
"mask",
")",
",",
"name",
"=",
"'Bi-LM-Backward-Sum'",
")",
"(",
"backward_layer_rev",
")",
"else",
":",
"forward_layer",
"=",
"model",
".",
"get_layer",
"(",
"name",
"=",
"'Bi-LM-Forward'",
")",
".",
"output",
"backward_layer",
"=",
"model",
".",
"get_layer",
"(",
"name",
"=",
"'Bi-LM-Backward'",
")",
".",
"output",
"output_layer",
"=",
"keras",
".",
"layers",
".",
"Concatenate",
"(",
"name",
"=",
"'Bi-LM-Feature'",
")",
"(",
"[",
"forward_layer",
",",
"backward_layer",
"]",
")",
"if",
"input_layer",
"is",
"None",
":",
"input_layer",
"=",
"model",
".",
"layers",
"[",
"0",
"]",
".",
"input",
"return",
"input_layer",
",",
"output_layer",
"return",
"output_layer"
] | Get layers that output the Bi-LM feature.
:param input_layer: Use existing input layer.
:param trainable: Whether the layers are still trainable.
:param use_weighted_sum: Whether to use weighted sum of RNN layers.
:return [input_layer,] output_layer: Input and output layer. | [
"Get",
"layers",
"that",
"output",
"the",
"Bi",
"-",
"LM",
"feature",
"."
] | 615e1131052d488420d759bab2370d504c9fc074 | https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L225-L261 | train |
pedrotgn/pyactor | examples/chord/chord.py | Node.join | def join(self, n1):
"""if join returns false, the node did not entry the ring. Retry it"""
if self.id == n1.get_id():
for i in range(k):
self.finger[i] = self.proxy
self.predecessor = self.proxy
self.run = True
return True
else:
try:
self.init_finger_table(n1)
except Exception:
print 'Join failed'
# raise Exception('Join failed')
return False
else:
self.run = True
return True | python | def join(self, n1):
"""if join returns false, the node did not entry the ring. Retry it"""
if self.id == n1.get_id():
for i in range(k):
self.finger[i] = self.proxy
self.predecessor = self.proxy
self.run = True
return True
else:
try:
self.init_finger_table(n1)
except Exception:
print 'Join failed'
# raise Exception('Join failed')
return False
else:
self.run = True
return True | [
"def",
"join",
"(",
"self",
",",
"n1",
")",
":",
"if",
"self",
".",
"id",
"==",
"n1",
".",
"get_id",
"(",
")",
":",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"self",
".",
"finger",
"[",
"i",
"]",
"=",
"self",
".",
"proxy",
"self",
".",
"predecessor",
"=",
"self",
".",
"proxy",
"self",
".",
"run",
"=",
"True",
"return",
"True",
"else",
":",
"try",
":",
"self",
".",
"init_finger_table",
"(",
"n1",
")",
"except",
"Exception",
":",
"print",
"'Join failed'",
"# raise Exception('Join failed')",
"return",
"False",
"else",
":",
"self",
".",
"run",
"=",
"True",
"return",
"True"
] | if join returns false, the node did not entry the ring. Retry it | [
"if",
"join",
"returns",
"false",
"the",
"node",
"did",
"not",
"entry",
"the",
"ring",
".",
"Retry",
"it"
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/examples/chord/chord.py#L123-L140 | train |
CI-WATER/gsshapy | gsshapy/orm/snw.py | NwsrfsFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
NWSRFS Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse
with open(path, 'r') as nwsrfsFile:
for line in nwsrfsFile:
sline = line.strip().split()
# Cases
if sline[0].lower() == 'number_bands:':
self.numBands = sline[1]
elif sline[0].lower() == 'lower_elevation':
"""DO NOTHING"""
else:
# Create GSSHAPY NwsrfsRecord object
record = NwsrfsRecord(lowerElev=sline[0],
upperElev=sline[1],
mfMin=sline[2],
mfMax=sline[3],
scf=sline[4],
frUse=sline[5],
tipm=sline[6],
nmf=sline[7],
fua=sline[8],
plwhc=sline[9])
# Associate NwsrfsRecord with NwsrfsFile
record.nwsrfsFile = self | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
NWSRFS Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse
with open(path, 'r') as nwsrfsFile:
for line in nwsrfsFile:
sline = line.strip().split()
# Cases
if sline[0].lower() == 'number_bands:':
self.numBands = sline[1]
elif sline[0].lower() == 'lower_elevation':
"""DO NOTHING"""
else:
# Create GSSHAPY NwsrfsRecord object
record = NwsrfsRecord(lowerElev=sline[0],
upperElev=sline[1],
mfMin=sline[2],
mfMax=sline[3],
scf=sline[4],
frUse=sline[5],
tipm=sline[6],
nmf=sline[7],
fua=sline[8],
plwhc=sline[9])
# Associate NwsrfsRecord with NwsrfsFile
record.nwsrfsFile = self | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"nwsrfsFile",
":",
"for",
"line",
"in",
"nwsrfsFile",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"sline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"'number_bands:'",
":",
"self",
".",
"numBands",
"=",
"sline",
"[",
"1",
"]",
"elif",
"sline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"'lower_elevation'",
":",
"\"\"\"DO NOTHING\"\"\"",
"else",
":",
"# Create GSSHAPY NwsrfsRecord object",
"record",
"=",
"NwsrfsRecord",
"(",
"lowerElev",
"=",
"sline",
"[",
"0",
"]",
",",
"upperElev",
"=",
"sline",
"[",
"1",
"]",
",",
"mfMin",
"=",
"sline",
"[",
"2",
"]",
",",
"mfMax",
"=",
"sline",
"[",
"3",
"]",
",",
"scf",
"=",
"sline",
"[",
"4",
"]",
",",
"frUse",
"=",
"sline",
"[",
"5",
"]",
",",
"tipm",
"=",
"sline",
"[",
"6",
"]",
",",
"nmf",
"=",
"sline",
"[",
"7",
"]",
",",
"fua",
"=",
"sline",
"[",
"8",
"]",
",",
"plwhc",
"=",
"sline",
"[",
"9",
"]",
")",
"# Associate NwsrfsRecord with NwsrfsFile",
"record",
".",
"nwsrfsFile",
"=",
"self"
] | NWSRFS Read from File Method | [
"NWSRFS",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L56-L87 | train |
CI-WATER/gsshapy | gsshapy/orm/snw.py | NwsrfsFile._write | def _write(self, session, openFile, replaceParamFile):
"""
NWSRFS Write to File Method
"""
# Write lines
openFile.write('Number_Bands: %s\n' % self.numBands)
openFile.write('Lower_Elevation Upper_Elevation MF_Min MF_Max SCF FR_USE TIPM NMF FUA PCWHC\n')
# Retrieve NwsrfsRecords
records = self.nwsrfsRecords
for record in records:
openFile.write('%s%s%s%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f\n' % (
record.lowerElev,
' ' * (17 - len(str(record.lowerElev))), # Num Spaces
record.upperElev,
' ' * (17 - len(str(record.upperElev))), # Num Spaces
record.mfMin,
' ' * (8 - len(str(record.mfMin))), # Num Spaces
record.mfMax,
' ' * (8 - len(str(record.mfMax))), # Num Spaces
record.scf,
' ' * (5 - len(str(record.scf))), # Num Spaces
record.frUse,
' ' * (8 - len(str(record.frUse))), # Num Spaces
record.tipm,
' ' * (6 - len(str(record.tipm))), # Num Spaces
record.nmf,
' ' * (5 - len(str(record.nmf))), # Num Spaces
record.fua,
' ' * (5 - len(str(record.fua))), # Num Spaces
record.plwhc)) | python | def _write(self, session, openFile, replaceParamFile):
"""
NWSRFS Write to File Method
"""
# Write lines
openFile.write('Number_Bands: %s\n' % self.numBands)
openFile.write('Lower_Elevation Upper_Elevation MF_Min MF_Max SCF FR_USE TIPM NMF FUA PCWHC\n')
# Retrieve NwsrfsRecords
records = self.nwsrfsRecords
for record in records:
openFile.write('%s%s%s%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f\n' % (
record.lowerElev,
' ' * (17 - len(str(record.lowerElev))), # Num Spaces
record.upperElev,
' ' * (17 - len(str(record.upperElev))), # Num Spaces
record.mfMin,
' ' * (8 - len(str(record.mfMin))), # Num Spaces
record.mfMax,
' ' * (8 - len(str(record.mfMax))), # Num Spaces
record.scf,
' ' * (5 - len(str(record.scf))), # Num Spaces
record.frUse,
' ' * (8 - len(str(record.frUse))), # Num Spaces
record.tipm,
' ' * (6 - len(str(record.tipm))), # Num Spaces
record.nmf,
' ' * (5 - len(str(record.nmf))), # Num Spaces
record.fua,
' ' * (5 - len(str(record.fua))), # Num Spaces
record.plwhc)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Write lines",
"openFile",
".",
"write",
"(",
"'Number_Bands: %s\\n'",
"%",
"self",
".",
"numBands",
")",
"openFile",
".",
"write",
"(",
"'Lower_Elevation Upper_Elevation MF_Min MF_Max SCF FR_USE TIPM NMF FUA PCWHC\\n'",
")",
"# Retrieve NwsrfsRecords",
"records",
"=",
"self",
".",
"nwsrfsRecords",
"for",
"record",
"in",
"records",
":",
"openFile",
".",
"write",
"(",
"'%s%s%s%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f\\n'",
"%",
"(",
"record",
".",
"lowerElev",
",",
"' '",
"*",
"(",
"17",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"lowerElev",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"upperElev",
",",
"' '",
"*",
"(",
"17",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"upperElev",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"mfMin",
",",
"' '",
"*",
"(",
"8",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"mfMin",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"mfMax",
",",
"' '",
"*",
"(",
"8",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"mfMax",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"scf",
",",
"' '",
"*",
"(",
"5",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"scf",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"frUse",
",",
"' '",
"*",
"(",
"8",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"frUse",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"tipm",
",",
"' '",
"*",
"(",
"6",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"tipm",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"nmf",
",",
"' '",
"*",
"(",
"5",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"nmf",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"fua",
",",
"' '",
"*",
"(",
"5",
"-",
"len",
"(",
"str",
"(",
"record",
".",
"fua",
")",
")",
")",
",",
"# Num Spaces",
"record",
".",
"plwhc",
")",
")"
] | NWSRFS Write to File Method | [
"NWSRFS",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L89-L120 | train |
Subsets and Splits