repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
CI-WATER/gsshapy
gsshapy/orm/snw.py
OrographicGageFile._read
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Orographic Gage File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into HmetRecords with open(path, 'r') as orthoFile: for line in orthoFile: sline = line.strip().split() # Cases if sline[0].lower() == 'num_sites:': self.numSites = sline[1] elif sline[0].lower() == 'elev_base': self.elevBase = sline[1] elif sline[0].lower() == 'elev_2': self.elev2 = sline[1] elif sline[0].lower() == 'year': """DO NOTHING""" else: # Create datetime object dateTime = datetime(year=int(sline[0]), month=int(sline[1]), day=int(sline[2]), hour=int(sline[3])) # Create GSSHAPY OrthoMeasurement object measurement = OrographicMeasurement(dateTime=dateTime, temp2=sline[4]) # Associate OrthoMeasurement with OrthographicGageFile self.orographicMeasurements.append(measurement)
python
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Orographic Gage File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into HmetRecords with open(path, 'r') as orthoFile: for line in orthoFile: sline = line.strip().split() # Cases if sline[0].lower() == 'num_sites:': self.numSites = sline[1] elif sline[0].lower() == 'elev_base': self.elevBase = sline[1] elif sline[0].lower() == 'elev_2': self.elev2 = sline[1] elif sline[0].lower() == 'year': """DO NOTHING""" else: # Create datetime object dateTime = datetime(year=int(sline[0]), month=int(sline[1]), day=int(sline[2]), hour=int(sline[3])) # Create GSSHAPY OrthoMeasurement object measurement = OrographicMeasurement(dateTime=dateTime, temp2=sline[4]) # Associate OrthoMeasurement with OrthographicGageFile self.orographicMeasurements.append(measurement)
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Set file extension property", "self", ".", "fileExtension", "=", "extension", "# Open file and parse into HmetRecords", "with", "open", "(", "path", ",", "'r'", ")", "as", "orthoFile", ":", "for", "line", "in", "orthoFile", ":", "sline", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "# Cases", "if", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'num_sites:'", ":", "self", ".", "numSites", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'elev_base'", ":", "self", ".", "elevBase", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'elev_2'", ":", "self", ".", "elev2", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'year'", ":", "\"\"\"DO NOTHING\"\"\"", "else", ":", "# Create datetime object", "dateTime", "=", "datetime", "(", "year", "=", "int", "(", "sline", "[", "0", "]", ")", ",", "month", "=", "int", "(", "sline", "[", "1", "]", ")", ",", "day", "=", "int", "(", "sline", "[", "2", "]", ")", ",", "hour", "=", "int", "(", "sline", "[", "3", "]", ")", ")", "# Create GSSHAPY OrthoMeasurement object", "measurement", "=", "OrographicMeasurement", "(", "dateTime", "=", "dateTime", ",", "temp2", "=", "sline", "[", "4", "]", ")", "# Associate OrthoMeasurement with OrthographicGageFile", "self", ".", "orographicMeasurements", ".", "append", "(", "measurement", ")" ]
Orographic Gage File Read from File Method
[ "Orographic", "Gage", "File", "Read", "from", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L211-L244
train
CI-WATER/gsshapy
gsshapy/orm/snw.py
OrographicGageFile._write
def _write(self, session, openFile, replaceParamFile): """ Orographic Gage File Write to File Method """ # Write lines openFile.write('Num_Sites: %s\n' % self.numSites) openFile.write('Elev_Base %s\n' % self.elevBase) openFile.write('Elev_2 %s\n' % self.elev2) openFile.write('Year Month Day Hour Temp_2\n') # Retrieve OrographicMeasurements measurements = self.orographicMeasurements for measurement in measurements: dateTime = measurement.dateTime openFile.write('%s%s%s%s%s%s%s%s%.3f\n' % ( dateTime.year, ' ', dateTime.month, ' ' * (8 - len(str(dateTime.month))), dateTime.day, ' ' * (8 - len(str(dateTime.day))), dateTime.hour, ' ' * (8 - len(str(dateTime.hour))), measurement.temp2))
python
def _write(self, session, openFile, replaceParamFile): """ Orographic Gage File Write to File Method """ # Write lines openFile.write('Num_Sites: %s\n' % self.numSites) openFile.write('Elev_Base %s\n' % self.elevBase) openFile.write('Elev_2 %s\n' % self.elev2) openFile.write('Year Month Day Hour Temp_2\n') # Retrieve OrographicMeasurements measurements = self.orographicMeasurements for measurement in measurements: dateTime = measurement.dateTime openFile.write('%s%s%s%s%s%s%s%s%.3f\n' % ( dateTime.year, ' ', dateTime.month, ' ' * (8 - len(str(dateTime.month))), dateTime.day, ' ' * (8 - len(str(dateTime.day))), dateTime.hour, ' ' * (8 - len(str(dateTime.hour))), measurement.temp2))
[ "def", "_write", "(", "self", ",", "session", ",", "openFile", ",", "replaceParamFile", ")", ":", "# Write lines", "openFile", ".", "write", "(", "'Num_Sites: %s\\n'", "%", "self", ".", "numSites", ")", "openFile", ".", "write", "(", "'Elev_Base %s\\n'", "%", "self", ".", "elevBase", ")", "openFile", ".", "write", "(", "'Elev_2 %s\\n'", "%", "self", ".", "elev2", ")", "openFile", ".", "write", "(", "'Year Month Day Hour Temp_2\\n'", ")", "# Retrieve OrographicMeasurements", "measurements", "=", "self", ".", "orographicMeasurements", "for", "measurement", "in", "measurements", ":", "dateTime", "=", "measurement", ".", "dateTime", "openFile", ".", "write", "(", "'%s%s%s%s%s%s%s%s%.3f\\n'", "%", "(", "dateTime", ".", "year", ",", "' '", ",", "dateTime", ".", "month", ",", "' '", "*", "(", "8", "-", "len", "(", "str", "(", "dateTime", ".", "month", ")", ")", ")", ",", "dateTime", ".", "day", ",", "' '", "*", "(", "8", "-", "len", "(", "str", "(", "dateTime", ".", "day", ")", ")", ")", ",", "dateTime", ".", "hour", ",", "' '", "*", "(", "8", "-", "len", "(", "str", "(", "dateTime", ".", "hour", ")", ")", ")", ",", "measurement", ".", "temp2", ")", ")" ]
Orographic Gage File Write to File Method
[ "Orographic", "Gage", "File", "Write", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L246-L270
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile.getFluvialLinks
def getFluvialLinks(self): """ Retrieve only the links that represent fluvial portions of the stream. Returns a list of StreamLink instances. Returns: list: A list of fluvial :class:`.StreamLink` objects. """ # Define fluvial types fluvialTypeKeywords = ('TRAPEZOID', 'TRAP', 'BREAKPOINT', 'ERODE', 'SUBSURFACE') fluvialLinks = [] for link in self.streamLinks: for fluvialTypeKeyword in fluvialTypeKeywords: if fluvialTypeKeyword in link.type: fluvialLinks.append(link) break return fluvialLinks
python
def getFluvialLinks(self): """ Retrieve only the links that represent fluvial portions of the stream. Returns a list of StreamLink instances. Returns: list: A list of fluvial :class:`.StreamLink` objects. """ # Define fluvial types fluvialTypeKeywords = ('TRAPEZOID', 'TRAP', 'BREAKPOINT', 'ERODE', 'SUBSURFACE') fluvialLinks = [] for link in self.streamLinks: for fluvialTypeKeyword in fluvialTypeKeywords: if fluvialTypeKeyword in link.type: fluvialLinks.append(link) break return fluvialLinks
[ "def", "getFluvialLinks", "(", "self", ")", ":", "# Define fluvial types", "fluvialTypeKeywords", "=", "(", "'TRAPEZOID'", ",", "'TRAP'", ",", "'BREAKPOINT'", ",", "'ERODE'", ",", "'SUBSURFACE'", ")", "fluvialLinks", "=", "[", "]", "for", "link", "in", "self", ".", "streamLinks", ":", "for", "fluvialTypeKeyword", "in", "fluvialTypeKeywords", ":", "if", "fluvialTypeKeyword", "in", "link", ".", "type", ":", "fluvialLinks", ".", "append", "(", "link", ")", "break", "return", "fluvialLinks" ]
Retrieve only the links that represent fluvial portions of the stream. Returns a list of StreamLink instances. Returns: list: A list of fluvial :class:`.StreamLink` objects.
[ "Retrieve", "only", "the", "links", "that", "represent", "fluvial", "portions", "of", "the", "stream", ".", "Returns", "a", "list", "of", "StreamLink", "instances", "." ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L93-L111
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile.getOrderedLinks
def getOrderedLinks(self, session): """ Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects. """ streamLinks = session.query(StreamLink).\ filter(StreamLink.channelInputFile == self).\ order_by(StreamLink.linkNumber).\ all() return streamLinks
python
def getOrderedLinks(self, session): """ Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects. """ streamLinks = session.query(StreamLink).\ filter(StreamLink.channelInputFile == self).\ order_by(StreamLink.linkNumber).\ all() return streamLinks
[ "def", "getOrderedLinks", "(", "self", ",", "session", ")", ":", "streamLinks", "=", "session", ".", "query", "(", "StreamLink", ")", ".", "filter", "(", "StreamLink", ".", "channelInputFile", "==", "self", ")", ".", "order_by", "(", "StreamLink", ".", "linkNumber", ")", ".", "all", "(", ")", "return", "streamLinks" ]
Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects.
[ "Retrieve", "the", "links", "in", "the", "order", "of", "the", "link", "number", "." ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L113-L128
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile.getStreamNetworkAsWkt
def getStreamNetworkAsWkt(self, session, withNodes=True): """ Retrieve the stream network geometry in Well Known Text format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string. """ wkt_list = [] for link in self.streamLinks: wkt_link = link.getAsWkt(session) if wkt_link: wkt_list.append(wkt_link) if withNodes: for node in link.nodes: wkt_node = node.getAsWkt(session) if wkt_node: wkt_list.append(wkt_node) return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))
python
def getStreamNetworkAsWkt(self, session, withNodes=True): """ Retrieve the stream network geometry in Well Known Text format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string. """ wkt_list = [] for link in self.streamLinks: wkt_link = link.getAsWkt(session) if wkt_link: wkt_list.append(wkt_link) if withNodes: for node in link.nodes: wkt_node = node.getAsWkt(session) if wkt_node: wkt_list.append(wkt_node) return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))
[ "def", "getStreamNetworkAsWkt", "(", "self", ",", "session", ",", "withNodes", "=", "True", ")", ":", "wkt_list", "=", "[", "]", "for", "link", "in", "self", ".", "streamLinks", ":", "wkt_link", "=", "link", ".", "getAsWkt", "(", "session", ")", "if", "wkt_link", ":", "wkt_list", ".", "append", "(", "wkt_link", ")", "if", "withNodes", ":", "for", "node", "in", "link", ".", "nodes", ":", "wkt_node", "=", "node", ".", "getAsWkt", "(", "session", ")", "if", "wkt_node", ":", "wkt_list", ".", "append", "(", "wkt_node", ")", "return", "'GEOMCOLLECTION ({0})'", ".", "format", "(", "', '", ".", "join", "(", "wkt_list", ")", ")" ]
Retrieve the stream network geometry in Well Known Text format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string.
[ "Retrieve", "the", "stream", "network", "geometry", "in", "Well", "Known", "Text", "format", "." ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L303-L329
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile.getStreamNetworkAsGeoJson
def getStreamNetworkAsGeoJson(self, session, withNodes=True): """ Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string. """ features_list = [] # Assemble link features for link in self.streamLinks: link_geoJson = link.getAsGeoJson(session) if link_geoJson: link_geometry = json.loads(link.getAsGeoJson(session)) link_properties = {"link_number": link.linkNumber, "type": link.type, "num_elements": link.numElements, "dx": link.dx, "erode": link.erode, "subsurface": link.subsurface} link_feature = {"type": "Feature", "geometry": link_geometry, "properties": link_properties, "id": link.id} features_list.append(link_feature) # Assemble node features if withNodes: for node in link.nodes: node_geoJson = node.getAsGeoJson(session) if node_geoJson: node_geometry = json.loads(node_geoJson) node_properties = {"link_number": link.linkNumber, "node_number": node.nodeNumber, "elevation": node.elevation} node_feature = {"type": "Feature", "geometry": node_geometry, "properties": node_properties, "id": node.id} features_list.append(node_feature) feature_collection = {"type": "FeatureCollection", "features": features_list} return json.dumps(feature_collection)
python
def getStreamNetworkAsGeoJson(self, session, withNodes=True): """ Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string. """ features_list = [] # Assemble link features for link in self.streamLinks: link_geoJson = link.getAsGeoJson(session) if link_geoJson: link_geometry = json.loads(link.getAsGeoJson(session)) link_properties = {"link_number": link.linkNumber, "type": link.type, "num_elements": link.numElements, "dx": link.dx, "erode": link.erode, "subsurface": link.subsurface} link_feature = {"type": "Feature", "geometry": link_geometry, "properties": link_properties, "id": link.id} features_list.append(link_feature) # Assemble node features if withNodes: for node in link.nodes: node_geoJson = node.getAsGeoJson(session) if node_geoJson: node_geometry = json.loads(node_geoJson) node_properties = {"link_number": link.linkNumber, "node_number": node.nodeNumber, "elevation": node.elevation} node_feature = {"type": "Feature", "geometry": node_geometry, "properties": node_properties, "id": node.id} features_list.append(node_feature) feature_collection = {"type": "FeatureCollection", "features": features_list} return json.dumps(feature_collection)
[ "def", "getStreamNetworkAsGeoJson", "(", "self", ",", "session", ",", "withNodes", "=", "True", ")", ":", "features_list", "=", "[", "]", "# Assemble link features", "for", "link", "in", "self", ".", "streamLinks", ":", "link_geoJson", "=", "link", ".", "getAsGeoJson", "(", "session", ")", "if", "link_geoJson", ":", "link_geometry", "=", "json", ".", "loads", "(", "link", ".", "getAsGeoJson", "(", "session", ")", ")", "link_properties", "=", "{", "\"link_number\"", ":", "link", ".", "linkNumber", ",", "\"type\"", ":", "link", ".", "type", ",", "\"num_elements\"", ":", "link", ".", "numElements", ",", "\"dx\"", ":", "link", ".", "dx", ",", "\"erode\"", ":", "link", ".", "erode", ",", "\"subsurface\"", ":", "link", ".", "subsurface", "}", "link_feature", "=", "{", "\"type\"", ":", "\"Feature\"", ",", "\"geometry\"", ":", "link_geometry", ",", "\"properties\"", ":", "link_properties", ",", "\"id\"", ":", "link", ".", "id", "}", "features_list", ".", "append", "(", "link_feature", ")", "# Assemble node features", "if", "withNodes", ":", "for", "node", "in", "link", ".", "nodes", ":", "node_geoJson", "=", "node", ".", "getAsGeoJson", "(", "session", ")", "if", "node_geoJson", ":", "node_geometry", "=", "json", ".", "loads", "(", "node_geoJson", ")", "node_properties", "=", "{", "\"link_number\"", ":", "link", ".", "linkNumber", ",", "\"node_number\"", ":", "node", ".", "nodeNumber", ",", "\"elevation\"", ":", "node", ".", "elevation", "}", "node_feature", "=", "{", "\"type\"", ":", "\"Feature\"", ",", "\"geometry\"", ":", "node_geometry", ",", "\"properties\"", ":", "node_properties", ",", "\"id\"", ":", "node", ".", "id", "}", "features_list", ".", "append", "(", "node_feature", ")", "feature_collection", "=", "{", "\"type\"", ":", "\"FeatureCollection\"", ",", "\"features\"", ":", "features_list", "}", "return", "json", ".", "dumps", "(", "feature_collection", ")" ]
Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string.
[ "Retrieve", "the", "stream", "network", "geometry", "in", "GeoJSON", "format", "." ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L331-L387
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._read
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Channel Input File Read from File Method """ # Set file extension property self.fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = {'ALPHA': cic.cardChunk, 'BETA': cic.cardChunk, 'THETA': cic.cardChunk, 'LINKS': cic.cardChunk, 'MAXNODES': cic.cardChunk, 'CONNECT': cic.connectChunk, 'LINK': cic.linkChunk} links = [] connectivity = [] # Parse file into chunks associated with keywords/cards with open(path, 'r') as f: chunks = pt.chunk(KEYWORDS, f) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: # Call chunk specific parsers for each chunk result = KEYWORDS[key](key, chunk) # Cases if key == 'LINK': # Link handler links.append(self._createLink(result, replaceParamFile)) elif key == 'CONNECT': # Connectivity handler connectivity.append(result) else: # Global variable handler card = result['card'] value = result['values'][0] # Cases if card == 'LINKS': self.links = int(value) elif card == 'MAXNODES': self.maxNodes = int(value) elif card == 'ALPHA': self.alpha = float(vrp(value, replaceParamFile)) elif card == 'BETA': self.beta = float(vrp(value, replaceParamFile)) elif card == 'THETA': self.theta = float(vrp(value, replaceParamFile)) self._createConnectivity(linkList=links, connectList=connectivity) if spatial: self._createGeometry(session, spatialReferenceID)
python
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Channel Input File Read from File Method """ # Set file extension property self.fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = {'ALPHA': cic.cardChunk, 'BETA': cic.cardChunk, 'THETA': cic.cardChunk, 'LINKS': cic.cardChunk, 'MAXNODES': cic.cardChunk, 'CONNECT': cic.connectChunk, 'LINK': cic.linkChunk} links = [] connectivity = [] # Parse file into chunks associated with keywords/cards with open(path, 'r') as f: chunks = pt.chunk(KEYWORDS, f) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: # Call chunk specific parsers for each chunk result = KEYWORDS[key](key, chunk) # Cases if key == 'LINK': # Link handler links.append(self._createLink(result, replaceParamFile)) elif key == 'CONNECT': # Connectivity handler connectivity.append(result) else: # Global variable handler card = result['card'] value = result['values'][0] # Cases if card == 'LINKS': self.links = int(value) elif card == 'MAXNODES': self.maxNodes = int(value) elif card == 'ALPHA': self.alpha = float(vrp(value, replaceParamFile)) elif card == 'BETA': self.beta = float(vrp(value, replaceParamFile)) elif card == 'THETA': self.theta = float(vrp(value, replaceParamFile)) self._createConnectivity(linkList=links, connectList=connectivity) if spatial: self._createGeometry(session, spatialReferenceID)
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Set file extension property", "self", ".", "fileExtension", "=", "extension", "# Dictionary of keywords/cards and parse function names", "KEYWORDS", "=", "{", "'ALPHA'", ":", "cic", ".", "cardChunk", ",", "'BETA'", ":", "cic", ".", "cardChunk", ",", "'THETA'", ":", "cic", ".", "cardChunk", ",", "'LINKS'", ":", "cic", ".", "cardChunk", ",", "'MAXNODES'", ":", "cic", ".", "cardChunk", ",", "'CONNECT'", ":", "cic", ".", "connectChunk", ",", "'LINK'", ":", "cic", ".", "linkChunk", "}", "links", "=", "[", "]", "connectivity", "=", "[", "]", "# Parse file into chunks associated with keywords/cards", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "chunks", "=", "pt", ".", "chunk", "(", "KEYWORDS", ",", "f", ")", "# Parse chunks associated with each key", "for", "key", ",", "chunkList", "in", "iteritems", "(", "chunks", ")", ":", "# Parse each chunk in the chunk list", "for", "chunk", "in", "chunkList", ":", "# Call chunk specific parsers for each chunk", "result", "=", "KEYWORDS", "[", "key", "]", "(", "key", ",", "chunk", ")", "# Cases", "if", "key", "==", "'LINK'", ":", "# Link handler", "links", ".", "append", "(", "self", ".", "_createLink", "(", "result", ",", "replaceParamFile", ")", ")", "elif", "key", "==", "'CONNECT'", ":", "# Connectivity handler", "connectivity", ".", "append", "(", "result", ")", "else", ":", "# Global variable handler", "card", "=", "result", "[", "'card'", "]", "value", "=", "result", "[", "'values'", "]", "[", "0", "]", "# Cases", "if", "card", "==", "'LINKS'", ":", "self", ".", "links", "=", "int", "(", "value", ")", "elif", "card", "==", "'MAXNODES'", ":", "self", ".", "maxNodes", "=", "int", "(", "value", ")", "elif", "card", "==", "'ALPHA'", ":", "self", ".", "alpha", "=", "float", "(", "vrp", "(", "value", ",", "replaceParamFile", ")", ")", "elif", "card", "==", "'BETA'", ":", "self", ".", "beta", "=", "float", "(", "vrp", "(", "value", ",", "replaceParamFile", ")", ")", "elif", "card", "==", "'THETA'", ":", "self", ".", "theta", "=", "float", "(", "vrp", "(", "value", ",", "replaceParamFile", ")", ")", "self", ".", "_createConnectivity", "(", "linkList", "=", "links", ",", "connectList", "=", "connectivity", ")", "if", "spatial", ":", "self", ".", "_createGeometry", "(", "session", ",", "spatialReferenceID", ")" ]
Channel Input File Read from File Method
[ "Channel", "Input", "File", "Read", "from", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L389-L447
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._write
def _write(self, session, openFile, replaceParamFile): """ Channel Input File Write to File Method """ # Write lines openFile.write('GSSHA_CHAN\n') alpha = vwp(self.alpha, replaceParamFile) try: openFile.write('ALPHA%s%.6f\n' % (' ' * 7, alpha)) except: openFile.write('ALPHA%s%s\n' % (' ' * 7, alpha)) beta = vwp(self.beta, replaceParamFile) try: openFile.write('BETA%s%.6f\n' % (' ' * 8, beta)) except: openFile.write('BETA%s%s\n' % (' ' * 8, beta)) theta = vwp(self.theta, replaceParamFile) try: openFile.write('THETA%s%.6f\n' % (' ' * 7, theta)) except: openFile.write('THETA%s%s\n' % (' ' * 7, theta)) openFile.write('LINKS%s%s\n' % (' ' * 7, self.links)) openFile.write('MAXNODES%s%s\n' % (' ' * 4, self.maxNodes)) # Retrieve StreamLinks links = self.getOrderedLinks(session) self._writeConnectivity(links=links, fileObject=openFile) self._writeLinks(links=links, fileObject=openFile, replaceParamFile=replaceParamFile)
python
def _write(self, session, openFile, replaceParamFile): """ Channel Input File Write to File Method """ # Write lines openFile.write('GSSHA_CHAN\n') alpha = vwp(self.alpha, replaceParamFile) try: openFile.write('ALPHA%s%.6f\n' % (' ' * 7, alpha)) except: openFile.write('ALPHA%s%s\n' % (' ' * 7, alpha)) beta = vwp(self.beta, replaceParamFile) try: openFile.write('BETA%s%.6f\n' % (' ' * 8, beta)) except: openFile.write('BETA%s%s\n' % (' ' * 8, beta)) theta = vwp(self.theta, replaceParamFile) try: openFile.write('THETA%s%.6f\n' % (' ' * 7, theta)) except: openFile.write('THETA%s%s\n' % (' ' * 7, theta)) openFile.write('LINKS%s%s\n' % (' ' * 7, self.links)) openFile.write('MAXNODES%s%s\n' % (' ' * 4, self.maxNodes)) # Retrieve StreamLinks links = self.getOrderedLinks(session) self._writeConnectivity(links=links, fileObject=openFile) self._writeLinks(links=links, fileObject=openFile, replaceParamFile=replaceParamFile)
[ "def", "_write", "(", "self", ",", "session", ",", "openFile", ",", "replaceParamFile", ")", ":", "# Write lines", "openFile", ".", "write", "(", "'GSSHA_CHAN\\n'", ")", "alpha", "=", "vwp", "(", "self", ".", "alpha", ",", "replaceParamFile", ")", "try", ":", "openFile", ".", "write", "(", "'ALPHA%s%.6f\\n'", "%", "(", "' '", "*", "7", ",", "alpha", ")", ")", "except", ":", "openFile", ".", "write", "(", "'ALPHA%s%s\\n'", "%", "(", "' '", "*", "7", ",", "alpha", ")", ")", "beta", "=", "vwp", "(", "self", ".", "beta", ",", "replaceParamFile", ")", "try", ":", "openFile", ".", "write", "(", "'BETA%s%.6f\\n'", "%", "(", "' '", "*", "8", ",", "beta", ")", ")", "except", ":", "openFile", ".", "write", "(", "'BETA%s%s\\n'", "%", "(", "' '", "*", "8", ",", "beta", ")", ")", "theta", "=", "vwp", "(", "self", ".", "theta", ",", "replaceParamFile", ")", "try", ":", "openFile", ".", "write", "(", "'THETA%s%.6f\\n'", "%", "(", "' '", "*", "7", ",", "theta", ")", ")", "except", ":", "openFile", ".", "write", "(", "'THETA%s%s\\n'", "%", "(", "' '", "*", "7", ",", "theta", ")", ")", "openFile", ".", "write", "(", "'LINKS%s%s\\n'", "%", "(", "' '", "*", "7", ",", "self", ".", "links", ")", ")", "openFile", ".", "write", "(", "'MAXNODES%s%s\\n'", "%", "(", "' '", "*", "4", ",", "self", ".", "maxNodes", ")", ")", "# Retrieve StreamLinks", "links", "=", "self", ".", "getOrderedLinks", "(", "session", ")", "self", ".", "_writeConnectivity", "(", "links", "=", "links", ",", "fileObject", "=", "openFile", ")", "self", ".", "_writeLinks", "(", "links", "=", "links", ",", "fileObject", "=", "openFile", ",", "replaceParamFile", "=", "replaceParamFile", ")" ]
Channel Input File Write to File Method
[ "Channel", "Input", "File", "Write", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L449-L484
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createLink
def _createLink(self, linkResult, replaceParamFile): """ Create GSSHAPY Link Object Method """ link = None # Cases if linkResult['type'] == 'XSEC': # Cross section link handler link = self._createCrossSection(linkResult, replaceParamFile) elif linkResult['type'] == 'STRUCTURE': # Structure link handler link = self._createStructure(linkResult, replaceParamFile) elif linkResult['type'] in ('RESERVOIR', 'LAKE'): # Reservoir/lake handler link = self._createReservoir(linkResult, replaceParamFile) return link
python
def _createLink(self, linkResult, replaceParamFile): """ Create GSSHAPY Link Object Method """ link = None # Cases if linkResult['type'] == 'XSEC': # Cross section link handler link = self._createCrossSection(linkResult, replaceParamFile) elif linkResult['type'] == 'STRUCTURE': # Structure link handler link = self._createStructure(linkResult, replaceParamFile) elif linkResult['type'] in ('RESERVOIR', 'LAKE'): # Reservoir/lake handler link = self._createReservoir(linkResult, replaceParamFile) return link
[ "def", "_createLink", "(", "self", ",", "linkResult", ",", "replaceParamFile", ")", ":", "link", "=", "None", "# Cases", "if", "linkResult", "[", "'type'", "]", "==", "'XSEC'", ":", "# Cross section link handler", "link", "=", "self", ".", "_createCrossSection", "(", "linkResult", ",", "replaceParamFile", ")", "elif", "linkResult", "[", "'type'", "]", "==", "'STRUCTURE'", ":", "# Structure link handler", "link", "=", "self", ".", "_createStructure", "(", "linkResult", ",", "replaceParamFile", ")", "elif", "linkResult", "[", "'type'", "]", "in", "(", "'RESERVOIR'", ",", "'LAKE'", ")", ":", "# Reservoir/lake handler", "link", "=", "self", ".", "_createReservoir", "(", "linkResult", ",", "replaceParamFile", ")", "return", "link" ]
Create GSSHAPY Link Object Method
[ "Create", "GSSHAPY", "Link", "Object", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L486-L505
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createConnectivity
def _createConnectivity(self, linkList, connectList): """ Create GSSHAPY Connect Object Method """ # Create StreamLink-Connectivity Pairs for idx, link in enumerate(linkList): connectivity = connectList[idx] # Initialize GSSHAPY UpstreamLink objects for upLink in connectivity['upLinks']: upstreamLink = UpstreamLink(upstreamLinkID=int(upLink)) upstreamLink.streamLink = link link.downstreamLinkID = int(connectivity['downLink']) link.numUpstreamLinks = int(connectivity['numUpLinks'])
python
def _createConnectivity(self, linkList, connectList): """ Create GSSHAPY Connect Object Method """ # Create StreamLink-Connectivity Pairs for idx, link in enumerate(linkList): connectivity = connectList[idx] # Initialize GSSHAPY UpstreamLink objects for upLink in connectivity['upLinks']: upstreamLink = UpstreamLink(upstreamLinkID=int(upLink)) upstreamLink.streamLink = link link.downstreamLinkID = int(connectivity['downLink']) link.numUpstreamLinks = int(connectivity['numUpLinks'])
[ "def", "_createConnectivity", "(", "self", ",", "linkList", ",", "connectList", ")", ":", "# Create StreamLink-Connectivity Pairs", "for", "idx", ",", "link", "in", "enumerate", "(", "linkList", ")", ":", "connectivity", "=", "connectList", "[", "idx", "]", "# Initialize GSSHAPY UpstreamLink objects", "for", "upLink", "in", "connectivity", "[", "'upLinks'", "]", ":", "upstreamLink", "=", "UpstreamLink", "(", "upstreamLinkID", "=", "int", "(", "upLink", ")", ")", "upstreamLink", ".", "streamLink", "=", "link", "link", ".", "downstreamLinkID", "=", "int", "(", "connectivity", "[", "'downLink'", "]", ")", "link", ".", "numUpstreamLinks", "=", "int", "(", "connectivity", "[", "'numUpLinks'", "]", ")" ]
Create GSSHAPY Connect Object Method
[ "Create", "GSSHAPY", "Connect", "Object", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L508-L524
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createCrossSection
def _createCrossSection(self, linkResult, replaceParamFile): """ Create GSSHAPY Cross Section Objects Method """ # Extract header variables from link result object header = linkResult['header'] # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=int(header['link']), type=header['xSecType'], numElements=header['nodes'], dx=vrp(header['dx'], replaceParamFile), erode=header['erode'], subsurface=header['subsurface']) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Initialize GSSHAPY TrapezoidalCS or BreakpointCS objects xSection = linkResult['xSection'] # Cases if 'TRAPEZOID' in link.type or 'TRAP' in link.type: # Trapezoid cross section handler # Initialize GSSHPY TrapeziodalCS object trapezoidCS = TrapezoidalCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), bottomWidth=vrp(xSection['bottom_width'], replaceParamFile), bankfullDepth=vrp(xSection['bankfull_depth'], replaceParamFile), sideSlope=vrp(xSection['side_slope'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) # Associate TrapezoidalCS with StreamLink trapezoidCS.streamLink = link elif 'BREAKPOINT' in link.type: # Breakpoint cross section handler # Initialize GSSHAPY BreakpointCS objects breakpointCS = BreakpointCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), numPairs=xSection['npairs'], numInterp=vrp(xSection['num_interp'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) # Associate BreakpointCS with StreamLink breakpointCS.streamLink = link # Create GSSHAPY Breakpoint objects for b in xSection['breakpoints']: breakpoint = Breakpoint(x=b['x'], y=b['y']) # Associate Breakpoint with BreakpointCS breakpoint.crossSection = breakpointCS # Initialize GSSHAPY StreamNode objects for n in linkResult['nodes']: # Initialize GSSHAPY StreamNode object node = StreamNode(nodeNumber=int(n['node']), x=n['x'], y=n['y'], elevation=n['elev']) # Associate StreamNode with StreamLink node.streamLink = link return link
python
def _createCrossSection(self, linkResult, replaceParamFile): """ Create GSSHAPY Cross Section Objects Method """ # Extract header variables from link result object header = linkResult['header'] # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=int(header['link']), type=header['xSecType'], numElements=header['nodes'], dx=vrp(header['dx'], replaceParamFile), erode=header['erode'], subsurface=header['subsurface']) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Initialize GSSHAPY TrapezoidalCS or BreakpointCS objects xSection = linkResult['xSection'] # Cases if 'TRAPEZOID' in link.type or 'TRAP' in link.type: # Trapezoid cross section handler # Initialize GSSHPY TrapeziodalCS object trapezoidCS = TrapezoidalCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), bottomWidth=vrp(xSection['bottom_width'], replaceParamFile), bankfullDepth=vrp(xSection['bankfull_depth'], replaceParamFile), sideSlope=vrp(xSection['side_slope'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) # Associate TrapezoidalCS with StreamLink trapezoidCS.streamLink = link elif 'BREAKPOINT' in link.type: # Breakpoint cross section handler # Initialize GSSHAPY BreakpointCS objects breakpointCS = BreakpointCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), numPairs=xSection['npairs'], numInterp=vrp(xSection['num_interp'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) # Associate BreakpointCS with StreamLink breakpointCS.streamLink = link # Create GSSHAPY Breakpoint objects for b in xSection['breakpoints']: breakpoint = Breakpoint(x=b['x'], y=b['y']) # Associate Breakpoint with BreakpointCS breakpoint.crossSection = breakpointCS # Initialize GSSHAPY StreamNode objects for n in linkResult['nodes']: # Initialize GSSHAPY StreamNode object node = StreamNode(nodeNumber=int(n['node']), x=n['x'], y=n['y'], elevation=n['elev']) # Associate StreamNode with StreamLink node.streamLink = link return link
[ "def", "_createCrossSection", "(", "self", ",", "linkResult", ",", "replaceParamFile", ")", ":", "# Extract header variables from link result object", "header", "=", "linkResult", "[", "'header'", "]", "# Initialize GSSHAPY StreamLink object", "link", "=", "StreamLink", "(", "linkNumber", "=", "int", "(", "header", "[", "'link'", "]", ")", ",", "type", "=", "header", "[", "'xSecType'", "]", ",", "numElements", "=", "header", "[", "'nodes'", "]", ",", "dx", "=", "vrp", "(", "header", "[", "'dx'", "]", ",", "replaceParamFile", ")", ",", "erode", "=", "header", "[", "'erode'", "]", ",", "subsurface", "=", "header", "[", "'subsurface'", "]", ")", "# Associate StreamLink with ChannelInputFile", "link", ".", "channelInputFile", "=", "self", "# Initialize GSSHAPY TrapezoidalCS or BreakpointCS objects", "xSection", "=", "linkResult", "[", "'xSection'", "]", "# Cases", "if", "'TRAPEZOID'", "in", "link", ".", "type", "or", "'TRAP'", "in", "link", ".", "type", ":", "# Trapezoid cross section handler", "# Initialize GSSHPY TrapeziodalCS object", "trapezoidCS", "=", "TrapezoidalCS", "(", "mannings_n", "=", "vrp", "(", "xSection", "[", "'mannings_n'", "]", ",", "replaceParamFile", ")", ",", "bottomWidth", "=", "vrp", "(", "xSection", "[", "'bottom_width'", "]", ",", "replaceParamFile", ")", ",", "bankfullDepth", "=", "vrp", "(", "xSection", "[", "'bankfull_depth'", "]", ",", "replaceParamFile", ")", ",", "sideSlope", "=", "vrp", "(", "xSection", "[", "'side_slope'", "]", ",", "replaceParamFile", ")", ",", "mRiver", "=", "vrp", "(", "xSection", "[", "'m_river'", "]", ",", "replaceParamFile", ")", ",", "kRiver", "=", "vrp", "(", "xSection", "[", "'k_river'", "]", ",", "replaceParamFile", ")", ",", "erode", "=", "xSection", "[", "'erode'", "]", ",", "subsurface", "=", "xSection", "[", "'subsurface'", "]", ",", "maxErosion", "=", "vrp", "(", "xSection", "[", "'max_erosion'", "]", ",", "replaceParamFile", ")", ")", "# Associate TrapezoidalCS with StreamLink", "trapezoidCS", ".", "streamLink", "=", "link", "elif", "'BREAKPOINT'", "in", "link", ".", "type", ":", "# Breakpoint cross section handler", "# Initialize GSSHAPY BreakpointCS objects", "breakpointCS", "=", "BreakpointCS", "(", "mannings_n", "=", "vrp", "(", "xSection", "[", "'mannings_n'", "]", ",", "replaceParamFile", ")", ",", "numPairs", "=", "xSection", "[", "'npairs'", "]", ",", "numInterp", "=", "vrp", "(", "xSection", "[", "'num_interp'", "]", ",", "replaceParamFile", ")", ",", "mRiver", "=", "vrp", "(", "xSection", "[", "'m_river'", "]", ",", "replaceParamFile", ")", ",", "kRiver", "=", "vrp", "(", "xSection", "[", "'k_river'", "]", ",", "replaceParamFile", ")", ",", "erode", "=", "xSection", "[", "'erode'", "]", ",", "subsurface", "=", "xSection", "[", "'subsurface'", "]", ",", "maxErosion", "=", "vrp", "(", "xSection", "[", "'max_erosion'", "]", ",", "replaceParamFile", ")", ")", "# Associate BreakpointCS with StreamLink", "breakpointCS", ".", "streamLink", "=", "link", "# Create GSSHAPY Breakpoint objects", "for", "b", "in", "xSection", "[", "'breakpoints'", "]", ":", "breakpoint", "=", "Breakpoint", "(", "x", "=", "b", "[", "'x'", "]", ",", "y", "=", "b", "[", "'y'", "]", ")", "# Associate Breakpoint with BreakpointCS", "breakpoint", ".", "crossSection", "=", "breakpointCS", "# Initialize GSSHAPY StreamNode objects", "for", "n", "in", "linkResult", "[", "'nodes'", "]", ":", "# Initialize GSSHAPY StreamNode object", "node", "=", "StreamNode", "(", "nodeNumber", "=", "int", "(", "n", "[", "'node'", "]", ")", ",", "x", "=", "n", "[", "'x'", "]", ",", "y", "=", "n", "[", "'y'", "]", ",", "elevation", "=", "n", "[", "'elev'", "]", ")", "# Associate StreamNode with StreamLink", "node", ".", "streamLink", "=", "link", "return", "link" ]
Create GSSHAPY Cross Section Objects Method
[ "Create", "GSSHAPY", "Cross", "Section", "Objects", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L527-L599
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createStructure
def _createStructure(self, linkResult, replaceParamFile): """ Create GSSHAPY Structure Objects Method """ # Constants WEIRS = ('WEIR', 'SAG_WEIR') CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT') CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE') header = linkResult['header'] # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=header['link'], type=linkResult['type'], numElements=header['numstructs']) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Create Structure objects for s in linkResult['structures']: structType = s['structtype'] # Cases if structType in WEIRS: # Weir type handler # Initialize GSSHAPY Weir object weir = Weir(type=structType, crestLength=vrp(s['crest_length'], replaceParamFile), crestLowElevation=vrp(s['crest_low_elev'], replaceParamFile), dischargeCoeffForward=vrp(s['discharge_coeff_forward'], replaceParamFile), dischargeCoeffReverse=vrp(s['discharge_coeff_reverse'], replaceParamFile), crestLowLocation=vrp(s['crest_low_loc'], replaceParamFile), steepSlope=vrp(s['steep_slope'], replaceParamFile), shallowSlope=vrp(s['shallow_slope'], replaceParamFile)) # Associate Weir with StreamLink weir.streamLink = link elif structType in CULVERTS: # Culvert type handler # Initialize GSSHAPY Culvert object culvert = Culvert(type=structType, upstreamInvert=vrp(s['upinvert'], replaceParamFile), downstreamInvert=vrp(s['downinvert'], replaceParamFile), inletDischargeCoeff=vrp(s['inlet_disch_coeff'], replaceParamFile), reverseFlowDischargeCoeff=vrp(s['rev_flow_disch_coeff'], replaceParamFile), slope=vrp(s['slope'], replaceParamFile), length=vrp(s['length'], replaceParamFile), roughness=vrp(s['rough_coeff'], replaceParamFile), diameter=vrp(s['diameter'], replaceParamFile), width=vrp(s['width'], replaceParamFile), height=vrp(s['height'], replaceParamFile)) # Associate Culvert with StreamLink culvert.streamLink = link elif structType in CURVES: # Curve type handler pass return link
python
def _createStructure(self, linkResult, replaceParamFile): """ Create GSSHAPY Structure Objects Method """ # Constants WEIRS = ('WEIR', 'SAG_WEIR') CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT') CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE') header = linkResult['header'] # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=header['link'], type=linkResult['type'], numElements=header['numstructs']) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Create Structure objects for s in linkResult['structures']: structType = s['structtype'] # Cases if structType in WEIRS: # Weir type handler # Initialize GSSHAPY Weir object weir = Weir(type=structType, crestLength=vrp(s['crest_length'], replaceParamFile), crestLowElevation=vrp(s['crest_low_elev'], replaceParamFile), dischargeCoeffForward=vrp(s['discharge_coeff_forward'], replaceParamFile), dischargeCoeffReverse=vrp(s['discharge_coeff_reverse'], replaceParamFile), crestLowLocation=vrp(s['crest_low_loc'], replaceParamFile), steepSlope=vrp(s['steep_slope'], replaceParamFile), shallowSlope=vrp(s['shallow_slope'], replaceParamFile)) # Associate Weir with StreamLink weir.streamLink = link elif structType in CULVERTS: # Culvert type handler # Initialize GSSHAPY Culvert object culvert = Culvert(type=structType, upstreamInvert=vrp(s['upinvert'], replaceParamFile), downstreamInvert=vrp(s['downinvert'], replaceParamFile), inletDischargeCoeff=vrp(s['inlet_disch_coeff'], replaceParamFile), reverseFlowDischargeCoeff=vrp(s['rev_flow_disch_coeff'], replaceParamFile), slope=vrp(s['slope'], replaceParamFile), length=vrp(s['length'], replaceParamFile), roughness=vrp(s['rough_coeff'], replaceParamFile), diameter=vrp(s['diameter'], replaceParamFile), width=vrp(s['width'], replaceParamFile), height=vrp(s['height'], replaceParamFile)) # Associate Culvert with StreamLink culvert.streamLink = link elif structType in CURVES: # Curve type handler pass return link
[ "def", "_createStructure", "(", "self", ",", "linkResult", ",", "replaceParamFile", ")", ":", "# Constants", "WEIRS", "=", "(", "'WEIR'", ",", "'SAG_WEIR'", ")", "CULVERTS", "=", "(", "'ROUND_CULVERT'", ",", "'RECT_CULVERT'", ")", "CURVES", "=", "(", "'RATING_CURVE'", ",", "'SCHEDULED_RELEASE'", ",", "'RULE_CURVE'", ")", "header", "=", "linkResult", "[", "'header'", "]", "# Initialize GSSHAPY StreamLink object", "link", "=", "StreamLink", "(", "linkNumber", "=", "header", "[", "'link'", "]", ",", "type", "=", "linkResult", "[", "'type'", "]", ",", "numElements", "=", "header", "[", "'numstructs'", "]", ")", "# Associate StreamLink with ChannelInputFile", "link", ".", "channelInputFile", "=", "self", "# Create Structure objects", "for", "s", "in", "linkResult", "[", "'structures'", "]", ":", "structType", "=", "s", "[", "'structtype'", "]", "# Cases", "if", "structType", "in", "WEIRS", ":", "# Weir type handler", "# Initialize GSSHAPY Weir object", "weir", "=", "Weir", "(", "type", "=", "structType", ",", "crestLength", "=", "vrp", "(", "s", "[", "'crest_length'", "]", ",", "replaceParamFile", ")", ",", "crestLowElevation", "=", "vrp", "(", "s", "[", "'crest_low_elev'", "]", ",", "replaceParamFile", ")", ",", "dischargeCoeffForward", "=", "vrp", "(", "s", "[", "'discharge_coeff_forward'", "]", ",", "replaceParamFile", ")", ",", "dischargeCoeffReverse", "=", "vrp", "(", "s", "[", "'discharge_coeff_reverse'", "]", ",", "replaceParamFile", ")", ",", "crestLowLocation", "=", "vrp", "(", "s", "[", "'crest_low_loc'", "]", ",", "replaceParamFile", ")", ",", "steepSlope", "=", "vrp", "(", "s", "[", "'steep_slope'", "]", ",", "replaceParamFile", ")", ",", "shallowSlope", "=", "vrp", "(", "s", "[", "'shallow_slope'", "]", ",", "replaceParamFile", ")", ")", "# Associate Weir with StreamLink", "weir", ".", "streamLink", "=", "link", "elif", "structType", "in", "CULVERTS", ":", "# Culvert type handler", "# Initialize GSSHAPY Culvert object", "culvert", "=", "Culvert", "(", "type", "=", "structType", ",", "upstreamInvert", "=", "vrp", "(", "s", "[", "'upinvert'", "]", ",", "replaceParamFile", ")", ",", "downstreamInvert", "=", "vrp", "(", "s", "[", "'downinvert'", "]", ",", "replaceParamFile", ")", ",", "inletDischargeCoeff", "=", "vrp", "(", "s", "[", "'inlet_disch_coeff'", "]", ",", "replaceParamFile", ")", ",", "reverseFlowDischargeCoeff", "=", "vrp", "(", "s", "[", "'rev_flow_disch_coeff'", "]", ",", "replaceParamFile", ")", ",", "slope", "=", "vrp", "(", "s", "[", "'slope'", "]", ",", "replaceParamFile", ")", ",", "length", "=", "vrp", "(", "s", "[", "'length'", "]", ",", "replaceParamFile", ")", ",", "roughness", "=", "vrp", "(", "s", "[", "'rough_coeff'", "]", ",", "replaceParamFile", ")", ",", "diameter", "=", "vrp", "(", "s", "[", "'diameter'", "]", ",", "replaceParamFile", ")", ",", "width", "=", "vrp", "(", "s", "[", "'width'", "]", ",", "replaceParamFile", ")", ",", "height", "=", "vrp", "(", "s", "[", "'height'", "]", ",", "replaceParamFile", ")", ")", "# Associate Culvert with StreamLink", "culvert", ".", "streamLink", "=", "link", "elif", "structType", "in", "CURVES", ":", "# Curve type handler", "pass", "return", "link" ]
Create GSSHAPY Structure Objects Method
[ "Create", "GSSHAPY", "Structure", "Objects", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L601-L664
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createReservoir
def _createReservoir(self, linkResult, replaceParamFile): """ Create GSSHAPY Reservoir Objects Method """ # Extract header variables from link result object header = linkResult['header'] # Cases if linkResult['type'] == 'LAKE': # Lake handler initWSE = vrp(header['initwse'], replaceParamFile) minWSE = vrp(header['minwse'], replaceParamFile) maxWSE = vrp(header['maxwse'], replaceParamFile) numPts = header['numpts'] elif linkResult['type'] == 'RESERVOIR': # Reservoir handler initWSE = vrp(header['res_initwse'], replaceParamFile) minWSE = vrp(header['res_minwse'], replaceParamFile) maxWSE = vrp(header['res_maxwse'], replaceParamFile) numPts = header['res_numpts'] # Initialize GSSHAPY Reservoir object reservoir = Reservoir(initWSE=initWSE, minWSE=minWSE, maxWSE=maxWSE) # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=int(header['link']), type=linkResult['type'], numElements=numPts) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Associate Reservoir with StreamLink reservoir.streamLink = link # Create ReservoirPoint objects for p in linkResult['points']: # Initialize GSSHAPY ReservoirPoint object resPoint = ReservoirPoint(i=p['i'], j=p['j']) # Associate ReservoirPoint with Reservoir resPoint.reservoir = reservoir return link
python
def _createReservoir(self, linkResult, replaceParamFile): """ Create GSSHAPY Reservoir Objects Method """ # Extract header variables from link result object header = linkResult['header'] # Cases if linkResult['type'] == 'LAKE': # Lake handler initWSE = vrp(header['initwse'], replaceParamFile) minWSE = vrp(header['minwse'], replaceParamFile) maxWSE = vrp(header['maxwse'], replaceParamFile) numPts = header['numpts'] elif linkResult['type'] == 'RESERVOIR': # Reservoir handler initWSE = vrp(header['res_initwse'], replaceParamFile) minWSE = vrp(header['res_minwse'], replaceParamFile) maxWSE = vrp(header['res_maxwse'], replaceParamFile) numPts = header['res_numpts'] # Initialize GSSHAPY Reservoir object reservoir = Reservoir(initWSE=initWSE, minWSE=minWSE, maxWSE=maxWSE) # Initialize GSSHAPY StreamLink object link = StreamLink(linkNumber=int(header['link']), type=linkResult['type'], numElements=numPts) # Associate StreamLink with ChannelInputFile link.channelInputFile = self # Associate Reservoir with StreamLink reservoir.streamLink = link # Create ReservoirPoint objects for p in linkResult['points']: # Initialize GSSHAPY ReservoirPoint object resPoint = ReservoirPoint(i=p['i'], j=p['j']) # Associate ReservoirPoint with Reservoir resPoint.reservoir = reservoir return link
[ "def", "_createReservoir", "(", "self", ",", "linkResult", ",", "replaceParamFile", ")", ":", "# Extract header variables from link result object", "header", "=", "linkResult", "[", "'header'", "]", "# Cases", "if", "linkResult", "[", "'type'", "]", "==", "'LAKE'", ":", "# Lake handler", "initWSE", "=", "vrp", "(", "header", "[", "'initwse'", "]", ",", "replaceParamFile", ")", "minWSE", "=", "vrp", "(", "header", "[", "'minwse'", "]", ",", "replaceParamFile", ")", "maxWSE", "=", "vrp", "(", "header", "[", "'maxwse'", "]", ",", "replaceParamFile", ")", "numPts", "=", "header", "[", "'numpts'", "]", "elif", "linkResult", "[", "'type'", "]", "==", "'RESERVOIR'", ":", "# Reservoir handler", "initWSE", "=", "vrp", "(", "header", "[", "'res_initwse'", "]", ",", "replaceParamFile", ")", "minWSE", "=", "vrp", "(", "header", "[", "'res_minwse'", "]", ",", "replaceParamFile", ")", "maxWSE", "=", "vrp", "(", "header", "[", "'res_maxwse'", "]", ",", "replaceParamFile", ")", "numPts", "=", "header", "[", "'res_numpts'", "]", "# Initialize GSSHAPY Reservoir object", "reservoir", "=", "Reservoir", "(", "initWSE", "=", "initWSE", ",", "minWSE", "=", "minWSE", ",", "maxWSE", "=", "maxWSE", ")", "# Initialize GSSHAPY StreamLink object", "link", "=", "StreamLink", "(", "linkNumber", "=", "int", "(", "header", "[", "'link'", "]", ")", ",", "type", "=", "linkResult", "[", "'type'", "]", ",", "numElements", "=", "numPts", ")", "# Associate StreamLink with ChannelInputFile", "link", ".", "channelInputFile", "=", "self", "# Associate Reservoir with StreamLink", "reservoir", ".", "streamLink", "=", "link", "# Create ReservoirPoint objects", "for", "p", "in", "linkResult", "[", "'points'", "]", ":", "# Initialize GSSHAPY ReservoirPoint object", "resPoint", "=", "ReservoirPoint", "(", "i", "=", "p", "[", "'i'", "]", ",", "j", "=", "p", "[", "'j'", "]", ")", "# Associate ReservoirPoint with Reservoir", "resPoint", ".", "reservoir", "=", "reservoir", "return", "link" ]
Create GSSHAPY Reservoir Objects Method
[ "Create", "GSSHAPY", "Reservoir", "Objects", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L666-L713
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._createGeometry
def _createGeometry(self, session, spatialReferenceID): """ Create PostGIS geometric objects """ # Flush the current session session.flush() # Create geometry for each fluvial link for link in self.getFluvialLinks(): # Retrieve the nodes for each link nodes = link.nodes nodeCoordinates = [] # Create geometry for each node for node in nodes: # Assemble coordinates in well known text format coordinates = '{0} {1} {2}'.format(node.x, node.y, node.elevation) nodeCoordinates.append(coordinates) # Create well known text string for point with z coordinate wktPoint = 'POINT Z ({0})'.format(coordinates) # Write SQL statement statement = self._getUpdateGeometrySqlString(geometryID=node.id, tableName=node.tableName, spatialReferenceID=spatialReferenceID, wktString=wktPoint) session.execute(statement) # Assemble line string in well known text format wktLineString = 'LINESTRING Z ({0})'.format(', '.join(nodeCoordinates)) # Write SQL statement statement = self._getUpdateGeometrySqlString(geometryID=link.id, tableName=link.tableName, spatialReferenceID=spatialReferenceID, wktString=wktLineString) session.execute(statement)
python
def _createGeometry(self, session, spatialReferenceID): """ Create PostGIS geometric objects """ # Flush the current session session.flush() # Create geometry for each fluvial link for link in self.getFluvialLinks(): # Retrieve the nodes for each link nodes = link.nodes nodeCoordinates = [] # Create geometry for each node for node in nodes: # Assemble coordinates in well known text format coordinates = '{0} {1} {2}'.format(node.x, node.y, node.elevation) nodeCoordinates.append(coordinates) # Create well known text string for point with z coordinate wktPoint = 'POINT Z ({0})'.format(coordinates) # Write SQL statement statement = self._getUpdateGeometrySqlString(geometryID=node.id, tableName=node.tableName, spatialReferenceID=spatialReferenceID, wktString=wktPoint) session.execute(statement) # Assemble line string in well known text format wktLineString = 'LINESTRING Z ({0})'.format(', '.join(nodeCoordinates)) # Write SQL statement statement = self._getUpdateGeometrySqlString(geometryID=link.id, tableName=link.tableName, spatialReferenceID=spatialReferenceID, wktString=wktLineString) session.execute(statement)
[ "def", "_createGeometry", "(", "self", ",", "session", ",", "spatialReferenceID", ")", ":", "# Flush the current session", "session", ".", "flush", "(", ")", "# Create geometry for each fluvial link", "for", "link", "in", "self", ".", "getFluvialLinks", "(", ")", ":", "# Retrieve the nodes for each link", "nodes", "=", "link", ".", "nodes", "nodeCoordinates", "=", "[", "]", "# Create geometry for each node", "for", "node", "in", "nodes", ":", "# Assemble coordinates in well known text format", "coordinates", "=", "'{0} {1} {2}'", ".", "format", "(", "node", ".", "x", ",", "node", ".", "y", ",", "node", ".", "elevation", ")", "nodeCoordinates", ".", "append", "(", "coordinates", ")", "# Create well known text string for point with z coordinate", "wktPoint", "=", "'POINT Z ({0})'", ".", "format", "(", "coordinates", ")", "# Write SQL statement", "statement", "=", "self", ".", "_getUpdateGeometrySqlString", "(", "geometryID", "=", "node", ".", "id", ",", "tableName", "=", "node", ".", "tableName", ",", "spatialReferenceID", "=", "spatialReferenceID", ",", "wktString", "=", "wktPoint", ")", "session", ".", "execute", "(", "statement", ")", "# Assemble line string in well known text format", "wktLineString", "=", "'LINESTRING Z ({0})'", ".", "format", "(", "', '", ".", "join", "(", "nodeCoordinates", ")", ")", "# Write SQL statement", "statement", "=", "self", ".", "_getUpdateGeometrySqlString", "(", "geometryID", "=", "link", ".", "id", ",", "tableName", "=", "link", ".", "tableName", ",", "spatialReferenceID", "=", "spatialReferenceID", ",", "wktString", "=", "wktLineString", ")", "session", ".", "execute", "(", "statement", ")" ]
Create PostGIS geometric objects
[ "Create", "PostGIS", "geometric", "objects" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L715-L755
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._writeConnectivity
def _writeConnectivity(self, links, fileObject): """ Write Connectivity Lines to File Method """ for link in links: linkNum = link.linkNumber downLink = link.downstreamLinkID numUpLinks = link.numUpstreamLinks upLinks = '' for upLink in link.upstreamLinks: upLinks = '{}{:>5}'.format(upLinks, str(upLink.upstreamLinkID)) line = 'CONNECT{:>5}{:>5}{:>5}{}\n'.format(linkNum, downLink, numUpLinks, upLinks) fileObject.write(line) fileObject.write('\n')
python
def _writeConnectivity(self, links, fileObject): """ Write Connectivity Lines to File Method """ for link in links: linkNum = link.linkNumber downLink = link.downstreamLinkID numUpLinks = link.numUpstreamLinks upLinks = '' for upLink in link.upstreamLinks: upLinks = '{}{:>5}'.format(upLinks, str(upLink.upstreamLinkID)) line = 'CONNECT{:>5}{:>5}{:>5}{}\n'.format(linkNum, downLink, numUpLinks, upLinks) fileObject.write(line) fileObject.write('\n')
[ "def", "_writeConnectivity", "(", "self", ",", "links", ",", "fileObject", ")", ":", "for", "link", "in", "links", ":", "linkNum", "=", "link", ".", "linkNumber", "downLink", "=", "link", ".", "downstreamLinkID", "numUpLinks", "=", "link", ".", "numUpstreamLinks", "upLinks", "=", "''", "for", "upLink", "in", "link", ".", "upstreamLinks", ":", "upLinks", "=", "'{}{:>5}'", ".", "format", "(", "upLinks", ",", "str", "(", "upLink", ".", "upstreamLinkID", ")", ")", "line", "=", "'CONNECT{:>5}{:>5}{:>5}{}\\n'", ".", "format", "(", "linkNum", ",", "downLink", ",", "numUpLinks", ",", "upLinks", ")", "fileObject", ".", "write", "(", "line", ")", "fileObject", ".", "write", "(", "'\\n'", ")" ]
Write Connectivity Lines to File Method
[ "Write", "Connectivity", "Lines", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L757-L771
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._writeLinks
def _writeLinks(self, links, fileObject, replaceParamFile): """ Write Link Lines to File Method """ for link in links: linkType = link.type fileObject.write('LINK %s\n' % link.linkNumber) # Cases if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType: self._writeCrossSectionLink(link, fileObject, replaceParamFile) elif linkType == 'STRUCTURE': self._writeStructureLink(link, fileObject, replaceParamFile) elif linkType in ('RESERVOIR', 'LAKE'): self._writeReservoirLink(link, fileObject, replaceParamFile) else: log.error('OOPS: CIF LINE 417') # THIS SHOULDN'T HAPPEN fileObject.write('\n')
python
def _writeLinks(self, links, fileObject, replaceParamFile): """ Write Link Lines to File Method """ for link in links: linkType = link.type fileObject.write('LINK %s\n' % link.linkNumber) # Cases if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType: self._writeCrossSectionLink(link, fileObject, replaceParamFile) elif linkType == 'STRUCTURE': self._writeStructureLink(link, fileObject, replaceParamFile) elif linkType in ('RESERVOIR', 'LAKE'): self._writeReservoirLink(link, fileObject, replaceParamFile) else: log.error('OOPS: CIF LINE 417') # THIS SHOULDN'T HAPPEN fileObject.write('\n')
[ "def", "_writeLinks", "(", "self", ",", "links", ",", "fileObject", ",", "replaceParamFile", ")", ":", "for", "link", "in", "links", ":", "linkType", "=", "link", ".", "type", "fileObject", ".", "write", "(", "'LINK %s\\n'", "%", "link", ".", "linkNumber", ")", "# Cases", "if", "'TRAP'", "in", "linkType", "or", "'TRAPEZOID'", "in", "linkType", "or", "'BREAKPOINT'", "in", "linkType", ":", "self", ".", "_writeCrossSectionLink", "(", "link", ",", "fileObject", ",", "replaceParamFile", ")", "elif", "linkType", "==", "'STRUCTURE'", ":", "self", ".", "_writeStructureLink", "(", "link", ",", "fileObject", ",", "replaceParamFile", ")", "elif", "linkType", "in", "(", "'RESERVOIR'", ",", "'LAKE'", ")", ":", "self", ".", "_writeReservoirLink", "(", "link", ",", "fileObject", ",", "replaceParamFile", ")", "else", ":", "log", ".", "error", "(", "'OOPS: CIF LINE 417'", ")", "# THIS SHOULDN'T HAPPEN", "fileObject", ".", "write", "(", "'\\n'", ")" ]
Write Link Lines to File Method
[ "Write", "Link", "Lines", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L773-L794
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._writeCrossSectionLink
def _writeCrossSectionLink(self, link, fileObject, replaceParamFile): """ Write Cross Section Link to File Method """ linkType = link.type # Write cross section link header dx = vwp(link.dx, replaceParamFile) try: fileObject.write('DX %.6f\n' % dx) except: fileObject.write('DX %s\n' % dx) fileObject.write('%s\n' % linkType) fileObject.write('NODES %s\n' % link.numElements) for node in link.nodes: # Write node information fileObject.write('NODE %s\n' % node.nodeNumber) fileObject.write('X_Y %.6f %.6f\n' % (node.x, node.y)) fileObject.write('ELEV %.6f\n' % node.elevation) if node.nodeNumber == 1: # Write cross section information after first node fileObject.write('XSEC\n') # Cases if 'TRAPEZOID' in linkType or 'TRAP' in linkType: # Retrieve cross section xSec = link.trapezoidalCS # Write cross section properties mannings_n = vwp(xSec.mannings_n, replaceParamFile) bottomWidth = vwp(xSec.bottomWidth, replaceParamFile) bankfullDepth = vwp(xSec.bankfullDepth, replaceParamFile) sideSlope = vwp(xSec.sideSlope, replaceParamFile) try: fileObject.write('MANNINGS_N %.6f\n' % mannings_n) except: fileObject.write('MANNINGS_N %s\n' % mannings_n) try: fileObject.write('BOTTOM_WIDTH %.6f\n' % bottomWidth) except: fileObject.write('BOTTOM_WIDTH %s\n' % bottomWidth) try: fileObject.write('BANKFULL_DEPTH %.6f\n' % bankfullDepth) except: fileObject.write('BANKFULL_DEPTH %s\n' % bankfullDepth) try: fileObject.write('SIDE_SLOPE %.6f\n' % sideSlope) except: fileObject.write('SIDE_SLOPE %s\n' % sideSlope) # Write optional cross section properties self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile) elif 'BREAKPOINT' in linkType: # Retrieve cross section xSec = link.breakpointCS # Write cross section properties mannings_n = vwp(xSec.mannings_n, replaceParamFile) try: fileObject.write('MANNINGS_N %.6f\n' % mannings_n) except: fileObject.write('MANNINGS_N %s\n' % mannings_n) fileObject.write('NPAIRS %s\n' % xSec.numPairs) fileObject.write('NUM_INTERP %s\n' % vwp(xSec.numInterp, replaceParamFile)) # Write optional cross section properties self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile) # Write breakpoint lines for bp in xSec.breakpoints: fileObject.write('X1 %.6f %.6f\n' % (bp.x, bp.y)) else: log.error('OOPS: MISSED A CROSS SECTION TYPE. CIF LINE 580. {0}'.format(linkType))
python
def _writeCrossSectionLink(self, link, fileObject, replaceParamFile): """ Write Cross Section Link to File Method """ linkType = link.type # Write cross section link header dx = vwp(link.dx, replaceParamFile) try: fileObject.write('DX %.6f\n' % dx) except: fileObject.write('DX %s\n' % dx) fileObject.write('%s\n' % linkType) fileObject.write('NODES %s\n' % link.numElements) for node in link.nodes: # Write node information fileObject.write('NODE %s\n' % node.nodeNumber) fileObject.write('X_Y %.6f %.6f\n' % (node.x, node.y)) fileObject.write('ELEV %.6f\n' % node.elevation) if node.nodeNumber == 1: # Write cross section information after first node fileObject.write('XSEC\n') # Cases if 'TRAPEZOID' in linkType or 'TRAP' in linkType: # Retrieve cross section xSec = link.trapezoidalCS # Write cross section properties mannings_n = vwp(xSec.mannings_n, replaceParamFile) bottomWidth = vwp(xSec.bottomWidth, replaceParamFile) bankfullDepth = vwp(xSec.bankfullDepth, replaceParamFile) sideSlope = vwp(xSec.sideSlope, replaceParamFile) try: fileObject.write('MANNINGS_N %.6f\n' % mannings_n) except: fileObject.write('MANNINGS_N %s\n' % mannings_n) try: fileObject.write('BOTTOM_WIDTH %.6f\n' % bottomWidth) except: fileObject.write('BOTTOM_WIDTH %s\n' % bottomWidth) try: fileObject.write('BANKFULL_DEPTH %.6f\n' % bankfullDepth) except: fileObject.write('BANKFULL_DEPTH %s\n' % bankfullDepth) try: fileObject.write('SIDE_SLOPE %.6f\n' % sideSlope) except: fileObject.write('SIDE_SLOPE %s\n' % sideSlope) # Write optional cross section properties self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile) elif 'BREAKPOINT' in linkType: # Retrieve cross section xSec = link.breakpointCS # Write cross section properties mannings_n = vwp(xSec.mannings_n, replaceParamFile) try: fileObject.write('MANNINGS_N %.6f\n' % mannings_n) except: fileObject.write('MANNINGS_N %s\n' % mannings_n) fileObject.write('NPAIRS %s\n' % xSec.numPairs) fileObject.write('NUM_INTERP %s\n' % vwp(xSec.numInterp, replaceParamFile)) # Write optional cross section properties self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile) # Write breakpoint lines for bp in xSec.breakpoints: fileObject.write('X1 %.6f %.6f\n' % (bp.x, bp.y)) else: log.error('OOPS: MISSED A CROSS SECTION TYPE. CIF LINE 580. {0}'.format(linkType))
[ "def", "_writeCrossSectionLink", "(", "self", ",", "link", ",", "fileObject", ",", "replaceParamFile", ")", ":", "linkType", "=", "link", ".", "type", "# Write cross section link header", "dx", "=", "vwp", "(", "link", ".", "dx", ",", "replaceParamFile", ")", "try", ":", "fileObject", ".", "write", "(", "'DX %.6f\\n'", "%", "dx", ")", "except", ":", "fileObject", ".", "write", "(", "'DX %s\\n'", "%", "dx", ")", "fileObject", ".", "write", "(", "'%s\\n'", "%", "linkType", ")", "fileObject", ".", "write", "(", "'NODES %s\\n'", "%", "link", ".", "numElements", ")", "for", "node", "in", "link", ".", "nodes", ":", "# Write node information", "fileObject", ".", "write", "(", "'NODE %s\\n'", "%", "node", ".", "nodeNumber", ")", "fileObject", ".", "write", "(", "'X_Y %.6f %.6f\\n'", "%", "(", "node", ".", "x", ",", "node", ".", "y", ")", ")", "fileObject", ".", "write", "(", "'ELEV %.6f\\n'", "%", "node", ".", "elevation", ")", "if", "node", ".", "nodeNumber", "==", "1", ":", "# Write cross section information after first node", "fileObject", ".", "write", "(", "'XSEC\\n'", ")", "# Cases", "if", "'TRAPEZOID'", "in", "linkType", "or", "'TRAP'", "in", "linkType", ":", "# Retrieve cross section", "xSec", "=", "link", ".", "trapezoidalCS", "# Write cross section properties", "mannings_n", "=", "vwp", "(", "xSec", ".", "mannings_n", ",", "replaceParamFile", ")", "bottomWidth", "=", "vwp", "(", "xSec", ".", "bottomWidth", ",", "replaceParamFile", ")", "bankfullDepth", "=", "vwp", "(", "xSec", ".", "bankfullDepth", ",", "replaceParamFile", ")", "sideSlope", "=", "vwp", "(", "xSec", ".", "sideSlope", ",", "replaceParamFile", ")", "try", ":", "fileObject", ".", "write", "(", "'MANNINGS_N %.6f\\n'", "%", "mannings_n", ")", "except", ":", "fileObject", ".", "write", "(", "'MANNINGS_N %s\\n'", "%", "mannings_n", ")", "try", ":", "fileObject", ".", "write", "(", "'BOTTOM_WIDTH %.6f\\n'", "%", "bottomWidth", ")", "except", ":", "fileObject", ".", "write", "(", "'BOTTOM_WIDTH %s\\n'", "%", "bottomWidth", ")", "try", ":", "fileObject", ".", "write", "(", "'BANKFULL_DEPTH %.6f\\n'", "%", "bankfullDepth", ")", "except", ":", "fileObject", ".", "write", "(", "'BANKFULL_DEPTH %s\\n'", "%", "bankfullDepth", ")", "try", ":", "fileObject", ".", "write", "(", "'SIDE_SLOPE %.6f\\n'", "%", "sideSlope", ")", "except", ":", "fileObject", ".", "write", "(", "'SIDE_SLOPE %s\\n'", "%", "sideSlope", ")", "# Write optional cross section properties", "self", ".", "_writeOptionalXsecCards", "(", "fileObject", "=", "fileObject", ",", "xSec", "=", "xSec", ",", "replaceParamFile", "=", "replaceParamFile", ")", "elif", "'BREAKPOINT'", "in", "linkType", ":", "# Retrieve cross section", "xSec", "=", "link", ".", "breakpointCS", "# Write cross section properties", "mannings_n", "=", "vwp", "(", "xSec", ".", "mannings_n", ",", "replaceParamFile", ")", "try", ":", "fileObject", ".", "write", "(", "'MANNINGS_N %.6f\\n'", "%", "mannings_n", ")", "except", ":", "fileObject", ".", "write", "(", "'MANNINGS_N %s\\n'", "%", "mannings_n", ")", "fileObject", ".", "write", "(", "'NPAIRS %s\\n'", "%", "xSec", ".", "numPairs", ")", "fileObject", ".", "write", "(", "'NUM_INTERP %s\\n'", "%", "vwp", "(", "xSec", ".", "numInterp", ",", "replaceParamFile", ")", ")", "# Write optional cross section properties", "self", ".", "_writeOptionalXsecCards", "(", "fileObject", "=", "fileObject", ",", "xSec", "=", "xSec", ",", "replaceParamFile", "=", "replaceParamFile", ")", "# Write breakpoint lines", "for", "bp", "in", "xSec", ".", "breakpoints", ":", "fileObject", ".", "write", "(", "'X1 %.6f %.6f\\n'", "%", "(", "bp", ".", "x", ",", "bp", ".", "y", ")", ")", "else", ":", "log", ".", "error", "(", "'OOPS: MISSED A CROSS SECTION TYPE. CIF LINE 580. {0}'", ".", "format", "(", "linkType", ")", ")" ]
Write Cross Section Link to File Method
[ "Write", "Cross", "Section", "Link", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L1003-L1085
train
CI-WATER/gsshapy
gsshapy/orm/cif.py
ChannelInputFile._writeOptionalXsecCards
def _writeOptionalXsecCards(self, fileObject, xSec, replaceParamFile): """ Write Optional Cross Section Cards to File Method """ if xSec.erode: fileObject.write('ERODE\n') if xSec.maxErosion != None: fileObject.write('MAX_EROSION %.6f\n' % xSec.maxErosion) if xSec.subsurface: fileObject.write('SUBSURFACE\n') if xSec.mRiver != None: mRiver = vwp(xSec.mRiver, replaceParamFile) try: fileObject.write('M_RIVER %.6f\n' % mRiver) except: fileObject.write('M_RIVER %s\n' % mRiver) if xSec.kRiver != None: kRiver = vwp(xSec.kRiver, replaceParamFile) try: fileObject.write('K_RIVER %.6f\n' % kRiver) except: fileObject.write('K_RIVER %s\n' % kRiver)
python
def _writeOptionalXsecCards(self, fileObject, xSec, replaceParamFile): """ Write Optional Cross Section Cards to File Method """ if xSec.erode: fileObject.write('ERODE\n') if xSec.maxErosion != None: fileObject.write('MAX_EROSION %.6f\n' % xSec.maxErosion) if xSec.subsurface: fileObject.write('SUBSURFACE\n') if xSec.mRiver != None: mRiver = vwp(xSec.mRiver, replaceParamFile) try: fileObject.write('M_RIVER %.6f\n' % mRiver) except: fileObject.write('M_RIVER %s\n' % mRiver) if xSec.kRiver != None: kRiver = vwp(xSec.kRiver, replaceParamFile) try: fileObject.write('K_RIVER %.6f\n' % kRiver) except: fileObject.write('K_RIVER %s\n' % kRiver)
[ "def", "_writeOptionalXsecCards", "(", "self", ",", "fileObject", ",", "xSec", ",", "replaceParamFile", ")", ":", "if", "xSec", ".", "erode", ":", "fileObject", ".", "write", "(", "'ERODE\\n'", ")", "if", "xSec", ".", "maxErosion", "!=", "None", ":", "fileObject", ".", "write", "(", "'MAX_EROSION %.6f\\n'", "%", "xSec", ".", "maxErosion", ")", "if", "xSec", ".", "subsurface", ":", "fileObject", ".", "write", "(", "'SUBSURFACE\\n'", ")", "if", "xSec", ".", "mRiver", "!=", "None", ":", "mRiver", "=", "vwp", "(", "xSec", ".", "mRiver", ",", "replaceParamFile", ")", "try", ":", "fileObject", ".", "write", "(", "'M_RIVER %.6f\\n'", "%", "mRiver", ")", "except", ":", "fileObject", ".", "write", "(", "'M_RIVER %s\\n'", "%", "mRiver", ")", "if", "xSec", ".", "kRiver", "!=", "None", ":", "kRiver", "=", "vwp", "(", "xSec", ".", "kRiver", ",", "replaceParamFile", ")", "try", ":", "fileObject", ".", "write", "(", "'K_RIVER %.6f\\n'", "%", "kRiver", ")", "except", ":", "fileObject", ".", "write", "(", "'K_RIVER %s\\n'", "%", "kRiver", ")" ]
Write Optional Cross Section Cards to File Method
[ "Write", "Optional", "Cross", "Section", "Cards", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L1087-L1112
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
replace_file
def replace_file(from_file, to_file): """ Replaces to_file with from_file """ try: os.remove(to_file) except OSError: pass copy(from_file, to_file)
python
def replace_file(from_file, to_file): """ Replaces to_file with from_file """ try: os.remove(to_file) except OSError: pass copy(from_file, to_file)
[ "def", "replace_file", "(", "from_file", ",", "to_file", ")", ":", "try", ":", "os", ".", "remove", "(", "to_file", ")", "except", "OSError", ":", "pass", "copy", "(", "from_file", ",", "to_file", ")" ]
Replaces to_file with from_file
[ "Replaces", "to_file", "with", "from_file" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L29-L37
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework._prepare_lsm_gag
def _prepare_lsm_gag(self): """ Determines whether to prepare gage data from LSM """ lsm_required_vars = (self.lsm_precip_data_var, self.lsm_precip_type) return self.lsm_input_valid and (None not in lsm_required_vars)
python
def _prepare_lsm_gag(self): """ Determines whether to prepare gage data from LSM """ lsm_required_vars = (self.lsm_precip_data_var, self.lsm_precip_type) return self.lsm_input_valid and (None not in lsm_required_vars)
[ "def", "_prepare_lsm_gag", "(", "self", ")", ":", "lsm_required_vars", "=", "(", "self", ".", "lsm_precip_data_var", ",", "self", ".", "lsm_precip_type", ")", "return", "self", ".", "lsm_input_valid", "and", "(", "None", "not", "in", "lsm_required_vars", ")" ]
Determines whether to prepare gage data from LSM
[ "Determines", "whether", "to", "prepare", "gage", "data", "from", "LSM" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L338-L345
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework._update_card_file_location
def _update_card_file_location(self, card_name, new_directory): """ Moves card to new gssha working directory """ with tmp_chdir(self.gssha_directory): file_card = self.project_manager.getCard(card_name) if file_card: if file_card.value: original_location = file_card.value.strip("'").strip('"') new_location = os.path.join(new_directory, os.path.basename(original_location)) file_card.value = '"{0}"'.format(os.path.basename(original_location)) try: move(original_location, new_location) except OSError as ex: log.warning(ex) pass
python
def _update_card_file_location(self, card_name, new_directory): """ Moves card to new gssha working directory """ with tmp_chdir(self.gssha_directory): file_card = self.project_manager.getCard(card_name) if file_card: if file_card.value: original_location = file_card.value.strip("'").strip('"') new_location = os.path.join(new_directory, os.path.basename(original_location)) file_card.value = '"{0}"'.format(os.path.basename(original_location)) try: move(original_location, new_location) except OSError as ex: log.warning(ex) pass
[ "def", "_update_card_file_location", "(", "self", ",", "card_name", ",", "new_directory", ")", ":", "with", "tmp_chdir", "(", "self", ".", "gssha_directory", ")", ":", "file_card", "=", "self", ".", "project_manager", ".", "getCard", "(", "card_name", ")", "if", "file_card", ":", "if", "file_card", ".", "value", ":", "original_location", "=", "file_card", ".", "value", ".", "strip", "(", "\"'\"", ")", ".", "strip", "(", "'\"'", ")", "new_location", "=", "os", ".", "path", ".", "join", "(", "new_directory", ",", "os", ".", "path", ".", "basename", "(", "original_location", ")", ")", "file_card", ".", "value", "=", "'\"{0}\"'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "original_location", ")", ")", "try", ":", "move", "(", "original_location", ",", "new_location", ")", "except", "OSError", "as", "ex", ":", "log", ".", "warning", "(", "ex", ")", "pass" ]
Moves card to new gssha working directory
[ "Moves", "card", "to", "new", "gssha", "working", "directory" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L366-L382
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.download_spt_forecast
def download_spt_forecast(self, extract_directory): """ Downloads Streamflow Prediction Tool forecast data """ needed_vars = (self.spt_watershed_name, self.spt_subbasin_name, self.spt_forecast_date_string, self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) if None not in needed_vars: er_manager = ECMWFRAPIDDatasetManager(self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) # TODO: Modify to only download one of the forecasts in the ensemble er_manager.download_prediction_dataset(watershed=self.spt_watershed_name, subbasin=self.spt_subbasin_name, date_string=self.spt_forecast_date_string, # '20160711.1200' extract_directory=extract_directory) return glob(os.path.join(extract_directory, self.spt_forecast_date_string, "Qout*52.nc"))[0] elif needed_vars.count(None) == len(needed_vars): log.info("Skipping streamflow forecast download ...") return None else: raise ValueError("To download the forecasts, you need to set: \n" "spt_watershed_name, spt_subbasin_name, spt_forecast_date_string \n" "ckan_engine_url, ckan_api_key, and ckan_owner_organization.")
python
def download_spt_forecast(self, extract_directory): """ Downloads Streamflow Prediction Tool forecast data """ needed_vars = (self.spt_watershed_name, self.spt_subbasin_name, self.spt_forecast_date_string, self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) if None not in needed_vars: er_manager = ECMWFRAPIDDatasetManager(self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) # TODO: Modify to only download one of the forecasts in the ensemble er_manager.download_prediction_dataset(watershed=self.spt_watershed_name, subbasin=self.spt_subbasin_name, date_string=self.spt_forecast_date_string, # '20160711.1200' extract_directory=extract_directory) return glob(os.path.join(extract_directory, self.spt_forecast_date_string, "Qout*52.nc"))[0] elif needed_vars.count(None) == len(needed_vars): log.info("Skipping streamflow forecast download ...") return None else: raise ValueError("To download the forecasts, you need to set: \n" "spt_watershed_name, spt_subbasin_name, spt_forecast_date_string \n" "ckan_engine_url, ckan_api_key, and ckan_owner_organization.")
[ "def", "download_spt_forecast", "(", "self", ",", "extract_directory", ")", ":", "needed_vars", "=", "(", "self", ".", "spt_watershed_name", ",", "self", ".", "spt_subbasin_name", ",", "self", ".", "spt_forecast_date_string", ",", "self", ".", "ckan_engine_url", ",", "self", ".", "ckan_api_key", ",", "self", ".", "ckan_owner_organization", ")", "if", "None", "not", "in", "needed_vars", ":", "er_manager", "=", "ECMWFRAPIDDatasetManager", "(", "self", ".", "ckan_engine_url", ",", "self", ".", "ckan_api_key", ",", "self", ".", "ckan_owner_organization", ")", "# TODO: Modify to only download one of the forecasts in the ensemble", "er_manager", ".", "download_prediction_dataset", "(", "watershed", "=", "self", ".", "spt_watershed_name", ",", "subbasin", "=", "self", ".", "spt_subbasin_name", ",", "date_string", "=", "self", ".", "spt_forecast_date_string", ",", "# '20160711.1200'", "extract_directory", "=", "extract_directory", ")", "return", "glob", "(", "os", ".", "path", ".", "join", "(", "extract_directory", ",", "self", ".", "spt_forecast_date_string", ",", "\"Qout*52.nc\"", ")", ")", "[", "0", "]", "elif", "needed_vars", ".", "count", "(", "None", ")", "==", "len", "(", "needed_vars", ")", ":", "log", ".", "info", "(", "\"Skipping streamflow forecast download ...\"", ")", "return", "None", "else", ":", "raise", "ValueError", "(", "\"To download the forecasts, you need to set: \\n\"", "\"spt_watershed_name, spt_subbasin_name, spt_forecast_date_string \\n\"", "\"ckan_engine_url, ckan_api_key, and ckan_owner_organization.\"", ")" ]
Downloads Streamflow Prediction Tool forecast data
[ "Downloads", "Streamflow", "Prediction", "Tool", "forecast", "data" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L384-L414
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.prepare_hmet
def prepare_hmet(self): """ Prepare HMET data for simulation """ if self._prepare_lsm_hmet: netcdf_file_path = None hmet_ascii_output_folder = None if self.output_netcdf: netcdf_file_path = '{0}_hmet.nc'.format(self.project_manager.name) if self.hotstart_minimal_mode: netcdf_file_path = '{0}_hmet_hotstart.nc'.format(self.project_manager.name) else: hmet_ascii_output_folder = 'hmet_data_{0}to{1}' if self.hotstart_minimal_mode: hmet_ascii_output_folder += "_hotstart" self.event_manager.prepare_hmet_lsm(self.lsm_data_var_map_array, hmet_ascii_output_folder, netcdf_file_path) self.simulation_modified_input_cards += ["HMET_NETCDF", "HMET_ASCII"] else: log.info("HMET preparation skipped due to missing parameters ...")
python
def prepare_hmet(self): """ Prepare HMET data for simulation """ if self._prepare_lsm_hmet: netcdf_file_path = None hmet_ascii_output_folder = None if self.output_netcdf: netcdf_file_path = '{0}_hmet.nc'.format(self.project_manager.name) if self.hotstart_minimal_mode: netcdf_file_path = '{0}_hmet_hotstart.nc'.format(self.project_manager.name) else: hmet_ascii_output_folder = 'hmet_data_{0}to{1}' if self.hotstart_minimal_mode: hmet_ascii_output_folder += "_hotstart" self.event_manager.prepare_hmet_lsm(self.lsm_data_var_map_array, hmet_ascii_output_folder, netcdf_file_path) self.simulation_modified_input_cards += ["HMET_NETCDF", "HMET_ASCII"] else: log.info("HMET preparation skipped due to missing parameters ...")
[ "def", "prepare_hmet", "(", "self", ")", ":", "if", "self", ".", "_prepare_lsm_hmet", ":", "netcdf_file_path", "=", "None", "hmet_ascii_output_folder", "=", "None", "if", "self", ".", "output_netcdf", ":", "netcdf_file_path", "=", "'{0}_hmet.nc'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ")", "if", "self", ".", "hotstart_minimal_mode", ":", "netcdf_file_path", "=", "'{0}_hmet_hotstart.nc'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ")", "else", ":", "hmet_ascii_output_folder", "=", "'hmet_data_{0}to{1}'", "if", "self", ".", "hotstart_minimal_mode", ":", "hmet_ascii_output_folder", "+=", "\"_hotstart\"", "self", ".", "event_manager", ".", "prepare_hmet_lsm", "(", "self", ".", "lsm_data_var_map_array", ",", "hmet_ascii_output_folder", ",", "netcdf_file_path", ")", "self", ".", "simulation_modified_input_cards", "+=", "[", "\"HMET_NETCDF\"", ",", "\"HMET_ASCII\"", "]", "else", ":", "log", ".", "info", "(", "\"HMET preparation skipped due to missing parameters ...\"", ")" ]
Prepare HMET data for simulation
[ "Prepare", "HMET", "data", "for", "simulation" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L416-L438
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.prepare_gag
def prepare_gag(self): """ Prepare gage data for simulation """ if self._prepare_lsm_gag: self.event_manager.prepare_gag_lsm(self.lsm_precip_data_var, self.lsm_precip_type, self.precip_interpolation_type) self.simulation_modified_input_cards.append("PRECIP_FILE") else: log.info("Gage file preparation skipped due to missing parameters ...")
python
def prepare_gag(self): """ Prepare gage data for simulation """ if self._prepare_lsm_gag: self.event_manager.prepare_gag_lsm(self.lsm_precip_data_var, self.lsm_precip_type, self.precip_interpolation_type) self.simulation_modified_input_cards.append("PRECIP_FILE") else: log.info("Gage file preparation skipped due to missing parameters ...")
[ "def", "prepare_gag", "(", "self", ")", ":", "if", "self", ".", "_prepare_lsm_gag", ":", "self", ".", "event_manager", ".", "prepare_gag_lsm", "(", "self", ".", "lsm_precip_data_var", ",", "self", ".", "lsm_precip_type", ",", "self", ".", "precip_interpolation_type", ")", "self", ".", "simulation_modified_input_cards", ".", "append", "(", "\"PRECIP_FILE\"", ")", "else", ":", "log", ".", "info", "(", "\"Gage file preparation skipped due to missing parameters ...\"", ")" ]
Prepare gage data for simulation
[ "Prepare", "gage", "data", "for", "simulation" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L440-L450
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.rapid_to_gssha
def rapid_to_gssha(self): """ Prepare RAPID data for simulation """ # if no streamflow given, download forecast if self.path_to_rapid_qout is None and self.connection_list_file: rapid_qout_directory = os.path.join(self.gssha_directory, 'rapid_streamflow') try: os.mkdir(rapid_qout_directory) except OSError: pass self.path_to_rapid_qout = self.download_spt_forecast(rapid_qout_directory) # prepare input for GSSHA if user wants if self.path_to_rapid_qout is not None and self.connection_list_file: self.event_manager.prepare_rapid_streamflow(self.path_to_rapid_qout, self.connection_list_file) self.simulation_modified_input_cards.append('CHAN_POINT_INPUT')
python
def rapid_to_gssha(self): """ Prepare RAPID data for simulation """ # if no streamflow given, download forecast if self.path_to_rapid_qout is None and self.connection_list_file: rapid_qout_directory = os.path.join(self.gssha_directory, 'rapid_streamflow') try: os.mkdir(rapid_qout_directory) except OSError: pass self.path_to_rapid_qout = self.download_spt_forecast(rapid_qout_directory) # prepare input for GSSHA if user wants if self.path_to_rapid_qout is not None and self.connection_list_file: self.event_manager.prepare_rapid_streamflow(self.path_to_rapid_qout, self.connection_list_file) self.simulation_modified_input_cards.append('CHAN_POINT_INPUT')
[ "def", "rapid_to_gssha", "(", "self", ")", ":", "# if no streamflow given, download forecast", "if", "self", ".", "path_to_rapid_qout", "is", "None", "and", "self", ".", "connection_list_file", ":", "rapid_qout_directory", "=", "os", ".", "path", ".", "join", "(", "self", ".", "gssha_directory", ",", "'rapid_streamflow'", ")", "try", ":", "os", ".", "mkdir", "(", "rapid_qout_directory", ")", "except", "OSError", ":", "pass", "self", ".", "path_to_rapid_qout", "=", "self", ".", "download_spt_forecast", "(", "rapid_qout_directory", ")", "# prepare input for GSSHA if user wants", "if", "self", ".", "path_to_rapid_qout", "is", "not", "None", "and", "self", ".", "connection_list_file", ":", "self", ".", "event_manager", ".", "prepare_rapid_streamflow", "(", "self", ".", "path_to_rapid_qout", ",", "self", ".", "connection_list_file", ")", "self", ".", "simulation_modified_input_cards", ".", "append", "(", "'CHAN_POINT_INPUT'", ")" ]
Prepare RAPID data for simulation
[ "Prepare", "RAPID", "data", "for", "simulation" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L452-L469
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.hotstart
def hotstart(self): """ Prepare simulation hotstart info """ if self.write_hotstart: hotstart_time_str = self.event_manager.simulation_end.strftime("%Y%m%d_%H%M") try: os.mkdir('hotstart') except OSError: pass ov_hotstart_path = os.path.join('..', 'hotstart', '{0}_ov_hotstart_{1}.ovh'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_OV_HOTSTART", ov_hotstart_path, True) chan_hotstart_path = os.path.join('..', 'hotstart', '{0}_chan_hotstart_{1}'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_CHAN_HOTSTART", chan_hotstart_path, True) sm_hotstart_path = os.path.join('..', 'hotstart', '{0}_sm_hotstart_{1}.smh'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_SM_HOTSTART", sm_hotstart_path, True) else: self._delete_card("WRITE_OV_HOTSTART") self._delete_card("WRITE_CHAN_HOTSTART") self._delete_card("WRITE_SM_HOTSTART") if self.read_hotstart: hotstart_time_str = self.event_manager.simulation_start.strftime("%Y%m%d_%H%M") # OVERLAND expected_ov_hotstart = os.path.join('hotstart', '{0}_ov_hotstart_{1}.ovh'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists(expected_ov_hotstart): self._update_card("READ_OV_HOTSTART", os.path.join("..", expected_ov_hotstart), True) else: self._delete_card("READ_OV_HOTSTART") log.warning("READ_OV_HOTSTART not included as " "{0} does not exist ...".format(expected_ov_hotstart)) # CHANNEL expected_chan_hotstart = os.path.join('hotstart', '{0}_chan_hotstart_{1}'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists("{0}.qht".format(expected_chan_hotstart)) \ and os.path.exists("{0}.dht".format(expected_chan_hotstart)): self._update_card("READ_CHAN_HOTSTART", os.path.join("..", expected_chan_hotstart), True) else: self._delete_card("READ_CHAN_HOTSTART") log.warning("READ_CHAN_HOTSTART not included as " "{0}.qht and/or {0}.dht does not exist ...".format(expected_chan_hotstart)) # INFILTRATION expected_sm_hotstart = os.path.join('hotstart', '{0}_sm_hotstart_{1}.smh'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists(expected_sm_hotstart): self._update_card("READ_SM_HOTSTART", os.path.join("..", expected_sm_hotstart), True) else: self._delete_card("READ_SM_HOTSTART") log.warning("READ_SM_HOTSTART not included as" " {0} does not exist ...".format(expected_sm_hotstart))
python
def hotstart(self): """ Prepare simulation hotstart info """ if self.write_hotstart: hotstart_time_str = self.event_manager.simulation_end.strftime("%Y%m%d_%H%M") try: os.mkdir('hotstart') except OSError: pass ov_hotstart_path = os.path.join('..', 'hotstart', '{0}_ov_hotstart_{1}.ovh'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_OV_HOTSTART", ov_hotstart_path, True) chan_hotstart_path = os.path.join('..', 'hotstart', '{0}_chan_hotstart_{1}'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_CHAN_HOTSTART", chan_hotstart_path, True) sm_hotstart_path = os.path.join('..', 'hotstart', '{0}_sm_hotstart_{1}.smh'.format(self.project_manager.name, hotstart_time_str)) self._update_card("WRITE_SM_HOTSTART", sm_hotstart_path, True) else: self._delete_card("WRITE_OV_HOTSTART") self._delete_card("WRITE_CHAN_HOTSTART") self._delete_card("WRITE_SM_HOTSTART") if self.read_hotstart: hotstart_time_str = self.event_manager.simulation_start.strftime("%Y%m%d_%H%M") # OVERLAND expected_ov_hotstart = os.path.join('hotstart', '{0}_ov_hotstart_{1}.ovh'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists(expected_ov_hotstart): self._update_card("READ_OV_HOTSTART", os.path.join("..", expected_ov_hotstart), True) else: self._delete_card("READ_OV_HOTSTART") log.warning("READ_OV_HOTSTART not included as " "{0} does not exist ...".format(expected_ov_hotstart)) # CHANNEL expected_chan_hotstart = os.path.join('hotstart', '{0}_chan_hotstart_{1}'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists("{0}.qht".format(expected_chan_hotstart)) \ and os.path.exists("{0}.dht".format(expected_chan_hotstart)): self._update_card("READ_CHAN_HOTSTART", os.path.join("..", expected_chan_hotstart), True) else: self._delete_card("READ_CHAN_HOTSTART") log.warning("READ_CHAN_HOTSTART not included as " "{0}.qht and/or {0}.dht does not exist ...".format(expected_chan_hotstart)) # INFILTRATION expected_sm_hotstart = os.path.join('hotstart', '{0}_sm_hotstart_{1}.smh'.format(self.project_manager.name, hotstart_time_str)) if os.path.exists(expected_sm_hotstart): self._update_card("READ_SM_HOTSTART", os.path.join("..", expected_sm_hotstart), True) else: self._delete_card("READ_SM_HOTSTART") log.warning("READ_SM_HOTSTART not included as" " {0} does not exist ...".format(expected_sm_hotstart))
[ "def", "hotstart", "(", "self", ")", ":", "if", "self", ".", "write_hotstart", ":", "hotstart_time_str", "=", "self", ".", "event_manager", ".", "simulation_end", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", "try", ":", "os", ".", "mkdir", "(", "'hotstart'", ")", "except", "OSError", ":", "pass", "ov_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_ov_hotstart_{1}.ovh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_OV_HOTSTART\"", ",", "ov_hotstart_path", ",", "True", ")", "chan_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_chan_hotstart_{1}'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_CHAN_HOTSTART\"", ",", "chan_hotstart_path", ",", "True", ")", "sm_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_sm_hotstart_{1}.smh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_SM_HOTSTART\"", ",", "sm_hotstart_path", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"WRITE_OV_HOTSTART\"", ")", "self", ".", "_delete_card", "(", "\"WRITE_CHAN_HOTSTART\"", ")", "self", ".", "_delete_card", "(", "\"WRITE_SM_HOTSTART\"", ")", "if", "self", ".", "read_hotstart", ":", "hotstart_time_str", "=", "self", ".", "event_manager", ".", "simulation_start", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", "# OVERLAND", "expected_ov_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_ov_hotstart_{1}.ovh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "expected_ov_hotstart", ")", ":", "self", ".", "_update_card", "(", "\"READ_OV_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_ov_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_OV_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_OV_HOTSTART not included as \"", "\"{0} does not exist ...\"", ".", "format", "(", "expected_ov_hotstart", ")", ")", "# CHANNEL", "expected_chan_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_chan_hotstart_{1}'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "\"{0}.qht\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "\"{0}.dht\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", ":", "self", ".", "_update_card", "(", "\"READ_CHAN_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_chan_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_CHAN_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_CHAN_HOTSTART not included as \"", "\"{0}.qht and/or {0}.dht does not exist ...\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", "# INFILTRATION", "expected_sm_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_sm_hotstart_{1}.smh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "expected_sm_hotstart", ")", ":", "self", ".", "_update_card", "(", "\"READ_SM_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_sm_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_SM_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_SM_HOTSTART not included as\"", "\" {0} does not exist ...\"", ".", "format", "(", "expected_sm_hotstart", ")", ")" ]
Prepare simulation hotstart info
[ "Prepare", "simulation", "hotstart", "info" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L471-L533
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
GSSHAFramework.run_forecast
def run_forecast(self): """ Updates card & runs for RAPID to GSSHA & LSM to GSSHA """ # ---------------------------------------------------------------------- # LSM to GSSHA # ---------------------------------------------------------------------- self.prepare_hmet() self.prepare_gag() # ---------------------------------------------------------------------- # RAPID to GSSHA # ---------------------------------------------------------------------- self.rapid_to_gssha() # ---------------------------------------------------------------------- # HOTSTART # ---------------------------------------------------------------------- self.hotstart() # ---------------------------------------------------------------------- # Run GSSHA # ---------------------------------------------------------------------- return self.run()
python
def run_forecast(self): """ Updates card & runs for RAPID to GSSHA & LSM to GSSHA """ # ---------------------------------------------------------------------- # LSM to GSSHA # ---------------------------------------------------------------------- self.prepare_hmet() self.prepare_gag() # ---------------------------------------------------------------------- # RAPID to GSSHA # ---------------------------------------------------------------------- self.rapid_to_gssha() # ---------------------------------------------------------------------- # HOTSTART # ---------------------------------------------------------------------- self.hotstart() # ---------------------------------------------------------------------- # Run GSSHA # ---------------------------------------------------------------------- return self.run()
[ "def", "run_forecast", "(", "self", ")", ":", "# ----------------------------------------------------------------------", "# LSM to GSSHA", "# ----------------------------------------------------------------------", "self", ".", "prepare_hmet", "(", ")", "self", ".", "prepare_gag", "(", ")", "# ----------------------------------------------------------------------", "# RAPID to GSSHA", "# ----------------------------------------------------------------------", "self", ".", "rapid_to_gssha", "(", ")", "# ----------------------------------------------------------------------", "# HOTSTART", "# ----------------------------------------------------------------------", "self", ".", "hotstart", "(", ")", "# ----------------------------------------------------------------------", "# Run GSSHA", "# ----------------------------------------------------------------------", "return", "self", ".", "run", "(", ")" ]
Updates card & runs for RAPID to GSSHA & LSM to GSSHA
[ "Updates", "card", "&", "runs", "for", "RAPID", "to", "GSSHA", "&", "LSM", "to", "GSSHA" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L676-L700
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
get_cache_key
def get_cache_key(request, page, lang, site_id, title): """ Create the cache key for the current page and tag type """ from cms.cache import _get_cache_key from cms.templatetags.cms_tags import _get_page_by_untyped_arg from cms.models import Page if not isinstance(page, Page): page = _get_page_by_untyped_arg(page, request, site_id) if not site_id: try: site_id = page.node.site_id except AttributeError: # CMS_3_4 site_id = page.site_id if not title: return _get_cache_key('page_tags', page, '', site_id) + '_type:tags_list' else: return _get_cache_key('title_tags', page, lang, site_id) + '_type:tags_list'
python
def get_cache_key(request, page, lang, site_id, title): """ Create the cache key for the current page and tag type """ from cms.cache import _get_cache_key from cms.templatetags.cms_tags import _get_page_by_untyped_arg from cms.models import Page if not isinstance(page, Page): page = _get_page_by_untyped_arg(page, request, site_id) if not site_id: try: site_id = page.node.site_id except AttributeError: # CMS_3_4 site_id = page.site_id if not title: return _get_cache_key('page_tags', page, '', site_id) + '_type:tags_list' else: return _get_cache_key('title_tags', page, lang, site_id) + '_type:tags_list'
[ "def", "get_cache_key", "(", "request", ",", "page", ",", "lang", ",", "site_id", ",", "title", ")", ":", "from", "cms", ".", "cache", "import", "_get_cache_key", "from", "cms", ".", "templatetags", ".", "cms_tags", "import", "_get_page_by_untyped_arg", "from", "cms", ".", "models", "import", "Page", "if", "not", "isinstance", "(", "page", ",", "Page", ")", ":", "page", "=", "_get_page_by_untyped_arg", "(", "page", ",", "request", ",", "site_id", ")", "if", "not", "site_id", ":", "try", ":", "site_id", "=", "page", ".", "node", ".", "site_id", "except", "AttributeError", ":", "# CMS_3_4", "site_id", "=", "page", ".", "site_id", "if", "not", "title", ":", "return", "_get_cache_key", "(", "'page_tags'", ",", "page", ",", "''", ",", "site_id", ")", "+", "'_type:tags_list'", "else", ":", "return", "_get_cache_key", "(", "'title_tags'", ",", "page", ",", "lang", ",", "site_id", ")", "+", "'_type:tags_list'" ]
Create the cache key for the current page and tag type
[ "Create", "the", "cache", "key", "for", "the", "current", "page", "and", "tag", "type" ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L5-L23
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
get_page_tags
def get_page_tags(page): """ Retrieves all the tags for a Page instance. :param page: a Page instance :return: list or queryset of attached tags :type: List """ from .models import PageTags try: return page.pagetags.tags.all() except PageTags.DoesNotExist: return []
python
def get_page_tags(page): """ Retrieves all the tags for a Page instance. :param page: a Page instance :return: list or queryset of attached tags :type: List """ from .models import PageTags try: return page.pagetags.tags.all() except PageTags.DoesNotExist: return []
[ "def", "get_page_tags", "(", "page", ")", ":", "from", ".", "models", "import", "PageTags", "try", ":", "return", "page", ".", "pagetags", ".", "tags", ".", "all", "(", ")", "except", "PageTags", ".", "DoesNotExist", ":", "return", "[", "]" ]
Retrieves all the tags for a Page instance. :param page: a Page instance :return: list or queryset of attached tags :type: List
[ "Retrieves", "all", "the", "tags", "for", "a", "Page", "instance", "." ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L26-L39
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
page_has_tag
def page_has_tag(page, tag): """ Check if a Page object is associated with the given tag. :param page: a Page instance :param tag: a Tag instance or a slug string. :return: whether the Page instance has the given tag attached (False if no Page or no attached PageTags exists) :type: Boolean """ from .models import PageTags if hasattr(tag, 'slug'): slug = tag.slug else: slug = tag try: return page.pagetags.tags.filter(slug=slug).exists() except PageTags.DoesNotExist: return False
python
def page_has_tag(page, tag): """ Check if a Page object is associated with the given tag. :param page: a Page instance :param tag: a Tag instance or a slug string. :return: whether the Page instance has the given tag attached (False if no Page or no attached PageTags exists) :type: Boolean """ from .models import PageTags if hasattr(tag, 'slug'): slug = tag.slug else: slug = tag try: return page.pagetags.tags.filter(slug=slug).exists() except PageTags.DoesNotExist: return False
[ "def", "page_has_tag", "(", "page", ",", "tag", ")", ":", "from", ".", "models", "import", "PageTags", "if", "hasattr", "(", "tag", ",", "'slug'", ")", ":", "slug", "=", "tag", ".", "slug", "else", ":", "slug", "=", "tag", "try", ":", "return", "page", ".", "pagetags", ".", "tags", ".", "filter", "(", "slug", "=", "slug", ")", ".", "exists", "(", ")", "except", "PageTags", ".", "DoesNotExist", ":", "return", "False" ]
Check if a Page object is associated with the given tag. :param page: a Page instance :param tag: a Tag instance or a slug string. :return: whether the Page instance has the given tag attached (False if no Page or no attached PageTags exists) :type: Boolean
[ "Check", "if", "a", "Page", "object", "is", "associated", "with", "the", "given", "tag", "." ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L42-L61
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
title_has_tag
def title_has_tag(page, lang, tag): """ Check if a Title object is associated with the given tag. This function does not use fallbacks to retrieve title object. :param page: a Page instance :param lang: a language code :param tag: a Tag instance or a slug string. :return: whether the Title instance has the given tag attached (False if no Title or no attached TitleTags exists) :type: Boolean """ from .models import TitleTags if hasattr(tag, 'slug'): slug = tag.slug else: slug = tag try: return page.get_title_obj( language=lang, fallback=False ).titletags.tags.filter(slug=slug).exists() except TitleTags.DoesNotExist: return False
python
def title_has_tag(page, lang, tag): """ Check if a Title object is associated with the given tag. This function does not use fallbacks to retrieve title object. :param page: a Page instance :param lang: a language code :param tag: a Tag instance or a slug string. :return: whether the Title instance has the given tag attached (False if no Title or no attached TitleTags exists) :type: Boolean """ from .models import TitleTags if hasattr(tag, 'slug'): slug = tag.slug else: slug = tag try: return page.get_title_obj( language=lang, fallback=False ).titletags.tags.filter(slug=slug).exists() except TitleTags.DoesNotExist: return False
[ "def", "title_has_tag", "(", "page", ",", "lang", ",", "tag", ")", ":", "from", ".", "models", "import", "TitleTags", "if", "hasattr", "(", "tag", ",", "'slug'", ")", ":", "slug", "=", "tag", ".", "slug", "else", ":", "slug", "=", "tag", "try", ":", "return", "page", ".", "get_title_obj", "(", "language", "=", "lang", ",", "fallback", "=", "False", ")", ".", "titletags", ".", "tags", ".", "filter", "(", "slug", "=", "slug", ")", ".", "exists", "(", ")", "except", "TitleTags", ".", "DoesNotExist", ":", "return", "False" ]
Check if a Title object is associated with the given tag. This function does not use fallbacks to retrieve title object. :param page: a Page instance :param lang: a language code :param tag: a Tag instance or a slug string. :return: whether the Title instance has the given tag attached (False if no Title or no attached TitleTags exists) :type: Boolean
[ "Check", "if", "a", "Title", "object", "is", "associated", "with", "the", "given", "tag", ".", "This", "function", "does", "not", "use", "fallbacks", "to", "retrieve", "title", "object", "." ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L82-L105
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
get_page_tags_from_request
def get_page_tags_from_request(request, page_lookup, lang, site, title=False): """ Get the list of tags attached to a Page or a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :param title: a boolean to extract the Page (if False) or Title instance :return: list of tags :type: List """ from cms.templatetags.cms_tags import _get_page_by_untyped_arg from cms.utils import get_language_from_request, get_site_id from django.core.cache import cache try: from cms.utils import get_cms_setting except ImportError: from cms.utils.conf import get_cms_setting site_id = get_site_id(site) if lang is None: lang = get_language_from_request(request) cache_key = get_cache_key(request, page_lookup, lang, site, title) tags_list = cache.get(cache_key) if not tags_list: page = _get_page_by_untyped_arg(page_lookup, request, site_id) if page: if title: tags_list = get_title_tags(page, lang) else: tags_list = get_page_tags(page) cache.set(cache_key, tags_list, timeout=get_cms_setting('CACHE_DURATIONS')['content']) if not tags_list: tags_list = () return tags_list
python
def get_page_tags_from_request(request, page_lookup, lang, site, title=False): """ Get the list of tags attached to a Page or a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :param title: a boolean to extract the Page (if False) or Title instance :return: list of tags :type: List """ from cms.templatetags.cms_tags import _get_page_by_untyped_arg from cms.utils import get_language_from_request, get_site_id from django.core.cache import cache try: from cms.utils import get_cms_setting except ImportError: from cms.utils.conf import get_cms_setting site_id = get_site_id(site) if lang is None: lang = get_language_from_request(request) cache_key = get_cache_key(request, page_lookup, lang, site, title) tags_list = cache.get(cache_key) if not tags_list: page = _get_page_by_untyped_arg(page_lookup, request, site_id) if page: if title: tags_list = get_title_tags(page, lang) else: tags_list = get_page_tags(page) cache.set(cache_key, tags_list, timeout=get_cms_setting('CACHE_DURATIONS')['content']) if not tags_list: tags_list = () return tags_list
[ "def", "get_page_tags_from_request", "(", "request", ",", "page_lookup", ",", "lang", ",", "site", ",", "title", "=", "False", ")", ":", "from", "cms", ".", "templatetags", ".", "cms_tags", "import", "_get_page_by_untyped_arg", "from", "cms", ".", "utils", "import", "get_language_from_request", ",", "get_site_id", "from", "django", ".", "core", ".", "cache", "import", "cache", "try", ":", "from", "cms", ".", "utils", "import", "get_cms_setting", "except", "ImportError", ":", "from", "cms", ".", "utils", ".", "conf", "import", "get_cms_setting", "site_id", "=", "get_site_id", "(", "site", ")", "if", "lang", "is", "None", ":", "lang", "=", "get_language_from_request", "(", "request", ")", "cache_key", "=", "get_cache_key", "(", "request", ",", "page_lookup", ",", "lang", ",", "site", ",", "title", ")", "tags_list", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "not", "tags_list", ":", "page", "=", "_get_page_by_untyped_arg", "(", "page_lookup", ",", "request", ",", "site_id", ")", "if", "page", ":", "if", "title", ":", "tags_list", "=", "get_title_tags", "(", "page", ",", "lang", ")", "else", ":", "tags_list", "=", "get_page_tags", "(", "page", ")", "cache", ".", "set", "(", "cache_key", ",", "tags_list", ",", "timeout", "=", "get_cms_setting", "(", "'CACHE_DURATIONS'", ")", "[", "'content'", "]", ")", "if", "not", "tags_list", ":", "tags_list", "=", "(", ")", "return", "tags_list" ]
Get the list of tags attached to a Page or a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :param title: a boolean to extract the Page (if False) or Title instance :return: list of tags :type: List
[ "Get", "the", "list", "of", "tags", "attached", "to", "a", "Page", "or", "a", "Title", "from", "a", "request", "from", "usual", "page_lookup", "parameters", "." ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L108-L146
train
nephila/djangocms-page-tags
djangocms_page_tags/utils.py
get_title_tags_from_request
def get_title_tags_from_request(request, page_lookup, lang, site): """ Get the list of tags attached to a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :return: list of tags attached to the given Title :type: List """ return get_page_tags_from_request(request, page_lookup, lang, site, True)
python
def get_title_tags_from_request(request, page_lookup, lang, site): """ Get the list of tags attached to a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :return: list of tags attached to the given Title :type: List """ return get_page_tags_from_request(request, page_lookup, lang, site, True)
[ "def", "get_title_tags_from_request", "(", "request", ",", "page_lookup", ",", "lang", ",", "site", ")", ":", "return", "get_page_tags_from_request", "(", "request", ",", "page_lookup", ",", "lang", ",", "site", ",", "True", ")" ]
Get the list of tags attached to a Title from a request from usual `page_lookup` parameters. :param request: request object :param page_lookup: a valid page_lookup argument :param lang: a language code :param site: a site id :return: list of tags attached to the given Title :type: List
[ "Get", "the", "list", "of", "tags", "attached", "to", "a", "Title", "from", "a", "request", "from", "usual", "page_lookup", "parameters", "." ]
602c9d74456d689f46ddb8d67cd64d1a42747359
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L149-L162
train
CI-WATER/gsshapy
gsshapy/orm/msk.py
WatershedMaskFile.generateFromWatershedShapefile
def generateFromWatershedShapefile(self, shapefile_path, cell_size, out_raster_path=None, load_raster_to_db=True): """ Generates a mask from a watershed_shapefile Example:: from gsshapy.orm import ProjectFile, WatershedMaskFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' shapefile_path = 'watershed_boundary.shp' # Create Test DB sqlalchemy_url, sql_engine = dbt.init_sqlite_memory() # Create DB Sessions db_session = dbt.create_session(sqlalchemy_url, sql_engine) # Instantiate GSSHAPY object for reading to database project_manager = ProjectFile() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate watershed mask watershed_mask = WatershedMaskFile(session=db_session, project_file=project_manager) watershed_mask.generateFromWatershedShapefile(shapefile_path, x_num_cells=50, y_num_cells=50, ) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') """ if not self.projectFile: raise ValueError("Must be connected to project file ...") # match elevation grid if exists match_grid = None try: match_grid = self.projectFile.getGrid(use_mask=False) except ValueError: pass # match projection if exists wkt_projection = None try: wkt_projection = self.projectFile.getWkt() except ValueError: pass if out_raster_path is None: out_raster_path = '{0}.{1}'.format(self.projectFile.name, self.extension) # make sure paths are absolute as the working directory changes shapefile_path = os.path.abspath(shapefile_path) # make sure the polygon is valid check_watershed_boundary_geometry(shapefile_path) gr = rasterize_shapefile(shapefile_path, x_cell_size=cell_size, y_cell_size=cell_size, match_grid=match_grid, raster_nodata=0, as_gdal_grid=True, raster_wkt_proj=wkt_projection, convert_to_utm=True) with tmp_chdir(self.projectFile.project_directory): gr.to_grass_ascii(out_raster_path, print_nodata=False) self.filename = out_raster_path # update project file cards self.projectFile.setCard('WATERSHED_MASK', out_raster_path, add_quotes=True) self.projectFile.setCard('GRIDSIZE', str((gr.geotransform[1] - gr.geotransform[-1])/2.0)) self.projectFile.setCard('ROWS', str(gr.y_size)) self.projectFile.setCard('COLS', str(gr.x_size)) # write projection file if does not exist if wkt_projection is None: proj_file = ProjectionFile() proj_file.projection = gr.wkt proj_file.projectFile = self.projectFile proj_path = "{0}_prj.pro".format(os.path.splitext(out_raster_path)[0]) gr.write_prj(proj_path) self.projectFile.setCard('#PROJECTION_FILE', proj_path, add_quotes=True) # read raster into object if load_raster_to_db: self._load_raster_text(out_raster_path)
python
def generateFromWatershedShapefile(self, shapefile_path, cell_size, out_raster_path=None, load_raster_to_db=True): """ Generates a mask from a watershed_shapefile Example:: from gsshapy.orm import ProjectFile, WatershedMaskFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' shapefile_path = 'watershed_boundary.shp' # Create Test DB sqlalchemy_url, sql_engine = dbt.init_sqlite_memory() # Create DB Sessions db_session = dbt.create_session(sqlalchemy_url, sql_engine) # Instantiate GSSHAPY object for reading to database project_manager = ProjectFile() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate watershed mask watershed_mask = WatershedMaskFile(session=db_session, project_file=project_manager) watershed_mask.generateFromWatershedShapefile(shapefile_path, x_num_cells=50, y_num_cells=50, ) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') """ if not self.projectFile: raise ValueError("Must be connected to project file ...") # match elevation grid if exists match_grid = None try: match_grid = self.projectFile.getGrid(use_mask=False) except ValueError: pass # match projection if exists wkt_projection = None try: wkt_projection = self.projectFile.getWkt() except ValueError: pass if out_raster_path is None: out_raster_path = '{0}.{1}'.format(self.projectFile.name, self.extension) # make sure paths are absolute as the working directory changes shapefile_path = os.path.abspath(shapefile_path) # make sure the polygon is valid check_watershed_boundary_geometry(shapefile_path) gr = rasterize_shapefile(shapefile_path, x_cell_size=cell_size, y_cell_size=cell_size, match_grid=match_grid, raster_nodata=0, as_gdal_grid=True, raster_wkt_proj=wkt_projection, convert_to_utm=True) with tmp_chdir(self.projectFile.project_directory): gr.to_grass_ascii(out_raster_path, print_nodata=False) self.filename = out_raster_path # update project file cards self.projectFile.setCard('WATERSHED_MASK', out_raster_path, add_quotes=True) self.projectFile.setCard('GRIDSIZE', str((gr.geotransform[1] - gr.geotransform[-1])/2.0)) self.projectFile.setCard('ROWS', str(gr.y_size)) self.projectFile.setCard('COLS', str(gr.x_size)) # write projection file if does not exist if wkt_projection is None: proj_file = ProjectionFile() proj_file.projection = gr.wkt proj_file.projectFile = self.projectFile proj_path = "{0}_prj.pro".format(os.path.splitext(out_raster_path)[0]) gr.write_prj(proj_path) self.projectFile.setCard('#PROJECTION_FILE', proj_path, add_quotes=True) # read raster into object if load_raster_to_db: self._load_raster_text(out_raster_path)
[ "def", "generateFromWatershedShapefile", "(", "self", ",", "shapefile_path", ",", "cell_size", ",", "out_raster_path", "=", "None", ",", "load_raster_to_db", "=", "True", ")", ":", "if", "not", "self", ".", "projectFile", ":", "raise", "ValueError", "(", "\"Must be connected to project file ...\"", ")", "# match elevation grid if exists", "match_grid", "=", "None", "try", ":", "match_grid", "=", "self", ".", "projectFile", ".", "getGrid", "(", "use_mask", "=", "False", ")", "except", "ValueError", ":", "pass", "# match projection if exists", "wkt_projection", "=", "None", "try", ":", "wkt_projection", "=", "self", ".", "projectFile", ".", "getWkt", "(", ")", "except", "ValueError", ":", "pass", "if", "out_raster_path", "is", "None", ":", "out_raster_path", "=", "'{0}.{1}'", ".", "format", "(", "self", ".", "projectFile", ".", "name", ",", "self", ".", "extension", ")", "# make sure paths are absolute as the working directory changes", "shapefile_path", "=", "os", ".", "path", ".", "abspath", "(", "shapefile_path", ")", "# make sure the polygon is valid", "check_watershed_boundary_geometry", "(", "shapefile_path", ")", "gr", "=", "rasterize_shapefile", "(", "shapefile_path", ",", "x_cell_size", "=", "cell_size", ",", "y_cell_size", "=", "cell_size", ",", "match_grid", "=", "match_grid", ",", "raster_nodata", "=", "0", ",", "as_gdal_grid", "=", "True", ",", "raster_wkt_proj", "=", "wkt_projection", ",", "convert_to_utm", "=", "True", ")", "with", "tmp_chdir", "(", "self", ".", "projectFile", ".", "project_directory", ")", ":", "gr", ".", "to_grass_ascii", "(", "out_raster_path", ",", "print_nodata", "=", "False", ")", "self", ".", "filename", "=", "out_raster_path", "# update project file cards", "self", ".", "projectFile", ".", "setCard", "(", "'WATERSHED_MASK'", ",", "out_raster_path", ",", "add_quotes", "=", "True", ")", "self", ".", "projectFile", ".", "setCard", "(", "'GRIDSIZE'", ",", "str", "(", "(", "gr", ".", "geotransform", "[", "1", "]", "-", "gr", ".", "geotransform", "[", "-", "1", "]", ")", "/", "2.0", ")", ")", "self", ".", "projectFile", ".", "setCard", "(", "'ROWS'", ",", "str", "(", "gr", ".", "y_size", ")", ")", "self", ".", "projectFile", ".", "setCard", "(", "'COLS'", ",", "str", "(", "gr", ".", "x_size", ")", ")", "# write projection file if does not exist", "if", "wkt_projection", "is", "None", ":", "proj_file", "=", "ProjectionFile", "(", ")", "proj_file", ".", "projection", "=", "gr", ".", "wkt", "proj_file", ".", "projectFile", "=", "self", ".", "projectFile", "proj_path", "=", "\"{0}_prj.pro\"", ".", "format", "(", "os", ".", "path", ".", "splitext", "(", "out_raster_path", ")", "[", "0", "]", ")", "gr", ".", "write_prj", "(", "proj_path", ")", "self", ".", "projectFile", ".", "setCard", "(", "'#PROJECTION_FILE'", ",", "proj_path", ",", "add_quotes", "=", "True", ")", "# read raster into object", "if", "load_raster_to_db", ":", "self", ".", "_load_raster_text", "(", "out_raster_path", ")" ]
Generates a mask from a watershed_shapefile Example:: from gsshapy.orm import ProjectFile, WatershedMaskFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' shapefile_path = 'watershed_boundary.shp' # Create Test DB sqlalchemy_url, sql_engine = dbt.init_sqlite_memory() # Create DB Sessions db_session = dbt.create_session(sqlalchemy_url, sql_engine) # Instantiate GSSHAPY object for reading to database project_manager = ProjectFile() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate watershed mask watershed_mask = WatershedMaskFile(session=db_session, project_file=project_manager) watershed_mask.generateFromWatershedShapefile(shapefile_path, x_num_cells=50, y_num_cells=50, ) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard')
[ "Generates", "a", "mask", "from", "a", "watershed_shapefile" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/msk.py#L45-L143
train
CI-WATER/gsshapy
gsshapy/util/context.py
tmp_chdir
def tmp_chdir(new_path): """Change directory temporarily and return when done.""" prev_cwd = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(prev_cwd)
python
def tmp_chdir(new_path): """Change directory temporarily and return when done.""" prev_cwd = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(prev_cwd)
[ "def", "tmp_chdir", "(", "new_path", ")", ":", "prev_cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "new_path", ")", "try", ":", "yield", "finally", ":", "os", ".", "chdir", "(", "prev_cwd", ")" ]
Change directory temporarily and return when done.
[ "Change", "directory", "temporarily", "and", "return", "when", "done", "." ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/util/context.py#L13-L20
train
CI-WATER/gsshapy
gsshapy/grid/era_to_gssha.py
ERAtoGSSHA._download
def _download(self): """download ERA5 data for GSSHA domain""" # reproject GSSHA grid and get bounds min_x, max_x, min_y, max_y = self.gssha_grid.bounds(as_geographic=True) if self.era_download_data == 'era5': log.info("Downloading ERA5 data ...") download_era5_for_gssha(self.lsm_input_folder_path, self.download_start_datetime, self.download_end_datetime, leftlon=min_x-0.5, rightlon=max_x+0.5, toplat=max_y+0.5, bottomlat=min_y-0.5) else: log.info("Downloading ERA Interim data ...") download_interim_for_gssha(self.lsm_input_folder_path, self.download_start_datetime, self.download_end_datetime, leftlon=min_x-1, rightlon=max_x+1, toplat=max_y+1, bottomlat=min_y-1)
python
def _download(self): """download ERA5 data for GSSHA domain""" # reproject GSSHA grid and get bounds min_x, max_x, min_y, max_y = self.gssha_grid.bounds(as_geographic=True) if self.era_download_data == 'era5': log.info("Downloading ERA5 data ...") download_era5_for_gssha(self.lsm_input_folder_path, self.download_start_datetime, self.download_end_datetime, leftlon=min_x-0.5, rightlon=max_x+0.5, toplat=max_y+0.5, bottomlat=min_y-0.5) else: log.info("Downloading ERA Interim data ...") download_interim_for_gssha(self.lsm_input_folder_path, self.download_start_datetime, self.download_end_datetime, leftlon=min_x-1, rightlon=max_x+1, toplat=max_y+1, bottomlat=min_y-1)
[ "def", "_download", "(", "self", ")", ":", "# reproject GSSHA grid and get bounds", "min_x", ",", "max_x", ",", "min_y", ",", "max_y", "=", "self", ".", "gssha_grid", ".", "bounds", "(", "as_geographic", "=", "True", ")", "if", "self", ".", "era_download_data", "==", "'era5'", ":", "log", ".", "info", "(", "\"Downloading ERA5 data ...\"", ")", "download_era5_for_gssha", "(", "self", ".", "lsm_input_folder_path", ",", "self", ".", "download_start_datetime", ",", "self", ".", "download_end_datetime", ",", "leftlon", "=", "min_x", "-", "0.5", ",", "rightlon", "=", "max_x", "+", "0.5", ",", "toplat", "=", "max_y", "+", "0.5", ",", "bottomlat", "=", "min_y", "-", "0.5", ")", "else", ":", "log", ".", "info", "(", "\"Downloading ERA Interim data ...\"", ")", "download_interim_for_gssha", "(", "self", ".", "lsm_input_folder_path", ",", "self", ".", "download_start_datetime", ",", "self", ".", "download_end_datetime", ",", "leftlon", "=", "min_x", "-", "1", ",", "rightlon", "=", "max_x", "+", "1", ",", "toplat", "=", "max_y", "+", "1", ",", "bottomlat", "=", "min_y", "-", "1", ")" ]
download ERA5 data for GSSHA domain
[ "download", "ERA5", "data", "for", "GSSHA", "domain" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/era_to_gssha.py#L381-L402
train
hellupline/flask-manager
flask_manager/views.py
View.dispatch_request
def dispatch_request(self, *args, **kwargs): """Dispatch the request. Its the actual ``view`` flask will use. """ if request.method in ('POST', 'PUT'): return_url, context = self.post(*args, **kwargs) if return_url is not None: return redirect(return_url) elif request.method in ('GET', 'HEAD'): context = self.get(*args, **kwargs) return self.render_response(self.context(context))
python
def dispatch_request(self, *args, **kwargs): """Dispatch the request. Its the actual ``view`` flask will use. """ if request.method in ('POST', 'PUT'): return_url, context = self.post(*args, **kwargs) if return_url is not None: return redirect(return_url) elif request.method in ('GET', 'HEAD'): context = self.get(*args, **kwargs) return self.render_response(self.context(context))
[ "def", "dispatch_request", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "return_url", ",", "context", "=", "self", ".", "post", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "return_url", "is", "not", "None", ":", "return", "redirect", "(", "return_url", ")", "elif", "request", ".", "method", "in", "(", "'GET'", ",", "'HEAD'", ")", ":", "context", "=", "self", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "render_response", "(", "self", ".", "context", "(", "context", ")", ")" ]
Dispatch the request. Its the actual ``view`` flask will use.
[ "Dispatch", "the", "request", ".", "Its", "the", "actual", "view", "flask", "will", "use", "." ]
70e48309f73aacf55f5c37b43165791ae1cf6861
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/views.py#L21-L31
train
bayesimpact/fex
fex/runner.py
_run_cmd_get_output
def _run_cmd_get_output(cmd): """Runs a shell command, returns console output. Mimics python3's subprocess.getoutput """ process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) out, err = process.communicate() return out or err
python
def _run_cmd_get_output(cmd): """Runs a shell command, returns console output. Mimics python3's subprocess.getoutput """ process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) out, err = process.communicate() return out or err
[ "def", "_run_cmd_get_output", "(", "cmd", ")", ":", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ".", "split", "(", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "process", ".", "communicate", "(", ")", "return", "out", "or", "err" ]
Runs a shell command, returns console output. Mimics python3's subprocess.getoutput
[ "Runs", "a", "shell", "command", "returns", "console", "output", "." ]
2d9b4e9be2bf98847a36055b907411fd5557eb77
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L15-L22
train
bayesimpact/fex
fex/runner.py
_remote_github_url_to_string
def _remote_github_url_to_string(remote_url): """Parse out the repository identifier from a github URL.""" # TODO: make this work with https URLs match = re.search('git@github\.com:(.*)\.git', remote_url) if not match: raise EnvironmentError('Remote is not a valid github URL') identifier = match.group(1) return re.sub('\W', ':', identifier)
python
def _remote_github_url_to_string(remote_url): """Parse out the repository identifier from a github URL.""" # TODO: make this work with https URLs match = re.search('git@github\.com:(.*)\.git', remote_url) if not match: raise EnvironmentError('Remote is not a valid github URL') identifier = match.group(1) return re.sub('\W', ':', identifier)
[ "def", "_remote_github_url_to_string", "(", "remote_url", ")", ":", "# TODO: make this work with https URLs", "match", "=", "re", ".", "search", "(", "'git@github\\.com:(.*)\\.git'", ",", "remote_url", ")", "if", "not", "match", ":", "raise", "EnvironmentError", "(", "'Remote is not a valid github URL'", ")", "identifier", "=", "match", ".", "group", "(", "1", ")", "return", "re", ".", "sub", "(", "'\\W'", ",", "':'", ",", "identifier", ")" ]
Parse out the repository identifier from a github URL.
[ "Parse", "out", "the", "repository", "identifier", "from", "a", "github", "URL", "." ]
2d9b4e9be2bf98847a36055b907411fd5557eb77
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L25-L32
train
bayesimpact/fex
fex/runner.py
_get_args
def _get_args(args): """Argparse logic lives here. returns: parsed arguments. """ parser = argparse.ArgumentParser( description='A tool to extract features into a simple format.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('--no-cache', action='store_true') parser.add_argument('--deploy', action='store_true') parser.add_argument('--cache-path', type=str, default='fex-cache.pckl', help='Path for cache file') parser.add_argument('--path', type=str, default='features.csv', help='Path to write the dataset to') args = parser.parse_args(args) if args.no_cache: args.cache_path = None return args
python
def _get_args(args): """Argparse logic lives here. returns: parsed arguments. """ parser = argparse.ArgumentParser( description='A tool to extract features into a simple format.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('--no-cache', action='store_true') parser.add_argument('--deploy', action='store_true') parser.add_argument('--cache-path', type=str, default='fex-cache.pckl', help='Path for cache file') parser.add_argument('--path', type=str, default='features.csv', help='Path to write the dataset to') args = parser.parse_args(args) if args.no_cache: args.cache_path = None return args
[ "def", "_get_args", "(", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'A tool to extract features into a simple format.'", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ",", ")", "parser", ".", "add_argument", "(", "'--no-cache'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--deploy'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--cache-path'", ",", "type", "=", "str", ",", "default", "=", "'fex-cache.pckl'", ",", "help", "=", "'Path for cache file'", ")", "parser", ".", "add_argument", "(", "'--path'", ",", "type", "=", "str", ",", "default", "=", "'features.csv'", ",", "help", "=", "'Path to write the dataset to'", ")", "args", "=", "parser", ".", "parse_args", "(", "args", ")", "if", "args", ".", "no_cache", ":", "args", ".", "cache_path", "=", "None", "return", "args" ]
Argparse logic lives here. returns: parsed arguments.
[ "Argparse", "logic", "lives", "here", "." ]
2d9b4e9be2bf98847a36055b907411fd5557eb77
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L75-L93
train
bayesimpact/fex
fex/runner.py
run
def run(*extractor_list, **kwargs): """Parse arguments provided on the commandline and execute extractors.""" args = _get_args(kwargs.get('args')) n_extractors = len(extractor_list) log.info('Going to run list of {} FeatureExtractors'.format(n_extractors)) collection = fex.Collection(cache_path=args.cache_path) for extractor in extractor_list: collection.add_feature_extractor(extractor) out_path = args.path if args.deploy: out_path = _prefix_git_hash(out_path) collection.run(out_path)
python
def run(*extractor_list, **kwargs): """Parse arguments provided on the commandline and execute extractors.""" args = _get_args(kwargs.get('args')) n_extractors = len(extractor_list) log.info('Going to run list of {} FeatureExtractors'.format(n_extractors)) collection = fex.Collection(cache_path=args.cache_path) for extractor in extractor_list: collection.add_feature_extractor(extractor) out_path = args.path if args.deploy: out_path = _prefix_git_hash(out_path) collection.run(out_path)
[ "def", "run", "(", "*", "extractor_list", ",", "*", "*", "kwargs", ")", ":", "args", "=", "_get_args", "(", "kwargs", ".", "get", "(", "'args'", ")", ")", "n_extractors", "=", "len", "(", "extractor_list", ")", "log", ".", "info", "(", "'Going to run list of {} FeatureExtractors'", ".", "format", "(", "n_extractors", ")", ")", "collection", "=", "fex", ".", "Collection", "(", "cache_path", "=", "args", ".", "cache_path", ")", "for", "extractor", "in", "extractor_list", ":", "collection", ".", "add_feature_extractor", "(", "extractor", ")", "out_path", "=", "args", ".", "path", "if", "args", ".", "deploy", ":", "out_path", "=", "_prefix_git_hash", "(", "out_path", ")", "collection", ".", "run", "(", "out_path", ")" ]
Parse arguments provided on the commandline and execute extractors.
[ "Parse", "arguments", "provided", "on", "the", "commandline", "and", "execute", "extractors", "." ]
2d9b4e9be2bf98847a36055b907411fd5557eb77
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L110-L122
train
CI-WATER/gsshapy
gsshapy/orm/map.py
RasterMapFile._delete_existing
def _delete_existing(self, project_file, session): """ This will delete existing instances with the same extension """ # remove existing grid if exists existing_elev = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == project_file).\ filter(RasterMapFile.fileExtension == self.fileExtension).\ all() if existing_elev: session.delete(existing_elev) session.commit()
python
def _delete_existing(self, project_file, session): """ This will delete existing instances with the same extension """ # remove existing grid if exists existing_elev = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == project_file).\ filter(RasterMapFile.fileExtension == self.fileExtension).\ all() if existing_elev: session.delete(existing_elev) session.commit()
[ "def", "_delete_existing", "(", "self", ",", "project_file", ",", "session", ")", ":", "# remove existing grid if exists", "existing_elev", "=", "session", ".", "query", "(", "RasterMapFile", ")", ".", "filter", "(", "RasterMapFile", ".", "projectFile", "==", "project_file", ")", ".", "filter", "(", "RasterMapFile", ".", "fileExtension", "==", "self", ".", "fileExtension", ")", ".", "all", "(", ")", "if", "existing_elev", ":", "session", ".", "delete", "(", "existing_elev", ")", "session", ".", "commit", "(", ")" ]
This will delete existing instances with the same extension
[ "This", "will", "delete", "existing", "instances", "with", "the", "same", "extension" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L81-L92
train
CI-WATER/gsshapy
gsshapy/orm/map.py
RasterMapFile._load_raster_text
def _load_raster_text(self, raster_path): """ Loads grass ASCII to object """ # Open file and read plain text into text field with open(raster_path, 'r') as f: self.rasterText = f.read() # Retrieve metadata from header lines = self.rasterText.split('\n') for line in lines[0:6]: spline = line.split() if 'north' in spline[0].lower(): self.north = float(spline[1]) elif 'south' in spline[0].lower(): self.south = float(spline[1]) elif 'east' in spline[0].lower(): self.east = float(spline[1]) elif 'west' in spline[0].lower(): self.west = float(spline[1]) elif 'rows' in spline[0].lower(): self.rows = int(spline[1]) elif 'cols' in spline[0].lower(): self.columns = int(spline[1])
python
def _load_raster_text(self, raster_path): """ Loads grass ASCII to object """ # Open file and read plain text into text field with open(raster_path, 'r') as f: self.rasterText = f.read() # Retrieve metadata from header lines = self.rasterText.split('\n') for line in lines[0:6]: spline = line.split() if 'north' in spline[0].lower(): self.north = float(spline[1]) elif 'south' in spline[0].lower(): self.south = float(spline[1]) elif 'east' in spline[0].lower(): self.east = float(spline[1]) elif 'west' in spline[0].lower(): self.west = float(spline[1]) elif 'rows' in spline[0].lower(): self.rows = int(spline[1]) elif 'cols' in spline[0].lower(): self.columns = int(spline[1])
[ "def", "_load_raster_text", "(", "self", ",", "raster_path", ")", ":", "# Open file and read plain text into text field", "with", "open", "(", "raster_path", ",", "'r'", ")", "as", "f", ":", "self", ".", "rasterText", "=", "f", ".", "read", "(", ")", "# Retrieve metadata from header", "lines", "=", "self", ".", "rasterText", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", "[", "0", ":", "6", "]", ":", "spline", "=", "line", ".", "split", "(", ")", "if", "'north'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "north", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'south'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "south", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'east'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "east", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'west'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "west", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'rows'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "rows", "=", "int", "(", "spline", "[", "1", "]", ")", "elif", "'cols'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "columns", "=", "int", "(", "spline", "[", "1", "]", ")" ]
Loads grass ASCII to object
[ "Loads", "grass", "ASCII", "to", "object" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L94-L118
train
CI-WATER/gsshapy
gsshapy/orm/map.py
RasterMapFile._read
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Raster Map File Read from File Method """ # Assign file extension attribute to file object self.fileExtension = extension self.filename = filename self._load_raster_text(path) if spatial: # Get well known binary from the raster file using the MapKit RasterLoader wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session, grassRasterPath=path, srid=str(spatialReferenceID), noData='0') self.raster = wkbRaster
python
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Raster Map File Read from File Method """ # Assign file extension attribute to file object self.fileExtension = extension self.filename = filename self._load_raster_text(path) if spatial: # Get well known binary from the raster file using the MapKit RasterLoader wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session, grassRasterPath=path, srid=str(spatialReferenceID), noData='0') self.raster = wkbRaster
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Assign file extension attribute to file object", "self", ".", "fileExtension", "=", "extension", "self", ".", "filename", "=", "filename", "self", ".", "_load_raster_text", "(", "path", ")", "if", "spatial", ":", "# Get well known binary from the raster file using the MapKit RasterLoader", "wkbRaster", "=", "RasterLoader", ".", "grassAsciiRasterToWKB", "(", "session", "=", "session", ",", "grassRasterPath", "=", "path", ",", "srid", "=", "str", "(", "spatialReferenceID", ")", ",", "noData", "=", "'0'", ")", "self", ".", "raster", "=", "wkbRaster" ]
Raster Map File Read from File Method
[ "Raster", "Map", "File", "Read", "from", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L120-L136
train
CI-WATER/gsshapy
gsshapy/orm/map.py
RasterMapFile._write
def _write(self, session, openFile, replaceParamFile): """ Raster Map File Write to File Method """ # If the raster field is not empty, write from this field if self.raster is not None: # Configure RasterConverter converter = RasterConverter(session) # Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid grassAsciiGrid = converter.getAsGrassAsciiRaster(rasterFieldName='raster', tableName=self.__tablename__, rasterIdFieldName='id', rasterId=self.id) # Write to file openFile.write(grassAsciiGrid) elif self.rasterText is not None: # Write file openFile.write(self.rasterText)
python
def _write(self, session, openFile, replaceParamFile): """ Raster Map File Write to File Method """ # If the raster field is not empty, write from this field if self.raster is not None: # Configure RasterConverter converter = RasterConverter(session) # Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid grassAsciiGrid = converter.getAsGrassAsciiRaster(rasterFieldName='raster', tableName=self.__tablename__, rasterIdFieldName='id', rasterId=self.id) # Write to file openFile.write(grassAsciiGrid) elif self.rasterText is not None: # Write file openFile.write(self.rasterText)
[ "def", "_write", "(", "self", ",", "session", ",", "openFile", ",", "replaceParamFile", ")", ":", "# If the raster field is not empty, write from this field", "if", "self", ".", "raster", "is", "not", "None", ":", "# Configure RasterConverter", "converter", "=", "RasterConverter", "(", "session", ")", "# Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid", "grassAsciiGrid", "=", "converter", ".", "getAsGrassAsciiRaster", "(", "rasterFieldName", "=", "'raster'", ",", "tableName", "=", "self", ".", "__tablename__", ",", "rasterIdFieldName", "=", "'id'", ",", "rasterId", "=", "self", ".", "id", ")", "# Write to file", "openFile", ".", "write", "(", "grassAsciiGrid", ")", "elif", "self", ".", "rasterText", "is", "not", "None", ":", "# Write file", "openFile", ".", "write", "(", "self", ".", "rasterText", ")" ]
Raster Map File Write to File Method
[ "Raster", "Map", "File", "Write", "to", "File", "Method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L138-L157
train
CI-WATER/gsshapy
gsshapy/orm/map.py
RasterMapFile.write
def write(self, session, directory, name, replaceParamFile=None, **kwargs): """ Wrapper for GsshaPyFileObjectBase write method """ if self.raster is not None or self.rasterText is not None: super(RasterMapFile, self).write(session, directory, name, replaceParamFile, **kwargs)
python
def write(self, session, directory, name, replaceParamFile=None, **kwargs): """ Wrapper for GsshaPyFileObjectBase write method """ if self.raster is not None or self.rasterText is not None: super(RasterMapFile, self).write(session, directory, name, replaceParamFile, **kwargs)
[ "def", "write", "(", "self", ",", "session", ",", "directory", ",", "name", ",", "replaceParamFile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "raster", "is", "not", "None", "or", "self", ".", "rasterText", "is", "not", "None", ":", "super", "(", "RasterMapFile", ",", "self", ")", ".", "write", "(", "session", ",", "directory", ",", "name", ",", "replaceParamFile", ",", "*", "*", "kwargs", ")" ]
Wrapper for GsshaPyFileObjectBase write method
[ "Wrapper", "for", "GsshaPyFileObjectBase", "write", "method" ]
00fd4af0fd65f1614d75a52fe950a04fb0867f4c
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L159-L164
train
hellupline/flask-manager
flask_manager/utils.py
slugify
def slugify(value): """Simple Slugify.""" s1 = first_cap_re.sub(r'\1_\2', value) s2 = all_cap_re.sub(r'\1_\2', s1) return s2.lower().replace(' _', '_').replace(' ', '_')
python
def slugify(value): """Simple Slugify.""" s1 = first_cap_re.sub(r'\1_\2', value) s2 = all_cap_re.sub(r'\1_\2', s1) return s2.lower().replace(' _', '_').replace(' ', '_')
[ "def", "slugify", "(", "value", ")", ":", "s1", "=", "first_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "value", ")", "s2", "=", "all_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "s1", ")", "return", "s2", ".", "lower", "(", ")", ".", "replace", "(", "' _'", ",", "'_'", ")", ".", "replace", "(", "' '", ",", "'_'", ")" ]
Simple Slugify.
[ "Simple", "Slugify", "." ]
70e48309f73aacf55f5c37b43165791ae1cf6861
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/utils.py#L23-L27
train
dssg/argcmdr
src/argcmdr.py
entrypoint
def entrypoint(cls): """Mark the decorated command as the intended entrypoint of the command module. """ if not isinstance(cls, type) or not issubclass(cls, Command): raise TypeError(f"inappropriate entrypoint instance of type {cls.__class__}") cls._argcmdr_entrypoint_ = True return cls
python
def entrypoint(cls): """Mark the decorated command as the intended entrypoint of the command module. """ if not isinstance(cls, type) or not issubclass(cls, Command): raise TypeError(f"inappropriate entrypoint instance of type {cls.__class__}") cls._argcmdr_entrypoint_ = True return cls
[ "def", "entrypoint", "(", "cls", ")", ":", "if", "not", "isinstance", "(", "cls", ",", "type", ")", "or", "not", "issubclass", "(", "cls", ",", "Command", ")", ":", "raise", "TypeError", "(", "f\"inappropriate entrypoint instance of type {cls.__class__}\"", ")", "cls", ".", "_argcmdr_entrypoint_", "=", "True", "return", "cls" ]
Mark the decorated command as the intended entrypoint of the command module.
[ "Mark", "the", "decorated", "command", "as", "the", "intended", "entrypoint", "of", "the", "command", "module", "." ]
346b6158987464c3d3a32d315f3800a4807744b4
https://github.com/dssg/argcmdr/blob/346b6158987464c3d3a32d315f3800a4807744b4/src/argcmdr.py#L767-L775
train
dssg/argcmdr
src/argcmdr.py
store_env_override
def store_env_override(option_strings, dest, envvar, nargs=None, default=None, type=None, choices=None, description=None, help=None, metavar=None): """Construct an argparse action which stores the value of a command line option to override a corresponding value in the process environment. If the environment variable is not empty, then no override is required. If the environment variable is empty, and no default is provided, then the "option" is required. In the case of a default value which is a *transformation* of the single environment variable, this default may be provided as a callable, (*e.g.* as a lambda function). Rather than have to fully explain the relationship of this environment-backed option, help text may be generated from a provided description. """ if envvar == '': raise ValueError("unsupported environment variable name", envvar) envvalue = os.getenv(envvar) if callable(default): default_value = default(envvalue) elif envvalue: default_value = envvalue else: default_value = default if description and help: raise ValueError( "only specify help to override its optional generation from " "description -- not both" ) elif description: if default_value: help = '{} (default {} envvar {}: {})'.format( description, 'provided by' if default is None else 'derived from', envvar, default_value, ) else: help = (f'{description} (required because ' f'envvar {envvar} is empty)') return argparse._StoreAction( option_strings=option_strings, dest=dest, nargs=nargs, const=None, default=default_value, type=type, choices=choices, required=(not default_value), help=help, metavar=metavar, )
python
def store_env_override(option_strings, dest, envvar, nargs=None, default=None, type=None, choices=None, description=None, help=None, metavar=None): """Construct an argparse action which stores the value of a command line option to override a corresponding value in the process environment. If the environment variable is not empty, then no override is required. If the environment variable is empty, and no default is provided, then the "option" is required. In the case of a default value which is a *transformation* of the single environment variable, this default may be provided as a callable, (*e.g.* as a lambda function). Rather than have to fully explain the relationship of this environment-backed option, help text may be generated from a provided description. """ if envvar == '': raise ValueError("unsupported environment variable name", envvar) envvalue = os.getenv(envvar) if callable(default): default_value = default(envvalue) elif envvalue: default_value = envvalue else: default_value = default if description and help: raise ValueError( "only specify help to override its optional generation from " "description -- not both" ) elif description: if default_value: help = '{} (default {} envvar {}: {})'.format( description, 'provided by' if default is None else 'derived from', envvar, default_value, ) else: help = (f'{description} (required because ' f'envvar {envvar} is empty)') return argparse._StoreAction( option_strings=option_strings, dest=dest, nargs=nargs, const=None, default=default_value, type=type, choices=choices, required=(not default_value), help=help, metavar=metavar, )
[ "def", "store_env_override", "(", "option_strings", ",", "dest", ",", "envvar", ",", "nargs", "=", "None", ",", "default", "=", "None", ",", "type", "=", "None", ",", "choices", "=", "None", ",", "description", "=", "None", ",", "help", "=", "None", ",", "metavar", "=", "None", ")", ":", "if", "envvar", "==", "''", ":", "raise", "ValueError", "(", "\"unsupported environment variable name\"", ",", "envvar", ")", "envvalue", "=", "os", ".", "getenv", "(", "envvar", ")", "if", "callable", "(", "default", ")", ":", "default_value", "=", "default", "(", "envvalue", ")", "elif", "envvalue", ":", "default_value", "=", "envvalue", "else", ":", "default_value", "=", "default", "if", "description", "and", "help", ":", "raise", "ValueError", "(", "\"only specify help to override its optional generation from \"", "\"description -- not both\"", ")", "elif", "description", ":", "if", "default_value", ":", "help", "=", "'{} (default {} envvar {}: {})'", ".", "format", "(", "description", ",", "'provided by'", "if", "default", "is", "None", "else", "'derived from'", ",", "envvar", ",", "default_value", ",", ")", "else", ":", "help", "=", "(", "f'{description} (required because '", "f'envvar {envvar} is empty)'", ")", "return", "argparse", ".", "_StoreAction", "(", "option_strings", "=", "option_strings", ",", "dest", "=", "dest", ",", "nargs", "=", "nargs", ",", "const", "=", "None", ",", "default", "=", "default_value", ",", "type", "=", "type", ",", "choices", "=", "choices", ",", "required", "=", "(", "not", "default_value", ")", ",", "help", "=", "help", ",", "metavar", "=", "metavar", ",", ")" ]
Construct an argparse action which stores the value of a command line option to override a corresponding value in the process environment. If the environment variable is not empty, then no override is required. If the environment variable is empty, and no default is provided, then the "option" is required. In the case of a default value which is a *transformation* of the single environment variable, this default may be provided as a callable, (*e.g.* as a lambda function). Rather than have to fully explain the relationship of this environment-backed option, help text may be generated from a provided description.
[ "Construct", "an", "argparse", "action", "which", "stores", "the", "value", "of", "a", "command", "line", "option", "to", "override", "a", "corresponding", "value", "in", "the", "process", "environment", "." ]
346b6158987464c3d3a32d315f3800a4807744b4
https://github.com/dssg/argcmdr/blob/346b6158987464c3d3a32d315f3800a4807744b4/src/argcmdr.py#L785-L852
train
robinandeer/puzzle
puzzle/plugins/base.py
Plugin.individual_dict
def individual_dict(self, ind_ids): """Return a dict with ind_id as key and Individual as values.""" ind_dict = {ind.ind_id: ind for ind in self.individuals(ind_ids=ind_ids)} return ind_dict
python
def individual_dict(self, ind_ids): """Return a dict with ind_id as key and Individual as values.""" ind_dict = {ind.ind_id: ind for ind in self.individuals(ind_ids=ind_ids)} return ind_dict
[ "def", "individual_dict", "(", "self", ",", "ind_ids", ")", ":", "ind_dict", "=", "{", "ind", ".", "ind_id", ":", "ind", "for", "ind", "in", "self", ".", "individuals", "(", "ind_ids", "=", "ind_ids", ")", "}", "return", "ind_dict" ]
Return a dict with ind_id as key and Individual as values.
[ "Return", "a", "dict", "with", "ind_id", "as", "key", "and", "Individual", "as", "values", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/base.py#L35-L38
train
robinandeer/puzzle
tasks.py
clean
def clean(): """clean - remove build artifacts.""" run('rm -rf build/') run('rm -rf dist/') run('rm -rf puzzle.egg-info') run('find . -name __pycache__ -delete') run('find . -name *.pyc -delete') run('find . -name *.pyo -delete') run('find . -name *~ -delete') log.info('cleaned up')
python
def clean(): """clean - remove build artifacts.""" run('rm -rf build/') run('rm -rf dist/') run('rm -rf puzzle.egg-info') run('find . -name __pycache__ -delete') run('find . -name *.pyc -delete') run('find . -name *.pyo -delete') run('find . -name *~ -delete') log.info('cleaned up')
[ "def", "clean", "(", ")", ":", "run", "(", "'rm -rf build/'", ")", "run", "(", "'rm -rf dist/'", ")", "run", "(", "'rm -rf puzzle.egg-info'", ")", "run", "(", "'find . -name __pycache__ -delete'", ")", "run", "(", "'find . -name *.pyc -delete'", ")", "run", "(", "'find . -name *.pyo -delete'", ")", "run", "(", "'find . -name *~ -delete'", ")", "log", ".", "info", "(", "'cleaned up'", ")" ]
clean - remove build artifacts.
[ "clean", "-", "remove", "build", "artifacts", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/tasks.py#L10-L20
train
eleme/meepo
meepo/sub/zmq.py
zmq_sub
def zmq_sub(bind, tables, forwarder=False, green=False): """0mq fanout sub. This sub will use zeromq to fanout the events. :param bind: the zmq pub socket or zmq device socket. :param tables: the events of tables to follow. :param forwarder: set to True if zmq pub to a forwarder device. :param green: weather to use a greenlet compat zmq """ logger = logging.getLogger("meepo.sub.zmq_sub") if not isinstance(tables, (list, set)): raise ValueError("tables should be list or set") if not green: import zmq else: import zmq.green as zmq ctx = zmq.Context() socket = ctx.socket(zmq.PUB) if forwarder: socket.connect(bind) else: socket.bind(bind) events = ("%s_%s" % (tb, action) for tb, action in itertools.product(*[tables, ["write", "update", "delete"]])) for event in events: def _sub(pk, event=event): msg = "%s %s" % (event, pk) socket.send_string(msg) logger.debug("pub msg: %s" % msg) signal(event).connect(_sub, weak=False) return socket
python
def zmq_sub(bind, tables, forwarder=False, green=False): """0mq fanout sub. This sub will use zeromq to fanout the events. :param bind: the zmq pub socket or zmq device socket. :param tables: the events of tables to follow. :param forwarder: set to True if zmq pub to a forwarder device. :param green: weather to use a greenlet compat zmq """ logger = logging.getLogger("meepo.sub.zmq_sub") if not isinstance(tables, (list, set)): raise ValueError("tables should be list or set") if not green: import zmq else: import zmq.green as zmq ctx = zmq.Context() socket = ctx.socket(zmq.PUB) if forwarder: socket.connect(bind) else: socket.bind(bind) events = ("%s_%s" % (tb, action) for tb, action in itertools.product(*[tables, ["write", "update", "delete"]])) for event in events: def _sub(pk, event=event): msg = "%s %s" % (event, pk) socket.send_string(msg) logger.debug("pub msg: %s" % msg) signal(event).connect(_sub, weak=False) return socket
[ "def", "zmq_sub", "(", "bind", ",", "tables", ",", "forwarder", "=", "False", ",", "green", "=", "False", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "\"meepo.sub.zmq_sub\"", ")", "if", "not", "isinstance", "(", "tables", ",", "(", "list", ",", "set", ")", ")", ":", "raise", "ValueError", "(", "\"tables should be list or set\"", ")", "if", "not", "green", ":", "import", "zmq", "else", ":", "import", "zmq", ".", "green", "as", "zmq", "ctx", "=", "zmq", ".", "Context", "(", ")", "socket", "=", "ctx", ".", "socket", "(", "zmq", ".", "PUB", ")", "if", "forwarder", ":", "socket", ".", "connect", "(", "bind", ")", "else", ":", "socket", ".", "bind", "(", "bind", ")", "events", "=", "(", "\"%s_%s\"", "%", "(", "tb", ",", "action", ")", "for", "tb", ",", "action", "in", "itertools", ".", "product", "(", "*", "[", "tables", ",", "[", "\"write\"", ",", "\"update\"", ",", "\"delete\"", "]", "]", ")", ")", "for", "event", "in", "events", ":", "def", "_sub", "(", "pk", ",", "event", "=", "event", ")", ":", "msg", "=", "\"%s %s\"", "%", "(", "event", ",", "pk", ")", "socket", ".", "send_string", "(", "msg", ")", "logger", ".", "debug", "(", "\"pub msg: %s\"", "%", "msg", ")", "signal", "(", "event", ")", ".", "connect", "(", "_sub", ",", "weak", "=", "False", ")", "return", "socket" ]
0mq fanout sub. This sub will use zeromq to fanout the events. :param bind: the zmq pub socket or zmq device socket. :param tables: the events of tables to follow. :param forwarder: set to True if zmq pub to a forwarder device. :param green: weather to use a greenlet compat zmq
[ "0mq", "fanout", "sub", "." ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/sub/zmq.py#L11-L48
train
robinandeer/puzzle
puzzle/plugins/sql/mixins/case.py
CaseMixin.add_case
def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None): """Load a case with individuals. Args: case_obj (puzzle.models.Case): initialized case model """ new_case = Case(case_id=case_obj.case_id, name=case_obj.name, variant_source=case_obj.variant_source, variant_type=vtype, variant_mode=mode, pedigree=ped_svg, compressed=case_obj.compressed, tabix_index=case_obj.tabix_index) # build individuals inds = [Individual( ind_id=ind.ind_id, name=ind.name, mother=ind.mother, father=ind.father, sex=ind.sex, phenotype=ind.phenotype, ind_index=ind.ind_index, variant_source=ind.variant_source, bam_path=ind.bam_path, ) for ind in case_obj.individuals] new_case.individuals = inds if self.case(new_case.case_id): logger.warning("Case already exists in database!") else: self.session.add(new_case) self.save() return new_case
python
def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None): """Load a case with individuals. Args: case_obj (puzzle.models.Case): initialized case model """ new_case = Case(case_id=case_obj.case_id, name=case_obj.name, variant_source=case_obj.variant_source, variant_type=vtype, variant_mode=mode, pedigree=ped_svg, compressed=case_obj.compressed, tabix_index=case_obj.tabix_index) # build individuals inds = [Individual( ind_id=ind.ind_id, name=ind.name, mother=ind.mother, father=ind.father, sex=ind.sex, phenotype=ind.phenotype, ind_index=ind.ind_index, variant_source=ind.variant_source, bam_path=ind.bam_path, ) for ind in case_obj.individuals] new_case.individuals = inds if self.case(new_case.case_id): logger.warning("Case already exists in database!") else: self.session.add(new_case) self.save() return new_case
[ "def", "add_case", "(", "self", ",", "case_obj", ",", "vtype", "=", "'snv'", ",", "mode", "=", "'vcf'", ",", "ped_svg", "=", "None", ")", ":", "new_case", "=", "Case", "(", "case_id", "=", "case_obj", ".", "case_id", ",", "name", "=", "case_obj", ".", "name", ",", "variant_source", "=", "case_obj", ".", "variant_source", ",", "variant_type", "=", "vtype", ",", "variant_mode", "=", "mode", ",", "pedigree", "=", "ped_svg", ",", "compressed", "=", "case_obj", ".", "compressed", ",", "tabix_index", "=", "case_obj", ".", "tabix_index", ")", "# build individuals", "inds", "=", "[", "Individual", "(", "ind_id", "=", "ind", ".", "ind_id", ",", "name", "=", "ind", ".", "name", ",", "mother", "=", "ind", ".", "mother", ",", "father", "=", "ind", ".", "father", ",", "sex", "=", "ind", ".", "sex", ",", "phenotype", "=", "ind", ".", "phenotype", ",", "ind_index", "=", "ind", ".", "ind_index", ",", "variant_source", "=", "ind", ".", "variant_source", ",", "bam_path", "=", "ind", ".", "bam_path", ",", ")", "for", "ind", "in", "case_obj", ".", "individuals", "]", "new_case", ".", "individuals", "=", "inds", "if", "self", ".", "case", "(", "new_case", ".", "case_id", ")", ":", "logger", ".", "warning", "(", "\"Case already exists in database!\"", ")", "else", ":", "self", ".", "session", ".", "add", "(", "new_case", ")", "self", ".", "save", "(", ")", "return", "new_case" ]
Load a case with individuals. Args: case_obj (puzzle.models.Case): initialized case model
[ "Load", "a", "case", "with", "individuals", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/case.py#L15-L49
train
robinandeer/puzzle
puzzle/plugins/sql/mixins/case.py
CaseMixin.individuals
def individuals(self, ind_ids=None): """Fetch all individuals from the database.""" query = self.query(Individual) if ind_ids: query = query.filter(Individual.ind_id.in_(ind_ids)) return query
python
def individuals(self, ind_ids=None): """Fetch all individuals from the database.""" query = self.query(Individual) if ind_ids: query = query.filter(Individual.ind_id.in_(ind_ids)) return query
[ "def", "individuals", "(", "self", ",", "ind_ids", "=", "None", ")", ":", "query", "=", "self", ".", "query", "(", "Individual", ")", "if", "ind_ids", ":", "query", "=", "query", ".", "filter", "(", "Individual", ".", "ind_id", ".", "in_", "(", "ind_ids", ")", ")", "return", "query" ]
Fetch all individuals from the database.
[ "Fetch", "all", "individuals", "from", "the", "database", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/case.py#L92-L97
train
robinandeer/puzzle
puzzle/models/sql/models.py
Case.case_comments
def case_comments(self): """Return only comments made on the case.""" comments = (comment for comment in self.comments if comment.variant_id is None) return comments
python
def case_comments(self): """Return only comments made on the case.""" comments = (comment for comment in self.comments if comment.variant_id is None) return comments
[ "def", "case_comments", "(", "self", ")", ":", "comments", "=", "(", "comment", "for", "comment", "in", "self", ".", "comments", "if", "comment", ".", "variant_id", "is", "None", ")", "return", "comments" ]
Return only comments made on the case.
[ "Return", "only", "comments", "made", "on", "the", "case", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/sql/models.py#L55-L59
train
basecrm/basecrm-python
basecrm/http_client.py
HttpClient.put
def put(self, url, body=None, **kwargs): """ Send a PUT request. :param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix. :param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded. :param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`. :return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text) :rtype: tuple """ return self.request('put', url, body=body, **kwargs)
python
def put(self, url, body=None, **kwargs): """ Send a PUT request. :param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix. :param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded. :param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`. :return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text) :rtype: tuple """ return self.request('put', url, body=body, **kwargs)
[ "def", "put", "(", "self", ",", "url", ",", "body", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "request", "(", "'put'", ",", "url", ",", "body", "=", "body", ",", "*", "*", "kwargs", ")" ]
Send a PUT request. :param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix. :param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded. :param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`. :return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text) :rtype: tuple
[ "Send", "a", "PUT", "request", "." ]
7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/http_client.py#L62-L73
train
eleme/meepo
meepo/apps/replicator/queue.py
QueueReplicator.event
def event(self, *topics, **kwargs): """Topic callback registry. callback func should receive two args: topic and pk, and then process the replication job. Note: The callback func must return True/False. When passed a list of pks, the func should return a list of True/False with the same length of pks. :param topics: a list of topics :param workers: how many workers to process this topic :param multi: whether pass multiple pks :param queue_limit: when queue size is larger than the limit, the worker should run deduplicate procedure """ workers = kwargs.pop("workers", 1) multi = kwargs.pop("multi", False) queue_limit = kwargs.pop("queue_limit", 10000) def wrapper(func): for topic in topics: queues = [Queue() for _ in range(workers)] hash_ring = ketama.Continuum() for q in queues: hash_ring[str(hash(q))] = q self.worker_queues[topic] = hash_ring self.workers[topic] = WorkerPool( queues, topic, func, multi=multi, queue_limit=queue_limit, logger_name="%s.%s" % (self.name, topic)) self.socket.setsockopt(zmq.SUBSCRIBE, asbytes(topic)) return func return wrapper
python
def event(self, *topics, **kwargs): """Topic callback registry. callback func should receive two args: topic and pk, and then process the replication job. Note: The callback func must return True/False. When passed a list of pks, the func should return a list of True/False with the same length of pks. :param topics: a list of topics :param workers: how many workers to process this topic :param multi: whether pass multiple pks :param queue_limit: when queue size is larger than the limit, the worker should run deduplicate procedure """ workers = kwargs.pop("workers", 1) multi = kwargs.pop("multi", False) queue_limit = kwargs.pop("queue_limit", 10000) def wrapper(func): for topic in topics: queues = [Queue() for _ in range(workers)] hash_ring = ketama.Continuum() for q in queues: hash_ring[str(hash(q))] = q self.worker_queues[topic] = hash_ring self.workers[topic] = WorkerPool( queues, topic, func, multi=multi, queue_limit=queue_limit, logger_name="%s.%s" % (self.name, topic)) self.socket.setsockopt(zmq.SUBSCRIBE, asbytes(topic)) return func return wrapper
[ "def", "event", "(", "self", ",", "*", "topics", ",", "*", "*", "kwargs", ")", ":", "workers", "=", "kwargs", ".", "pop", "(", "\"workers\"", ",", "1", ")", "multi", "=", "kwargs", ".", "pop", "(", "\"multi\"", ",", "False", ")", "queue_limit", "=", "kwargs", ".", "pop", "(", "\"queue_limit\"", ",", "10000", ")", "def", "wrapper", "(", "func", ")", ":", "for", "topic", "in", "topics", ":", "queues", "=", "[", "Queue", "(", ")", "for", "_", "in", "range", "(", "workers", ")", "]", "hash_ring", "=", "ketama", ".", "Continuum", "(", ")", "for", "q", "in", "queues", ":", "hash_ring", "[", "str", "(", "hash", "(", "q", ")", ")", "]", "=", "q", "self", ".", "worker_queues", "[", "topic", "]", "=", "hash_ring", "self", ".", "workers", "[", "topic", "]", "=", "WorkerPool", "(", "queues", ",", "topic", ",", "func", ",", "multi", "=", "multi", ",", "queue_limit", "=", "queue_limit", ",", "logger_name", "=", "\"%s.%s\"", "%", "(", "self", ".", "name", ",", "topic", ")", ")", "self", ".", "socket", ".", "setsockopt", "(", "zmq", ".", "SUBSCRIBE", ",", "asbytes", "(", "topic", ")", ")", "return", "func", "return", "wrapper" ]
Topic callback registry. callback func should receive two args: topic and pk, and then process the replication job. Note: The callback func must return True/False. When passed a list of pks, the func should return a list of True/False with the same length of pks. :param topics: a list of topics :param workers: how many workers to process this topic :param multi: whether pass multiple pks :param queue_limit: when queue size is larger than the limit, the worker should run deduplicate procedure
[ "Topic", "callback", "registry", "." ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/replicator/queue.py#L31-L63
train
eleme/meepo
meepo/apps/replicator/queue.py
QueueReplicator.run
def run(self): """Run the replicator. Main process receive messages and distribute them to worker queues. """ for worker_pool in self.workers.values(): worker_pool.start() if isinstance(self.listen, list): for i in self.listen: self.socket.connect(i) else: self.socket.connect(self.listen) try: while True: msg = self.socket.recv_string() lst = msg.split() if len(lst) == 2: topic, pks = lst[0], [lst[1], ] elif len(lst) > 2: topic, pks = lst[0], lst[1:] else: self.logger.error("msg corrupt -> %s" % msg) continue self.logger.debug("replicator: {0} -> {1}".format(topic, pks)) for pk in pks: self.worker_queues[topic][str(hash(pk))].put(pk) except Exception as e: self.logger.exception(e) finally: for worker_pool in self.workers.values(): worker_pool.terminate()
python
def run(self): """Run the replicator. Main process receive messages and distribute them to worker queues. """ for worker_pool in self.workers.values(): worker_pool.start() if isinstance(self.listen, list): for i in self.listen: self.socket.connect(i) else: self.socket.connect(self.listen) try: while True: msg = self.socket.recv_string() lst = msg.split() if len(lst) == 2: topic, pks = lst[0], [lst[1], ] elif len(lst) > 2: topic, pks = lst[0], lst[1:] else: self.logger.error("msg corrupt -> %s" % msg) continue self.logger.debug("replicator: {0} -> {1}".format(topic, pks)) for pk in pks: self.worker_queues[topic][str(hash(pk))].put(pk) except Exception as e: self.logger.exception(e) finally: for worker_pool in self.workers.values(): worker_pool.terminate()
[ "def", "run", "(", "self", ")", ":", "for", "worker_pool", "in", "self", ".", "workers", ".", "values", "(", ")", ":", "worker_pool", ".", "start", "(", ")", "if", "isinstance", "(", "self", ".", "listen", ",", "list", ")", ":", "for", "i", "in", "self", ".", "listen", ":", "self", ".", "socket", ".", "connect", "(", "i", ")", "else", ":", "self", ".", "socket", ".", "connect", "(", "self", ".", "listen", ")", "try", ":", "while", "True", ":", "msg", "=", "self", ".", "socket", ".", "recv_string", "(", ")", "lst", "=", "msg", ".", "split", "(", ")", "if", "len", "(", "lst", ")", "==", "2", ":", "topic", ",", "pks", "=", "lst", "[", "0", "]", ",", "[", "lst", "[", "1", "]", ",", "]", "elif", "len", "(", "lst", ")", ">", "2", ":", "topic", ",", "pks", "=", "lst", "[", "0", "]", ",", "lst", "[", "1", ":", "]", "else", ":", "self", ".", "logger", ".", "error", "(", "\"msg corrupt -> %s\"", "%", "msg", ")", "continue", "self", ".", "logger", ".", "debug", "(", "\"replicator: {0} -> {1}\"", ".", "format", "(", "topic", ",", "pks", ")", ")", "for", "pk", "in", "pks", ":", "self", ".", "worker_queues", "[", "topic", "]", "[", "str", "(", "hash", "(", "pk", ")", ")", "]", ".", "put", "(", "pk", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "finally", ":", "for", "worker_pool", "in", "self", ".", "workers", ".", "values", "(", ")", ":", "worker_pool", ".", "terminate", "(", ")" ]
Run the replicator. Main process receive messages and distribute them to worker queues.
[ "Run", "the", "replicator", "." ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/replicator/queue.py#L65-L98
train
eleme/meepo
meepo/pub/sqlalchemy.py
sqlalchemy_pub._pk
def _pk(self, obj): """Get pk values from object :param obj: sqlalchemy object """ pk_values = tuple(getattr(obj, c.name) for c in obj.__mapper__.primary_key) if len(pk_values) == 1: return pk_values[0] return pk_values
python
def _pk(self, obj): """Get pk values from object :param obj: sqlalchemy object """ pk_values = tuple(getattr(obj, c.name) for c in obj.__mapper__.primary_key) if len(pk_values) == 1: return pk_values[0] return pk_values
[ "def", "_pk", "(", "self", ",", "obj", ")", ":", "pk_values", "=", "tuple", "(", "getattr", "(", "obj", ",", "c", ".", "name", ")", "for", "c", "in", "obj", ".", "__mapper__", ".", "primary_key", ")", "if", "len", "(", "pk_values", ")", "==", "1", ":", "return", "pk_values", "[", "0", "]", "return", "pk_values" ]
Get pk values from object :param obj: sqlalchemy object
[ "Get", "pk", "values", "from", "object" ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L135-L144
train
eleme/meepo
meepo/pub/sqlalchemy.py
sqlalchemy_pub.session_update
def session_update(self, session, *_): """Record the sqlalchemy object states in the middle of session, prepare the events for the final pub in session_commit. """ self._session_init(session) session.pending_write |= set(session.new) session.pending_update |= set(session.dirty) session.pending_delete |= set(session.deleted) self.logger.debug("%s - session_update" % session.meepo_unique_id)
python
def session_update(self, session, *_): """Record the sqlalchemy object states in the middle of session, prepare the events for the final pub in session_commit. """ self._session_init(session) session.pending_write |= set(session.new) session.pending_update |= set(session.dirty) session.pending_delete |= set(session.deleted) self.logger.debug("%s - session_update" % session.meepo_unique_id)
[ "def", "session_update", "(", "self", ",", "session", ",", "*", "_", ")", ":", "self", ".", "_session_init", "(", "session", ")", "session", ".", "pending_write", "|=", "set", "(", "session", ".", "new", ")", "session", ".", "pending_update", "|=", "set", "(", "session", ".", "dirty", ")", "session", ".", "pending_delete", "|=", "set", "(", "session", ".", "deleted", ")", "self", ".", "logger", ".", "debug", "(", "\"%s - session_update\"", "%", "session", ".", "meepo_unique_id", ")" ]
Record the sqlalchemy object states in the middle of session, prepare the events for the final pub in session_commit.
[ "Record", "the", "sqlalchemy", "object", "states", "in", "the", "middle", "of", "session", "prepare", "the", "events", "for", "the", "final", "pub", "in", "session_commit", "." ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L201-L209
train
eleme/meepo
meepo/pub/sqlalchemy.py
sqlalchemy_pub.session_commit
def session_commit(self, session): """Pub the events after the session committed. This method should be linked to sqlalchemy "after_commit" event. """ # this may happen when there's nothing to commit if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_commit") return self._session_pub(session) self._session_del(session)
python
def session_commit(self, session): """Pub the events after the session committed. This method should be linked to sqlalchemy "after_commit" event. """ # this may happen when there's nothing to commit if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_commit") return self._session_pub(session) self._session_del(session)
[ "def", "session_commit", "(", "self", ",", "session", ")", ":", "# this may happen when there's nothing to commit", "if", "not", "hasattr", "(", "session", ",", "'meepo_unique_id'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"skipped - session_commit\"", ")", "return", "self", ".", "_session_pub", "(", "session", ")", "self", ".", "_session_del", "(", "session", ")" ]
Pub the events after the session committed. This method should be linked to sqlalchemy "after_commit" event.
[ "Pub", "the", "events", "after", "the", "session", "committed", "." ]
8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L211-L222
train
tswicegood/Dolt
dolt/helpers.py
add_basic_auth
def add_basic_auth(dolt, username, password): """ Send basic auth username and password. Normally you can use httplib2.Http.add_credentials() to add username and password. However this has two disadvantages. 1. Some poorly implemented APIs require basic auth but don't send a "401 Authorization Required". Httplib2 won't send basic auth unless the server responds this way (see http://code.google.com/p/httplib2/issues/detail?id=130) 2. Doing a request just to get a "401 Authorization Required" header is a waste of time and bandwidth. If you know you need basic auth, you might as well send it right up front. By using `with_basic_auth`, the username and password will be sent proactively without waiting for a 401 header. """ return dolt.with_headers( Authorization='Basic %s' % base64.b64encode('%s:%s' % (username, password)).strip() )
python
def add_basic_auth(dolt, username, password): """ Send basic auth username and password. Normally you can use httplib2.Http.add_credentials() to add username and password. However this has two disadvantages. 1. Some poorly implemented APIs require basic auth but don't send a "401 Authorization Required". Httplib2 won't send basic auth unless the server responds this way (see http://code.google.com/p/httplib2/issues/detail?id=130) 2. Doing a request just to get a "401 Authorization Required" header is a waste of time and bandwidth. If you know you need basic auth, you might as well send it right up front. By using `with_basic_auth`, the username and password will be sent proactively without waiting for a 401 header. """ return dolt.with_headers( Authorization='Basic %s' % base64.b64encode('%s:%s' % (username, password)).strip() )
[ "def", "add_basic_auth", "(", "dolt", ",", "username", ",", "password", ")", ":", "return", "dolt", ".", "with_headers", "(", "Authorization", "=", "'Basic %s'", "%", "base64", ".", "b64encode", "(", "'%s:%s'", "%", "(", "username", ",", "password", ")", ")", ".", "strip", "(", ")", ")" ]
Send basic auth username and password. Normally you can use httplib2.Http.add_credentials() to add username and password. However this has two disadvantages. 1. Some poorly implemented APIs require basic auth but don't send a "401 Authorization Required". Httplib2 won't send basic auth unless the server responds this way (see http://code.google.com/p/httplib2/issues/detail?id=130) 2. Doing a request just to get a "401 Authorization Required" header is a waste of time and bandwidth. If you know you need basic auth, you might as well send it right up front. By using `with_basic_auth`, the username and password will be sent proactively without waiting for a 401 header.
[ "Send", "basic", "auth", "username", "and", "password", "." ]
e0da1918b7db18f885734a89f824b9e173cc30a5
https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/helpers.py#L3-L22
train
robinandeer/puzzle
puzzle/plugins/gemini/mixins/variant_extras/genotypes.py
GenotypeExtras._add_genotypes
def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs): """Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals """ for ind in individual_objs: index = ind.ind_index variant_obj.add_individual(Genotype( sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index] ))
python
def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs): """Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals """ for ind in individual_objs: index = ind.ind_index variant_obj.add_individual(Genotype( sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index] ))
[ "def", "_add_genotypes", "(", "self", ",", "variant_obj", ",", "gemini_variant", ",", "case_id", ",", "individual_objs", ")", ":", "for", "ind", "in", "individual_objs", ":", "index", "=", "ind", ".", "ind_index", "variant_obj", ".", "add_individual", "(", "Genotype", "(", "sample_id", "=", "ind", ".", "ind_id", ",", "genotype", "=", "gemini_variant", "[", "'gts'", "]", "[", "index", "]", ",", "case_id", "=", "case_id", ",", "phenotype", "=", "ind", ".", "phenotype", ",", "ref_depth", "=", "gemini_variant", "[", "'gt_ref_depths'", "]", "[", "index", "]", ",", "alt_depth", "=", "gemini_variant", "[", "'gt_alt_depths'", "]", "[", "index", "]", ",", "depth", "=", "gemini_variant", "[", "'gt_depths'", "]", "[", "index", "]", ",", "genotype_quality", "=", "gemini_variant", "[", "'gt_quals'", "]", "[", "index", "]", ")", ")" ]
Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals
[ "Add", "the", "genotypes", "for", "a", "variant", "for", "all", "individuals" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant_extras/genotypes.py#L11-L33
train
jam31118/vis
vis/ani.py
process_frames_argument
def process_frames_argument(frames): """ Check and process 'frames' argument into a proper iterable for an animation object ## Arguments # frames : a seed for an integer-type iterable that is used as a sequence of frame indices - if integer or integer-valued float (e.g. 1.0): The 'frames' is interpreted as the number of total frames and the sequence frame indices becomes [ 0, 1, 2, ..., 'frames' - 1 ] which is equivalent to range('frames'). - if array-like: All elements in 'frames' should be integer or integer-valued float. Then, the 'frames' itself is used as a sequence of frame indices. """ result = None if np.iterable(frames): try: frames_arr = np.array(frames) except: raise TypeError("'frames' should be convertable to numpy.array") for idx in range(len(frames_arr)): frame_idx = frames_arr[idx] assert is_real_number(frame_idx) assert int(frame_idx) == frame_idx frames_arr[idx] = int(frame_idx) #self.frames = frames_arr result = frames_arr elif is_real_number(frames): assert int(frames) == frames frames = int(frames) #self.frames = range(frames) result = range(frames) return result
python
def process_frames_argument(frames): """ Check and process 'frames' argument into a proper iterable for an animation object ## Arguments # frames : a seed for an integer-type iterable that is used as a sequence of frame indices - if integer or integer-valued float (e.g. 1.0): The 'frames' is interpreted as the number of total frames and the sequence frame indices becomes [ 0, 1, 2, ..., 'frames' - 1 ] which is equivalent to range('frames'). - if array-like: All elements in 'frames' should be integer or integer-valued float. Then, the 'frames' itself is used as a sequence of frame indices. """ result = None if np.iterable(frames): try: frames_arr = np.array(frames) except: raise TypeError("'frames' should be convertable to numpy.array") for idx in range(len(frames_arr)): frame_idx = frames_arr[idx] assert is_real_number(frame_idx) assert int(frame_idx) == frame_idx frames_arr[idx] = int(frame_idx) #self.frames = frames_arr result = frames_arr elif is_real_number(frames): assert int(frames) == frames frames = int(frames) #self.frames = range(frames) result = range(frames) return result
[ "def", "process_frames_argument", "(", "frames", ")", ":", "result", "=", "None", "if", "np", ".", "iterable", "(", "frames", ")", ":", "try", ":", "frames_arr", "=", "np", ".", "array", "(", "frames", ")", "except", ":", "raise", "TypeError", "(", "\"'frames' should be convertable to numpy.array\"", ")", "for", "idx", "in", "range", "(", "len", "(", "frames_arr", ")", ")", ":", "frame_idx", "=", "frames_arr", "[", "idx", "]", "assert", "is_real_number", "(", "frame_idx", ")", "assert", "int", "(", "frame_idx", ")", "==", "frame_idx", "frames_arr", "[", "idx", "]", "=", "int", "(", "frame_idx", ")", "#self.frames = frames_arr", "result", "=", "frames_arr", "elif", "is_real_number", "(", "frames", ")", ":", "assert", "int", "(", "frames", ")", "==", "frames", "frames", "=", "int", "(", "frames", ")", "#self.frames = range(frames)", "result", "=", "range", "(", "frames", ")", "return", "result" ]
Check and process 'frames' argument into a proper iterable for an animation object ## Arguments # frames : a seed for an integer-type iterable that is used as a sequence of frame indices - if integer or integer-valued float (e.g. 1.0): The 'frames' is interpreted as the number of total frames and the sequence frame indices becomes [ 0, 1, 2, ..., 'frames' - 1 ] which is equivalent to range('frames'). - if array-like: All elements in 'frames' should be integer or integer-valued float. Then, the 'frames' itself is used as a sequence of frame indices.
[ "Check", "and", "process", "frames", "argument", "into", "a", "proper", "iterable", "for", "an", "animation", "object" ]
965ebec102c539b323d5756fef04153ac71e50d9
https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/ani.py#L86-L120
train
robinandeer/puzzle
puzzle/cli/init.py
init
def init(ctx, reset, root, phenomizer): """Initialize a database that store metadata Check if "root" dir exists, otherwise create the directory and build the database. If a database already exists, do nothing. """ configs = {} if root is None: root = ctx.obj.get('root') or os.path.expanduser("~/.puzzle") configs['root'] = root if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) resource_dir = os.path.join(root, 'resources') logger.info("resource dir is: {}".format(resource_dir)) if os.path.exists(resource_dir): logger.debug("Found puzzle directory: {0}".format(root)) if os.path.exists(resource_dir) and not reset: logger.warning("Puzzle db already in place") ctx.abort() else: logger.info("Create directory: {0}".format(resource_dir)) os.makedirs(resource_dir) logger.debug('Directory created') logger.debug('Connect to database and create tables') store = SqlStore(db_path) store.set_up(reset=reset) if phenomizer: phenomizer = [str(term) for term in phenomizer] configs['phenomizer_auth'] = phenomizer if not ctx.obj.get('config_path'): logger.info("Creating puzzle config file in {0}".format(PUZZLE_CONFIG_PATH)) with codecs.open(PUZZLE_CONFIG_PATH, 'w', encoding='utf-8') as f: f.write(yaml.dump(configs)) logger.debug("Config created")
python
def init(ctx, reset, root, phenomizer): """Initialize a database that store metadata Check if "root" dir exists, otherwise create the directory and build the database. If a database already exists, do nothing. """ configs = {} if root is None: root = ctx.obj.get('root') or os.path.expanduser("~/.puzzle") configs['root'] = root if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) resource_dir = os.path.join(root, 'resources') logger.info("resource dir is: {}".format(resource_dir)) if os.path.exists(resource_dir): logger.debug("Found puzzle directory: {0}".format(root)) if os.path.exists(resource_dir) and not reset: logger.warning("Puzzle db already in place") ctx.abort() else: logger.info("Create directory: {0}".format(resource_dir)) os.makedirs(resource_dir) logger.debug('Directory created') logger.debug('Connect to database and create tables') store = SqlStore(db_path) store.set_up(reset=reset) if phenomizer: phenomizer = [str(term) for term in phenomizer] configs['phenomizer_auth'] = phenomizer if not ctx.obj.get('config_path'): logger.info("Creating puzzle config file in {0}".format(PUZZLE_CONFIG_PATH)) with codecs.open(PUZZLE_CONFIG_PATH, 'w', encoding='utf-8') as f: f.write(yaml.dump(configs)) logger.debug("Config created")
[ "def", "init", "(", "ctx", ",", "reset", ",", "root", ",", "phenomizer", ")", ":", "configs", "=", "{", "}", "if", "root", "is", "None", ":", "root", "=", "ctx", ".", "obj", ".", "get", "(", "'root'", ")", "or", "os", ".", "path", ".", "expanduser", "(", "\"~/.puzzle\"", ")", "configs", "[", "'root'", "]", "=", "root", "if", "os", ".", "path", ".", "isfile", "(", "root", ")", ":", "logger", ".", "error", "(", "\"'root' can't be a file\"", ")", "ctx", ".", "abort", "(", ")", "logger", ".", "info", "(", "\"Root directory is: {}\"", ".", "format", "(", "root", ")", ")", "db_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'puzzle_db.sqlite3'", ")", "logger", ".", "info", "(", "\"db path is: {}\"", ".", "format", "(", "db_path", ")", ")", "resource_dir", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'resources'", ")", "logger", ".", "info", "(", "\"resource dir is: {}\"", ".", "format", "(", "resource_dir", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "resource_dir", ")", ":", "logger", ".", "debug", "(", "\"Found puzzle directory: {0}\"", ".", "format", "(", "root", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "resource_dir", ")", "and", "not", "reset", ":", "logger", ".", "warning", "(", "\"Puzzle db already in place\"", ")", "ctx", ".", "abort", "(", ")", "else", ":", "logger", ".", "info", "(", "\"Create directory: {0}\"", ".", "format", "(", "resource_dir", ")", ")", "os", ".", "makedirs", "(", "resource_dir", ")", "logger", ".", "debug", "(", "'Directory created'", ")", "logger", ".", "debug", "(", "'Connect to database and create tables'", ")", "store", "=", "SqlStore", "(", "db_path", ")", "store", ".", "set_up", "(", "reset", "=", "reset", ")", "if", "phenomizer", ":", "phenomizer", "=", "[", "str", "(", "term", ")", "for", "term", "in", "phenomizer", "]", "configs", "[", "'phenomizer_auth'", "]", "=", "phenomizer", "if", "not", "ctx", ".", "obj", ".", "get", "(", "'config_path'", ")", ":", "logger", ".", "info", "(", "\"Creating puzzle config file in {0}\"", ".", "format", "(", "PUZZLE_CONFIG_PATH", ")", ")", "with", "codecs", ".", "open", "(", "PUZZLE_CONFIG_PATH", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "yaml", ".", "dump", "(", "configs", ")", ")", "logger", ".", "debug", "(", "\"Config created\"", ")" ]
Initialize a database that store metadata Check if "root" dir exists, otherwise create the directory and build the database. If a database already exists, do nothing.
[ "Initialize", "a", "database", "that", "store", "metadata" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/init.py#L23-L69
train
seb-m/tss
tss.py
encode
def encode(value, encoding='utf-8', encoding_errors='strict'): """ Return a bytestring representation of the value. """ if isinstance(value, bytes): return value if not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): value = value.encode(encoding, encoding_errors) return value
python
def encode(value, encoding='utf-8', encoding_errors='strict'): """ Return a bytestring representation of the value. """ if isinstance(value, bytes): return value if not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): value = value.encode(encoding, encoding_errors) return value
[ "def", "encode", "(", "value", ",", "encoding", "=", "'utf-8'", ",", "encoding_errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "value", "if", "not", "isinstance", "(", "value", ",", "basestring", ")", ":", "value", "=", "str", "(", "value", ")", "if", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "encoding", ",", "encoding_errors", ")", "return", "value" ]
Return a bytestring representation of the value.
[ "Return", "a", "bytestring", "representation", "of", "the", "value", "." ]
ab45176b8585ba6bbbcaeffd21ec0c63f615dce0
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L53-L63
train
seb-m/tss
tss.py
share_secret
def share_secret(threshold, nshares, secret, identifier, hash_id=Hash.SHA256): """ Create nshares of the secret. threshold specifies the number of shares needed for reconstructing the secret value. A 0-16 bytes identifier must be provided. Optionally the secret is hashed with the algorithm specified by hash_id, a class attribute of Hash. This function must return a list of formatted shares or raises a TSSError exception if anything went wrong. """ if identifier is None: raise TSSError('an identifier must be provided') if not Hash.is_valid(hash_id): raise TSSError('invalid hash algorithm %s' % hash_id) secret = encode(secret) identifier = encode(identifier) if hash_id != Hash.NONE: secret += Hash.to_func(hash_id)(secret).digest() shares = generate_shares(threshold, nshares, secret) header = format_header(identifier, hash_id, threshold, len(secret) + 1) return [format_share(header, share) for share in shares]
python
def share_secret(threshold, nshares, secret, identifier, hash_id=Hash.SHA256): """ Create nshares of the secret. threshold specifies the number of shares needed for reconstructing the secret value. A 0-16 bytes identifier must be provided. Optionally the secret is hashed with the algorithm specified by hash_id, a class attribute of Hash. This function must return a list of formatted shares or raises a TSSError exception if anything went wrong. """ if identifier is None: raise TSSError('an identifier must be provided') if not Hash.is_valid(hash_id): raise TSSError('invalid hash algorithm %s' % hash_id) secret = encode(secret) identifier = encode(identifier) if hash_id != Hash.NONE: secret += Hash.to_func(hash_id)(secret).digest() shares = generate_shares(threshold, nshares, secret) header = format_header(identifier, hash_id, threshold, len(secret) + 1) return [format_share(header, share) for share in shares]
[ "def", "share_secret", "(", "threshold", ",", "nshares", ",", "secret", ",", "identifier", ",", "hash_id", "=", "Hash", ".", "SHA256", ")", ":", "if", "identifier", "is", "None", ":", "raise", "TSSError", "(", "'an identifier must be provided'", ")", "if", "not", "Hash", ".", "is_valid", "(", "hash_id", ")", ":", "raise", "TSSError", "(", "'invalid hash algorithm %s'", "%", "hash_id", ")", "secret", "=", "encode", "(", "secret", ")", "identifier", "=", "encode", "(", "identifier", ")", "if", "hash_id", "!=", "Hash", ".", "NONE", ":", "secret", "+=", "Hash", ".", "to_func", "(", "hash_id", ")", "(", "secret", ")", ".", "digest", "(", ")", "shares", "=", "generate_shares", "(", "threshold", ",", "nshares", ",", "secret", ")", "header", "=", "format_header", "(", "identifier", ",", "hash_id", ",", "threshold", ",", "len", "(", "secret", ")", "+", "1", ")", "return", "[", "format_share", "(", "header", ",", "share", ")", "for", "share", "in", "shares", "]" ]
Create nshares of the secret. threshold specifies the number of shares needed for reconstructing the secret value. A 0-16 bytes identifier must be provided. Optionally the secret is hashed with the algorithm specified by hash_id, a class attribute of Hash. This function must return a list of formatted shares or raises a TSSError exception if anything went wrong.
[ "Create", "nshares", "of", "the", "secret", ".", "threshold", "specifies", "the", "number", "of", "shares", "needed", "for", "reconstructing", "the", "secret", "value", ".", "A", "0", "-", "16", "bytes", "identifier", "must", "be", "provided", ".", "Optionally", "the", "secret", "is", "hashed", "with", "the", "algorithm", "specified", "by", "hash_id", "a", "class", "attribute", "of", "Hash", ".", "This", "function", "must", "return", "a", "list", "of", "formatted", "shares", "or", "raises", "a", "TSSError", "exception", "if", "anything", "went", "wrong", "." ]
ab45176b8585ba6bbbcaeffd21ec0c63f615dce0
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L212-L231
train
robinandeer/puzzle
puzzle/utils/get_info.py
get_gene_symbols
def get_gene_symbols(chrom, start, stop): """Get the gene symbols that a interval overlaps""" gene_symbols = query_gene_symbol(chrom, start, stop) logger.debug("Found gene symbols: {0}".format(', '.join(gene_symbols))) return gene_symbols
python
def get_gene_symbols(chrom, start, stop): """Get the gene symbols that a interval overlaps""" gene_symbols = query_gene_symbol(chrom, start, stop) logger.debug("Found gene symbols: {0}".format(', '.join(gene_symbols))) return gene_symbols
[ "def", "get_gene_symbols", "(", "chrom", ",", "start", ",", "stop", ")", ":", "gene_symbols", "=", "query_gene_symbol", "(", "chrom", ",", "start", ",", "stop", ")", "logger", ".", "debug", "(", "\"Found gene symbols: {0}\"", ".", "format", "(", "', '", ".", "join", "(", "gene_symbols", ")", ")", ")", "return", "gene_symbols" ]
Get the gene symbols that a interval overlaps
[ "Get", "the", "gene", "symbols", "that", "a", "interval", "overlaps" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L23-L27
train
robinandeer/puzzle
puzzle/utils/get_info.py
get_gene_info
def get_gene_info(ensembl_ids=None, hgnc_symbols=None): """Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects """ uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or [])) uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or [])) genes = [] gene_data = [] if uniq_ensembl_ids: for ensembl_id in uniq_ensembl_ids: for res in query_gene(ensembl_id=ensembl_id): gene_data.append(res) elif uniq_hgnc_symbols: for hgnc_symbol in uniq_hgnc_symbols: query_res = query_gene(hgnc_symbol=hgnc_symbol) if query_res: for res in query_res: gene_data.append(res) else: # If no result we add just the symbol gene_data.append({ 'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None, }) for gene in gene_data: genes.append(Gene( symbol=gene ['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol']) )) return genes
python
def get_gene_info(ensembl_ids=None, hgnc_symbols=None): """Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects """ uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or [])) uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or [])) genes = [] gene_data = [] if uniq_ensembl_ids: for ensembl_id in uniq_ensembl_ids: for res in query_gene(ensembl_id=ensembl_id): gene_data.append(res) elif uniq_hgnc_symbols: for hgnc_symbol in uniq_hgnc_symbols: query_res = query_gene(hgnc_symbol=hgnc_symbol) if query_res: for res in query_res: gene_data.append(res) else: # If no result we add just the symbol gene_data.append({ 'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None, }) for gene in gene_data: genes.append(Gene( symbol=gene ['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol']) )) return genes
[ "def", "get_gene_info", "(", "ensembl_ids", "=", "None", ",", "hgnc_symbols", "=", "None", ")", ":", "uniq_ensembl_ids", "=", "set", "(", "ensembl_id", "for", "ensembl_id", "in", "(", "ensembl_ids", "or", "[", "]", ")", ")", "uniq_hgnc_symbols", "=", "set", "(", "hgnc_symbol", "for", "hgnc_symbol", "in", "(", "hgnc_symbols", "or", "[", "]", ")", ")", "genes", "=", "[", "]", "gene_data", "=", "[", "]", "if", "uniq_ensembl_ids", ":", "for", "ensembl_id", "in", "uniq_ensembl_ids", ":", "for", "res", "in", "query_gene", "(", "ensembl_id", "=", "ensembl_id", ")", ":", "gene_data", ".", "append", "(", "res", ")", "elif", "uniq_hgnc_symbols", ":", "for", "hgnc_symbol", "in", "uniq_hgnc_symbols", ":", "query_res", "=", "query_gene", "(", "hgnc_symbol", "=", "hgnc_symbol", ")", "if", "query_res", ":", "for", "res", "in", "query_res", ":", "gene_data", ".", "append", "(", "res", ")", "else", ":", "# If no result we add just the symbol", "gene_data", ".", "append", "(", "{", "'hgnc_symbol'", ":", "hgnc_symbol", ",", "'hgnc_id'", ":", "None", ",", "'ensembl_id'", ":", "None", ",", "'description'", ":", "None", ",", "'chrom'", ":", "'unknown'", ",", "'start'", ":", "0", ",", "'stop'", ":", "0", ",", "'hi_score'", ":", "None", ",", "'constraint_score'", ":", "None", ",", "}", ")", "for", "gene", "in", "gene_data", ":", "genes", ".", "append", "(", "Gene", "(", "symbol", "=", "gene", "[", "'hgnc_symbol'", "]", ",", "hgnc_id", "=", "gene", "[", "'hgnc_id'", "]", ",", "ensembl_id", "=", "gene", "[", "'ensembl_id'", "]", ",", "description", "=", "gene", "[", "'description'", "]", ",", "chrom", "=", "gene", "[", "'chrom'", "]", ",", "start", "=", "gene", "[", "'start'", "]", ",", "stop", "=", "gene", "[", "'stop'", "]", ",", "location", "=", "get_cytoband_coord", "(", "gene", "[", "'chrom'", "]", ",", "gene", "[", "'start'", "]", ")", ",", "hi_score", "=", "gene", "[", "'hi_score'", "]", ",", "constraint_score", "=", "gene", "[", "'constraint_score'", "]", ",", "omim_number", "=", "get_omim_number", "(", "gene", "[", "'hgnc_symbol'", "]", ")", ")", ")", "return", "genes" ]
Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects
[ "Return", "the", "genes", "info", "based", "on", "the", "transcripts", "found" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L29-L83
train
robinandeer/puzzle
puzzle/utils/get_info.py
get_most_severe_consequence
def get_most_severe_consequence(transcripts): """Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence """ most_severe_consequence = None most_severe_score = None for transcript in transcripts: for consequence in transcript['consequence'].split('&'): logger.debug("Checking severity score for consequence: {0}".format( consequence )) severity_score = SEVERITY_DICT.get( consequence ) logger.debug("Severity score found: {0}".format( severity_score )) if severity_score != None: if most_severe_score: if severity_score < most_severe_score: most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
python
def get_most_severe_consequence(transcripts): """Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence """ most_severe_consequence = None most_severe_score = None for transcript in transcripts: for consequence in transcript['consequence'].split('&'): logger.debug("Checking severity score for consequence: {0}".format( consequence )) severity_score = SEVERITY_DICT.get( consequence ) logger.debug("Severity score found: {0}".format( severity_score )) if severity_score != None: if most_severe_score: if severity_score < most_severe_score: most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
[ "def", "get_most_severe_consequence", "(", "transcripts", ")", ":", "most_severe_consequence", "=", "None", "most_severe_score", "=", "None", "for", "transcript", "in", "transcripts", ":", "for", "consequence", "in", "transcript", "[", "'consequence'", "]", ".", "split", "(", "'&'", ")", ":", "logger", ".", "debug", "(", "\"Checking severity score for consequence: {0}\"", ".", "format", "(", "consequence", ")", ")", "severity_score", "=", "SEVERITY_DICT", ".", "get", "(", "consequence", ")", "logger", ".", "debug", "(", "\"Severity score found: {0}\"", ".", "format", "(", "severity_score", ")", ")", "if", "severity_score", "!=", "None", ":", "if", "most_severe_score", ":", "if", "severity_score", "<", "most_severe_score", ":", "most_severe_consequence", "=", "consequence", "most_severe_score", "=", "severity_score", "else", ":", "most_severe_consequence", "=", "consequence", "most_severe_score", "=", "severity_score", "return", "most_severe_consequence" ]
Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence
[ "Get", "the", "most", "severe", "consequence" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L85-L119
train
robinandeer/puzzle
puzzle/utils/get_info.py
get_cytoband_coord
def get_cytoband_coord(chrom, pos): """Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband """ chrom = chrom.strip('chr') pos = int(pos) result = None logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos)) if chrom in CYTOBANDS: for interval in CYTOBANDS[chrom][pos]: result = "{0}{1}".format(chrom, interval.data) return result
python
def get_cytoband_coord(chrom, pos): """Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband """ chrom = chrom.strip('chr') pos = int(pos) result = None logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos)) if chrom in CYTOBANDS: for interval in CYTOBANDS[chrom][pos]: result = "{0}{1}".format(chrom, interval.data) return result
[ "def", "get_cytoband_coord", "(", "chrom", ",", "pos", ")", ":", "chrom", "=", "chrom", ".", "strip", "(", "'chr'", ")", "pos", "=", "int", "(", "pos", ")", "result", "=", "None", "logger", ".", "debug", "(", "\"Finding Cytoband for chrom:{0} pos:{1}\"", ".", "format", "(", "chrom", ",", "pos", ")", ")", "if", "chrom", "in", "CYTOBANDS", ":", "for", "interval", "in", "CYTOBANDS", "[", "chrom", "]", "[", "pos", "]", ":", "result", "=", "\"{0}{1}\"", ".", "format", "(", "chrom", ",", "interval", ".", "data", ")", "return", "result" ]
Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband
[ "Get", "the", "cytoband", "coordinate", "for", "a", "position" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L121-L139
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.parse_mapping
def parse_mapping(self, map_path, source=None, dotfiles=None): """Do a simple parse of the dotfile mapping, using semicolons to separate source file name from the target file paths.""" include_re = r"""^\s*#include\s+(".+"|'.+')""" include_re = re.compile(include_re, re.I) mapping_re = r"""^("[^"]+"|\'[^\']+\'|[^\'":]+)\s*(?::\s*(.*)\s*)?$""" mapping_re = re.compile(mapping_re) filename = None map_path = path.realpath(path.expanduser(map_path)) if path.isfile(map_path): filename = map_path elif path.isdir(map_path): # try finding a mapping in the target directory for map_name in '.dotfiles', 'dotfiles': candidate = path.join(map_path, map_name) if path.isfile(candidate): filename = candidate break if filename is None: raise ValueError('No dotfile mapping found in %s' % map_path) if source is None: source = path.dirname(map_path) if dotfiles is None: dotfiles = OrderedDict() lineno = 0 with open(filename) as fh: for line in fh: lineno += 1 content = line.strip() match = include_re.match(content) if match: include_path = match.group(1).strip('\'"') if (include_path.startswith('/') or include_path.startswith('~')): include_path = path.realpath( path.expanduser(include_path)) else: include_path = path.join(path.dirname(filename), include_path) if path.exists(include_path): self.log.debug('Recursively parsing mapping in %s', include_path) dotfiles = self.parse_mapping(include_path, dotfiles=dotfiles) else: self.log.warning('Include command points to file or ' 'directory that does not exist, "%s",' ' on line %d', include_path, lineno) if not content or content.startswith('#'): # comment line or empty line continue match = mapping_re.match(content) if match: source_path, target_path = match.groups() source_path = path.join(source, source_path.strip('\'"')) if source_path in dotfiles: self.log.warning('Duplicate dotfile source "%s" ' 'on line #%d', lineno) continue if target_path is None: target_path = source_path dotfiles[source_path] = target_path else: self.log.warning('Dotfile mapping regex failed on line ' '#%d', lineno) return dotfiles
python
def parse_mapping(self, map_path, source=None, dotfiles=None): """Do a simple parse of the dotfile mapping, using semicolons to separate source file name from the target file paths.""" include_re = r"""^\s*#include\s+(".+"|'.+')""" include_re = re.compile(include_re, re.I) mapping_re = r"""^("[^"]+"|\'[^\']+\'|[^\'":]+)\s*(?::\s*(.*)\s*)?$""" mapping_re = re.compile(mapping_re) filename = None map_path = path.realpath(path.expanduser(map_path)) if path.isfile(map_path): filename = map_path elif path.isdir(map_path): # try finding a mapping in the target directory for map_name in '.dotfiles', 'dotfiles': candidate = path.join(map_path, map_name) if path.isfile(candidate): filename = candidate break if filename is None: raise ValueError('No dotfile mapping found in %s' % map_path) if source is None: source = path.dirname(map_path) if dotfiles is None: dotfiles = OrderedDict() lineno = 0 with open(filename) as fh: for line in fh: lineno += 1 content = line.strip() match = include_re.match(content) if match: include_path = match.group(1).strip('\'"') if (include_path.startswith('/') or include_path.startswith('~')): include_path = path.realpath( path.expanduser(include_path)) else: include_path = path.join(path.dirname(filename), include_path) if path.exists(include_path): self.log.debug('Recursively parsing mapping in %s', include_path) dotfiles = self.parse_mapping(include_path, dotfiles=dotfiles) else: self.log.warning('Include command points to file or ' 'directory that does not exist, "%s",' ' on line %d', include_path, lineno) if not content or content.startswith('#'): # comment line or empty line continue match = mapping_re.match(content) if match: source_path, target_path = match.groups() source_path = path.join(source, source_path.strip('\'"')) if source_path in dotfiles: self.log.warning('Duplicate dotfile source "%s" ' 'on line #%d', lineno) continue if target_path is None: target_path = source_path dotfiles[source_path] = target_path else: self.log.warning('Dotfile mapping regex failed on line ' '#%d', lineno) return dotfiles
[ "def", "parse_mapping", "(", "self", ",", "map_path", ",", "source", "=", "None", ",", "dotfiles", "=", "None", ")", ":", "include_re", "=", "r\"\"\"^\\s*#include\\s+(\".+\"|'.+')\"\"\"", "include_re", "=", "re", ".", "compile", "(", "include_re", ",", "re", ".", "I", ")", "mapping_re", "=", "r\"\"\"^(\"[^\"]+\"|\\'[^\\']+\\'|[^\\'\":]+)\\s*(?::\\s*(.*)\\s*)?$\"\"\"", "mapping_re", "=", "re", ".", "compile", "(", "mapping_re", ")", "filename", "=", "None", "map_path", "=", "path", ".", "realpath", "(", "path", ".", "expanduser", "(", "map_path", ")", ")", "if", "path", ".", "isfile", "(", "map_path", ")", ":", "filename", "=", "map_path", "elif", "path", ".", "isdir", "(", "map_path", ")", ":", "# try finding a mapping in the target directory", "for", "map_name", "in", "'.dotfiles'", ",", "'dotfiles'", ":", "candidate", "=", "path", ".", "join", "(", "map_path", ",", "map_name", ")", "if", "path", ".", "isfile", "(", "candidate", ")", ":", "filename", "=", "candidate", "break", "if", "filename", "is", "None", ":", "raise", "ValueError", "(", "'No dotfile mapping found in %s'", "%", "map_path", ")", "if", "source", "is", "None", ":", "source", "=", "path", ".", "dirname", "(", "map_path", ")", "if", "dotfiles", "is", "None", ":", "dotfiles", "=", "OrderedDict", "(", ")", "lineno", "=", "0", "with", "open", "(", "filename", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "lineno", "+=", "1", "content", "=", "line", ".", "strip", "(", ")", "match", "=", "include_re", ".", "match", "(", "content", ")", "if", "match", ":", "include_path", "=", "match", ".", "group", "(", "1", ")", ".", "strip", "(", "'\\'\"'", ")", "if", "(", "include_path", ".", "startswith", "(", "'/'", ")", "or", "include_path", ".", "startswith", "(", "'~'", ")", ")", ":", "include_path", "=", "path", ".", "realpath", "(", "path", ".", "expanduser", "(", "include_path", ")", ")", "else", ":", "include_path", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "filename", ")", ",", "include_path", ")", "if", "path", ".", "exists", "(", "include_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Recursively parsing mapping in %s'", ",", "include_path", ")", "dotfiles", "=", "self", ".", "parse_mapping", "(", "include_path", ",", "dotfiles", "=", "dotfiles", ")", "else", ":", "self", ".", "log", ".", "warning", "(", "'Include command points to file or '", "'directory that does not exist, \"%s\",'", "' on line %d'", ",", "include_path", ",", "lineno", ")", "if", "not", "content", "or", "content", ".", "startswith", "(", "'#'", ")", ":", "# comment line or empty line", "continue", "match", "=", "mapping_re", ".", "match", "(", "content", ")", "if", "match", ":", "source_path", ",", "target_path", "=", "match", ".", "groups", "(", ")", "source_path", "=", "path", ".", "join", "(", "source", ",", "source_path", ".", "strip", "(", "'\\'\"'", ")", ")", "if", "source_path", "in", "dotfiles", ":", "self", ".", "log", ".", "warning", "(", "'Duplicate dotfile source \"%s\" '", "'on line #%d'", ",", "lineno", ")", "continue", "if", "target_path", "is", "None", ":", "target_path", "=", "source_path", "dotfiles", "[", "source_path", "]", "=", "target_path", "else", ":", "self", ".", "log", ".", "warning", "(", "'Dotfile mapping regex failed on line '", "'#%d'", ",", "lineno", ")", "return", "dotfiles" ]
Do a simple parse of the dotfile mapping, using semicolons to separate source file name from the target file paths.
[ "Do", "a", "simple", "parse", "of", "the", "dotfile", "mapping", "using", "semicolons", "to", "separate", "source", "file", "name", "from", "the", "target", "file", "paths", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L129-L211
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.sh
def sh(self, *command, **kwargs): """Run a shell command with the given arguments.""" self.log.debug('shell: %s', ' '.join(command)) return subprocess.check_call(' '.join(command), stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin, shell=True, **kwargs)
python
def sh(self, *command, **kwargs): """Run a shell command with the given arguments.""" self.log.debug('shell: %s', ' '.join(command)) return subprocess.check_call(' '.join(command), stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin, shell=True, **kwargs)
[ "def", "sh", "(", "self", ",", "*", "command", ",", "*", "*", "kwargs", ")", ":", "self", ".", "log", ".", "debug", "(", "'shell: %s'", ",", "' '", ".", "join", "(", "command", ")", ")", "return", "subprocess", ".", "check_call", "(", "' '", ".", "join", "(", "command", ")", ",", "stdout", "=", "sys", ".", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ",", "stdin", "=", "sys", ".", "stdin", ",", "shell", "=", "True", ",", "*", "*", "kwargs", ")" ]
Run a shell command with the given arguments.
[ "Run", "a", "shell", "command", "with", "the", "given", "arguments", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L213-L220
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.scp
def scp(self, local_file, remote_path=''): """Copy a local file to the given remote path.""" if self.args.user: upload_spec = '{0}@{1}:{2}'.format(self.args.user, self.args.server, remote_path) else: upload_spec = '{0}:{1}'.format(self.args.server, remote_path) return self.sh('scp', local_file, upload_spec)
python
def scp(self, local_file, remote_path=''): """Copy a local file to the given remote path.""" if self.args.user: upload_spec = '{0}@{1}:{2}'.format(self.args.user, self.args.server, remote_path) else: upload_spec = '{0}:{1}'.format(self.args.server, remote_path) return self.sh('scp', local_file, upload_spec)
[ "def", "scp", "(", "self", ",", "local_file", ",", "remote_path", "=", "''", ")", ":", "if", "self", ".", "args", ".", "user", ":", "upload_spec", "=", "'{0}@{1}:{2}'", ".", "format", "(", "self", ".", "args", ".", "user", ",", "self", ".", "args", ".", "server", ",", "remote_path", ")", "else", ":", "upload_spec", "=", "'{0}:{1}'", ".", "format", "(", "self", ".", "args", ".", "server", ",", "remote_path", ")", "return", "self", ".", "sh", "(", "'scp'", ",", "local_file", ",", "upload_spec", ")" ]
Copy a local file to the given remote path.
[ "Copy", "a", "local", "file", "to", "the", "given", "remote", "path", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L231-L240
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.run
def run(self): """Start the dotfile deployment process.""" script = path.realpath(__file__) self.log.debug('Running from %s with arguments: %s', script, self.args) if self.args.source: self.source = self.args.source else: # hardcoding as the parent-parent of the script for now self.source = path.dirname(path.dirname(script)) self.log.debug('Sourcing dotfiles from %s', self.source) try: if self.args.repo: self.clone_repo() self.deploy_dotfiles(self.load_dotfiles()) except: self.log.exception('Profile deploy failed') finally: if self.args.repo: self.cleanup_repo()
python
def run(self): """Start the dotfile deployment process.""" script = path.realpath(__file__) self.log.debug('Running from %s with arguments: %s', script, self.args) if self.args.source: self.source = self.args.source else: # hardcoding as the parent-parent of the script for now self.source = path.dirname(path.dirname(script)) self.log.debug('Sourcing dotfiles from %s', self.source) try: if self.args.repo: self.clone_repo() self.deploy_dotfiles(self.load_dotfiles()) except: self.log.exception('Profile deploy failed') finally: if self.args.repo: self.cleanup_repo()
[ "def", "run", "(", "self", ")", ":", "script", "=", "path", ".", "realpath", "(", "__file__", ")", "self", ".", "log", ".", "debug", "(", "'Running from %s with arguments: %s'", ",", "script", ",", "self", ".", "args", ")", "if", "self", ".", "args", ".", "source", ":", "self", ".", "source", "=", "self", ".", "args", ".", "source", "else", ":", "# hardcoding as the parent-parent of the script for now", "self", ".", "source", "=", "path", ".", "dirname", "(", "path", ".", "dirname", "(", "script", ")", ")", "self", ".", "log", ".", "debug", "(", "'Sourcing dotfiles from %s'", ",", "self", ".", "source", ")", "try", ":", "if", "self", ".", "args", ".", "repo", ":", "self", ".", "clone_repo", "(", ")", "self", ".", "deploy_dotfiles", "(", "self", ".", "load_dotfiles", "(", ")", ")", "except", ":", "self", ".", "log", ".", "exception", "(", "'Profile deploy failed'", ")", "finally", ":", "if", "self", ".", "args", ".", "repo", ":", "self", ".", "cleanup_repo", "(", ")" ]
Start the dotfile deployment process.
[ "Start", "the", "dotfile", "deployment", "process", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L246-L269
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.load_dotfiles
def load_dotfiles(self): """Read in the dotfile mapping as a dictionary.""" if self.args.map and path.exists(self.args.map): dotfiles_path = self.args.map else: dotfiles_path = self.source self.log.debug('Loading dotfile mapping from %s', dotfiles_path) return self.parse_mapping(dotfiles_path, source=self.source)
python
def load_dotfiles(self): """Read in the dotfile mapping as a dictionary.""" if self.args.map and path.exists(self.args.map): dotfiles_path = self.args.map else: dotfiles_path = self.source self.log.debug('Loading dotfile mapping from %s', dotfiles_path) return self.parse_mapping(dotfiles_path, source=self.source)
[ "def", "load_dotfiles", "(", "self", ")", ":", "if", "self", ".", "args", ".", "map", "and", "path", ".", "exists", "(", "self", ".", "args", ".", "map", ")", ":", "dotfiles_path", "=", "self", ".", "args", ".", "map", "else", ":", "dotfiles_path", "=", "self", ".", "source", "self", ".", "log", ".", "debug", "(", "'Loading dotfile mapping from %s'", ",", "dotfiles_path", ")", "return", "self", ".", "parse_mapping", "(", "dotfiles_path", ",", "source", "=", "self", ".", "source", ")" ]
Read in the dotfile mapping as a dictionary.
[ "Read", "in", "the", "dotfile", "mapping", "as", "a", "dictionary", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L271-L280
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.clone_repo
def clone_repo(self): """Clone a repository containing the dotfiles source.""" tempdir_path = tempfile.mkdtemp() if self.args.git: self.log.debug('Cloning git source repository from %s to %s', self.source, tempdir_path) self.sh('git clone', self.source, tempdir_path) else: raise NotImplementedError('Unknown repo type') self.source = tempdir_path
python
def clone_repo(self): """Clone a repository containing the dotfiles source.""" tempdir_path = tempfile.mkdtemp() if self.args.git: self.log.debug('Cloning git source repository from %s to %s', self.source, tempdir_path) self.sh('git clone', self.source, tempdir_path) else: raise NotImplementedError('Unknown repo type') self.source = tempdir_path
[ "def", "clone_repo", "(", "self", ")", ":", "tempdir_path", "=", "tempfile", ".", "mkdtemp", "(", ")", "if", "self", ".", "args", ".", "git", ":", "self", ".", "log", ".", "debug", "(", "'Cloning git source repository from %s to %s'", ",", "self", ".", "source", ",", "tempdir_path", ")", "self", ".", "sh", "(", "'git clone'", ",", "self", ".", "source", ",", "tempdir_path", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unknown repo type'", ")", "self", ".", "source", "=", "tempdir_path" ]
Clone a repository containing the dotfiles source.
[ "Clone", "a", "repository", "containing", "the", "dotfiles", "source", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L282-L294
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.cleanup_repo
def cleanup_repo(self): """Cleanup the temporary directory containing the dotfiles repo.""" if self.source and path.isdir(self.source): self.log.debug('Cleaning up source repo from %s', self.source) shutil.rmtree(self.source)
python
def cleanup_repo(self): """Cleanup the temporary directory containing the dotfiles repo.""" if self.source and path.isdir(self.source): self.log.debug('Cleaning up source repo from %s', self.source) shutil.rmtree(self.source)
[ "def", "cleanup_repo", "(", "self", ")", ":", "if", "self", ".", "source", "and", "path", ".", "isdir", "(", "self", ".", "source", ")", ":", "self", ".", "log", ".", "debug", "(", "'Cleaning up source repo from %s'", ",", "self", ".", "source", ")", "shutil", ".", "rmtree", "(", "self", ".", "source", ")" ]
Cleanup the temporary directory containing the dotfiles repo.
[ "Cleanup", "the", "temporary", "directory", "containing", "the", "dotfiles", "repo", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L296-L300
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.deploy_dotfiles
def deploy_dotfiles(self, dotfiles): """Deploy dotfiles using the appropriate method.""" if self.args.server: return self.deploy_remote(dotfiles) else: return self.deploy_local(dotfiles)
python
def deploy_dotfiles(self, dotfiles): """Deploy dotfiles using the appropriate method.""" if self.args.server: return self.deploy_remote(dotfiles) else: return self.deploy_local(dotfiles)
[ "def", "deploy_dotfiles", "(", "self", ",", "dotfiles", ")", ":", "if", "self", ".", "args", ".", "server", ":", "return", "self", ".", "deploy_remote", "(", "dotfiles", ")", "else", ":", "return", "self", ".", "deploy_local", "(", "dotfiles", ")" ]
Deploy dotfiles using the appropriate method.
[ "Deploy", "dotfiles", "using", "the", "appropriate", "method", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L302-L307
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.deploy_remote
def deploy_remote(self, dotfiles): """Deploy dotfiles to a remote server.""" tempfile_path = None tempdir_path = None try: tempdir_path = tempfile.mkdtemp() self.log.debug('Deploying to temp dir %s', tempdir_path) self.deploy_local(dotfiles, target_root=tempdir_path) if self.args.rsync: local_spec = tempdir_path.rstrip('/') + '/' remote_spec = self.args.path.rstrip('/') + '/' if self.args.user: remote_spec = "{0}@{1}:{2}".format(self.args.user, self.args.server, remote_spec) else: remote_spec = "{0}:{1}".format(self.args.server, remote_spec) self.log.debug('Using rsync to sync dotfiles to %s', remote_spec) self.sh('rsync', '-az', local_spec, remote_spec) else: fh, tempfile_path = tempfile.mkstemp(suffix='.tar.gz') os.close(fh) self.log.debug('Creating tar file %s', tempfile_path) shutil.make_archive(tempfile_path.replace('.tar.gz', ''), 'gztar', tempdir_path) upload_path = '_profile_upload.tgz' self.log.debug('Uploading tarball to %s', upload_path) self.scp(tempfile_path, upload_path) if self.args.path: ssh_command = "'mkdir -p {0} && "\ "tar xf _profile_upload.tgz -C {0}; "\ "rm -f _profile_upload.tgz'"\ "".format(self.args.path) else: ssh_command = "tar xf _profile_upload.tgz; "\ "rm -f _profile_upload.tgz" self.log.debug('Using ssh to unpack tarball and clean up') self.ssh(ssh_command) finally: if tempdir_path and path.isdir(tempdir_path): self.log.debug('Removing temp dir %s', tempdir_path) shutil.rmtree(tempdir_path) if tempfile_path and path.isfile(tempfile_path): self.log.debug('Removing temp file %s', tempfile_path) os.unlink(tempfile_path)
python
def deploy_remote(self, dotfiles): """Deploy dotfiles to a remote server.""" tempfile_path = None tempdir_path = None try: tempdir_path = tempfile.mkdtemp() self.log.debug('Deploying to temp dir %s', tempdir_path) self.deploy_local(dotfiles, target_root=tempdir_path) if self.args.rsync: local_spec = tempdir_path.rstrip('/') + '/' remote_spec = self.args.path.rstrip('/') + '/' if self.args.user: remote_spec = "{0}@{1}:{2}".format(self.args.user, self.args.server, remote_spec) else: remote_spec = "{0}:{1}".format(self.args.server, remote_spec) self.log.debug('Using rsync to sync dotfiles to %s', remote_spec) self.sh('rsync', '-az', local_spec, remote_spec) else: fh, tempfile_path = tempfile.mkstemp(suffix='.tar.gz') os.close(fh) self.log.debug('Creating tar file %s', tempfile_path) shutil.make_archive(tempfile_path.replace('.tar.gz', ''), 'gztar', tempdir_path) upload_path = '_profile_upload.tgz' self.log.debug('Uploading tarball to %s', upload_path) self.scp(tempfile_path, upload_path) if self.args.path: ssh_command = "'mkdir -p {0} && "\ "tar xf _profile_upload.tgz -C {0}; "\ "rm -f _profile_upload.tgz'"\ "".format(self.args.path) else: ssh_command = "tar xf _profile_upload.tgz; "\ "rm -f _profile_upload.tgz" self.log.debug('Using ssh to unpack tarball and clean up') self.ssh(ssh_command) finally: if tempdir_path and path.isdir(tempdir_path): self.log.debug('Removing temp dir %s', tempdir_path) shutil.rmtree(tempdir_path) if tempfile_path and path.isfile(tempfile_path): self.log.debug('Removing temp file %s', tempfile_path) os.unlink(tempfile_path)
[ "def", "deploy_remote", "(", "self", ",", "dotfiles", ")", ":", "tempfile_path", "=", "None", "tempdir_path", "=", "None", "try", ":", "tempdir_path", "=", "tempfile", ".", "mkdtemp", "(", ")", "self", ".", "log", ".", "debug", "(", "'Deploying to temp dir %s'", ",", "tempdir_path", ")", "self", ".", "deploy_local", "(", "dotfiles", ",", "target_root", "=", "tempdir_path", ")", "if", "self", ".", "args", ".", "rsync", ":", "local_spec", "=", "tempdir_path", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "remote_spec", "=", "self", ".", "args", ".", "path", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "if", "self", ".", "args", ".", "user", ":", "remote_spec", "=", "\"{0}@{1}:{2}\"", ".", "format", "(", "self", ".", "args", ".", "user", ",", "self", ".", "args", ".", "server", ",", "remote_spec", ")", "else", ":", "remote_spec", "=", "\"{0}:{1}\"", ".", "format", "(", "self", ".", "args", ".", "server", ",", "remote_spec", ")", "self", ".", "log", ".", "debug", "(", "'Using rsync to sync dotfiles to %s'", ",", "remote_spec", ")", "self", ".", "sh", "(", "'rsync'", ",", "'-az'", ",", "local_spec", ",", "remote_spec", ")", "else", ":", "fh", ",", "tempfile_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.tar.gz'", ")", "os", ".", "close", "(", "fh", ")", "self", ".", "log", ".", "debug", "(", "'Creating tar file %s'", ",", "tempfile_path", ")", "shutil", ".", "make_archive", "(", "tempfile_path", ".", "replace", "(", "'.tar.gz'", ",", "''", ")", ",", "'gztar'", ",", "tempdir_path", ")", "upload_path", "=", "'_profile_upload.tgz'", "self", ".", "log", ".", "debug", "(", "'Uploading tarball to %s'", ",", "upload_path", ")", "self", ".", "scp", "(", "tempfile_path", ",", "upload_path", ")", "if", "self", ".", "args", ".", "path", ":", "ssh_command", "=", "\"'mkdir -p {0} && \"", "\"tar xf _profile_upload.tgz -C {0}; \"", "\"rm -f _profile_upload.tgz'\"", "\"\"", ".", "format", "(", "self", ".", "args", ".", "path", ")", "else", ":", "ssh_command", "=", "\"tar xf _profile_upload.tgz; \"", "\"rm -f _profile_upload.tgz\"", "self", ".", "log", ".", "debug", "(", "'Using ssh to unpack tarball and clean up'", ")", "self", ".", "ssh", "(", "ssh_command", ")", "finally", ":", "if", "tempdir_path", "and", "path", ".", "isdir", "(", "tempdir_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Removing temp dir %s'", ",", "tempdir_path", ")", "shutil", ".", "rmtree", "(", "tempdir_path", ")", "if", "tempfile_path", "and", "path", ".", "isfile", "(", "tempfile_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Removing temp file %s'", ",", "tempfile_path", ")", "os", ".", "unlink", "(", "tempfile_path", ")" ]
Deploy dotfiles to a remote server.
[ "Deploy", "dotfiles", "to", "a", "remote", "server", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L309-L364
train
jreese/dotlink
dotlink/dotlink.py
Dotlink.deploy_local
def deploy_local(self, dotfiles, target_root=None): """Deploy dotfiles to a local path.""" if target_root is None: target_root = self.args.path for source_path, target_path in dotfiles.items(): source_path = path.join(self.source, source_path) target_path = path.join(target_root, target_path) if path.isfile(target_path) or path.islink(target_path): self.log.debug('Removing existing file at %s', target_path) os.unlink(target_path) elif path.isdir(target_path): self.log.debug('Removing existing dir at %s', target_path) shutil.rmtree(target_path) parent_dir = path.dirname(target_path) if not path.isdir(parent_dir): self.log.debug('Creating parent dir %s', parent_dir) os.makedirs(parent_dir) if self.args.copy: if path.isdir(source_path): self.log.debug('Copying file %s to %s', source_path, target_path) shutil.copytree(source_path, target_path) else: self.log.debug('Copying dir %s to %s', source_path, target_path) shutil.copy(source_path, target_path) else: self.log.debug('Symlinking %s -> %s', target_path, source_path) os.symlink(source_path, target_path)
python
def deploy_local(self, dotfiles, target_root=None): """Deploy dotfiles to a local path.""" if target_root is None: target_root = self.args.path for source_path, target_path in dotfiles.items(): source_path = path.join(self.source, source_path) target_path = path.join(target_root, target_path) if path.isfile(target_path) or path.islink(target_path): self.log.debug('Removing existing file at %s', target_path) os.unlink(target_path) elif path.isdir(target_path): self.log.debug('Removing existing dir at %s', target_path) shutil.rmtree(target_path) parent_dir = path.dirname(target_path) if not path.isdir(parent_dir): self.log.debug('Creating parent dir %s', parent_dir) os.makedirs(parent_dir) if self.args.copy: if path.isdir(source_path): self.log.debug('Copying file %s to %s', source_path, target_path) shutil.copytree(source_path, target_path) else: self.log.debug('Copying dir %s to %s', source_path, target_path) shutil.copy(source_path, target_path) else: self.log.debug('Symlinking %s -> %s', target_path, source_path) os.symlink(source_path, target_path)
[ "def", "deploy_local", "(", "self", ",", "dotfiles", ",", "target_root", "=", "None", ")", ":", "if", "target_root", "is", "None", ":", "target_root", "=", "self", ".", "args", ".", "path", "for", "source_path", ",", "target_path", "in", "dotfiles", ".", "items", "(", ")", ":", "source_path", "=", "path", ".", "join", "(", "self", ".", "source", ",", "source_path", ")", "target_path", "=", "path", ".", "join", "(", "target_root", ",", "target_path", ")", "if", "path", ".", "isfile", "(", "target_path", ")", "or", "path", ".", "islink", "(", "target_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Removing existing file at %s'", ",", "target_path", ")", "os", ".", "unlink", "(", "target_path", ")", "elif", "path", ".", "isdir", "(", "target_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Removing existing dir at %s'", ",", "target_path", ")", "shutil", ".", "rmtree", "(", "target_path", ")", "parent_dir", "=", "path", ".", "dirname", "(", "target_path", ")", "if", "not", "path", ".", "isdir", "(", "parent_dir", ")", ":", "self", ".", "log", ".", "debug", "(", "'Creating parent dir %s'", ",", "parent_dir", ")", "os", ".", "makedirs", "(", "parent_dir", ")", "if", "self", ".", "args", ".", "copy", ":", "if", "path", ".", "isdir", "(", "source_path", ")", ":", "self", ".", "log", ".", "debug", "(", "'Copying file %s to %s'", ",", "source_path", ",", "target_path", ")", "shutil", ".", "copytree", "(", "source_path", ",", "target_path", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "'Copying dir %s to %s'", ",", "source_path", ",", "target_path", ")", "shutil", ".", "copy", "(", "source_path", ",", "target_path", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "'Symlinking %s -> %s'", ",", "target_path", ",", "source_path", ")", "os", ".", "symlink", "(", "source_path", ",", "target_path", ")" ]
Deploy dotfiles to a local path.
[ "Deploy", "dotfiles", "to", "a", "local", "path", "." ]
5e48c1493c20fc6df4ad0144e80563915ce339b6
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L366-L400
train
inveniosoftware-contrib/json-merger
json_merger/utils.py
dedupe_list
def dedupe_list(l): """Remove duplicates from a list preserving the order. We might be tempted to use the list(set(l)) idiom, but it doesn't preserve the order, which hinders testability and does not work for lists with unhashable elements. """ result = [] for el in l: if el not in result: result.append(el) return result
python
def dedupe_list(l): """Remove duplicates from a list preserving the order. We might be tempted to use the list(set(l)) idiom, but it doesn't preserve the order, which hinders testability and does not work for lists with unhashable elements. """ result = [] for el in l: if el not in result: result.append(el) return result
[ "def", "dedupe_list", "(", "l", ")", ":", "result", "=", "[", "]", "for", "el", "in", "l", ":", "if", "el", "not", "in", "result", ":", "result", ".", "append", "(", "el", ")", "return", "result" ]
Remove duplicates from a list preserving the order. We might be tempted to use the list(set(l)) idiom, but it doesn't preserve the order, which hinders testability and does not work for lists with unhashable elements.
[ "Remove", "duplicates", "from", "a", "list", "preserving", "the", "order", "." ]
adc6d372da018427e1db7b92424d3471e01a4118
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/utils.py#L107-L120
train
ldomic/lintools
lintools/plots.py
Plots.plot_amino_diagrams
def plot_amino_diagrams(self): """ Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification. """ for res in self.topology_data.dict_of_plotted_res: try: color = [self.colors_amino_acids[self.amino_acids[res[0]]],'white'] except KeyError: color = ["pink",'white'] plt.figure(figsize=(2.5,2.5)) ring1,_=plt.pie([1], radius=1, startangle=90, colors=color, counterclock=False) plt.axis('equal') plt.setp(ring1, width=1, edgecolor=color[0]) if len(self.topology_data.universe.protein.segments)<=1: #Parameters for amino diagrams without segids plt.text(0,-0.45,res[0]+"\n"+res[1],ha='center',size=36, fontweight="bold") else: #Parameters for amino diagrams with segids plt.text(0,-0.37,res[0]+"\n"+res[1]+" "+res[2],ha='center',size=30, fontweight="bold") #play with the dpi pylab.savefig(str(res[1])+res[2]+".svg", dpi=300, transparent=True)
python
def plot_amino_diagrams(self): """ Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification. """ for res in self.topology_data.dict_of_plotted_res: try: color = [self.colors_amino_acids[self.amino_acids[res[0]]],'white'] except KeyError: color = ["pink",'white'] plt.figure(figsize=(2.5,2.5)) ring1,_=plt.pie([1], radius=1, startangle=90, colors=color, counterclock=False) plt.axis('equal') plt.setp(ring1, width=1, edgecolor=color[0]) if len(self.topology_data.universe.protein.segments)<=1: #Parameters for amino diagrams without segids plt.text(0,-0.45,res[0]+"\n"+res[1],ha='center',size=36, fontweight="bold") else: #Parameters for amino diagrams with segids plt.text(0,-0.37,res[0]+"\n"+res[1]+" "+res[2],ha='center',size=30, fontweight="bold") #play with the dpi pylab.savefig(str(res[1])+res[2]+".svg", dpi=300, transparent=True)
[ "def", "plot_amino_diagrams", "(", "self", ")", ":", "for", "res", "in", "self", ".", "topology_data", ".", "dict_of_plotted_res", ":", "try", ":", "color", "=", "[", "self", ".", "colors_amino_acids", "[", "self", ".", "amino_acids", "[", "res", "[", "0", "]", "]", "]", ",", "'white'", "]", "except", "KeyError", ":", "color", "=", "[", "\"pink\"", ",", "'white'", "]", "plt", ".", "figure", "(", "figsize", "=", "(", "2.5", ",", "2.5", ")", ")", "ring1", ",", "_", "=", "plt", ".", "pie", "(", "[", "1", "]", ",", "radius", "=", "1", ",", "startangle", "=", "90", ",", "colors", "=", "color", ",", "counterclock", "=", "False", ")", "plt", ".", "axis", "(", "'equal'", ")", "plt", ".", "setp", "(", "ring1", ",", "width", "=", "1", ",", "edgecolor", "=", "color", "[", "0", "]", ")", "if", "len", "(", "self", ".", "topology_data", ".", "universe", ".", "protein", ".", "segments", ")", "<=", "1", ":", "#Parameters for amino diagrams without segids", "plt", ".", "text", "(", "0", ",", "-", "0.45", ",", "res", "[", "0", "]", "+", "\"\\n\"", "+", "res", "[", "1", "]", ",", "ha", "=", "'center'", ",", "size", "=", "36", ",", "fontweight", "=", "\"bold\"", ")", "else", ":", "#Parameters for amino diagrams with segids", "plt", ".", "text", "(", "0", ",", "-", "0.37", ",", "res", "[", "0", "]", "+", "\"\\n\"", "+", "res", "[", "1", "]", "+", "\" \"", "+", "res", "[", "2", "]", ",", "ha", "=", "'center'", ",", "size", "=", "30", ",", "fontweight", "=", "\"bold\"", ")", "#play with the dpi", "pylab", ".", "savefig", "(", "str", "(", "res", "[", "1", "]", ")", "+", "res", "[", "2", "]", "+", "\".svg\"", ",", "dpi", "=", "300", ",", "transparent", "=", "True", ")" ]
Plotting of amino diagrams - circles with residue name and id, colored according to the residue type. If the protein has more than one chain, chain identity is also included in the plot. The plot is saved as svg file with residue id and chain id as filename for more certain identification.
[ "Plotting", "of", "amino", "diagrams", "-", "circles", "with", "residue", "name", "and", "id", "colored", "according", "to", "the", "residue", "type", ".", "If", "the", "protein", "has", "more", "than", "one", "chain", "chain", "identity", "is", "also", "included", "in", "the", "plot", ".", "The", "plot", "is", "saved", "as", "svg", "file", "with", "residue", "id", "and", "chain", "id", "as", "filename", "for", "more", "certain", "identification", "." ]
d825a4a7b35f3f857d3b81b46c9aee72b0ec697a
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/plots.py#L47-L71
train
robinandeer/puzzle
puzzle/utils/ped.py
get_cases
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): """Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case)) """ individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) # Add the individuals to the correct case for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
python
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): """Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case)) """ individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) # Add the individuals to the correct case for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
[ "def", "get_cases", "(", "variant_source", ",", "case_lines", "=", "None", ",", "case_type", "=", "'ped'", ",", "variant_type", "=", "'snv'", ",", "variant_mode", "=", "'vcf'", ")", ":", "individuals", "=", "get_individuals", "(", "variant_source", "=", "variant_source", ",", "case_lines", "=", "case_lines", ",", "case_type", "=", "case_type", ",", "variant_mode", "=", "variant_mode", ")", "case_objs", "=", "[", "]", "case_ids", "=", "set", "(", ")", "compressed", "=", "False", "tabix_index", "=", "False", "#If no individuals we still need to have a case id", "if", "variant_source", ".", "endswith", "(", "'.gz'", ")", ":", "logger", ".", "debug", "(", "\"Found compressed variant source\"", ")", "compressed", "=", "True", "tabix_file", "=", "'.'", ".", "join", "(", "[", "variant_source", ",", "'tbi'", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "tabix_file", ")", ":", "logger", ".", "debug", "(", "\"Found index file\"", ")", "tabix_index", "=", "True", "if", "len", "(", "individuals", ")", ">", "0", ":", "for", "individual", "in", "individuals", ":", "case_ids", ".", "add", "(", "individual", ".", "case_id", ")", "else", ":", "case_ids", "=", "[", "os", ".", "path", ".", "basename", "(", "variant_source", ")", "]", "for", "case_id", "in", "case_ids", ":", "logger", ".", "info", "(", "\"Found case {0}\"", ".", "format", "(", "case_id", ")", ")", "case", "=", "Case", "(", "case_id", "=", "case_id", ",", "name", "=", "case_id", ",", "variant_source", "=", "variant_source", ",", "variant_type", "=", "variant_type", ",", "variant_mode", "=", "variant_mode", ",", "compressed", "=", "compressed", ",", "tabix_index", "=", "tabix_index", ")", "# Add the individuals to the correct case", "for", "individual", "in", "individuals", ":", "if", "individual", ".", "case_id", "==", "case_id", ":", "logger", ".", "info", "(", "\"Adding ind {0} to case {1}\"", ".", "format", "(", "individual", ".", "name", ",", "individual", ".", "case_id", ")", ")", "case", ".", "add_individual", "(", "individual", ")", "case_objs", ".", "append", "(", "case", ")", "return", "case_objs" ]
Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case))
[ "Create", "a", "cases", "and", "populate", "it", "with", "individuals" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/ped.py#L20-L80
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.handle_message
def handle_message(self, message): """Process UDP messages""" if self._yamaha: if 'power' in message: _LOGGER.debug("Power: %s", message.get('power')) self._yamaha.power = ( STATE_ON if message.get('power') == "on" else STATE_OFF) if 'input' in message: _LOGGER.debug("Input: %s", message.get('input')) self._yamaha._source = message.get('input') if 'volume' in message: volume = message.get('volume') if 'max_volume' in message: volume_max = message.get('max_volume') else: volume_max = self._yamaha.volume_max _LOGGER.debug("Volume: %d / Max: %d", volume, volume_max) self._yamaha.volume = volume / volume_max self._yamaha.volume_max = volume_max if 'mute' in message: _LOGGER.debug("Mute: %s", message.get('mute')) self._yamaha.mute = message.get('mute', False) else: _LOGGER.debug("No yamaha-obj found")
python
def handle_message(self, message): """Process UDP messages""" if self._yamaha: if 'power' in message: _LOGGER.debug("Power: %s", message.get('power')) self._yamaha.power = ( STATE_ON if message.get('power') == "on" else STATE_OFF) if 'input' in message: _LOGGER.debug("Input: %s", message.get('input')) self._yamaha._source = message.get('input') if 'volume' in message: volume = message.get('volume') if 'max_volume' in message: volume_max = message.get('max_volume') else: volume_max = self._yamaha.volume_max _LOGGER.debug("Volume: %d / Max: %d", volume, volume_max) self._yamaha.volume = volume / volume_max self._yamaha.volume_max = volume_max if 'mute' in message: _LOGGER.debug("Mute: %s", message.get('mute')) self._yamaha.mute = message.get('mute', False) else: _LOGGER.debug("No yamaha-obj found")
[ "def", "handle_message", "(", "self", ",", "message", ")", ":", "if", "self", ".", "_yamaha", ":", "if", "'power'", "in", "message", ":", "_LOGGER", ".", "debug", "(", "\"Power: %s\"", ",", "message", ".", "get", "(", "'power'", ")", ")", "self", ".", "_yamaha", ".", "power", "=", "(", "STATE_ON", "if", "message", ".", "get", "(", "'power'", ")", "==", "\"on\"", "else", "STATE_OFF", ")", "if", "'input'", "in", "message", ":", "_LOGGER", ".", "debug", "(", "\"Input: %s\"", ",", "message", ".", "get", "(", "'input'", ")", ")", "self", ".", "_yamaha", ".", "_source", "=", "message", ".", "get", "(", "'input'", ")", "if", "'volume'", "in", "message", ":", "volume", "=", "message", ".", "get", "(", "'volume'", ")", "if", "'max_volume'", "in", "message", ":", "volume_max", "=", "message", ".", "get", "(", "'max_volume'", ")", "else", ":", "volume_max", "=", "self", ".", "_yamaha", ".", "volume_max", "_LOGGER", ".", "debug", "(", "\"Volume: %d / Max: %d\"", ",", "volume", ",", "volume_max", ")", "self", ".", "_yamaha", ".", "volume", "=", "volume", "/", "volume_max", "self", ".", "_yamaha", ".", "volume_max", "=", "volume_max", "if", "'mute'", "in", "message", ":", "_LOGGER", ".", "debug", "(", "\"Mute: %s\"", ",", "message", ".", "get", "(", "'mute'", ")", ")", "self", ".", "_yamaha", ".", "mute", "=", "message", ".", "get", "(", "'mute'", ",", "False", ")", "else", ":", "_LOGGER", ".", "debug", "(", "\"No yamaha-obj found\"", ")" ]
Process UDP messages
[ "Process", "UDP", "messages" ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L54-L80
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.update_status
def update_status(self, new_status=None): """Updates the zone status.""" _LOGGER.debug("update_status: Zone %s", self.zone_id) if self.status and new_status is None: _LOGGER.debug("Zone: healthy.") else: old_status = self.status or {} if new_status: # merge new_status with existing for comparison _LOGGER.debug("Set status: provided") # make a copy of the old_status status = old_status.copy() # merge updated items into status status.update(new_status) # promote merged_status to new_status new_status = status else: _LOGGER.debug("Set status: own") new_status = self.get_status() _LOGGER.debug("old_status: %s", old_status) _LOGGER.debug("new_status: %s", new_status) _LOGGER.debug("is_equal: %s", old_status == new_status) if new_status != old_status: self.handle_message(new_status) self._status_sent = False self.status = new_status if not self._status_sent: self._status_sent = self.update_hass()
python
def update_status(self, new_status=None): """Updates the zone status.""" _LOGGER.debug("update_status: Zone %s", self.zone_id) if self.status and new_status is None: _LOGGER.debug("Zone: healthy.") else: old_status = self.status or {} if new_status: # merge new_status with existing for comparison _LOGGER.debug("Set status: provided") # make a copy of the old_status status = old_status.copy() # merge updated items into status status.update(new_status) # promote merged_status to new_status new_status = status else: _LOGGER.debug("Set status: own") new_status = self.get_status() _LOGGER.debug("old_status: %s", old_status) _LOGGER.debug("new_status: %s", new_status) _LOGGER.debug("is_equal: %s", old_status == new_status) if new_status != old_status: self.handle_message(new_status) self._status_sent = False self.status = new_status if not self._status_sent: self._status_sent = self.update_hass()
[ "def", "update_status", "(", "self", ",", "new_status", "=", "None", ")", ":", "_LOGGER", ".", "debug", "(", "\"update_status: Zone %s\"", ",", "self", ".", "zone_id", ")", "if", "self", ".", "status", "and", "new_status", "is", "None", ":", "_LOGGER", ".", "debug", "(", "\"Zone: healthy.\"", ")", "else", ":", "old_status", "=", "self", ".", "status", "or", "{", "}", "if", "new_status", ":", "# merge new_status with existing for comparison", "_LOGGER", ".", "debug", "(", "\"Set status: provided\"", ")", "# make a copy of the old_status", "status", "=", "old_status", ".", "copy", "(", ")", "# merge updated items into status", "status", ".", "update", "(", "new_status", ")", "# promote merged_status to new_status", "new_status", "=", "status", "else", ":", "_LOGGER", ".", "debug", "(", "\"Set status: own\"", ")", "new_status", "=", "self", ".", "get_status", "(", ")", "_LOGGER", ".", "debug", "(", "\"old_status: %s\"", ",", "old_status", ")", "_LOGGER", ".", "debug", "(", "\"new_status: %s\"", ",", "new_status", ")", "_LOGGER", ".", "debug", "(", "\"is_equal: %s\"", ",", "old_status", "==", "new_status", ")", "if", "new_status", "!=", "old_status", ":", "self", ".", "handle_message", "(", "new_status", ")", "self", ".", "_status_sent", "=", "False", "self", ".", "status", "=", "new_status", "if", "not", "self", ".", "_status_sent", ":", "self", ".", "_status_sent", "=", "self", ".", "update_hass", "(", ")" ]
Updates the zone status.
[ "Updates", "the", "zone", "status", "." ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L82-L117
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.set_power
def set_power(self, power): """Send Power command.""" req_url = ENDPOINTS["setPower"].format(self.ip_address, self.zone_id) params = {"power": "on" if power else "standby"} return request(req_url, params=params)
python
def set_power(self, power): """Send Power command.""" req_url = ENDPOINTS["setPower"].format(self.ip_address, self.zone_id) params = {"power": "on" if power else "standby"} return request(req_url, params=params)
[ "def", "set_power", "(", "self", ",", "power", ")", ":", "req_url", "=", "ENDPOINTS", "[", "\"setPower\"", "]", ".", "format", "(", "self", ".", "ip_address", ",", "self", ".", "zone_id", ")", "params", "=", "{", "\"power\"", ":", "\"on\"", "if", "power", "else", "\"standby\"", "}", "return", "request", "(", "req_url", ",", "params", "=", "params", ")" ]
Send Power command.
[ "Send", "Power", "command", "." ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L133-L137
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.set_mute
def set_mute(self, mute): """Send mute command.""" req_url = ENDPOINTS["setMute"].format(self.ip_address, self.zone_id) params = {"enable": "true" if mute else "false"} return request(req_url, params=params)
python
def set_mute(self, mute): """Send mute command.""" req_url = ENDPOINTS["setMute"].format(self.ip_address, self.zone_id) params = {"enable": "true" if mute else "false"} return request(req_url, params=params)
[ "def", "set_mute", "(", "self", ",", "mute", ")", ":", "req_url", "=", "ENDPOINTS", "[", "\"setMute\"", "]", ".", "format", "(", "self", ".", "ip_address", ",", "self", ".", "zone_id", ")", "params", "=", "{", "\"enable\"", ":", "\"true\"", "if", "mute", "else", "\"false\"", "}", "return", "request", "(", "req_url", ",", "params", "=", "params", ")" ]
Send mute command.
[ "Send", "mute", "command", "." ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L139-L143
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.set_volume
def set_volume(self, volume): """Send Volume command.""" req_url = ENDPOINTS["setVolume"].format(self.ip_address, self.zone_id) params = {"volume": int(volume)} return request(req_url, params=params)
python
def set_volume(self, volume): """Send Volume command.""" req_url = ENDPOINTS["setVolume"].format(self.ip_address, self.zone_id) params = {"volume": int(volume)} return request(req_url, params=params)
[ "def", "set_volume", "(", "self", ",", "volume", ")", ":", "req_url", "=", "ENDPOINTS", "[", "\"setVolume\"", "]", ".", "format", "(", "self", ".", "ip_address", ",", "self", ".", "zone_id", ")", "params", "=", "{", "\"volume\"", ":", "int", "(", "volume", ")", "}", "return", "request", "(", "req_url", ",", "params", "=", "params", ")" ]
Send Volume command.
[ "Send", "Volume", "command", "." ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L145-L149
train
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.set_input
def set_input(self, input_id): """Send Input command.""" req_url = ENDPOINTS["setInput"].format(self.ip_address, self.zone_id) params = {"input": input_id} return request(req_url, params=params)
python
def set_input(self, input_id): """Send Input command.""" req_url = ENDPOINTS["setInput"].format(self.ip_address, self.zone_id) params = {"input": input_id} return request(req_url, params=params)
[ "def", "set_input", "(", "self", ",", "input_id", ")", ":", "req_url", "=", "ENDPOINTS", "[", "\"setInput\"", "]", ".", "format", "(", "self", ".", "ip_address", ",", "self", ".", "zone_id", ")", "params", "=", "{", "\"input\"", ":", "input_id", "}", "return", "request", "(", "req_url", ",", "params", "=", "params", ")" ]
Send Input command.
[ "Send", "Input", "command", "." ]
616379ae22d6b518c61042d58be6d18a46242168
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L151-L155
train
robinandeer/puzzle
puzzle/plugins/vcf/mixins/variant_extras/annotations.py
AnnotationExtras._add_compounds
def _add_compounds(self, variant_obj, info_dict): """Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score """ compound_list = [] compound_entry = info_dict.get('Compounds') if compound_entry: for family_annotation in compound_entry.split(','): compounds = family_annotation.split(':')[-1].split('|') for compound in compounds: splitted_compound = compound.split('>') compound_score = None if len(splitted_compound) > 1: compound_id = splitted_compound[0] compound_score = int(splitted_compound[-1]) compound_list.append(Compound( variant_id=compound_id, combined_score=compound_score ) ) #Sort the compounds based on rank score compound_list.sort(key = operator.attrgetter('combined_score'), reverse=True) for compound in compound_list: variant_obj.add_compound(compound)
python
def _add_compounds(self, variant_obj, info_dict): """Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score """ compound_list = [] compound_entry = info_dict.get('Compounds') if compound_entry: for family_annotation in compound_entry.split(','): compounds = family_annotation.split(':')[-1].split('|') for compound in compounds: splitted_compound = compound.split('>') compound_score = None if len(splitted_compound) > 1: compound_id = splitted_compound[0] compound_score = int(splitted_compound[-1]) compound_list.append(Compound( variant_id=compound_id, combined_score=compound_score ) ) #Sort the compounds based on rank score compound_list.sort(key = operator.attrgetter('combined_score'), reverse=True) for compound in compound_list: variant_obj.add_compound(compound)
[ "def", "_add_compounds", "(", "self", ",", "variant_obj", ",", "info_dict", ")", ":", "compound_list", "=", "[", "]", "compound_entry", "=", "info_dict", ".", "get", "(", "'Compounds'", ")", "if", "compound_entry", ":", "for", "family_annotation", "in", "compound_entry", ".", "split", "(", "','", ")", ":", "compounds", "=", "family_annotation", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "split", "(", "'|'", ")", "for", "compound", "in", "compounds", ":", "splitted_compound", "=", "compound", ".", "split", "(", "'>'", ")", "compound_score", "=", "None", "if", "len", "(", "splitted_compound", ")", ">", "1", ":", "compound_id", "=", "splitted_compound", "[", "0", "]", "compound_score", "=", "int", "(", "splitted_compound", "[", "-", "1", "]", ")", "compound_list", ".", "append", "(", "Compound", "(", "variant_id", "=", "compound_id", ",", "combined_score", "=", "compound_score", ")", ")", "#Sort the compounds based on rank score", "compound_list", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'combined_score'", ")", ",", "reverse", "=", "True", ")", "for", "compound", "in", "compound_list", ":", "variant_obj", ".", "add_compound", "(", "compound", ")" ]
Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score
[ "Check", "if", "there", "are", "any", "compounds", "and", "add", "them", "to", "the", "variant", "The", "compounds", "that", "are", "added", "should", "be", "sorted", "on", "rank", "score" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/annotations.py#L11-L39
train
jwodder/javaproperties
javaproperties/xmlprops.py
load_xml
def load_xml(fp, object_pairs_hook=dict): r""" Parse the contents of the file-like object ``fp`` as an XML properties file and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `load_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``fp`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``fp`` (including duplicates) in order of occurrence. `load_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Files containing non-ASCII characters need to be opened in binary mode in Python 2, while Python 3 accepts both binary and text input. :param fp: the file from which to read the XML properties document :type fp: file-like object :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute """ tree = ET.parse(fp) return object_pairs_hook(_fromXML(tree.getroot()))
python
def load_xml(fp, object_pairs_hook=dict): r""" Parse the contents of the file-like object ``fp`` as an XML properties file and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `load_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``fp`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``fp`` (including duplicates) in order of occurrence. `load_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Files containing non-ASCII characters need to be opened in binary mode in Python 2, while Python 3 accepts both binary and text input. :param fp: the file from which to read the XML properties document :type fp: file-like object :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute """ tree = ET.parse(fp) return object_pairs_hook(_fromXML(tree.getroot()))
[ "def", "load_xml", "(", "fp", ",", "object_pairs_hook", "=", "dict", ")", ":", "tree", "=", "ET", ".", "parse", "(", "fp", ")", "return", "object_pairs_hook", "(", "_fromXML", "(", "tree", ".", "getroot", "(", ")", ")", ")" ]
r""" Parse the contents of the file-like object ``fp`` as an XML properties file and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `load_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``fp`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``fp`` (including duplicates) in order of occurrence. `load_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Files containing non-ASCII characters need to be opened in binary mode in Python 2, while Python 3 accepts both binary and text input. :param fp: the file from which to read the XML properties document :type fp: file-like object :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute
[ "r", "Parse", "the", "contents", "of", "the", "file", "-", "like", "object", "fp", "as", "an", "XML", "properties", "file", "and", "return", "a", "dict", "of", "the", "key", "-", "value", "pairs", "." ]
8b48f040305217ebeb80c98c4354691bbb01429b
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L7-L41
train
jwodder/javaproperties
javaproperties/xmlprops.py
loads_xml
def loads_xml(s, object_pairs_hook=dict): r""" Parse the contents of the string ``s`` as an XML properties document and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `loads_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Strings containing non-ASCII characters need to be encoded as bytes in Python 2 (Use either UTF-8 or UTF-16 if the XML document does not contain an encoding declaration), while Python 3 accepts both binary and text input. :param string s: the string from which to read the XML properties document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute """ elem = ET.fromstring(s) return object_pairs_hook(_fromXML(elem))
python
def loads_xml(s, object_pairs_hook=dict): r""" Parse the contents of the string ``s`` as an XML properties document and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `loads_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Strings containing non-ASCII characters need to be encoded as bytes in Python 2 (Use either UTF-8 or UTF-16 if the XML document does not contain an encoding declaration), while Python 3 accepts both binary and text input. :param string s: the string from which to read the XML properties document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute """ elem = ET.fromstring(s) return object_pairs_hook(_fromXML(elem))
[ "def", "loads_xml", "(", "s", ",", "object_pairs_hook", "=", "dict", ")", ":", "elem", "=", "ET", ".", "fromstring", "(", "s", ")", "return", "object_pairs_hook", "(", "_fromXML", "(", "elem", ")", ")" ]
r""" Parse the contents of the string ``s`` as an XML properties document and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `loads_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Strings containing non-ASCII characters need to be encoded as bytes in Python 2 (Use either UTF-8 or UTF-16 if the XML document does not contain an encoding declaration), while Python 3 accepts both binary and text input. :param string s: the string from which to read the XML properties document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute
[ "r", "Parse", "the", "contents", "of", "the", "string", "s", "as", "an", "XML", "properties", "document", "and", "return", "a", "dict", "of", "the", "key", "-", "value", "pairs", "." ]
8b48f040305217ebeb80c98c4354691bbb01429b
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L43-L77
train
jwodder/javaproperties
javaproperties/xmlprops.py
dump_xml
def dump_xml(props, fp, comment=None, encoding='UTF-8', sort_keys=False): """ Write a series ``props`` of key-value pairs to a binary filehandle ``fp`` in the format of an XML properties file. The file will include both an XML declaration and a doctype declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to write to ``fp``. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param fp: a file-like object to write the values of ``props`` to :type fp: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :return: `None` """ fp = codecs.lookup(encoding).streamwriter(fp, errors='xmlcharrefreplace') print('<?xml version="1.0" encoding={0} standalone="no"?>' .format(quoteattr(encoding)), file=fp) for s in _stream_xml(props, comment, sort_keys): print(s, file=fp)
python
def dump_xml(props, fp, comment=None, encoding='UTF-8', sort_keys=False): """ Write a series ``props`` of key-value pairs to a binary filehandle ``fp`` in the format of an XML properties file. The file will include both an XML declaration and a doctype declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to write to ``fp``. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param fp: a file-like object to write the values of ``props`` to :type fp: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :return: `None` """ fp = codecs.lookup(encoding).streamwriter(fp, errors='xmlcharrefreplace') print('<?xml version="1.0" encoding={0} standalone="no"?>' .format(quoteattr(encoding)), file=fp) for s in _stream_xml(props, comment, sort_keys): print(s, file=fp)
[ "def", "dump_xml", "(", "props", ",", "fp", ",", "comment", "=", "None", ",", "encoding", "=", "'UTF-8'", ",", "sort_keys", "=", "False", ")", ":", "fp", "=", "codecs", ".", "lookup", "(", "encoding", ")", ".", "streamwriter", "(", "fp", ",", "errors", "=", "'xmlcharrefreplace'", ")", "print", "(", "'<?xml version=\"1.0\" encoding={0} standalone=\"no\"?>'", ".", "format", "(", "quoteattr", "(", "encoding", ")", ")", ",", "file", "=", "fp", ")", "for", "s", "in", "_stream_xml", "(", "props", ",", "comment", ",", "sort_keys", ")", ":", "print", "(", "s", ",", "file", "=", "fp", ")" ]
Write a series ``props`` of key-value pairs to a binary filehandle ``fp`` in the format of an XML properties file. The file will include both an XML declaration and a doctype declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to write to ``fp``. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param fp: a file-like object to write the values of ``props`` to :type fp: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :return: `None`
[ "Write", "a", "series", "props", "of", "key", "-", "value", "pairs", "to", "a", "binary", "filehandle", "fp", "in", "the", "format", "of", "an", "XML", "properties", "file", ".", "The", "file", "will", "include", "both", "an", "XML", "declaration", "and", "a", "doctype", "declaration", "." ]
8b48f040305217ebeb80c98c4354691bbb01429b
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L88-L112
train
jwodder/javaproperties
javaproperties/xmlprops.py
dumps_xml
def dumps_xml(props, comment=None, sort_keys=False): """ Convert a series ``props`` of key-value pairs to a text string containing an XML properties document. The document will include a doctype declaration but not an XML declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to serialize. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :rtype: text string """ return ''.join(s + '\n' for s in _stream_xml(props, comment, sort_keys))
python
def dumps_xml(props, comment=None, sort_keys=False): """ Convert a series ``props`` of key-value pairs to a text string containing an XML properties document. The document will include a doctype declaration but not an XML declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to serialize. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :rtype: text string """ return ''.join(s + '\n' for s in _stream_xml(props, comment, sort_keys))
[ "def", "dumps_xml", "(", "props", ",", "comment", "=", "None", ",", "sort_keys", "=", "False", ")", ":", "return", "''", ".", "join", "(", "s", "+", "'\\n'", "for", "s", "in", "_stream_xml", "(", "props", ",", "comment", ",", "sort_keys", ")", ")" ]
Convert a series ``props`` of key-value pairs to a text string containing an XML properties document. The document will include a doctype declaration but not an XML declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to serialize. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :rtype: text string
[ "Convert", "a", "series", "props", "of", "key", "-", "value", "pairs", "to", "a", "text", "string", "containing", "an", "XML", "properties", "document", ".", "The", "document", "will", "include", "a", "doctype", "declaration", "but", "not", "an", "XML", "declaration", "." ]
8b48f040305217ebeb80c98c4354691bbb01429b
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L114-L130
train
robinandeer/puzzle
puzzle/plugins/sql/store.py
Store.connect
def connect(self, db_uri, debug=False): """Configure connection to a SQL database. Args: db_uri (str): path/URI to the database to connect to debug (Optional[bool]): whether to output logging information """ kwargs = {'echo': debug, 'convert_unicode': True} # connect to the SQL database if 'mysql' in db_uri: kwargs['pool_recycle'] = 3600 elif '://' not in db_uri: logger.debug("detected sqlite path URI: {}".format(db_uri)) db_path = os.path.abspath(os.path.expanduser(db_uri)) db_uri = "sqlite:///{}".format(db_path) self.engine = create_engine(db_uri, **kwargs) logger.debug('connection established successfully') # make sure the same engine is propagated to the BASE classes BASE.metadata.bind = self.engine # start a session self.session = scoped_session(sessionmaker(bind=self.engine)) # shortcut to query method self.query = self.session.query return self
python
def connect(self, db_uri, debug=False): """Configure connection to a SQL database. Args: db_uri (str): path/URI to the database to connect to debug (Optional[bool]): whether to output logging information """ kwargs = {'echo': debug, 'convert_unicode': True} # connect to the SQL database if 'mysql' in db_uri: kwargs['pool_recycle'] = 3600 elif '://' not in db_uri: logger.debug("detected sqlite path URI: {}".format(db_uri)) db_path = os.path.abspath(os.path.expanduser(db_uri)) db_uri = "sqlite:///{}".format(db_path) self.engine = create_engine(db_uri, **kwargs) logger.debug('connection established successfully') # make sure the same engine is propagated to the BASE classes BASE.metadata.bind = self.engine # start a session self.session = scoped_session(sessionmaker(bind=self.engine)) # shortcut to query method self.query = self.session.query return self
[ "def", "connect", "(", "self", ",", "db_uri", ",", "debug", "=", "False", ")", ":", "kwargs", "=", "{", "'echo'", ":", "debug", ",", "'convert_unicode'", ":", "True", "}", "# connect to the SQL database", "if", "'mysql'", "in", "db_uri", ":", "kwargs", "[", "'pool_recycle'", "]", "=", "3600", "elif", "'://'", "not", "in", "db_uri", ":", "logger", ".", "debug", "(", "\"detected sqlite path URI: {}\"", ".", "format", "(", "db_uri", ")", ")", "db_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "db_uri", ")", ")", "db_uri", "=", "\"sqlite:///{}\"", ".", "format", "(", "db_path", ")", "self", ".", "engine", "=", "create_engine", "(", "db_uri", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "'connection established successfully'", ")", "# make sure the same engine is propagated to the BASE classes", "BASE", ".", "metadata", ".", "bind", "=", "self", ".", "engine", "# start a session", "self", ".", "session", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "self", ".", "engine", ")", ")", "# shortcut to query method", "self", ".", "query", "=", "self", ".", "session", ".", "query", "return", "self" ]
Configure connection to a SQL database. Args: db_uri (str): path/URI to the database to connect to debug (Optional[bool]): whether to output logging information
[ "Configure", "connection", "to", "a", "SQL", "database", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/store.py#L62-L86
train
robinandeer/puzzle
puzzle/plugins/sql/store.py
Store.select_plugin
def select_plugin(self, case_obj): """Select and initialize the correct plugin for the case.""" if case_obj.variant_mode == 'vcf': logger.debug("Using vcf plugin") plugin = VcfPlugin(case_obj.variant_type) elif case_obj.variant_mode == 'gemini': logger.debug("Using gemini plugin") plugin = GeminiPlugin(case_obj.variant_type) #Add case to plugin plugin.add_case(case_obj) self.variant_type = case_obj.variant_type case_id = case_obj.case_id return plugin, case_id
python
def select_plugin(self, case_obj): """Select and initialize the correct plugin for the case.""" if case_obj.variant_mode == 'vcf': logger.debug("Using vcf plugin") plugin = VcfPlugin(case_obj.variant_type) elif case_obj.variant_mode == 'gemini': logger.debug("Using gemini plugin") plugin = GeminiPlugin(case_obj.variant_type) #Add case to plugin plugin.add_case(case_obj) self.variant_type = case_obj.variant_type case_id = case_obj.case_id return plugin, case_id
[ "def", "select_plugin", "(", "self", ",", "case_obj", ")", ":", "if", "case_obj", ".", "variant_mode", "==", "'vcf'", ":", "logger", ".", "debug", "(", "\"Using vcf plugin\"", ")", "plugin", "=", "VcfPlugin", "(", "case_obj", ".", "variant_type", ")", "elif", "case_obj", ".", "variant_mode", "==", "'gemini'", ":", "logger", ".", "debug", "(", "\"Using gemini plugin\"", ")", "plugin", "=", "GeminiPlugin", "(", "case_obj", ".", "variant_type", ")", "#Add case to plugin", "plugin", ".", "add_case", "(", "case_obj", ")", "self", ".", "variant_type", "=", "case_obj", ".", "variant_type", "case_id", "=", "case_obj", ".", "case_id", "return", "plugin", ",", "case_id" ]
Select and initialize the correct plugin for the case.
[ "Select", "and", "initialize", "the", "correct", "plugin", "for", "the", "case", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/store.py#L131-L146
train
robinandeer/puzzle
puzzle/server/blueprints/public/views.py
index
def index(): """Show the landing page.""" gene_lists = app.db.gene_lists() if app.config['STORE_ENABLED'] else [] queries = app.db.gemini_queries() if app.config['STORE_ENABLED'] else [] case_groups = {} for case in app.db.cases(): key = (case.variant_source, case.variant_type, case.variant_mode) if key not in case_groups: case_groups[key] = [] case_groups[key].append(case) return render_template('index.html', case_groups=case_groups, gene_lists=gene_lists, queries=queries)
python
def index(): """Show the landing page.""" gene_lists = app.db.gene_lists() if app.config['STORE_ENABLED'] else [] queries = app.db.gemini_queries() if app.config['STORE_ENABLED'] else [] case_groups = {} for case in app.db.cases(): key = (case.variant_source, case.variant_type, case.variant_mode) if key not in case_groups: case_groups[key] = [] case_groups[key].append(case) return render_template('index.html', case_groups=case_groups, gene_lists=gene_lists, queries=queries)
[ "def", "index", "(", ")", ":", "gene_lists", "=", "app", ".", "db", ".", "gene_lists", "(", ")", "if", "app", ".", "config", "[", "'STORE_ENABLED'", "]", "else", "[", "]", "queries", "=", "app", ".", "db", ".", "gemini_queries", "(", ")", "if", "app", ".", "config", "[", "'STORE_ENABLED'", "]", "else", "[", "]", "case_groups", "=", "{", "}", "for", "case", "in", "app", ".", "db", ".", "cases", "(", ")", ":", "key", "=", "(", "case", ".", "variant_source", ",", "case", ".", "variant_type", ",", "case", ".", "variant_mode", ")", "if", "key", "not", "in", "case_groups", ":", "case_groups", "[", "key", "]", "=", "[", "]", "case_groups", "[", "key", "]", ".", "append", "(", "case", ")", "return", "render_template", "(", "'index.html'", ",", "case_groups", "=", "case_groups", ",", "gene_lists", "=", "gene_lists", ",", "queries", "=", "queries", ")" ]
Show the landing page.
[ "Show", "the", "landing", "page", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L18-L31
train
robinandeer/puzzle
puzzle/server/blueprints/public/views.py
case
def case(case_id): """Show the overview for a case.""" case_obj = app.db.case(case_id) return render_template('case.html', case=case_obj, case_id=case_id)
python
def case(case_id): """Show the overview for a case.""" case_obj = app.db.case(case_id) return render_template('case.html', case=case_obj, case_id=case_id)
[ "def", "case", "(", "case_id", ")", ":", "case_obj", "=", "app", ".", "db", ".", "case", "(", "case_id", ")", "return", "render_template", "(", "'case.html'", ",", "case", "=", "case_obj", ",", "case_id", "=", "case_id", ")" ]
Show the overview for a case.
[ "Show", "the", "overview", "for", "a", "case", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L35-L38
train
robinandeer/puzzle
puzzle/server/blueprints/public/views.py
delete_phenotype
def delete_phenotype(phenotype_id): """Delete phenotype from an individual.""" ind_id = request.form['ind_id'] ind_obj = app.db.individual(ind_id) try: app.db.remove_phenotype(ind_obj, phenotype_id) except RuntimeError as error: return abort(500, error.message) return redirect(request.referrer)
python
def delete_phenotype(phenotype_id): """Delete phenotype from an individual.""" ind_id = request.form['ind_id'] ind_obj = app.db.individual(ind_id) try: app.db.remove_phenotype(ind_obj, phenotype_id) except RuntimeError as error: return abort(500, error.message) return redirect(request.referrer)
[ "def", "delete_phenotype", "(", "phenotype_id", ")", ":", "ind_id", "=", "request", ".", "form", "[", "'ind_id'", "]", "ind_obj", "=", "app", ".", "db", ".", "individual", "(", "ind_id", ")", "try", ":", "app", ".", "db", ".", "remove_phenotype", "(", "ind_obj", ",", "phenotype_id", ")", "except", "RuntimeError", "as", "error", ":", "return", "abort", "(", "500", ",", "error", ".", "message", ")", "return", "redirect", "(", "request", ".", "referrer", ")" ]
Delete phenotype from an individual.
[ "Delete", "phenotype", "from", "an", "individual", "." ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L66-L74
train