repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
ArangoDB-Community/pyArango
|
pyArango/graph.py
|
Graph.deleteEdge
|
def deleteEdge(self, edge, waitForSync = False) :
"""removes an edge from the graph"""
url = "%s/edge/%s" % (self.URL, edge._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete edge, %s" % edge._id, r.json())
|
python
|
def deleteEdge(self, edge, waitForSync = False) :
"""removes an edge from the graph"""
url = "%s/edge/%s" % (self.URL, edge._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete edge, %s" % edge._id, r.json())
|
[
"def",
"deleteEdge",
"(",
"self",
",",
"edge",
",",
"waitForSync",
"=",
"False",
")",
":",
"url",
"=",
"\"%s/edge/%s\"",
"%",
"(",
"self",
".",
"URL",
",",
"edge",
".",
"_id",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"delete",
"(",
"url",
",",
"params",
"=",
"{",
"'waitForSync'",
":",
"waitForSync",
"}",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
"or",
"r",
".",
"status_code",
"==",
"202",
":",
"return",
"True",
"raise",
"DeletionError",
"(",
"\"Unable to delete edge, %s\"",
"%",
"edge",
".",
"_id",
",",
"r",
".",
"json",
"(",
")",
")"
] |
removes an edge from the graph
|
[
"removes",
"an",
"edge",
"from",
"the",
"graph"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/graph.py#L196-L202
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
DocumentCache.delete
|
def delete(self, _key) :
"removes a document from the cache"
try :
doc = self.cacheStore[_key]
doc.prev.nextDoc = doc.nextDoc
doc.nextDoc.prev = doc.prev
del(self.cacheStore[_key])
except KeyError :
raise KeyError("Document with _key %s is not available in cache" % _key)
|
python
|
def delete(self, _key) :
"removes a document from the cache"
try :
doc = self.cacheStore[_key]
doc.prev.nextDoc = doc.nextDoc
doc.nextDoc.prev = doc.prev
del(self.cacheStore[_key])
except KeyError :
raise KeyError("Document with _key %s is not available in cache" % _key)
|
[
"def",
"delete",
"(",
"self",
",",
"_key",
")",
":",
"try",
":",
"doc",
"=",
"self",
".",
"cacheStore",
"[",
"_key",
"]",
"doc",
".",
"prev",
".",
"nextDoc",
"=",
"doc",
".",
"nextDoc",
"doc",
".",
"nextDoc",
".",
"prev",
"=",
"doc",
".",
"prev",
"del",
"(",
"self",
".",
"cacheStore",
"[",
"_key",
"]",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Document with _key %s is not available in cache\"",
"%",
"_key",
")"
] |
removes a document from the cache
|
[
"removes",
"a",
"document",
"from",
"the",
"cache"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L73-L81
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
DocumentCache.getChain
|
def getChain(self) :
"returns a list of keys representing the chain of documents"
l = []
h = self.head
while h :
l.append(h._key)
h = h.nextDoc
return l
|
python
|
def getChain(self) :
"returns a list of keys representing the chain of documents"
l = []
h = self.head
while h :
l.append(h._key)
h = h.nextDoc
return l
|
[
"def",
"getChain",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"h",
"=",
"self",
".",
"head",
"while",
"h",
":",
"l",
".",
"append",
"(",
"h",
".",
"_key",
")",
"h",
"=",
"h",
".",
"nextDoc",
"return",
"l"
] |
returns a list of keys representing the chain of documents
|
[
"returns",
"a",
"list",
"of",
"keys",
"representing",
"the",
"chain",
"of",
"documents"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L83-L90
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Field.validate
|
def validate(self, value) :
"""checks the validity of 'value' given the lits of validators"""
for v in self.validators :
v.validate(value)
return True
|
python
|
def validate(self, value) :
"""checks the validity of 'value' given the lits of validators"""
for v in self.validators :
v.validate(value)
return True
|
[
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"for",
"v",
"in",
"self",
".",
"validators",
":",
"v",
".",
"validate",
"(",
"value",
")",
"return",
"True"
] |
checks the validity of 'value' given the lits of validators
|
[
"checks",
"the",
"validity",
"of",
"value",
"given",
"the",
"lits",
"of",
"validators"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L121-L125
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection_metaclass.getCollectionClass
|
def getCollectionClass(cls, name) :
"""Return the class object of a collection given its 'name'"""
try :
return cls.collectionClasses[name]
except KeyError :
raise KeyError( "There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys())) )
|
python
|
def getCollectionClass(cls, name) :
"""Return the class object of a collection given its 'name'"""
try :
return cls.collectionClasses[name]
except KeyError :
raise KeyError( "There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys())) )
|
[
"def",
"getCollectionClass",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"return",
"cls",
".",
"collectionClasses",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"There is no Collection Class of type: '%s'; currently supported values: [%s]\"",
"%",
"(",
"name",
",",
"', '",
".",
"join",
"(",
"getCollectionClasses",
"(",
")",
".",
"keys",
"(",
")",
")",
")",
")"
] |
Return the class object of a collection given its 'name
|
[
"Return",
"the",
"class",
"object",
"of",
"a",
"collection",
"given",
"its",
"name"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L168-L173
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection_metaclass.isDocumentCollection
|
def isDocumentCollection(cls, name) :
"""return true or false wether 'name' is the name of a document collection."""
try :
col = cls.getCollectionClass(name)
return issubclass(col, Collection)
except KeyError :
return False
|
python
|
def isDocumentCollection(cls, name) :
"""return true or false wether 'name' is the name of a document collection."""
try :
col = cls.getCollectionClass(name)
return issubclass(col, Collection)
except KeyError :
return False
|
[
"def",
"isDocumentCollection",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"col",
"=",
"cls",
".",
"getCollectionClass",
"(",
"name",
")",
"return",
"issubclass",
"(",
"col",
",",
"Collection",
")",
"except",
"KeyError",
":",
"return",
"False"
] |
return true or false wether 'name' is the name of a document collection.
|
[
"return",
"true",
"or",
"false",
"wether",
"name",
"is",
"the",
"name",
"of",
"a",
"document",
"collection",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L181-L187
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection_metaclass.isEdgeCollection
|
def isEdgeCollection(cls, name) :
"""return true or false wether 'name' is the name of an edge collection."""
try :
col = cls.getCollectionClass(name)
return issubclass(col, Edges)
except KeyError :
return False
|
python
|
def isEdgeCollection(cls, name) :
"""return true or false wether 'name' is the name of an edge collection."""
try :
col = cls.getCollectionClass(name)
return issubclass(col, Edges)
except KeyError :
return False
|
[
"def",
"isEdgeCollection",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"col",
"=",
"cls",
".",
"getCollectionClass",
"(",
"name",
")",
"return",
"issubclass",
"(",
"col",
",",
"Edges",
")",
"except",
"KeyError",
":",
"return",
"False"
] |
return true or false wether 'name' is the name of an edge collection.
|
[
"return",
"true",
"or",
"false",
"wether",
"name",
"is",
"the",
"name",
"of",
"an",
"edge",
"collection",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L190-L196
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.getIndexes
|
def getIndexes(self) :
"""Fills self.indexes with all the indexes associates with the collection and returns it"""
url = "%s/index" % self.database.URL
r = self.connection.session.get(url, params = {"collection": self.name})
data = r.json()
for ind in data["indexes"] :
self.indexes[ind["type"]][ind["id"]] = Index(collection = self, infos = ind)
return self.indexes
|
python
|
def getIndexes(self) :
"""Fills self.indexes with all the indexes associates with the collection and returns it"""
url = "%s/index" % self.database.URL
r = self.connection.session.get(url, params = {"collection": self.name})
data = r.json()
for ind in data["indexes"] :
self.indexes[ind["type"]][ind["id"]] = Index(collection = self, infos = ind)
return self.indexes
|
[
"def",
"getIndexes",
"(",
"self",
")",
":",
"url",
"=",
"\"%s/index\"",
"%",
"self",
".",
"database",
".",
"URL",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"\"collection\"",
":",
"self",
".",
"name",
"}",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"for",
"ind",
"in",
"data",
"[",
"\"indexes\"",
"]",
":",
"self",
".",
"indexes",
"[",
"ind",
"[",
"\"type\"",
"]",
"]",
"[",
"ind",
"[",
"\"id\"",
"]",
"]",
"=",
"Index",
"(",
"collection",
"=",
"self",
",",
"infos",
"=",
"ind",
")",
"return",
"self",
".",
"indexes"
] |
Fills self.indexes with all the indexes associates with the collection and returns it
|
[
"Fills",
"self",
".",
"indexes",
"with",
"all",
"the",
"indexes",
"associates",
"with",
"the",
"collection",
"and",
"returns",
"it"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L265-L273
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.delete
|
def delete(self) :
"""deletes the collection from the database"""
r = self.connection.session.delete(self.URL)
data = r.json()
if not r.status_code == 200 or data["error"] :
raise DeletionError(data["errorMessage"], data)
|
python
|
def delete(self) :
"""deletes the collection from the database"""
r = self.connection.session.delete(self.URL)
data = r.json()
if not r.status_code == 200 or data["error"] :
raise DeletionError(data["errorMessage"], data)
|
[
"def",
"delete",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"delete",
"(",
"self",
".",
"URL",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"not",
"r",
".",
"status_code",
"==",
"200",
"or",
"data",
"[",
"\"error\"",
"]",
":",
"raise",
"DeletionError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"data",
")"
] |
deletes the collection from the database
|
[
"deletes",
"the",
"collection",
"from",
"the",
"database"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L283-L288
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.createDocument
|
def createDocument(self, initDict = None) :
"""create and returns a document populated with the defaults or with the values in initDict"""
if initDict is not None :
return self.createDocument_(initDict)
else :
if self._validation["on_load"] :
self._validation["on_load"] = False
return self.createDocument_(self.defaultDocument)
self._validation["on_load"] = True
else :
return self.createDocument_(self.defaultDocument)
|
python
|
def createDocument(self, initDict = None) :
"""create and returns a document populated with the defaults or with the values in initDict"""
if initDict is not None :
return self.createDocument_(initDict)
else :
if self._validation["on_load"] :
self._validation["on_load"] = False
return self.createDocument_(self.defaultDocument)
self._validation["on_load"] = True
else :
return self.createDocument_(self.defaultDocument)
|
[
"def",
"createDocument",
"(",
"self",
",",
"initDict",
"=",
"None",
")",
":",
"if",
"initDict",
"is",
"not",
"None",
":",
"return",
"self",
".",
"createDocument_",
"(",
"initDict",
")",
"else",
":",
"if",
"self",
".",
"_validation",
"[",
"\"on_load\"",
"]",
":",
"self",
".",
"_validation",
"[",
"\"on_load\"",
"]",
"=",
"False",
"return",
"self",
".",
"createDocument_",
"(",
"self",
".",
"defaultDocument",
")",
"self",
".",
"_validation",
"[",
"\"on_load\"",
"]",
"=",
"True",
"else",
":",
"return",
"self",
".",
"createDocument_",
"(",
"self",
".",
"defaultDocument",
")"
] |
create and returns a document populated with the defaults or with the values in initDict
|
[
"create",
"and",
"returns",
"a",
"document",
"populated",
"with",
"the",
"defaults",
"or",
"with",
"the",
"values",
"in",
"initDict"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L290-L300
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.createDocument_
|
def createDocument_(self, initDict = None) :
"create and returns a completely empty document or one populated with initDict"
if initDict is None :
initV = {}
else :
initV = initDict
return self.documentClass(self, initV)
|
python
|
def createDocument_(self, initDict = None) :
"create and returns a completely empty document or one populated with initDict"
if initDict is None :
initV = {}
else :
initV = initDict
return self.documentClass(self, initV)
|
[
"def",
"createDocument_",
"(",
"self",
",",
"initDict",
"=",
"None",
")",
":",
"if",
"initDict",
"is",
"None",
":",
"initV",
"=",
"{",
"}",
"else",
":",
"initV",
"=",
"initDict",
"return",
"self",
".",
"documentClass",
"(",
"self",
",",
"initV",
")"
] |
create and returns a completely empty document or one populated with initDict
|
[
"create",
"and",
"returns",
"a",
"completely",
"empty",
"document",
"or",
"one",
"populated",
"with",
"initDict"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L302-L309
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.ensureHashIndex
|
def ensureHashIndex(self, fields, unique = False, sparse = True, deduplicate = False) :
"""Creates a hash index if it does not already exist, and returns it"""
data = {
"type" : "hash",
"fields" : fields,
"unique" : unique,
"sparse" : sparse,
"deduplicate": deduplicate
}
ind = Index(self, creationData = data)
self.indexes["hash"][ind.infos["id"]] = ind
return ind
|
python
|
def ensureHashIndex(self, fields, unique = False, sparse = True, deduplicate = False) :
"""Creates a hash index if it does not already exist, and returns it"""
data = {
"type" : "hash",
"fields" : fields,
"unique" : unique,
"sparse" : sparse,
"deduplicate": deduplicate
}
ind = Index(self, creationData = data)
self.indexes["hash"][ind.infos["id"]] = ind
return ind
|
[
"def",
"ensureHashIndex",
"(",
"self",
",",
"fields",
",",
"unique",
"=",
"False",
",",
"sparse",
"=",
"True",
",",
"deduplicate",
"=",
"False",
")",
":",
"data",
"=",
"{",
"\"type\"",
":",
"\"hash\"",
",",
"\"fields\"",
":",
"fields",
",",
"\"unique\"",
":",
"unique",
",",
"\"sparse\"",
":",
"sparse",
",",
"\"deduplicate\"",
":",
"deduplicate",
"}",
"ind",
"=",
"Index",
"(",
"self",
",",
"creationData",
"=",
"data",
")",
"self",
".",
"indexes",
"[",
"\"hash\"",
"]",
"[",
"ind",
".",
"infos",
"[",
"\"id\"",
"]",
"]",
"=",
"ind",
"return",
"ind"
] |
Creates a hash index if it does not already exist, and returns it
|
[
"Creates",
"a",
"hash",
"index",
"if",
"it",
"does",
"not",
"already",
"exist",
"and",
"returns",
"it"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L333-L344
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.ensureGeoIndex
|
def ensureGeoIndex(self, fields) :
"""Creates a geo index if it does not already exist, and returns it"""
data = {
"type" : "geo",
"fields" : fields,
}
ind = Index(self, creationData = data)
self.indexes["geo"][ind.infos["id"]] = ind
return ind
|
python
|
def ensureGeoIndex(self, fields) :
"""Creates a geo index if it does not already exist, and returns it"""
data = {
"type" : "geo",
"fields" : fields,
}
ind = Index(self, creationData = data)
self.indexes["geo"][ind.infos["id"]] = ind
return ind
|
[
"def",
"ensureGeoIndex",
"(",
"self",
",",
"fields",
")",
":",
"data",
"=",
"{",
"\"type\"",
":",
"\"geo\"",
",",
"\"fields\"",
":",
"fields",
",",
"}",
"ind",
"=",
"Index",
"(",
"self",
",",
"creationData",
"=",
"data",
")",
"self",
".",
"indexes",
"[",
"\"geo\"",
"]",
"[",
"ind",
".",
"infos",
"[",
"\"id\"",
"]",
"]",
"=",
"ind",
"return",
"ind"
] |
Creates a geo index if it does not already exist, and returns it
|
[
"Creates",
"a",
"geo",
"index",
"if",
"it",
"does",
"not",
"already",
"exist",
"and",
"returns",
"it"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L359-L367
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.ensureFulltextIndex
|
def ensureFulltextIndex(self, fields, minLength = None) :
"""Creates a fulltext index if it does not already exist, and returns it"""
data = {
"type" : "fulltext",
"fields" : fields,
}
if minLength is not None :
data["minLength"] = minLength
ind = Index(self, creationData = data)
self.indexes["fulltext"][ind.infos["id"]] = ind
return ind
|
python
|
def ensureFulltextIndex(self, fields, minLength = None) :
"""Creates a fulltext index if it does not already exist, and returns it"""
data = {
"type" : "fulltext",
"fields" : fields,
}
if minLength is not None :
data["minLength"] = minLength
ind = Index(self, creationData = data)
self.indexes["fulltext"][ind.infos["id"]] = ind
return ind
|
[
"def",
"ensureFulltextIndex",
"(",
"self",
",",
"fields",
",",
"minLength",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"type\"",
":",
"\"fulltext\"",
",",
"\"fields\"",
":",
"fields",
",",
"}",
"if",
"minLength",
"is",
"not",
"None",
":",
"data",
"[",
"\"minLength\"",
"]",
"=",
"minLength",
"ind",
"=",
"Index",
"(",
"self",
",",
"creationData",
"=",
"data",
")",
"self",
".",
"indexes",
"[",
"\"fulltext\"",
"]",
"[",
"ind",
".",
"infos",
"[",
"\"id\"",
"]",
"]",
"=",
"ind",
"return",
"ind"
] |
Creates a fulltext index if it does not already exist, and returns it
|
[
"Creates",
"a",
"fulltext",
"index",
"if",
"it",
"does",
"not",
"already",
"exist",
"and",
"returns",
"it"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L369-L380
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.validatePrivate
|
def validatePrivate(self, field, value) :
"""validate a private field value"""
if field not in self.arangoPrivates :
raise ValueError("%s is not a private field of collection %s" % (field, self))
if field in self._fields :
self._fields[field].validate(value)
return True
|
python
|
def validatePrivate(self, field, value) :
"""validate a private field value"""
if field not in self.arangoPrivates :
raise ValueError("%s is not a private field of collection %s" % (field, self))
if field in self._fields :
self._fields[field].validate(value)
return True
|
[
"def",
"validatePrivate",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"if",
"field",
"not",
"in",
"self",
".",
"arangoPrivates",
":",
"raise",
"ValueError",
"(",
"\"%s is not a private field of collection %s\"",
"%",
"(",
"field",
",",
"self",
")",
")",
"if",
"field",
"in",
"self",
".",
"_fields",
":",
"self",
".",
"_fields",
"[",
"field",
"]",
".",
"validate",
"(",
"value",
")",
"return",
"True"
] |
validate a private field value
|
[
"validate",
"a",
"private",
"field",
"value"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L383-L390
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.simpleQuery
|
def simpleQuery(self, queryType, rawResults = False, **queryArgs) :
"""General interface for simple queries. queryType can be something like 'all', 'by-example' etc... everything is in the arango doc.
If rawResults, the query will return dictionaries instead of Document objetcs.
"""
return SimpleQuery(self, queryType, rawResults, **queryArgs)
|
python
|
def simpleQuery(self, queryType, rawResults = False, **queryArgs) :
"""General interface for simple queries. queryType can be something like 'all', 'by-example' etc... everything is in the arango doc.
If rawResults, the query will return dictionaries instead of Document objetcs.
"""
return SimpleQuery(self, queryType, rawResults, **queryArgs)
|
[
"def",
"simpleQuery",
"(",
"self",
",",
"queryType",
",",
"rawResults",
"=",
"False",
",",
"*",
"*",
"queryArgs",
")",
":",
"return",
"SimpleQuery",
"(",
"self",
",",
"queryType",
",",
"rawResults",
",",
"*",
"*",
"queryArgs",
")"
] |
General interface for simple queries. queryType can be something like 'all', 'by-example' etc... everything is in the arango doc.
If rawResults, the query will return dictionaries instead of Document objetcs.
|
[
"General",
"interface",
"for",
"simple",
"queries",
".",
"queryType",
"can",
"be",
"something",
"like",
"all",
"by",
"-",
"example",
"etc",
"...",
"everything",
"is",
"in",
"the",
"arango",
"doc",
".",
"If",
"rawResults",
"the",
"query",
"will",
"return",
"dictionaries",
"instead",
"of",
"Document",
"objetcs",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L486-L490
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.action
|
def action(self, method, action, **params) :
"""a generic fct for interacting everything that doesn't have an assigned fct"""
fct = getattr(self.connection.session, method.lower())
r = fct(self.URL + "/" + action, params = params)
return r.json()
|
python
|
def action(self, method, action, **params) :
"""a generic fct for interacting everything that doesn't have an assigned fct"""
fct = getattr(self.connection.session, method.lower())
r = fct(self.URL + "/" + action, params = params)
return r.json()
|
[
"def",
"action",
"(",
"self",
",",
"method",
",",
"action",
",",
"*",
"*",
"params",
")",
":",
"fct",
"=",
"getattr",
"(",
"self",
".",
"connection",
".",
"session",
",",
"method",
".",
"lower",
"(",
")",
")",
"r",
"=",
"fct",
"(",
"self",
".",
"URL",
"+",
"\"/\"",
"+",
"action",
",",
"params",
"=",
"params",
")",
"return",
"r",
".",
"json",
"(",
")"
] |
a generic fct for interacting everything that doesn't have an assigned fct
|
[
"a",
"generic",
"fct",
"for",
"interacting",
"everything",
"that",
"doesn",
"t",
"have",
"an",
"assigned",
"fct"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L492-L496
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Collection.bulkSave
|
def bulkSave(self, docs, onDuplicate="error", **params) :
"""Parameter docs must be either an iterrable of documents or dictionnaries.
This function will return the number of documents, created and updated, and will raise an UpdateError exception if there's at least one error.
params are any parameters from arango's documentation"""
payload = []
for d in docs :
if type(d) is dict :
payload.append(json.dumps(d, default=str))
else :
try:
payload.append(d.toJson())
except Exception as e:
payload.append(json.dumps(d.getStore(), default=str))
payload = '\n'.join(payload)
params["type"] = "documents"
params["onDuplicate"] = onDuplicate
params["collection"] = self.name
URL = "%s/import" % self.database.URL
r = self.connection.session.post(URL, params = params, data = payload)
data = r.json()
if (r.status_code == 201) and "error" not in data :
return True
else :
if data["errors"] > 0 :
raise UpdateError("%d documents could not be created" % data["errors"], data)
return data["updated"] + data["created"]
|
python
|
def bulkSave(self, docs, onDuplicate="error", **params) :
"""Parameter docs must be either an iterrable of documents or dictionnaries.
This function will return the number of documents, created and updated, and will raise an UpdateError exception if there's at least one error.
params are any parameters from arango's documentation"""
payload = []
for d in docs :
if type(d) is dict :
payload.append(json.dumps(d, default=str))
else :
try:
payload.append(d.toJson())
except Exception as e:
payload.append(json.dumps(d.getStore(), default=str))
payload = '\n'.join(payload)
params["type"] = "documents"
params["onDuplicate"] = onDuplicate
params["collection"] = self.name
URL = "%s/import" % self.database.URL
r = self.connection.session.post(URL, params = params, data = payload)
data = r.json()
if (r.status_code == 201) and "error" not in data :
return True
else :
if data["errors"] > 0 :
raise UpdateError("%d documents could not be created" % data["errors"], data)
return data["updated"] + data["created"]
|
[
"def",
"bulkSave",
"(",
"self",
",",
"docs",
",",
"onDuplicate",
"=",
"\"error\"",
",",
"*",
"*",
"params",
")",
":",
"payload",
"=",
"[",
"]",
"for",
"d",
"in",
"docs",
":",
"if",
"type",
"(",
"d",
")",
"is",
"dict",
":",
"payload",
".",
"append",
"(",
"json",
".",
"dumps",
"(",
"d",
",",
"default",
"=",
"str",
")",
")",
"else",
":",
"try",
":",
"payload",
".",
"append",
"(",
"d",
".",
"toJson",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"payload",
".",
"append",
"(",
"json",
".",
"dumps",
"(",
"d",
".",
"getStore",
"(",
")",
",",
"default",
"=",
"str",
")",
")",
"payload",
"=",
"'\\n'",
".",
"join",
"(",
"payload",
")",
"params",
"[",
"\"type\"",
"]",
"=",
"\"documents\"",
"params",
"[",
"\"onDuplicate\"",
"]",
"=",
"onDuplicate",
"params",
"[",
"\"collection\"",
"]",
"=",
"self",
".",
"name",
"URL",
"=",
"\"%s/import\"",
"%",
"self",
".",
"database",
".",
"URL",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"URL",
",",
"params",
"=",
"params",
",",
"data",
"=",
"payload",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"(",
"r",
".",
"status_code",
"==",
"201",
")",
"and",
"\"error\"",
"not",
"in",
"data",
":",
"return",
"True",
"else",
":",
"if",
"data",
"[",
"\"errors\"",
"]",
">",
"0",
":",
"raise",
"UpdateError",
"(",
"\"%d documents could not be created\"",
"%",
"data",
"[",
"\"errors\"",
"]",
",",
"data",
")",
"return",
"data",
"[",
"\"updated\"",
"]",
"+",
"data",
"[",
"\"created\"",
"]"
] |
Parameter docs must be either an iterrable of documents or dictionnaries.
This function will return the number of documents, created and updated, and will raise an UpdateError exception if there's at least one error.
params are any parameters from arango's documentation
|
[
"Parameter",
"docs",
"must",
"be",
"either",
"an",
"iterrable",
"of",
"documents",
"or",
"dictionnaries",
".",
"This",
"function",
"will",
"return",
"the",
"number",
"of",
"documents",
"created",
"and",
"updated",
"and",
"will",
"raise",
"an",
"UpdateError",
"exception",
"if",
"there",
"s",
"at",
"least",
"one",
"error",
".",
"params",
"are",
"any",
"parameters",
"from",
"arango",
"s",
"documentation"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L498-L528
|
train
|
ArangoDB-Community/pyArango
|
pyArango/collection.py
|
Edges.getEdges
|
def getEdges(self, vertex, inEdges = True, outEdges = True, rawResults = False) :
"""returns in, out, or both edges liked to a given document. vertex can be either a Document object or a string for an _id.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects"""
if isinstance(vertex, Document):
vId = vertex._id
elif (type(vertex) is str) or (type(vertex) is bytes):
vId = vertex
else :
raise ValueError("Vertex is neither a Document nor a String")
params = {"vertex" : vId}
if inEdges and outEdges :
pass
elif inEdges :
params["direction"] = "in"
elif outEdges :
params["direction"] = "out"
else :
raise ValueError("inEdges, outEdges or both must have a boolean value")
r = self.connection.session.get(self.edgesURL, params = params)
data = r.json()
if r.status_code == 200 :
if not rawResults :
ret = []
for e in data["edges"] :
ret.append(Edge(self, e))
return ret
else :
return data["edges"]
else :
raise CreationError("Unable to return edges for vertex: %s" % vId, data)
|
python
|
def getEdges(self, vertex, inEdges = True, outEdges = True, rawResults = False) :
"""returns in, out, or both edges liked to a given document. vertex can be either a Document object or a string for an _id.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects"""
if isinstance(vertex, Document):
vId = vertex._id
elif (type(vertex) is str) or (type(vertex) is bytes):
vId = vertex
else :
raise ValueError("Vertex is neither a Document nor a String")
params = {"vertex" : vId}
if inEdges and outEdges :
pass
elif inEdges :
params["direction"] = "in"
elif outEdges :
params["direction"] = "out"
else :
raise ValueError("inEdges, outEdges or both must have a boolean value")
r = self.connection.session.get(self.edgesURL, params = params)
data = r.json()
if r.status_code == 200 :
if not rawResults :
ret = []
for e in data["edges"] :
ret.append(Edge(self, e))
return ret
else :
return data["edges"]
else :
raise CreationError("Unable to return edges for vertex: %s" % vId, data)
|
[
"def",
"getEdges",
"(",
"self",
",",
"vertex",
",",
"inEdges",
"=",
"True",
",",
"outEdges",
"=",
"True",
",",
"rawResults",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"vertex",
",",
"Document",
")",
":",
"vId",
"=",
"vertex",
".",
"_id",
"elif",
"(",
"type",
"(",
"vertex",
")",
"is",
"str",
")",
"or",
"(",
"type",
"(",
"vertex",
")",
"is",
"bytes",
")",
":",
"vId",
"=",
"vertex",
"else",
":",
"raise",
"ValueError",
"(",
"\"Vertex is neither a Document nor a String\"",
")",
"params",
"=",
"{",
"\"vertex\"",
":",
"vId",
"}",
"if",
"inEdges",
"and",
"outEdges",
":",
"pass",
"elif",
"inEdges",
":",
"params",
"[",
"\"direction\"",
"]",
"=",
"\"in\"",
"elif",
"outEdges",
":",
"params",
"[",
"\"direction\"",
"]",
"=",
"\"out\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"inEdges, outEdges or both must have a boolean value\"",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"self",
".",
"edgesURL",
",",
"params",
"=",
"params",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"if",
"not",
"rawResults",
":",
"ret",
"=",
"[",
"]",
"for",
"e",
"in",
"data",
"[",
"\"edges\"",
"]",
":",
"ret",
".",
"append",
"(",
"Edge",
"(",
"self",
",",
"e",
")",
")",
"return",
"ret",
"else",
":",
"return",
"data",
"[",
"\"edges\"",
"]",
"else",
":",
"raise",
"CreationError",
"(",
"\"Unable to return edges for vertex: %s\"",
"%",
"vId",
",",
"data",
")"
] |
returns in, out, or both edges liked to a given document. vertex can be either a Document object or a string for an _id.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects
|
[
"returns",
"in",
"out",
"or",
"both",
"edges",
"liked",
"to",
"a",
"given",
"document",
".",
"vertex",
"can",
"be",
"either",
"a",
"Document",
"object",
"or",
"a",
"string",
"for",
"an",
"_id",
".",
"If",
"rawResults",
"a",
"arango",
"results",
"will",
"be",
"return",
"as",
"fetched",
"if",
"false",
"will",
"return",
"a",
"liste",
"of",
"Edge",
"objects"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L695-L726
|
train
|
ArangoDB-Community/pyArango
|
pyArango/database.py
|
Database.reloadCollections
|
def reloadCollections(self) :
"reloads the collection list."
r = self.connection.session.get(self.collectionsURL)
data = r.json()
if r.status_code == 200 :
self.collections = {}
for colData in data["result"] :
colName = colData['name']
if colData['isSystem'] :
colObj = COL.SystemCollection(self, colData)
else :
try :
colClass = COL.getCollectionClass(colName)
colObj = colClass(self, colData)
except KeyError :
if colData["type"] == CONST.COLLECTION_EDGE_TYPE :
colObj = COL.Edges(self, colData)
elif colData["type"] == CONST.COLLECTION_DOCUMENT_TYPE :
colObj = COL.Collection(self, colData)
else :
print(("Warning!! Collection of unknown type: %d, trying to load it as Collection nonetheless." % colData["type"]))
colObj = COL.Collection(self, colData)
self.collections[colName] = colObj
else :
raise UpdateError(data["errorMessage"], data)
|
python
|
def reloadCollections(self) :
"reloads the collection list."
r = self.connection.session.get(self.collectionsURL)
data = r.json()
if r.status_code == 200 :
self.collections = {}
for colData in data["result"] :
colName = colData['name']
if colData['isSystem'] :
colObj = COL.SystemCollection(self, colData)
else :
try :
colClass = COL.getCollectionClass(colName)
colObj = colClass(self, colData)
except KeyError :
if colData["type"] == CONST.COLLECTION_EDGE_TYPE :
colObj = COL.Edges(self, colData)
elif colData["type"] == CONST.COLLECTION_DOCUMENT_TYPE :
colObj = COL.Collection(self, colData)
else :
print(("Warning!! Collection of unknown type: %d, trying to load it as Collection nonetheless." % colData["type"]))
colObj = COL.Collection(self, colData)
self.collections[colName] = colObj
else :
raise UpdateError(data["errorMessage"], data)
|
[
"def",
"reloadCollections",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"self",
".",
"collectionsURL",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"collections",
"=",
"{",
"}",
"for",
"colData",
"in",
"data",
"[",
"\"result\"",
"]",
":",
"colName",
"=",
"colData",
"[",
"'name'",
"]",
"if",
"colData",
"[",
"'isSystem'",
"]",
":",
"colObj",
"=",
"COL",
".",
"SystemCollection",
"(",
"self",
",",
"colData",
")",
"else",
":",
"try",
":",
"colClass",
"=",
"COL",
".",
"getCollectionClass",
"(",
"colName",
")",
"colObj",
"=",
"colClass",
"(",
"self",
",",
"colData",
")",
"except",
"KeyError",
":",
"if",
"colData",
"[",
"\"type\"",
"]",
"==",
"CONST",
".",
"COLLECTION_EDGE_TYPE",
":",
"colObj",
"=",
"COL",
".",
"Edges",
"(",
"self",
",",
"colData",
")",
"elif",
"colData",
"[",
"\"type\"",
"]",
"==",
"CONST",
".",
"COLLECTION_DOCUMENT_TYPE",
":",
"colObj",
"=",
"COL",
".",
"Collection",
"(",
"self",
",",
"colData",
")",
"else",
":",
"print",
"(",
"(",
"\"Warning!! Collection of unknown type: %d, trying to load it as Collection nonetheless.\"",
"%",
"colData",
"[",
"\"type\"",
"]",
")",
")",
"colObj",
"=",
"COL",
".",
"Collection",
"(",
"self",
",",
"colData",
")",
"self",
".",
"collections",
"[",
"colName",
"]",
"=",
"colObj",
"else",
":",
"raise",
"UpdateError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"data",
")"
] |
reloads the collection list.
|
[
"reloads",
"the",
"collection",
"list",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L36-L62
|
train
|
ArangoDB-Community/pyArango
|
pyArango/database.py
|
Database.reloadGraphs
|
def reloadGraphs(self) :
"reloads the graph list"
r = self.connection.session.get(self.graphsURL)
data = r.json()
if r.status_code == 200 :
self.graphs = {}
for graphData in data["graphs"] :
try :
self.graphs[graphData["_key"]] = GR.getGraphClass(graphData["_key"])(self, graphData)
except KeyError :
self.graphs[graphData["_key"]] = Graph(self, graphData)
else :
raise UpdateError(data["errorMessage"], data)
|
python
|
def reloadGraphs(self) :
"reloads the graph list"
r = self.connection.session.get(self.graphsURL)
data = r.json()
if r.status_code == 200 :
self.graphs = {}
for graphData in data["graphs"] :
try :
self.graphs[graphData["_key"]] = GR.getGraphClass(graphData["_key"])(self, graphData)
except KeyError :
self.graphs[graphData["_key"]] = Graph(self, graphData)
else :
raise UpdateError(data["errorMessage"], data)
|
[
"def",
"reloadGraphs",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"self",
".",
"graphsURL",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"graphs",
"=",
"{",
"}",
"for",
"graphData",
"in",
"data",
"[",
"\"graphs\"",
"]",
":",
"try",
":",
"self",
".",
"graphs",
"[",
"graphData",
"[",
"\"_key\"",
"]",
"]",
"=",
"GR",
".",
"getGraphClass",
"(",
"graphData",
"[",
"\"_key\"",
"]",
")",
"(",
"self",
",",
"graphData",
")",
"except",
"KeyError",
":",
"self",
".",
"graphs",
"[",
"graphData",
"[",
"\"_key\"",
"]",
"]",
"=",
"Graph",
"(",
"self",
",",
"graphData",
")",
"else",
":",
"raise",
"UpdateError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"data",
")"
] |
reloads the graph list
|
[
"reloads",
"the",
"graph",
"list"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L64-L76
|
train
|
ArangoDB-Community/pyArango
|
pyArango/database.py
|
Database.createGraph
|
def createGraph(self, name, createCollections = True, isSmart = False, numberOfShards = None, smartGraphAttribute = None) :
"""Creates a graph and returns it. 'name' must be the name of a class inheriting from Graph.
Checks will be performed to make sure that every collection mentionned in the edges definition exist. Raises a ValueError in case of
a non-existing collection."""
def _checkCollectionList(lst) :
for colName in lst :
if not COL.isCollection(colName) :
raise ValueError("'%s' is not a defined Collection" % colName)
graphClass = GR.getGraphClass(name)
ed = []
for e in graphClass._edgeDefinitions :
if not COL.isEdgeCollection(e.edgesCollection) :
raise ValueError("'%s' is not a defined Edge Collection" % e.edgesCollection)
_checkCollectionList(e.fromCollections)
_checkCollectionList(e.toCollections)
ed.append(e.toJson())
_checkCollectionList(graphClass._orphanedCollections)
options = {}
if numberOfShards:
options['numberOfShards'] = numberOfShards
if smartGraphAttribute:
options['smartGraphAttribute'] = smartGraphAttribute
payload = {
"name": name,
"edgeDefinitions": ed,
"orphanCollections": graphClass._orphanedCollections
}
if isSmart :
payload['isSmart'] = isSmart
if options:
payload['options'] = options
payload = json.dumps(payload)
r = self.connection.session.post(self.graphsURL, data = payload)
data = r.json()
if r.status_code == 201 or r.status_code == 202 :
self.graphs[name] = graphClass(self, data["graph"])
else :
raise CreationError(data["errorMessage"], data)
return self.graphs[name]
|
python
|
def createGraph(self, name, createCollections = True, isSmart = False, numberOfShards = None, smartGraphAttribute = None) :
"""Creates a graph and returns it. 'name' must be the name of a class inheriting from Graph.
Checks will be performed to make sure that every collection mentionned in the edges definition exist. Raises a ValueError in case of
a non-existing collection."""
def _checkCollectionList(lst) :
for colName in lst :
if not COL.isCollection(colName) :
raise ValueError("'%s' is not a defined Collection" % colName)
graphClass = GR.getGraphClass(name)
ed = []
for e in graphClass._edgeDefinitions :
if not COL.isEdgeCollection(e.edgesCollection) :
raise ValueError("'%s' is not a defined Edge Collection" % e.edgesCollection)
_checkCollectionList(e.fromCollections)
_checkCollectionList(e.toCollections)
ed.append(e.toJson())
_checkCollectionList(graphClass._orphanedCollections)
options = {}
if numberOfShards:
options['numberOfShards'] = numberOfShards
if smartGraphAttribute:
options['smartGraphAttribute'] = smartGraphAttribute
payload = {
"name": name,
"edgeDefinitions": ed,
"orphanCollections": graphClass._orphanedCollections
}
if isSmart :
payload['isSmart'] = isSmart
if options:
payload['options'] = options
payload = json.dumps(payload)
r = self.connection.session.post(self.graphsURL, data = payload)
data = r.json()
if r.status_code == 201 or r.status_code == 202 :
self.graphs[name] = graphClass(self, data["graph"])
else :
raise CreationError(data["errorMessage"], data)
return self.graphs[name]
|
[
"def",
"createGraph",
"(",
"self",
",",
"name",
",",
"createCollections",
"=",
"True",
",",
"isSmart",
"=",
"False",
",",
"numberOfShards",
"=",
"None",
",",
"smartGraphAttribute",
"=",
"None",
")",
":",
"def",
"_checkCollectionList",
"(",
"lst",
")",
":",
"for",
"colName",
"in",
"lst",
":",
"if",
"not",
"COL",
".",
"isCollection",
"(",
"colName",
")",
":",
"raise",
"ValueError",
"(",
"\"'%s' is not a defined Collection\"",
"%",
"colName",
")",
"graphClass",
"=",
"GR",
".",
"getGraphClass",
"(",
"name",
")",
"ed",
"=",
"[",
"]",
"for",
"e",
"in",
"graphClass",
".",
"_edgeDefinitions",
":",
"if",
"not",
"COL",
".",
"isEdgeCollection",
"(",
"e",
".",
"edgesCollection",
")",
":",
"raise",
"ValueError",
"(",
"\"'%s' is not a defined Edge Collection\"",
"%",
"e",
".",
"edgesCollection",
")",
"_checkCollectionList",
"(",
"e",
".",
"fromCollections",
")",
"_checkCollectionList",
"(",
"e",
".",
"toCollections",
")",
"ed",
".",
"append",
"(",
"e",
".",
"toJson",
"(",
")",
")",
"_checkCollectionList",
"(",
"graphClass",
".",
"_orphanedCollections",
")",
"options",
"=",
"{",
"}",
"if",
"numberOfShards",
":",
"options",
"[",
"'numberOfShards'",
"]",
"=",
"numberOfShards",
"if",
"smartGraphAttribute",
":",
"options",
"[",
"'smartGraphAttribute'",
"]",
"=",
"smartGraphAttribute",
"payload",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"edgeDefinitions\"",
":",
"ed",
",",
"\"orphanCollections\"",
":",
"graphClass",
".",
"_orphanedCollections",
"}",
"if",
"isSmart",
":",
"payload",
"[",
"'isSmart'",
"]",
"=",
"isSmart",
"if",
"options",
":",
"payload",
"[",
"'options'",
"]",
"=",
"options",
"payload",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"self",
".",
"graphsURL",
",",
"data",
"=",
"payload",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"201",
"or",
"r",
".",
"status_code",
"==",
"202",
":",
"self",
".",
"graphs",
"[",
"name",
"]",
"=",
"graphClass",
"(",
"self",
",",
"data",
"[",
"\"graph\"",
"]",
")",
"else",
":",
"raise",
"CreationError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"data",
")",
"return",
"self",
".",
"graphs",
"[",
"name",
"]"
] |
Creates a graph and returns it. 'name' must be the name of a class inheriting from Graph.
Checks will be performed to make sure that every collection mentionned in the edges definition exist. Raises a ValueError in case of
a non-existing collection.
|
[
"Creates",
"a",
"graph",
"and",
"returns",
"it",
".",
"name",
"must",
"be",
"the",
"name",
"of",
"a",
"class",
"inheriting",
"from",
"Graph",
".",
"Checks",
"will",
"be",
"performed",
"to",
"make",
"sure",
"that",
"every",
"collection",
"mentionned",
"in",
"the",
"edges",
"definition",
"exist",
".",
"Raises",
"a",
"ValueError",
"in",
"case",
"of",
"a",
"non",
"-",
"existing",
"collection",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L129-L179
|
train
|
ArangoDB-Community/pyArango
|
pyArango/database.py
|
Database.validateAQLQuery
|
def validateAQLQuery(self, query, bindVars = None, options = None) :
"returns the server answer is the query is valid. Raises an AQLQueryError if not"
if bindVars is None :
bindVars = {}
if options is None :
options = {}
payload = {'query' : query, 'bindVars' : bindVars, 'options' : options}
r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload, default=str))
data = r.json()
if r.status_code == 201 and not data["error"] :
return data
else :
raise AQLQueryError(data["errorMessage"], query, data)
|
python
|
def validateAQLQuery(self, query, bindVars = None, options = None) :
"returns the server answer is the query is valid. Raises an AQLQueryError if not"
if bindVars is None :
bindVars = {}
if options is None :
options = {}
payload = {'query' : query, 'bindVars' : bindVars, 'options' : options}
r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload, default=str))
data = r.json()
if r.status_code == 201 and not data["error"] :
return data
else :
raise AQLQueryError(data["errorMessage"], query, data)
|
[
"def",
"validateAQLQuery",
"(",
"self",
",",
"query",
",",
"bindVars",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"if",
"bindVars",
"is",
"None",
":",
"bindVars",
"=",
"{",
"}",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"payload",
"=",
"{",
"'query'",
":",
"query",
",",
"'bindVars'",
":",
"bindVars",
",",
"'options'",
":",
"options",
"}",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"self",
".",
"cursorsURL",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"str",
")",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"201",
"and",
"not",
"data",
"[",
"\"error\"",
"]",
":",
"return",
"data",
"else",
":",
"raise",
"AQLQueryError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"query",
",",
"data",
")"
] |
returns the server answer is the query is valid. Raises an AQLQueryError if not
|
[
"returns",
"the",
"server",
"answer",
"is",
"the",
"query",
"is",
"valid",
".",
"Raises",
"an",
"AQLQueryError",
"if",
"not"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L212-L224
|
train
|
ArangoDB-Community/pyArango
|
pyArango/database.py
|
Database.transaction
|
def transaction(self, collections, action, waitForSync = False, lockTimeout = None, params = None) :
"""Execute a server-side transaction"""
payload = {
"collections": collections,
"action": action,
"waitForSync": waitForSync}
if lockTimeout is not None:
payload["lockTimeout"] = lockTimeout
if params is not None:
payload["params"] = params
self.connection.reportStart(action)
r = self.connection.session.post(self.transactionURL, data = json.dumps(payload, default=str))
self.connection.reportItem()
data = r.json()
if (r.status_code == 200 or r.status_code == 201 or r.status_code == 202) and not data.get("error") :
return data
else :
raise TransactionError(data["errorMessage"], action, data)
|
python
|
def transaction(self, collections, action, waitForSync = False, lockTimeout = None, params = None) :
"""Execute a server-side transaction"""
payload = {
"collections": collections,
"action": action,
"waitForSync": waitForSync}
if lockTimeout is not None:
payload["lockTimeout"] = lockTimeout
if params is not None:
payload["params"] = params
self.connection.reportStart(action)
r = self.connection.session.post(self.transactionURL, data = json.dumps(payload, default=str))
self.connection.reportItem()
data = r.json()
if (r.status_code == 200 or r.status_code == 201 or r.status_code == 202) and not data.get("error") :
return data
else :
raise TransactionError(data["errorMessage"], action, data)
|
[
"def",
"transaction",
"(",
"self",
",",
"collections",
",",
"action",
",",
"waitForSync",
"=",
"False",
",",
"lockTimeout",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"\"collections\"",
":",
"collections",
",",
"\"action\"",
":",
"action",
",",
"\"waitForSync\"",
":",
"waitForSync",
"}",
"if",
"lockTimeout",
"is",
"not",
"None",
":",
"payload",
"[",
"\"lockTimeout\"",
"]",
"=",
"lockTimeout",
"if",
"params",
"is",
"not",
"None",
":",
"payload",
"[",
"\"params\"",
"]",
"=",
"params",
"self",
".",
"connection",
".",
"reportStart",
"(",
"action",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"self",
".",
"transactionURL",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"str",
")",
")",
"self",
".",
"connection",
".",
"reportItem",
"(",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"(",
"r",
".",
"status_code",
"==",
"200",
"or",
"r",
".",
"status_code",
"==",
"201",
"or",
"r",
".",
"status_code",
"==",
"202",
")",
"and",
"not",
"data",
".",
"get",
"(",
"\"error\"",
")",
":",
"return",
"data",
"else",
":",
"raise",
"TransactionError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"action",
",",
"data",
")"
] |
Execute a server-side transaction
|
[
"Execute",
"a",
"server",
"-",
"side",
"transaction"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L226-L248
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
DocumentStore.getPatches
|
def getPatches(self) :
"""get patches as a dictionary"""
if not self.mustValidate :
return self.getStore()
res = {}
res.update(self.patchStore)
for k, v in self.subStores.items() :
res[k] = v.getPatches()
return res
|
python
|
def getPatches(self) :
"""get patches as a dictionary"""
if not self.mustValidate :
return self.getStore()
res = {}
res.update(self.patchStore)
for k, v in self.subStores.items() :
res[k] = v.getPatches()
return res
|
[
"def",
"getPatches",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"mustValidate",
":",
"return",
"self",
".",
"getStore",
"(",
")",
"res",
"=",
"{",
"}",
"res",
".",
"update",
"(",
"self",
".",
"patchStore",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"subStores",
".",
"items",
"(",
")",
":",
"res",
"[",
"k",
"]",
"=",
"v",
".",
"getPatches",
"(",
")",
"return",
"res"
] |
get patches as a dictionary
|
[
"get",
"patches",
"as",
"a",
"dictionary"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L38-L48
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
DocumentStore.getStore
|
def getStore(self) :
"""get the inner store as dictionary"""
res = {}
res.update(self.store)
for k, v in self.subStores.items() :
res[k] = v.getStore()
return res
|
python
|
def getStore(self) :
"""get the inner store as dictionary"""
res = {}
res.update(self.store)
for k, v in self.subStores.items() :
res[k] = v.getStore()
return res
|
[
"def",
"getStore",
"(",
"self",
")",
":",
"res",
"=",
"{",
"}",
"res",
".",
"update",
"(",
"self",
".",
"store",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"subStores",
".",
"items",
"(",
")",
":",
"res",
"[",
"k",
"]",
"=",
"v",
".",
"getStore",
"(",
")",
"return",
"res"
] |
get the inner store as dictionary
|
[
"get",
"the",
"inner",
"store",
"as",
"dictionary"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L50-L57
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
DocumentStore.validateField
|
def validateField(self, field) :
"""Validatie a field"""
if field not in self.validators and not self.collection._validation['allow_foreign_fields'] :
raise SchemaViolation(self.collection.__class__, field)
if field in self.store:
if isinstance(self.store[field], DocumentStore) :
return self[field].validate()
if field in self.patchStore :
return self.validators[field].validate(self.patchStore[field])
else :
try :
return self.validators[field].validate(self.store[field])
except ValidationError as e:
raise ValidationError( "'%s' -> %s" % ( field, str(e)) )
except AttributeError:
if isinstance(self.validators[field], dict) and not isinstance(self.store[field], dict) :
raise ValueError("Validator expected a sub document for field '%s', got '%s' instead" % (field, self.store[field]) )
else :
raise
return True
|
python
|
def validateField(self, field) :
"""Validatie a field"""
if field not in self.validators and not self.collection._validation['allow_foreign_fields'] :
raise SchemaViolation(self.collection.__class__, field)
if field in self.store:
if isinstance(self.store[field], DocumentStore) :
return self[field].validate()
if field in self.patchStore :
return self.validators[field].validate(self.patchStore[field])
else :
try :
return self.validators[field].validate(self.store[field])
except ValidationError as e:
raise ValidationError( "'%s' -> %s" % ( field, str(e)) )
except AttributeError:
if isinstance(self.validators[field], dict) and not isinstance(self.store[field], dict) :
raise ValueError("Validator expected a sub document for field '%s', got '%s' instead" % (field, self.store[field]) )
else :
raise
return True
|
[
"def",
"validateField",
"(",
"self",
",",
"field",
")",
":",
"if",
"field",
"not",
"in",
"self",
".",
"validators",
"and",
"not",
"self",
".",
"collection",
".",
"_validation",
"[",
"'allow_foreign_fields'",
"]",
":",
"raise",
"SchemaViolation",
"(",
"self",
".",
"collection",
".",
"__class__",
",",
"field",
")",
"if",
"field",
"in",
"self",
".",
"store",
":",
"if",
"isinstance",
"(",
"self",
".",
"store",
"[",
"field",
"]",
",",
"DocumentStore",
")",
":",
"return",
"self",
"[",
"field",
"]",
".",
"validate",
"(",
")",
"if",
"field",
"in",
"self",
".",
"patchStore",
":",
"return",
"self",
".",
"validators",
"[",
"field",
"]",
".",
"validate",
"(",
"self",
".",
"patchStore",
"[",
"field",
"]",
")",
"else",
":",
"try",
":",
"return",
"self",
".",
"validators",
"[",
"field",
"]",
".",
"validate",
"(",
"self",
".",
"store",
"[",
"field",
"]",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"ValidationError",
"(",
"\"'%s' -> %s\"",
"%",
"(",
"field",
",",
"str",
"(",
"e",
")",
")",
")",
"except",
"AttributeError",
":",
"if",
"isinstance",
"(",
"self",
".",
"validators",
"[",
"field",
"]",
",",
"dict",
")",
"and",
"not",
"isinstance",
"(",
"self",
".",
"store",
"[",
"field",
"]",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"Validator expected a sub document for field '%s', got '%s' instead\"",
"%",
"(",
"field",
",",
"self",
".",
"store",
"[",
"field",
"]",
")",
")",
"else",
":",
"raise",
"return",
"True"
] |
Validatie a field
|
[
"Validatie",
"a",
"field"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L59-L80
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
DocumentStore.validate
|
def validate(self) :
"""Validate the whole document"""
if not self.mustValidate :
return True
res = {}
for field in self.validators.keys() :
try :
if isinstance(self.validators[field], dict) and field not in self.store :
self.store[field] = DocumentStore(self.collection, validators = self.validators[field], initDct = {}, subStore=True, validateInit=self.validateInit)
self.validateField(field)
except InvalidDocument as e :
res.update(e.errors)
except (ValidationError, SchemaViolation) as e:
res[field] = str(e)
if len(res) > 0 :
raise InvalidDocument(res)
return True
|
python
|
def validate(self) :
"""Validate the whole document"""
if not self.mustValidate :
return True
res = {}
for field in self.validators.keys() :
try :
if isinstance(self.validators[field], dict) and field not in self.store :
self.store[field] = DocumentStore(self.collection, validators = self.validators[field], initDct = {}, subStore=True, validateInit=self.validateInit)
self.validateField(field)
except InvalidDocument as e :
res.update(e.errors)
except (ValidationError, SchemaViolation) as e:
res[field] = str(e)
if len(res) > 0 :
raise InvalidDocument(res)
return True
|
[
"def",
"validate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"mustValidate",
":",
"return",
"True",
"res",
"=",
"{",
"}",
"for",
"field",
"in",
"self",
".",
"validators",
".",
"keys",
"(",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"self",
".",
"validators",
"[",
"field",
"]",
",",
"dict",
")",
"and",
"field",
"not",
"in",
"self",
".",
"store",
":",
"self",
".",
"store",
"[",
"field",
"]",
"=",
"DocumentStore",
"(",
"self",
".",
"collection",
",",
"validators",
"=",
"self",
".",
"validators",
"[",
"field",
"]",
",",
"initDct",
"=",
"{",
"}",
",",
"subStore",
"=",
"True",
",",
"validateInit",
"=",
"self",
".",
"validateInit",
")",
"self",
".",
"validateField",
"(",
"field",
")",
"except",
"InvalidDocument",
"as",
"e",
":",
"res",
".",
"update",
"(",
"e",
".",
"errors",
")",
"except",
"(",
"ValidationError",
",",
"SchemaViolation",
")",
"as",
"e",
":",
"res",
"[",
"field",
"]",
"=",
"str",
"(",
"e",
")",
"if",
"len",
"(",
"res",
")",
">",
"0",
":",
"raise",
"InvalidDocument",
"(",
"res",
")",
"return",
"True"
] |
Validate the whole document
|
[
"Validate",
"the",
"whole",
"document"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L82-L101
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
DocumentStore.set
|
def set(self, dct) :
"""Set the store using a dictionary"""
# if not self.mustValidate :
# self.store = dct
# self.patchStore = dct
# return
for field, value in dct.items() :
if field not in self.collection.arangoPrivates :
if isinstance(value, dict) :
if field in self.validators and isinstance(self.validators[field], dict):
vals = self.validators[field]
else :
vals = {}
self[field] = DocumentStore(self.collection, validators = vals, initDct = value, patch = self.patching, subStore=True, validateInit=self.validateInit)
self.subStores[field] = self.store[field]
else :
self[field] = value
|
python
|
def set(self, dct) :
"""Set the store using a dictionary"""
# if not self.mustValidate :
# self.store = dct
# self.patchStore = dct
# return
for field, value in dct.items() :
if field not in self.collection.arangoPrivates :
if isinstance(value, dict) :
if field in self.validators and isinstance(self.validators[field], dict):
vals = self.validators[field]
else :
vals = {}
self[field] = DocumentStore(self.collection, validators = vals, initDct = value, patch = self.patching, subStore=True, validateInit=self.validateInit)
self.subStores[field] = self.store[field]
else :
self[field] = value
|
[
"def",
"set",
"(",
"self",
",",
"dct",
")",
":",
"# if not self.mustValidate :",
"# self.store = dct",
"# self.patchStore = dct",
"# return",
"for",
"field",
",",
"value",
"in",
"dct",
".",
"items",
"(",
")",
":",
"if",
"field",
"not",
"in",
"self",
".",
"collection",
".",
"arangoPrivates",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"field",
"in",
"self",
".",
"validators",
"and",
"isinstance",
"(",
"self",
".",
"validators",
"[",
"field",
"]",
",",
"dict",
")",
":",
"vals",
"=",
"self",
".",
"validators",
"[",
"field",
"]",
"else",
":",
"vals",
"=",
"{",
"}",
"self",
"[",
"field",
"]",
"=",
"DocumentStore",
"(",
"self",
".",
"collection",
",",
"validators",
"=",
"vals",
",",
"initDct",
"=",
"value",
",",
"patch",
"=",
"self",
".",
"patching",
",",
"subStore",
"=",
"True",
",",
"validateInit",
"=",
"self",
".",
"validateInit",
")",
"self",
".",
"subStores",
"[",
"field",
"]",
"=",
"self",
".",
"store",
"[",
"field",
"]",
"else",
":",
"self",
"[",
"field",
"]",
"=",
"value"
] |
Set the store using a dictionary
|
[
"Set",
"the",
"store",
"using",
"a",
"dictionary"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L103-L120
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.reset
|
def reset(self, collection, jsonFieldInit = None) :
if not jsonFieldInit:
jsonFieldInit = {}
"""replaces the current values in the document by those in jsonFieldInit"""
self.collection = collection
self.connection = self.collection.connection
self.documentsURL = self.collection.documentsURL
self.URL = None
self.setPrivates(jsonFieldInit)
self._store = DocumentStore(self.collection, validators=self.collection._fields, initDct=jsonFieldInit)
if self.collection._validation['on_load']:
self.validate()
self.modified = True
|
python
|
def reset(self, collection, jsonFieldInit = None) :
if not jsonFieldInit:
jsonFieldInit = {}
"""replaces the current values in the document by those in jsonFieldInit"""
self.collection = collection
self.connection = self.collection.connection
self.documentsURL = self.collection.documentsURL
self.URL = None
self.setPrivates(jsonFieldInit)
self._store = DocumentStore(self.collection, validators=self.collection._fields, initDct=jsonFieldInit)
if self.collection._validation['on_load']:
self.validate()
self.modified = True
|
[
"def",
"reset",
"(",
"self",
",",
"collection",
",",
"jsonFieldInit",
"=",
"None",
")",
":",
"if",
"not",
"jsonFieldInit",
":",
"jsonFieldInit",
"=",
"{",
"}",
"self",
".",
"collection",
"=",
"collection",
"self",
".",
"connection",
"=",
"self",
".",
"collection",
".",
"connection",
"self",
".",
"documentsURL",
"=",
"self",
".",
"collection",
".",
"documentsURL",
"self",
".",
"URL",
"=",
"None",
"self",
".",
"setPrivates",
"(",
"jsonFieldInit",
")",
"self",
".",
"_store",
"=",
"DocumentStore",
"(",
"self",
".",
"collection",
",",
"validators",
"=",
"self",
".",
"collection",
".",
"_fields",
",",
"initDct",
"=",
"jsonFieldInit",
")",
"if",
"self",
".",
"collection",
".",
"_validation",
"[",
"'on_load'",
"]",
":",
"self",
".",
"validate",
"(",
")",
"self",
".",
"modified",
"=",
"True"
] |
replaces the current values in the document by those in jsonFieldInit
|
[
"replaces",
"the",
"current",
"values",
"in",
"the",
"document",
"by",
"those",
"in",
"jsonFieldInit"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L191-L206
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.validate
|
def validate(self) :
"""validate the document"""
self._store.validate()
for pField in self.collection.arangoPrivates :
self.collection.validatePrivate(pField, getattr(self, pField))
|
python
|
def validate(self) :
"""validate the document"""
self._store.validate()
for pField in self.collection.arangoPrivates :
self.collection.validatePrivate(pField, getattr(self, pField))
|
[
"def",
"validate",
"(",
"self",
")",
":",
"self",
".",
"_store",
".",
"validate",
"(",
")",
"for",
"pField",
"in",
"self",
".",
"collection",
".",
"arangoPrivates",
":",
"self",
".",
"collection",
".",
"validatePrivate",
"(",
"pField",
",",
"getattr",
"(",
"self",
",",
"pField",
")",
")"
] |
validate the document
|
[
"validate",
"the",
"document"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L208-L212
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.setPrivates
|
def setPrivates(self, fieldDict) :
"""will set self._id, self._rev and self._key field."""
for priv in self.privates :
if priv in fieldDict :
setattr(self, priv, fieldDict[priv])
else :
setattr(self, priv, None)
if self._id is not None :
self.URL = "%s/%s" % (self.documentsURL, self._id)
|
python
|
def setPrivates(self, fieldDict) :
"""will set self._id, self._rev and self._key field."""
for priv in self.privates :
if priv in fieldDict :
setattr(self, priv, fieldDict[priv])
else :
setattr(self, priv, None)
if self._id is not None :
self.URL = "%s/%s" % (self.documentsURL, self._id)
|
[
"def",
"setPrivates",
"(",
"self",
",",
"fieldDict",
")",
":",
"for",
"priv",
"in",
"self",
".",
"privates",
":",
"if",
"priv",
"in",
"fieldDict",
":",
"setattr",
"(",
"self",
",",
"priv",
",",
"fieldDict",
"[",
"priv",
"]",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"priv",
",",
"None",
")",
"if",
"self",
".",
"_id",
"is",
"not",
"None",
":",
"self",
".",
"URL",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"documentsURL",
",",
"self",
".",
"_id",
")"
] |
will set self._id, self._rev and self._key field.
|
[
"will",
"set",
"self",
".",
"_id",
"self",
".",
"_rev",
"and",
"self",
".",
"_key",
"field",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L214-L224
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.patch
|
def patch(self, keepNull = True, **docArgs) :
"""Saves the document by only updating the modified fields.
The default behaviour concening the keepNull parameter is the opposite of ArangoDB's default, Null values won't be ignored
Use docArgs for things such as waitForSync = True"""
if self.URL is None :
raise ValueError("Cannot patch a document that was not previously saved")
payload = self._store.getPatches()
if self.collection._validation['on_save'] :
self.validate()
if len(payload) > 0 :
params = dict(docArgs)
params.update({'collection': self.collection.name, 'keepNull' : keepNull})
payload = json.dumps(payload, default=str)
r = self.connection.session.patch(self.URL, params = params, data = payload)
data = r.json()
if (r.status_code == 201 or r.status_code == 202) and "error" not in data :
self._rev = data['_rev']
else :
raise UpdateError(data['errorMessage'], data)
self.modified = False
self._store.resetPatch()
|
python
|
def patch(self, keepNull = True, **docArgs) :
"""Saves the document by only updating the modified fields.
The default behaviour concening the keepNull parameter is the opposite of ArangoDB's default, Null values won't be ignored
Use docArgs for things such as waitForSync = True"""
if self.URL is None :
raise ValueError("Cannot patch a document that was not previously saved")
payload = self._store.getPatches()
if self.collection._validation['on_save'] :
self.validate()
if len(payload) > 0 :
params = dict(docArgs)
params.update({'collection': self.collection.name, 'keepNull' : keepNull})
payload = json.dumps(payload, default=str)
r = self.connection.session.patch(self.URL, params = params, data = payload)
data = r.json()
if (r.status_code == 201 or r.status_code == 202) and "error" not in data :
self._rev = data['_rev']
else :
raise UpdateError(data['errorMessage'], data)
self.modified = False
self._store.resetPatch()
|
[
"def",
"patch",
"(",
"self",
",",
"keepNull",
"=",
"True",
",",
"*",
"*",
"docArgs",
")",
":",
"if",
"self",
".",
"URL",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot patch a document that was not previously saved\"",
")",
"payload",
"=",
"self",
".",
"_store",
".",
"getPatches",
"(",
")",
"if",
"self",
".",
"collection",
".",
"_validation",
"[",
"'on_save'",
"]",
":",
"self",
".",
"validate",
"(",
")",
"if",
"len",
"(",
"payload",
")",
">",
"0",
":",
"params",
"=",
"dict",
"(",
"docArgs",
")",
"params",
".",
"update",
"(",
"{",
"'collection'",
":",
"self",
".",
"collection",
".",
"name",
",",
"'keepNull'",
":",
"keepNull",
"}",
")",
"payload",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"str",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"patch",
"(",
"self",
".",
"URL",
",",
"params",
"=",
"params",
",",
"data",
"=",
"payload",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"(",
"r",
".",
"status_code",
"==",
"201",
"or",
"r",
".",
"status_code",
"==",
"202",
")",
"and",
"\"error\"",
"not",
"in",
"data",
":",
"self",
".",
"_rev",
"=",
"data",
"[",
"'_rev'",
"]",
"else",
":",
"raise",
"UpdateError",
"(",
"data",
"[",
"'errorMessage'",
"]",
",",
"data",
")",
"self",
".",
"modified",
"=",
"False",
"self",
".",
"_store",
".",
"resetPatch",
"(",
")"
] |
Saves the document by only updating the modified fields.
The default behaviour concening the keepNull parameter is the opposite of ArangoDB's default, Null values won't be ignored
Use docArgs for things such as waitForSync = True
|
[
"Saves",
"the",
"document",
"by",
"only",
"updating",
"the",
"modified",
"fields",
".",
"The",
"default",
"behaviour",
"concening",
"the",
"keepNull",
"parameter",
"is",
"the",
"opposite",
"of",
"ArangoDB",
"s",
"default",
"Null",
"values",
"won",
"t",
"be",
"ignored",
"Use",
"docArgs",
"for",
"things",
"such",
"as",
"waitForSync",
"=",
"True"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L289-L316
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.delete
|
def delete(self) :
"deletes the document from the database"
if self.URL is None :
raise DeletionError("Can't delete a document that was not saved")
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or 'error' in data :
raise DeletionError(data['errorMessage'], data)
self.reset(self.collection)
self.modified = True
|
python
|
def delete(self) :
"deletes the document from the database"
if self.URL is None :
raise DeletionError("Can't delete a document that was not saved")
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or 'error' in data :
raise DeletionError(data['errorMessage'], data)
self.reset(self.collection)
self.modified = True
|
[
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"URL",
"is",
"None",
":",
"raise",
"DeletionError",
"(",
"\"Can't delete a document that was not saved\"",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"delete",
"(",
"self",
".",
"URL",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"(",
"r",
".",
"status_code",
"!=",
"200",
"and",
"r",
".",
"status_code",
"!=",
"202",
")",
"or",
"'error'",
"in",
"data",
":",
"raise",
"DeletionError",
"(",
"data",
"[",
"'errorMessage'",
"]",
",",
"data",
")",
"self",
".",
"reset",
"(",
"self",
".",
"collection",
")",
"self",
".",
"modified",
"=",
"True"
] |
deletes the document from the database
|
[
"deletes",
"the",
"document",
"from",
"the",
"database"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L318-L329
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.getEdges
|
def getEdges(self, edges, inEdges = True, outEdges = True, rawResults = False) :
"""returns in, out, or both edges linked to self belonging the collection 'edges'.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects"""
try :
return edges.getEdges(self, inEdges, outEdges, rawResults)
except AttributeError :
raise AttributeError("%s does not seem to be a valid Edges object" % edges)
|
python
|
def getEdges(self, edges, inEdges = True, outEdges = True, rawResults = False) :
"""returns in, out, or both edges linked to self belonging the collection 'edges'.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects"""
try :
return edges.getEdges(self, inEdges, outEdges, rawResults)
except AttributeError :
raise AttributeError("%s does not seem to be a valid Edges object" % edges)
|
[
"def",
"getEdges",
"(",
"self",
",",
"edges",
",",
"inEdges",
"=",
"True",
",",
"outEdges",
"=",
"True",
",",
"rawResults",
"=",
"False",
")",
":",
"try",
":",
"return",
"edges",
".",
"getEdges",
"(",
"self",
",",
"inEdges",
",",
"outEdges",
",",
"rawResults",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"%s does not seem to be a valid Edges object\"",
"%",
"edges",
")"
] |
returns in, out, or both edges linked to self belonging the collection 'edges'.
If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects
|
[
"returns",
"in",
"out",
"or",
"both",
"edges",
"linked",
"to",
"self",
"belonging",
"the",
"collection",
"edges",
".",
"If",
"rawResults",
"a",
"arango",
"results",
"will",
"be",
"return",
"as",
"fetched",
"if",
"false",
"will",
"return",
"a",
"liste",
"of",
"Edge",
"objects"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L339-L345
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Document.getStore
|
def getStore(self) :
"""return the store in a dict format"""
store = self._store.getStore()
for priv in self.privates :
v = getattr(self, priv)
if v :
store[priv] = v
return store
|
python
|
def getStore(self) :
"""return the store in a dict format"""
store = self._store.getStore()
for priv in self.privates :
v = getattr(self, priv)
if v :
store[priv] = v
return store
|
[
"def",
"getStore",
"(",
"self",
")",
":",
"store",
"=",
"self",
".",
"_store",
".",
"getStore",
"(",
")",
"for",
"priv",
"in",
"self",
".",
"privates",
":",
"v",
"=",
"getattr",
"(",
"self",
",",
"priv",
")",
"if",
"v",
":",
"store",
"[",
"priv",
"]",
"=",
"v",
"return",
"store"
] |
return the store in a dict format
|
[
"return",
"the",
"store",
"in",
"a",
"dict",
"format"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L347-L354
|
train
|
ArangoDB-Community/pyArango
|
pyArango/document.py
|
Edge.links
|
def links(self, fromVertice, toVertice, **edgeArgs) :
"""
An alias to save that updates the _from and _to attributes.
fromVertice and toVertice, can be either strings or documents. It they are unsaved documents, they will be automatically saved.
"""
if isinstance(fromVertice, Document) or isinstance(getattr(fromVertice, 'document', None), Document):
if not fromVertice._id :
fromVertice.save()
self._from = fromVertice._id
elif (type(fromVertice) is bytes) or (type(fromVertice) is str):
self._from = fromVertice
elif not self._from:
raise CreationError('fromVertice %s is invalid!' % str(fromVertice))
if isinstance(toVertice, Document) or isinstance(getattr(toVertice, 'document', None), Document):
if not toVertice._id:
toVertice.save()
self._to = toVertice._id
elif (type(toVertice) is bytes) or (type(toVertice) is str):
self._to = toVertice
elif not self._to:
raise CreationError('toVertice %s is invalid!' % str(toVertice))
self.save(**edgeArgs)
|
python
|
def links(self, fromVertice, toVertice, **edgeArgs) :
"""
An alias to save that updates the _from and _to attributes.
fromVertice and toVertice, can be either strings or documents. It they are unsaved documents, they will be automatically saved.
"""
if isinstance(fromVertice, Document) or isinstance(getattr(fromVertice, 'document', None), Document):
if not fromVertice._id :
fromVertice.save()
self._from = fromVertice._id
elif (type(fromVertice) is bytes) or (type(fromVertice) is str):
self._from = fromVertice
elif not self._from:
raise CreationError('fromVertice %s is invalid!' % str(fromVertice))
if isinstance(toVertice, Document) or isinstance(getattr(toVertice, 'document', None), Document):
if not toVertice._id:
toVertice.save()
self._to = toVertice._id
elif (type(toVertice) is bytes) or (type(toVertice) is str):
self._to = toVertice
elif not self._to:
raise CreationError('toVertice %s is invalid!' % str(toVertice))
self.save(**edgeArgs)
|
[
"def",
"links",
"(",
"self",
",",
"fromVertice",
",",
"toVertice",
",",
"*",
"*",
"edgeArgs",
")",
":",
"if",
"isinstance",
"(",
"fromVertice",
",",
"Document",
")",
"or",
"isinstance",
"(",
"getattr",
"(",
"fromVertice",
",",
"'document'",
",",
"None",
")",
",",
"Document",
")",
":",
"if",
"not",
"fromVertice",
".",
"_id",
":",
"fromVertice",
".",
"save",
"(",
")",
"self",
".",
"_from",
"=",
"fromVertice",
".",
"_id",
"elif",
"(",
"type",
"(",
"fromVertice",
")",
"is",
"bytes",
")",
"or",
"(",
"type",
"(",
"fromVertice",
")",
"is",
"str",
")",
":",
"self",
".",
"_from",
"=",
"fromVertice",
"elif",
"not",
"self",
".",
"_from",
":",
"raise",
"CreationError",
"(",
"'fromVertice %s is invalid!'",
"%",
"str",
"(",
"fromVertice",
")",
")",
"if",
"isinstance",
"(",
"toVertice",
",",
"Document",
")",
"or",
"isinstance",
"(",
"getattr",
"(",
"toVertice",
",",
"'document'",
",",
"None",
")",
",",
"Document",
")",
":",
"if",
"not",
"toVertice",
".",
"_id",
":",
"toVertice",
".",
"save",
"(",
")",
"self",
".",
"_to",
"=",
"toVertice",
".",
"_id",
"elif",
"(",
"type",
"(",
"toVertice",
")",
"is",
"bytes",
")",
"or",
"(",
"type",
"(",
"toVertice",
")",
"is",
"str",
")",
":",
"self",
".",
"_to",
"=",
"toVertice",
"elif",
"not",
"self",
".",
"_to",
":",
"raise",
"CreationError",
"(",
"'toVertice %s is invalid!'",
"%",
"str",
"(",
"toVertice",
")",
")",
"self",
".",
"save",
"(",
"*",
"*",
"edgeArgs",
")"
] |
An alias to save that updates the _from and _to attributes.
fromVertice and toVertice, can be either strings or documents. It they are unsaved documents, they will be automatically saved.
|
[
"An",
"alias",
"to",
"save",
"that",
"updates",
"the",
"_from",
"and",
"_to",
"attributes",
".",
"fromVertice",
"and",
"toVertice",
"can",
"be",
"either",
"strings",
"or",
"documents",
".",
"It",
"they",
"are",
"unsaved",
"documents",
"they",
"will",
"be",
"automatically",
"saved",
"."
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L403-L426
|
train
|
ArangoDB-Community/pyArango
|
pyArango/users.py
|
User._set
|
def _set(self, jsonData) :
"""Initialize all fields at once. If no password is specified, it will be set as an empty string"""
self["username"] = jsonData["user"]
self["active"] = jsonData["active"]
self["extra"] = jsonData["extra"]
try:
self["changePassword"] = jsonData["changePassword"]
except Exception as e:
pass
# self["changePassword"] = ""
try :
self["password"] = jsonData["passwd"]
except KeyError :
self["password"] = ""
self.URL = "%s/user/%s" % (self.connection.URL, self["username"])
|
python
|
def _set(self, jsonData) :
"""Initialize all fields at once. If no password is specified, it will be set as an empty string"""
self["username"] = jsonData["user"]
self["active"] = jsonData["active"]
self["extra"] = jsonData["extra"]
try:
self["changePassword"] = jsonData["changePassword"]
except Exception as e:
pass
# self["changePassword"] = ""
try :
self["password"] = jsonData["passwd"]
except KeyError :
self["password"] = ""
self.URL = "%s/user/%s" % (self.connection.URL, self["username"])
|
[
"def",
"_set",
"(",
"self",
",",
"jsonData",
")",
":",
"self",
"[",
"\"username\"",
"]",
"=",
"jsonData",
"[",
"\"user\"",
"]",
"self",
"[",
"\"active\"",
"]",
"=",
"jsonData",
"[",
"\"active\"",
"]",
"self",
"[",
"\"extra\"",
"]",
"=",
"jsonData",
"[",
"\"extra\"",
"]",
"try",
":",
"self",
"[",
"\"changePassword\"",
"]",
"=",
"jsonData",
"[",
"\"changePassword\"",
"]",
"except",
"Exception",
"as",
"e",
":",
"pass",
"# self[\"changePassword\"] = \"\"",
"try",
":",
"self",
"[",
"\"password\"",
"]",
"=",
"jsonData",
"[",
"\"passwd\"",
"]",
"except",
"KeyError",
":",
"self",
"[",
"\"password\"",
"]",
"=",
"\"\"",
"self",
".",
"URL",
"=",
"\"%s/user/%s\"",
"%",
"(",
"self",
".",
"connection",
".",
"URL",
",",
"self",
"[",
"\"username\"",
"]",
")"
] |
Initialize all fields at once. If no password is specified, it will be set as an empty string
|
[
"Initialize",
"all",
"fields",
"at",
"once",
".",
"If",
"no",
"password",
"is",
"specified",
"it",
"will",
"be",
"set",
"as",
"an",
"empty",
"string"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/users.py#L24-L41
|
train
|
ArangoDB-Community/pyArango
|
pyArango/users.py
|
User.delete
|
def delete(self) :
"""Permanently remove the user"""
if not self.URL :
raise CreationError("Please save user first", None, None)
r = self.connection.session.delete(self.URL)
if r.status_code < 200 or r.status_code > 202 :
raise DeletionError("Unable to delete user, url: %s, status: %s" %(r.url, r.status_code), r.content )
self.URL = None
|
python
|
def delete(self) :
"""Permanently remove the user"""
if not self.URL :
raise CreationError("Please save user first", None, None)
r = self.connection.session.delete(self.URL)
if r.status_code < 200 or r.status_code > 202 :
raise DeletionError("Unable to delete user, url: %s, status: %s" %(r.url, r.status_code), r.content )
self.URL = None
|
[
"def",
"delete",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"URL",
":",
"raise",
"CreationError",
"(",
"\"Please save user first\"",
",",
"None",
",",
"None",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"delete",
"(",
"self",
".",
"URL",
")",
"if",
"r",
".",
"status_code",
"<",
"200",
"or",
"r",
".",
"status_code",
">",
"202",
":",
"raise",
"DeletionError",
"(",
"\"Unable to delete user, url: %s, status: %s\"",
"%",
"(",
"r",
".",
"url",
",",
"r",
".",
"status_code",
")",
",",
"r",
".",
"content",
")",
"self",
".",
"URL",
"=",
"None"
] |
Permanently remove the user
|
[
"Permanently",
"remove",
"the",
"user"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/users.py#L95-L104
|
train
|
ArangoDB-Community/pyArango
|
pyArango/users.py
|
Users.fetchAllUsers
|
def fetchAllUsers(self, rawResults = False) :
"""Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects"""
r = self.connection.session.get(self.URL)
if r.status_code == 200 :
data = r.json()
if rawResults :
return data["result"]
else :
res = []
for resu in data["result"] :
u = User(self, resu)
res.append(u)
return res
else :
raise ConnectionError("Unable to get user list", r.url, r.status_code)
|
python
|
def fetchAllUsers(self, rawResults = False) :
"""Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects"""
r = self.connection.session.get(self.URL)
if r.status_code == 200 :
data = r.json()
if rawResults :
return data["result"]
else :
res = []
for resu in data["result"] :
u = User(self, resu)
res.append(u)
return res
else :
raise ConnectionError("Unable to get user list", r.url, r.status_code)
|
[
"def",
"fetchAllUsers",
"(",
"self",
",",
"rawResults",
"=",
"False",
")",
":",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"self",
".",
"URL",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"rawResults",
":",
"return",
"data",
"[",
"\"result\"",
"]",
"else",
":",
"res",
"=",
"[",
"]",
"for",
"resu",
"in",
"data",
"[",
"\"result\"",
"]",
":",
"u",
"=",
"User",
"(",
"self",
",",
"resu",
")",
"res",
".",
"append",
"(",
"u",
")",
"return",
"res",
"else",
":",
"raise",
"ConnectionError",
"(",
"\"Unable to get user list\"",
",",
"r",
".",
"url",
",",
"r",
".",
"status_code",
")"
] |
Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects
|
[
"Returns",
"all",
"available",
"users",
".",
"if",
"rawResults",
"the",
"result",
"will",
"be",
"a",
"list",
"of",
"python",
"dicts",
"instead",
"of",
"User",
"objects"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/users.py#L129-L143
|
train
|
ArangoDB-Community/pyArango
|
pyArango/users.py
|
Users.fetchUser
|
def fetchUser(self, username, rawResults = False) :
"""Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects"""
url = "%s/%s" % (self.URL, username)
r = self.connection.session.get(url)
if r.status_code == 200 :
data = r.json()
if rawResults :
return data["result"]
else :
u = User(self, data)
return u
else :
raise KeyError("Unable to get user: %s" % username)
|
python
|
def fetchUser(self, username, rawResults = False) :
"""Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects"""
url = "%s/%s" % (self.URL, username)
r = self.connection.session.get(url)
if r.status_code == 200 :
data = r.json()
if rawResults :
return data["result"]
else :
u = User(self, data)
return u
else :
raise KeyError("Unable to get user: %s" % username)
|
[
"def",
"fetchUser",
"(",
"self",
",",
"username",
",",
"rawResults",
"=",
"False",
")",
":",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"URL",
",",
"username",
")",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"get",
"(",
"url",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"rawResults",
":",
"return",
"data",
"[",
"\"result\"",
"]",
"else",
":",
"u",
"=",
"User",
"(",
"self",
",",
"data",
")",
"return",
"u",
"else",
":",
"raise",
"KeyError",
"(",
"\"Unable to get user: %s\"",
"%",
"username",
")"
] |
Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects
|
[
"Returns",
"a",
"single",
"user",
".",
"if",
"rawResults",
"the",
"result",
"will",
"be",
"a",
"list",
"of",
"python",
"dicts",
"instead",
"of",
"User",
"objects"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/users.py#L145-L158
|
train
|
ArangoDB-Community/pyArango
|
pyArango/connection.py
|
Connection.resetSession
|
def resetSession(self, username=None, password=None, verify=True) :
"""resets the session"""
self.disconnectSession()
self.session = AikidoSession(username, password, verify)
|
python
|
def resetSession(self, username=None, password=None, verify=True) :
"""resets the session"""
self.disconnectSession()
self.session = AikidoSession(username, password, verify)
|
[
"def",
"resetSession",
"(",
"self",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"self",
".",
"disconnectSession",
"(",
")",
"self",
".",
"session",
"=",
"AikidoSession",
"(",
"username",
",",
"password",
",",
"verify",
")"
] |
resets the session
|
[
"resets",
"the",
"session"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/connection.py#L129-L132
|
train
|
ArangoDB-Community/pyArango
|
pyArango/connection.py
|
Connection.reload
|
def reload(self) :
"""Reloads the database list.
Because loading a database triggers the loading of all collections and graphs within,
only handles are loaded when this function is called. The full databases are loaded on demand when accessed
"""
r = self.session.get(self.databasesURL)
data = r.json()
if r.status_code == 200 and not data["error"] :
self.databases = {}
for dbName in data["result"] :
if dbName not in self.databases :
self.databases[dbName] = DBHandle(self, dbName)
else :
raise ConnectionError(data["errorMessage"], self.databasesURL, r.status_code, r.content)
|
python
|
def reload(self) :
"""Reloads the database list.
Because loading a database triggers the loading of all collections and graphs within,
only handles are loaded when this function is called. The full databases are loaded on demand when accessed
"""
r = self.session.get(self.databasesURL)
data = r.json()
if r.status_code == 200 and not data["error"] :
self.databases = {}
for dbName in data["result"] :
if dbName not in self.databases :
self.databases[dbName] = DBHandle(self, dbName)
else :
raise ConnectionError(data["errorMessage"], self.databasesURL, r.status_code, r.content)
|
[
"def",
"reload",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"databasesURL",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
"and",
"not",
"data",
"[",
"\"error\"",
"]",
":",
"self",
".",
"databases",
"=",
"{",
"}",
"for",
"dbName",
"in",
"data",
"[",
"\"result\"",
"]",
":",
"if",
"dbName",
"not",
"in",
"self",
".",
"databases",
":",
"self",
".",
"databases",
"[",
"dbName",
"]",
"=",
"DBHandle",
"(",
"self",
",",
"dbName",
")",
"else",
":",
"raise",
"ConnectionError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"self",
".",
"databasesURL",
",",
"r",
".",
"status_code",
",",
"r",
".",
"content",
")"
] |
Reloads the database list.
Because loading a database triggers the loading of all collections and graphs within,
only handles are loaded when this function is called. The full databases are loaded on demand when accessed
|
[
"Reloads",
"the",
"database",
"list",
".",
"Because",
"loading",
"a",
"database",
"triggers",
"the",
"loading",
"of",
"all",
"collections",
"and",
"graphs",
"within",
"only",
"handles",
"are",
"loaded",
"when",
"this",
"function",
"is",
"called",
".",
"The",
"full",
"databases",
"are",
"loaded",
"on",
"demand",
"when",
"accessed"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/connection.py#L134-L149
|
train
|
ArangoDB-Community/pyArango
|
pyArango/connection.py
|
Connection.createDatabase
|
def createDatabase(self, name, **dbArgs) :
"use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc"
dbArgs['name'] = name
payload = json.dumps(dbArgs, default=str)
url = self.URL + "/database"
r = self.session.post(url, data = payload)
data = r.json()
if r.status_code == 201 and not data["error"] :
db = Database(self, name)
self.databases[name] = db
return self.databases[name]
else :
raise CreationError(data["errorMessage"], r.content)
|
python
|
def createDatabase(self, name, **dbArgs) :
"use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc"
dbArgs['name'] = name
payload = json.dumps(dbArgs, default=str)
url = self.URL + "/database"
r = self.session.post(url, data = payload)
data = r.json()
if r.status_code == 201 and not data["error"] :
db = Database(self, name)
self.databases[name] = db
return self.databases[name]
else :
raise CreationError(data["errorMessage"], r.content)
|
[
"def",
"createDatabase",
"(",
"self",
",",
"name",
",",
"*",
"*",
"dbArgs",
")",
":",
"dbArgs",
"[",
"'name'",
"]",
"=",
"name",
"payload",
"=",
"json",
".",
"dumps",
"(",
"dbArgs",
",",
"default",
"=",
"str",
")",
"url",
"=",
"self",
".",
"URL",
"+",
"\"/database\"",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"payload",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"201",
"and",
"not",
"data",
"[",
"\"error\"",
"]",
":",
"db",
"=",
"Database",
"(",
"self",
",",
"name",
")",
"self",
".",
"databases",
"[",
"name",
"]",
"=",
"db",
"return",
"self",
".",
"databases",
"[",
"name",
"]",
"else",
":",
"raise",
"CreationError",
"(",
"data",
"[",
"\"errorMessage\"",
"]",
",",
"r",
".",
"content",
")"
] |
use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc
|
[
"use",
"dbArgs",
"for",
"arguments",
"other",
"than",
"name",
".",
"for",
"a",
"full",
"list",
"of",
"arguments",
"please",
"have",
"a",
"look",
"at",
"arangoDB",
"s",
"doc"
] |
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
|
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/connection.py#L151-L163
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/plugin.py
|
SensuPlugin.output
|
def output(self, args):
'''
Print the output message.
'''
print("SensuPlugin: {}".format(' '.join(str(a) for a in args)))
|
python
|
def output(self, args):
'''
Print the output message.
'''
print("SensuPlugin: {}".format(' '.join(str(a) for a in args)))
|
[
"def",
"output",
"(",
"self",
",",
"args",
")",
":",
"print",
"(",
"\"SensuPlugin: {}\"",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"str",
"(",
"a",
")",
"for",
"a",
"in",
"args",
")",
")",
")"
] |
Print the output message.
|
[
"Print",
"the",
"output",
"message",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L51-L55
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/plugin.py
|
SensuPlugin.__make_dynamic
|
def __make_dynamic(self, method):
'''
Create a method for each of the exit codes.
'''
def dynamic(*args):
self.plugin_info['status'] = method
if not args:
args = None
self.output(args)
sys.exit(getattr(self.exit_code, method))
method_lc = method.lower()
dynamic.__doc__ = "%s method" % method_lc
dynamic.__name__ = method_lc
setattr(self, dynamic.__name__, dynamic)
|
python
|
def __make_dynamic(self, method):
'''
Create a method for each of the exit codes.
'''
def dynamic(*args):
self.plugin_info['status'] = method
if not args:
args = None
self.output(args)
sys.exit(getattr(self.exit_code, method))
method_lc = method.lower()
dynamic.__doc__ = "%s method" % method_lc
dynamic.__name__ = method_lc
setattr(self, dynamic.__name__, dynamic)
|
[
"def",
"__make_dynamic",
"(",
"self",
",",
"method",
")",
":",
"def",
"dynamic",
"(",
"*",
"args",
")",
":",
"self",
".",
"plugin_info",
"[",
"'status'",
"]",
"=",
"method",
"if",
"not",
"args",
":",
"args",
"=",
"None",
"self",
".",
"output",
"(",
"args",
")",
"sys",
".",
"exit",
"(",
"getattr",
"(",
"self",
".",
"exit_code",
",",
"method",
")",
")",
"method_lc",
"=",
"method",
".",
"lower",
"(",
")",
"dynamic",
".",
"__doc__",
"=",
"\"%s method\"",
"%",
"method_lc",
"dynamic",
".",
"__name__",
"=",
"method_lc",
"setattr",
"(",
"self",
",",
"dynamic",
".",
"__name__",
",",
"dynamic",
")"
] |
Create a method for each of the exit codes.
|
[
"Create",
"a",
"method",
"for",
"each",
"of",
"the",
"exit",
"codes",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L57-L71
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/plugin.py
|
SensuPlugin.__exitfunction
|
def __exitfunction(self):
'''
Method called by exit hook, ensures that both an exit code and
output is supplied, also catches errors.
'''
if self._hook.exit_code is None and self._hook.exception is None:
print("Check did not exit! You should call an exit code method.")
sys.stdout.flush()
os._exit(1)
elif self._hook.exception:
print("Check failed to run: %s, %s" %
(sys.last_type, traceback.format_tb(sys.last_traceback)))
sys.stdout.flush()
os._exit(2)
|
python
|
def __exitfunction(self):
'''
Method called by exit hook, ensures that both an exit code and
output is supplied, also catches errors.
'''
if self._hook.exit_code is None and self._hook.exception is None:
print("Check did not exit! You should call an exit code method.")
sys.stdout.flush()
os._exit(1)
elif self._hook.exception:
print("Check failed to run: %s, %s" %
(sys.last_type, traceback.format_tb(sys.last_traceback)))
sys.stdout.flush()
os._exit(2)
|
[
"def",
"__exitfunction",
"(",
"self",
")",
":",
"if",
"self",
".",
"_hook",
".",
"exit_code",
"is",
"None",
"and",
"self",
".",
"_hook",
".",
"exception",
"is",
"None",
":",
"print",
"(",
"\"Check did not exit! You should call an exit code method.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"os",
".",
"_exit",
"(",
"1",
")",
"elif",
"self",
".",
"_hook",
".",
"exception",
":",
"print",
"(",
"\"Check failed to run: %s, %s\"",
"%",
"(",
"sys",
".",
"last_type",
",",
"traceback",
".",
"format_tb",
"(",
"sys",
".",
"last_traceback",
")",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"os",
".",
"_exit",
"(",
"2",
")"
] |
Method called by exit hook, ensures that both an exit code and
output is supplied, also catches errors.
|
[
"Method",
"called",
"by",
"exit",
"hook",
"ensures",
"that",
"both",
"an",
"exit",
"code",
"and",
"output",
"is",
"supplied",
"also",
"catches",
"errors",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L79-L92
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.run
|
def run(self):
'''
Set up the event object, global settings and command line
arguments.
'''
# Parse the stdin into a global event object
stdin = self.read_stdin()
self.event = self.read_event(stdin)
# Prepare global settings
self.settings = get_settings()
self.api_settings = self.get_api_settings()
# Prepare command line arguments and
self.parser = argparse.ArgumentParser()
# set up the 2.x to 1.x event mapping argument
self.parser.add_argument("--map-v2-event-into-v1",
action="store_true",
default=False,
dest="v2event")
if hasattr(self, 'setup'):
self.setup()
(self.options, self.remain) = self.parser.parse_known_args()
# map the event if required
if (self.options.v2event or
os.environ.get("SENSU_MAP_V2_EVENT_INTO_V1")):
self.event = map_v2_event_into_v1(self.event)
# Filter (deprecated) and handle
self.filter()
self.handle()
|
python
|
def run(self):
'''
Set up the event object, global settings and command line
arguments.
'''
# Parse the stdin into a global event object
stdin = self.read_stdin()
self.event = self.read_event(stdin)
# Prepare global settings
self.settings = get_settings()
self.api_settings = self.get_api_settings()
# Prepare command line arguments and
self.parser = argparse.ArgumentParser()
# set up the 2.x to 1.x event mapping argument
self.parser.add_argument("--map-v2-event-into-v1",
action="store_true",
default=False,
dest="v2event")
if hasattr(self, 'setup'):
self.setup()
(self.options, self.remain) = self.parser.parse_known_args()
# map the event if required
if (self.options.v2event or
os.environ.get("SENSU_MAP_V2_EVENT_INTO_V1")):
self.event = map_v2_event_into_v1(self.event)
# Filter (deprecated) and handle
self.filter()
self.handle()
|
[
"def",
"run",
"(",
"self",
")",
":",
"# Parse the stdin into a global event object",
"stdin",
"=",
"self",
".",
"read_stdin",
"(",
")",
"self",
".",
"event",
"=",
"self",
".",
"read_event",
"(",
"stdin",
")",
"# Prepare global settings",
"self",
".",
"settings",
"=",
"get_settings",
"(",
")",
"self",
".",
"api_settings",
"=",
"self",
".",
"get_api_settings",
"(",
")",
"# Prepare command line arguments and",
"self",
".",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"# set up the 2.x to 1.x event mapping argument",
"self",
".",
"parser",
".",
"add_argument",
"(",
"\"--map-v2-event-into-v1\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"\"v2event\"",
")",
"if",
"hasattr",
"(",
"self",
",",
"'setup'",
")",
":",
"self",
".",
"setup",
"(",
")",
"(",
"self",
".",
"options",
",",
"self",
".",
"remain",
")",
"=",
"self",
".",
"parser",
".",
"parse_known_args",
"(",
")",
"# map the event if required",
"if",
"(",
"self",
".",
"options",
".",
"v2event",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"SENSU_MAP_V2_EVENT_INTO_V1\"",
")",
")",
":",
"self",
".",
"event",
"=",
"map_v2_event_into_v1",
"(",
"self",
".",
"event",
")",
"# Filter (deprecated) and handle",
"self",
".",
"filter",
"(",
")",
"self",
".",
"handle",
"(",
")"
] |
Set up the event object, global settings and command line
arguments.
|
[
"Set",
"up",
"the",
"event",
"object",
"global",
"settings",
"and",
"command",
"line",
"arguments",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L31-L65
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.filter
|
def filter(self):
'''
Filters exit the proccess if the event should not be handled.
Filtering events is deprecated and will be removed in a future release.
'''
if self.deprecated_filtering_enabled():
print('warning: event filtering in sensu-plugin is deprecated,' +
'see http://bit.ly/sensu-plugin')
self.filter_disabled()
self.filter_silenced()
self.filter_dependencies()
if self.deprecated_occurrence_filtering():
print('warning: occurrence filtering in sensu-plugin is' +
'deprecated, see http://bit.ly/sensu-plugin')
self.filter_repeated()
|
python
|
def filter(self):
'''
Filters exit the proccess if the event should not be handled.
Filtering events is deprecated and will be removed in a future release.
'''
if self.deprecated_filtering_enabled():
print('warning: event filtering in sensu-plugin is deprecated,' +
'see http://bit.ly/sensu-plugin')
self.filter_disabled()
self.filter_silenced()
self.filter_dependencies()
if self.deprecated_occurrence_filtering():
print('warning: occurrence filtering in sensu-plugin is' +
'deprecated, see http://bit.ly/sensu-plugin')
self.filter_repeated()
|
[
"def",
"filter",
"(",
"self",
")",
":",
"if",
"self",
".",
"deprecated_filtering_enabled",
"(",
")",
":",
"print",
"(",
"'warning: event filtering in sensu-plugin is deprecated,'",
"+",
"'see http://bit.ly/sensu-plugin'",
")",
"self",
".",
"filter_disabled",
"(",
")",
"self",
".",
"filter_silenced",
"(",
")",
"self",
".",
"filter_dependencies",
"(",
")",
"if",
"self",
".",
"deprecated_occurrence_filtering",
"(",
")",
":",
"print",
"(",
"'warning: occurrence filtering in sensu-plugin is'",
"+",
"'deprecated, see http://bit.ly/sensu-plugin'",
")",
"self",
".",
"filter_repeated",
"(",
")"
] |
Filters exit the proccess if the event should not be handled.
Filtering events is deprecated and will be removed in a future release.
|
[
"Filters",
"exit",
"the",
"proccess",
"if",
"the",
"event",
"should",
"not",
"be",
"handled",
".",
"Filtering",
"events",
"is",
"deprecated",
"and",
"will",
"be",
"removed",
"in",
"a",
"future",
"release",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L95-L111
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.bail
|
def bail(self, msg):
'''
Gracefully terminate with message
'''
client_name = self.event['client'].get('name', 'error:no-client-name')
check_name = self.event['check'].get('name', 'error:no-check-name')
print('{}: {}/{}'.format(msg, client_name, check_name))
sys.exit(0)
|
python
|
def bail(self, msg):
'''
Gracefully terminate with message
'''
client_name = self.event['client'].get('name', 'error:no-client-name')
check_name = self.event['check'].get('name', 'error:no-check-name')
print('{}: {}/{}'.format(msg, client_name, check_name))
sys.exit(0)
|
[
"def",
"bail",
"(",
"self",
",",
"msg",
")",
":",
"client_name",
"=",
"self",
".",
"event",
"[",
"'client'",
"]",
".",
"get",
"(",
"'name'",
",",
"'error:no-client-name'",
")",
"check_name",
"=",
"self",
".",
"event",
"[",
"'check'",
"]",
".",
"get",
"(",
"'name'",
",",
"'error:no-check-name'",
")",
"print",
"(",
"'{}: {}/{}'",
".",
"format",
"(",
"msg",
",",
"client_name",
",",
"check_name",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
Gracefully terminate with message
|
[
"Gracefully",
"terminate",
"with",
"message"
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L135-L142
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.api_request
|
def api_request(self, method, path):
'''
Query Sensu api for information.
'''
if not hasattr(self, 'api_settings'):
ValueError('api.json settings not found')
if method.lower() == 'get':
_request = requests.get
elif method.lower() == 'post':
_request = requests.post
domain = self.api_settings['host']
uri = '{}:{}/{}'.format(domain, self.api_settings['port'], path)
if self.api_settings.get('user') and self.api_settings.get('password'):
auth = (self.api_settings['user'], self.api_settings['password'])
else:
auth = ()
req = _request(uri, auth=auth)
return req
|
python
|
def api_request(self, method, path):
'''
Query Sensu api for information.
'''
if not hasattr(self, 'api_settings'):
ValueError('api.json settings not found')
if method.lower() == 'get':
_request = requests.get
elif method.lower() == 'post':
_request = requests.post
domain = self.api_settings['host']
uri = '{}:{}/{}'.format(domain, self.api_settings['port'], path)
if self.api_settings.get('user') and self.api_settings.get('password'):
auth = (self.api_settings['user'], self.api_settings['password'])
else:
auth = ()
req = _request(uri, auth=auth)
return req
|
[
"def",
"api_request",
"(",
"self",
",",
"method",
",",
"path",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'api_settings'",
")",
":",
"ValueError",
"(",
"'api.json settings not found'",
")",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'get'",
":",
"_request",
"=",
"requests",
".",
"get",
"elif",
"method",
".",
"lower",
"(",
")",
"==",
"'post'",
":",
"_request",
"=",
"requests",
".",
"post",
"domain",
"=",
"self",
".",
"api_settings",
"[",
"'host'",
"]",
"uri",
"=",
"'{}:{}/{}'",
".",
"format",
"(",
"domain",
",",
"self",
".",
"api_settings",
"[",
"'port'",
"]",
",",
"path",
")",
"if",
"self",
".",
"api_settings",
".",
"get",
"(",
"'user'",
")",
"and",
"self",
".",
"api_settings",
".",
"get",
"(",
"'password'",
")",
":",
"auth",
"=",
"(",
"self",
".",
"api_settings",
"[",
"'user'",
"]",
",",
"self",
".",
"api_settings",
"[",
"'password'",
"]",
")",
"else",
":",
"auth",
"=",
"(",
")",
"req",
"=",
"_request",
"(",
"uri",
",",
"auth",
"=",
"auth",
")",
"return",
"req"
] |
Query Sensu api for information.
|
[
"Query",
"Sensu",
"api",
"for",
"information",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L172-L191
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.event_exists
|
def event_exists(self, client, check):
'''
Query Sensu API for event.
'''
return self.api_request(
'get',
'events/{}/{}'.format(client, check)
).status_code == 200
|
python
|
def event_exists(self, client, check):
'''
Query Sensu API for event.
'''
return self.api_request(
'get',
'events/{}/{}'.format(client, check)
).status_code == 200
|
[
"def",
"event_exists",
"(",
"self",
",",
"client",
",",
"check",
")",
":",
"return",
"self",
".",
"api_request",
"(",
"'get'",
",",
"'events/{}/{}'",
".",
"format",
"(",
"client",
",",
"check",
")",
")",
".",
"status_code",
"==",
"200"
] |
Query Sensu API for event.
|
[
"Query",
"Sensu",
"API",
"for",
"event",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L199-L206
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.filter_silenced
|
def filter_silenced(self):
'''
Determine whether a check is silenced and shouldn't handle.
'''
stashes = [
('client', '/silence/{}'.format(self.event['client']['name'])),
('check', '/silence/{}/{}'.format(
self.event['client']['name'],
self.event['check']['name'])),
('check', '/silence/all/{}'.format(self.event['check']['name']))
]
for scope, path in stashes:
if self.stash_exists(path):
self.bail(scope + ' alerts silenced')
|
python
|
def filter_silenced(self):
'''
Determine whether a check is silenced and shouldn't handle.
'''
stashes = [
('client', '/silence/{}'.format(self.event['client']['name'])),
('check', '/silence/{}/{}'.format(
self.event['client']['name'],
self.event['check']['name'])),
('check', '/silence/all/{}'.format(self.event['check']['name']))
]
for scope, path in stashes:
if self.stash_exists(path):
self.bail(scope + ' alerts silenced')
|
[
"def",
"filter_silenced",
"(",
"self",
")",
":",
"stashes",
"=",
"[",
"(",
"'client'",
",",
"'/silence/{}'",
".",
"format",
"(",
"self",
".",
"event",
"[",
"'client'",
"]",
"[",
"'name'",
"]",
")",
")",
",",
"(",
"'check'",
",",
"'/silence/{}/{}'",
".",
"format",
"(",
"self",
".",
"event",
"[",
"'client'",
"]",
"[",
"'name'",
"]",
",",
"self",
".",
"event",
"[",
"'check'",
"]",
"[",
"'name'",
"]",
")",
")",
",",
"(",
"'check'",
",",
"'/silence/all/{}'",
".",
"format",
"(",
"self",
".",
"event",
"[",
"'check'",
"]",
"[",
"'name'",
"]",
")",
")",
"]",
"for",
"scope",
",",
"path",
"in",
"stashes",
":",
"if",
"self",
".",
"stash_exists",
"(",
"path",
")",
":",
"self",
".",
"bail",
"(",
"scope",
"+",
"' alerts silenced'",
")"
] |
Determine whether a check is silenced and shouldn't handle.
|
[
"Determine",
"whether",
"a",
"check",
"is",
"silenced",
"and",
"shouldn",
"t",
"handle",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L216-L229
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.filter_dependencies
|
def filter_dependencies(self):
'''
Determine whether a check has dependencies.
'''
dependencies = self.event['check'].get('dependencies', None)
if dependencies is None or not isinstance(dependencies, list):
return
for dependency in self.event['check']['dependencies']:
if not str(dependency):
continue
dependency_split = tuple(dependency.split('/'))
# If there's a dependency on a check from another client, then use
# that client name, otherwise assume same client.
if len(dependency_split) == 2:
client, check = dependency_split
else:
client = self.event['client']['name']
check = dependency_split[0]
if self.event_exists(client, check):
self.bail('check dependency event exists')
|
python
|
def filter_dependencies(self):
'''
Determine whether a check has dependencies.
'''
dependencies = self.event['check'].get('dependencies', None)
if dependencies is None or not isinstance(dependencies, list):
return
for dependency in self.event['check']['dependencies']:
if not str(dependency):
continue
dependency_split = tuple(dependency.split('/'))
# If there's a dependency on a check from another client, then use
# that client name, otherwise assume same client.
if len(dependency_split) == 2:
client, check = dependency_split
else:
client = self.event['client']['name']
check = dependency_split[0]
if self.event_exists(client, check):
self.bail('check dependency event exists')
|
[
"def",
"filter_dependencies",
"(",
"self",
")",
":",
"dependencies",
"=",
"self",
".",
"event",
"[",
"'check'",
"]",
".",
"get",
"(",
"'dependencies'",
",",
"None",
")",
"if",
"dependencies",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"dependencies",
",",
"list",
")",
":",
"return",
"for",
"dependency",
"in",
"self",
".",
"event",
"[",
"'check'",
"]",
"[",
"'dependencies'",
"]",
":",
"if",
"not",
"str",
"(",
"dependency",
")",
":",
"continue",
"dependency_split",
"=",
"tuple",
"(",
"dependency",
".",
"split",
"(",
"'/'",
")",
")",
"# If there's a dependency on a check from another client, then use",
"# that client name, otherwise assume same client.",
"if",
"len",
"(",
"dependency_split",
")",
"==",
"2",
":",
"client",
",",
"check",
"=",
"dependency_split",
"else",
":",
"client",
"=",
"self",
".",
"event",
"[",
"'client'",
"]",
"[",
"'name'",
"]",
"check",
"=",
"dependency_split",
"[",
"0",
"]",
"if",
"self",
".",
"event_exists",
"(",
"client",
",",
"check",
")",
":",
"self",
".",
"bail",
"(",
"'check dependency event exists'",
")"
] |
Determine whether a check has dependencies.
|
[
"Determine",
"whether",
"a",
"check",
"has",
"dependencies",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L231-L250
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/handler.py
|
SensuHandler.filter_repeated
|
def filter_repeated(self):
'''
Determine whether a check is repeating.
'''
defaults = {
'occurrences': 1,
'interval': 30,
'refresh': 1800
}
# Override defaults with anything defined in the settings
if isinstance(self.settings['sensu_plugin'], dict):
defaults.update(self.settings['sensu_plugin'])
occurrences = int(self.event['check'].get(
'occurrences', defaults['occurrences']))
interval = int(self.event['check'].get(
'interval', defaults['interval']))
refresh = int(self.event['check'].get(
'refresh', defaults['refresh']))
if self.event['occurrences'] < occurrences:
self.bail('not enough occurrences')
if (self.event['occurrences'] > occurrences and
self.event['action'] == 'create'):
return
number = int(refresh / interval)
if (number == 0 or
(self.event['occurrences'] - occurrences) % number == 0):
return
self.bail('only handling every ' + str(number) + ' occurrences')
|
python
|
def filter_repeated(self):
'''
Determine whether a check is repeating.
'''
defaults = {
'occurrences': 1,
'interval': 30,
'refresh': 1800
}
# Override defaults with anything defined in the settings
if isinstance(self.settings['sensu_plugin'], dict):
defaults.update(self.settings['sensu_plugin'])
occurrences = int(self.event['check'].get(
'occurrences', defaults['occurrences']))
interval = int(self.event['check'].get(
'interval', defaults['interval']))
refresh = int(self.event['check'].get(
'refresh', defaults['refresh']))
if self.event['occurrences'] < occurrences:
self.bail('not enough occurrences')
if (self.event['occurrences'] > occurrences and
self.event['action'] == 'create'):
return
number = int(refresh / interval)
if (number == 0 or
(self.event['occurrences'] - occurrences) % number == 0):
return
self.bail('only handling every ' + str(number) + ' occurrences')
|
[
"def",
"filter_repeated",
"(",
"self",
")",
":",
"defaults",
"=",
"{",
"'occurrences'",
":",
"1",
",",
"'interval'",
":",
"30",
",",
"'refresh'",
":",
"1800",
"}",
"# Override defaults with anything defined in the settings",
"if",
"isinstance",
"(",
"self",
".",
"settings",
"[",
"'sensu_plugin'",
"]",
",",
"dict",
")",
":",
"defaults",
".",
"update",
"(",
"self",
".",
"settings",
"[",
"'sensu_plugin'",
"]",
")",
"occurrences",
"=",
"int",
"(",
"self",
".",
"event",
"[",
"'check'",
"]",
".",
"get",
"(",
"'occurrences'",
",",
"defaults",
"[",
"'occurrences'",
"]",
")",
")",
"interval",
"=",
"int",
"(",
"self",
".",
"event",
"[",
"'check'",
"]",
".",
"get",
"(",
"'interval'",
",",
"defaults",
"[",
"'interval'",
"]",
")",
")",
"refresh",
"=",
"int",
"(",
"self",
".",
"event",
"[",
"'check'",
"]",
".",
"get",
"(",
"'refresh'",
",",
"defaults",
"[",
"'refresh'",
"]",
")",
")",
"if",
"self",
".",
"event",
"[",
"'occurrences'",
"]",
"<",
"occurrences",
":",
"self",
".",
"bail",
"(",
"'not enough occurrences'",
")",
"if",
"(",
"self",
".",
"event",
"[",
"'occurrences'",
"]",
">",
"occurrences",
"and",
"self",
".",
"event",
"[",
"'action'",
"]",
"==",
"'create'",
")",
":",
"return",
"number",
"=",
"int",
"(",
"refresh",
"/",
"interval",
")",
"if",
"(",
"number",
"==",
"0",
"or",
"(",
"self",
".",
"event",
"[",
"'occurrences'",
"]",
"-",
"occurrences",
")",
"%",
"number",
"==",
"0",
")",
":",
"return",
"self",
".",
"bail",
"(",
"'only handling every '",
"+",
"str",
"(",
"number",
")",
"+",
"' occurrences'",
")"
] |
Determine whether a check is repeating.
|
[
"Determine",
"whether",
"a",
"check",
"is",
"repeating",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L252-L285
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
config_files
|
def config_files():
'''
Get list of currently used config files.
'''
sensu_loaded_tempfile = os.environ.get('SENSU_LOADED_TEMPFILE')
sensu_config_files = os.environ.get('SENSU_CONFIG_FILES')
sensu_v1_config = '/etc/sensu/config.json'
sensu_v1_confd = '/etc/sensu/conf.d'
if sensu_loaded_tempfile and os.path.isfile(sensu_loaded_tempfile):
with open(sensu_loaded_tempfile, 'r') as tempfile:
contents = tempfile.read()
return contents.split(':')
elif sensu_config_files:
return sensu_config_files.split(':')
else:
files = []
filenames = []
if os.path.isfile(sensu_v1_config):
files = [sensu_v1_config]
if os.path.isdir(sensu_v1_confd):
filenames = [f for f in os.listdir(sensu_v1_confd)
if os.path.splitext(f)[1] == '.json']
for filename in filenames:
files.append('{}/{}'.format(sensu_v1_confd, filename))
return files
|
python
|
def config_files():
'''
Get list of currently used config files.
'''
sensu_loaded_tempfile = os.environ.get('SENSU_LOADED_TEMPFILE')
sensu_config_files = os.environ.get('SENSU_CONFIG_FILES')
sensu_v1_config = '/etc/sensu/config.json'
sensu_v1_confd = '/etc/sensu/conf.d'
if sensu_loaded_tempfile and os.path.isfile(sensu_loaded_tempfile):
with open(sensu_loaded_tempfile, 'r') as tempfile:
contents = tempfile.read()
return contents.split(':')
elif sensu_config_files:
return sensu_config_files.split(':')
else:
files = []
filenames = []
if os.path.isfile(sensu_v1_config):
files = [sensu_v1_config]
if os.path.isdir(sensu_v1_confd):
filenames = [f for f in os.listdir(sensu_v1_confd)
if os.path.splitext(f)[1] == '.json']
for filename in filenames:
files.append('{}/{}'.format(sensu_v1_confd, filename))
return files
|
[
"def",
"config_files",
"(",
")",
":",
"sensu_loaded_tempfile",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SENSU_LOADED_TEMPFILE'",
")",
"sensu_config_files",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SENSU_CONFIG_FILES'",
")",
"sensu_v1_config",
"=",
"'/etc/sensu/config.json'",
"sensu_v1_confd",
"=",
"'/etc/sensu/conf.d'",
"if",
"sensu_loaded_tempfile",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"sensu_loaded_tempfile",
")",
":",
"with",
"open",
"(",
"sensu_loaded_tempfile",
",",
"'r'",
")",
"as",
"tempfile",
":",
"contents",
"=",
"tempfile",
".",
"read",
"(",
")",
"return",
"contents",
".",
"split",
"(",
"':'",
")",
"elif",
"sensu_config_files",
":",
"return",
"sensu_config_files",
".",
"split",
"(",
"':'",
")",
"else",
":",
"files",
"=",
"[",
"]",
"filenames",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"sensu_v1_config",
")",
":",
"files",
"=",
"[",
"sensu_v1_config",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sensu_v1_confd",
")",
":",
"filenames",
"=",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"sensu_v1_confd",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"1",
"]",
"==",
"'.json'",
"]",
"for",
"filename",
"in",
"filenames",
":",
"files",
".",
"append",
"(",
"'{}/{}'",
".",
"format",
"(",
"sensu_v1_confd",
",",
"filename",
")",
")",
"return",
"files"
] |
Get list of currently used config files.
|
[
"Get",
"list",
"of",
"currently",
"used",
"config",
"files",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L10-L34
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
get_settings
|
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings
|
python
|
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings
|
[
"def",
"get_settings",
"(",
")",
":",
"settings",
"=",
"{",
"}",
"for",
"config_file",
"in",
"config_files",
"(",
")",
":",
"config_contents",
"=",
"load_config",
"(",
"config_file",
")",
"if",
"config_contents",
"is",
"not",
"None",
":",
"settings",
"=",
"deep_merge",
"(",
"settings",
",",
"config_contents",
")",
"return",
"settings"
] |
Get all currently loaded settings.
|
[
"Get",
"all",
"currently",
"loaded",
"settings",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L37-L46
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
load_config
|
def load_config(filename):
'''
Read contents of config file.
'''
try:
with open(filename, 'r') as config_file:
return json.loads(config_file.read())
except IOError:
pass
|
python
|
def load_config(filename):
'''
Read contents of config file.
'''
try:
with open(filename, 'r') as config_file:
return json.loads(config_file.read())
except IOError:
pass
|
[
"def",
"load_config",
"(",
"filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"config_file",
":",
"return",
"json",
".",
"loads",
"(",
"config_file",
".",
"read",
"(",
")",
")",
"except",
"IOError",
":",
"pass"
] |
Read contents of config file.
|
[
"Read",
"contents",
"of",
"config",
"file",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L49-L57
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
deep_merge
|
def deep_merge(dict_one, dict_two):
'''
Deep merge two dicts.
'''
merged = dict_one.copy()
for key, value in dict_two.items():
# value is equivalent to dict_two[key]
if (key in dict_one and
isinstance(dict_one[key], dict) and
isinstance(value, dict)):
merged[key] = deep_merge(dict_one[key], value)
elif (key in dict_one and
isinstance(dict_one[key], list) and
isinstance(value, list)):
merged[key] = list(set(dict_one[key] + value))
else:
merged[key] = value
return merged
|
python
|
def deep_merge(dict_one, dict_two):
'''
Deep merge two dicts.
'''
merged = dict_one.copy()
for key, value in dict_two.items():
# value is equivalent to dict_two[key]
if (key in dict_one and
isinstance(dict_one[key], dict) and
isinstance(value, dict)):
merged[key] = deep_merge(dict_one[key], value)
elif (key in dict_one and
isinstance(dict_one[key], list) and
isinstance(value, list)):
merged[key] = list(set(dict_one[key] + value))
else:
merged[key] = value
return merged
|
[
"def",
"deep_merge",
"(",
"dict_one",
",",
"dict_two",
")",
":",
"merged",
"=",
"dict_one",
".",
"copy",
"(",
")",
"for",
"key",
",",
"value",
"in",
"dict_two",
".",
"items",
"(",
")",
":",
"# value is equivalent to dict_two[key]",
"if",
"(",
"key",
"in",
"dict_one",
"and",
"isinstance",
"(",
"dict_one",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"value",
",",
"dict",
")",
")",
":",
"merged",
"[",
"key",
"]",
"=",
"deep_merge",
"(",
"dict_one",
"[",
"key",
"]",
",",
"value",
")",
"elif",
"(",
"key",
"in",
"dict_one",
"and",
"isinstance",
"(",
"dict_one",
"[",
"key",
"]",
",",
"list",
")",
"and",
"isinstance",
"(",
"value",
",",
"list",
")",
")",
":",
"merged",
"[",
"key",
"]",
"=",
"list",
"(",
"set",
"(",
"dict_one",
"[",
"key",
"]",
"+",
"value",
")",
")",
"else",
":",
"merged",
"[",
"key",
"]",
"=",
"value",
"return",
"merged"
] |
Deep merge two dicts.
|
[
"Deep",
"merge",
"two",
"dicts",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L60-L77
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
map_v2_event_into_v1
|
def map_v2_event_into_v1(event):
'''
Helper method to convert Sensu 2.x event into Sensu 1.x event.
'''
# return the event if it has already been mapped
if "v2_event_mapped_into_v1" in event:
return event
# Trigger mapping code if enity exists and client does not
if not bool(event.get('client')) and "entity" in event:
event['client'] = event['entity']
# Fill in missing client attributes
if "name" not in event['client']:
event['client']['name'] = event['entity']['id']
if "subscribers" not in event['client']:
event['client']['subscribers'] = event['entity']['subscriptions']
# Fill in renamed check attributes expected in 1.4 event
if "subscribers" not in event['check']:
event['check']['subscribers'] = event['check']['subscriptions']
if "source" not in event['check']:
event['check']['source'] = event['check']['proxy_entity_id']
# Mimic 1.4 event action based on 2.0 event state
# action used in logs and fluentd plugins handlers
action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve',
'failing': 'create'}
if "state" in event['check']:
state = event['check']['state']
else:
state = "unknown::2.0_event"
if "action" not in event and state.lower() in action_state_mapping:
event['action'] = action_state_mapping[state.lower()]
else:
event['action'] = state
# Mimic 1.4 event history based on 2.0 event history
if "history" in event['check']:
# save the original history
event['check']['history_v2'] = deepcopy(event['check']['history'])
legacy_history = []
for history in event['check']['history']:
if isinstance(history['status'], int):
legacy_history.append(str(history['status']))
else:
legacy_history.append("3")
event['check']['history'] = legacy_history
# Setting flag indicating this function has already been called
event['v2_event_mapped_into_v1'] = True
# return the updated event
return event
|
python
|
def map_v2_event_into_v1(event):
'''
Helper method to convert Sensu 2.x event into Sensu 1.x event.
'''
# return the event if it has already been mapped
if "v2_event_mapped_into_v1" in event:
return event
# Trigger mapping code if enity exists and client does not
if not bool(event.get('client')) and "entity" in event:
event['client'] = event['entity']
# Fill in missing client attributes
if "name" not in event['client']:
event['client']['name'] = event['entity']['id']
if "subscribers" not in event['client']:
event['client']['subscribers'] = event['entity']['subscriptions']
# Fill in renamed check attributes expected in 1.4 event
if "subscribers" not in event['check']:
event['check']['subscribers'] = event['check']['subscriptions']
if "source" not in event['check']:
event['check']['source'] = event['check']['proxy_entity_id']
# Mimic 1.4 event action based on 2.0 event state
# action used in logs and fluentd plugins handlers
action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve',
'failing': 'create'}
if "state" in event['check']:
state = event['check']['state']
else:
state = "unknown::2.0_event"
if "action" not in event and state.lower() in action_state_mapping:
event['action'] = action_state_mapping[state.lower()]
else:
event['action'] = state
# Mimic 1.4 event history based on 2.0 event history
if "history" in event['check']:
# save the original history
event['check']['history_v2'] = deepcopy(event['check']['history'])
legacy_history = []
for history in event['check']['history']:
if isinstance(history['status'], int):
legacy_history.append(str(history['status']))
else:
legacy_history.append("3")
event['check']['history'] = legacy_history
# Setting flag indicating this function has already been called
event['v2_event_mapped_into_v1'] = True
# return the updated event
return event
|
[
"def",
"map_v2_event_into_v1",
"(",
"event",
")",
":",
"# return the event if it has already been mapped",
"if",
"\"v2_event_mapped_into_v1\"",
"in",
"event",
":",
"return",
"event",
"# Trigger mapping code if enity exists and client does not",
"if",
"not",
"bool",
"(",
"event",
".",
"get",
"(",
"'client'",
")",
")",
"and",
"\"entity\"",
"in",
"event",
":",
"event",
"[",
"'client'",
"]",
"=",
"event",
"[",
"'entity'",
"]",
"# Fill in missing client attributes",
"if",
"\"name\"",
"not",
"in",
"event",
"[",
"'client'",
"]",
":",
"event",
"[",
"'client'",
"]",
"[",
"'name'",
"]",
"=",
"event",
"[",
"'entity'",
"]",
"[",
"'id'",
"]",
"if",
"\"subscribers\"",
"not",
"in",
"event",
"[",
"'client'",
"]",
":",
"event",
"[",
"'client'",
"]",
"[",
"'subscribers'",
"]",
"=",
"event",
"[",
"'entity'",
"]",
"[",
"'subscriptions'",
"]",
"# Fill in renamed check attributes expected in 1.4 event",
"if",
"\"subscribers\"",
"not",
"in",
"event",
"[",
"'check'",
"]",
":",
"event",
"[",
"'check'",
"]",
"[",
"'subscribers'",
"]",
"=",
"event",
"[",
"'check'",
"]",
"[",
"'subscriptions'",
"]",
"if",
"\"source\"",
"not",
"in",
"event",
"[",
"'check'",
"]",
":",
"event",
"[",
"'check'",
"]",
"[",
"'source'",
"]",
"=",
"event",
"[",
"'check'",
"]",
"[",
"'proxy_entity_id'",
"]",
"# Mimic 1.4 event action based on 2.0 event state",
"# action used in logs and fluentd plugins handlers",
"action_state_mapping",
"=",
"{",
"'flapping'",
":",
"'flapping'",
",",
"'passing'",
":",
"'resolve'",
",",
"'failing'",
":",
"'create'",
"}",
"if",
"\"state\"",
"in",
"event",
"[",
"'check'",
"]",
":",
"state",
"=",
"event",
"[",
"'check'",
"]",
"[",
"'state'",
"]",
"else",
":",
"state",
"=",
"\"unknown::2.0_event\"",
"if",
"\"action\"",
"not",
"in",
"event",
"and",
"state",
".",
"lower",
"(",
")",
"in",
"action_state_mapping",
":",
"event",
"[",
"'action'",
"]",
"=",
"action_state_mapping",
"[",
"state",
".",
"lower",
"(",
")",
"]",
"else",
":",
"event",
"[",
"'action'",
"]",
"=",
"state",
"# Mimic 1.4 event history based on 2.0 event history",
"if",
"\"history\"",
"in",
"event",
"[",
"'check'",
"]",
":",
"# save the original history",
"event",
"[",
"'check'",
"]",
"[",
"'history_v2'",
"]",
"=",
"deepcopy",
"(",
"event",
"[",
"'check'",
"]",
"[",
"'history'",
"]",
")",
"legacy_history",
"=",
"[",
"]",
"for",
"history",
"in",
"event",
"[",
"'check'",
"]",
"[",
"'history'",
"]",
":",
"if",
"isinstance",
"(",
"history",
"[",
"'status'",
"]",
",",
"int",
")",
":",
"legacy_history",
".",
"append",
"(",
"str",
"(",
"history",
"[",
"'status'",
"]",
")",
")",
"else",
":",
"legacy_history",
".",
"append",
"(",
"\"3\"",
")",
"event",
"[",
"'check'",
"]",
"[",
"'history'",
"]",
"=",
"legacy_history",
"# Setting flag indicating this function has already been called",
"event",
"[",
"'v2_event_mapped_into_v1'",
"]",
"=",
"True",
"# return the updated event",
"return",
"event"
] |
Helper method to convert Sensu 2.x event into Sensu 1.x event.
|
[
"Helper",
"method",
"to",
"convert",
"Sensu",
"2",
".",
"x",
"event",
"into",
"Sensu",
"1",
".",
"x",
"event",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L80-L139
|
train
|
sensu-plugins/sensu-plugin-python
|
sensu_plugin/check.py
|
SensuPluginCheck.check_name
|
def check_name(self, name=None):
'''
Checks the plugin name and sets it accordingly.
Uses name if specified, class name if not set.
'''
if name:
self.plugin_info['check_name'] = name
if self.plugin_info['check_name'] is not None:
return self.plugin_info['check_name']
return self.__class__.__name__
|
python
|
def check_name(self, name=None):
'''
Checks the plugin name and sets it accordingly.
Uses name if specified, class name if not set.
'''
if name:
self.plugin_info['check_name'] = name
if self.plugin_info['check_name'] is not None:
return self.plugin_info['check_name']
return self.__class__.__name__
|
[
"def",
"check_name",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"self",
".",
"plugin_info",
"[",
"'check_name'",
"]",
"=",
"name",
"if",
"self",
".",
"plugin_info",
"[",
"'check_name'",
"]",
"is",
"not",
"None",
":",
"return",
"self",
".",
"plugin_info",
"[",
"'check_name'",
"]",
"return",
"self",
".",
"__class__",
".",
"__name__"
] |
Checks the plugin name and sets it accordingly.
Uses name if specified, class name if not set.
|
[
"Checks",
"the",
"plugin",
"name",
"and",
"sets",
"it",
"accordingly",
".",
"Uses",
"name",
"if",
"specified",
"class",
"name",
"if",
"not",
"set",
"."
] |
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/check.py#L11-L22
|
train
|
chainer/chainerui
|
chainerui/models/result.py
|
Result.sampled_logs
|
def sampled_logs(self, logs_limit=-1):
"""Return up to `logs_limit` logs.
If `logs_limit` is -1, this function will return all logs that belong
to the result.
"""
logs_count = len(self.logs)
if logs_limit == -1 or logs_count <= logs_limit:
return self.logs
elif logs_limit == 0:
return []
elif logs_limit == 1:
return [self.logs[-1]]
else:
def get_sampled_log(idx):
# always include the first and last element of `self.logs`
return self.logs[idx * (logs_count - 1) // (logs_limit - 1)]
return [get_sampled_log(i) for i in range(logs_limit)]
|
python
|
def sampled_logs(self, logs_limit=-1):
"""Return up to `logs_limit` logs.
If `logs_limit` is -1, this function will return all logs that belong
to the result.
"""
logs_count = len(self.logs)
if logs_limit == -1 or logs_count <= logs_limit:
return self.logs
elif logs_limit == 0:
return []
elif logs_limit == 1:
return [self.logs[-1]]
else:
def get_sampled_log(idx):
# always include the first and last element of `self.logs`
return self.logs[idx * (logs_count - 1) // (logs_limit - 1)]
return [get_sampled_log(i) for i in range(logs_limit)]
|
[
"def",
"sampled_logs",
"(",
"self",
",",
"logs_limit",
"=",
"-",
"1",
")",
":",
"logs_count",
"=",
"len",
"(",
"self",
".",
"logs",
")",
"if",
"logs_limit",
"==",
"-",
"1",
"or",
"logs_count",
"<=",
"logs_limit",
":",
"return",
"self",
".",
"logs",
"elif",
"logs_limit",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"logs_limit",
"==",
"1",
":",
"return",
"[",
"self",
".",
"logs",
"[",
"-",
"1",
"]",
"]",
"else",
":",
"def",
"get_sampled_log",
"(",
"idx",
")",
":",
"# always include the first and last element of `self.logs`",
"return",
"self",
".",
"logs",
"[",
"idx",
"*",
"(",
"logs_count",
"-",
"1",
")",
"//",
"(",
"logs_limit",
"-",
"1",
")",
"]",
"return",
"[",
"get_sampled_log",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"logs_limit",
")",
"]"
] |
Return up to `logs_limit` logs.
If `logs_limit` is -1, this function will return all logs that belong
to the result.
|
[
"Return",
"up",
"to",
"logs_limit",
"logs",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L60-L77
|
train
|
chainer/chainerui
|
chainerui/models/result.py
|
Result.serialize_with_sampled_logs
|
def serialize_with_sampled_logs(self, logs_limit=-1):
"""serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
"""
return {
'id': self.id,
'pathName': self.path_name,
'name': self.name,
'isUnregistered': self.is_unregistered,
'logs': [log.serialize for log in self.sampled_logs(logs_limit)],
'args': self.args.serialize if self.args is not None else [],
'commands': [cmd.serialize for cmd in self.commands],
'snapshots': [cmd.serialize for cmd in self.snapshots],
'logModifiedAt': self.log_modified_at.isoformat()
}
|
python
|
def serialize_with_sampled_logs(self, logs_limit=-1):
"""serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
"""
return {
'id': self.id,
'pathName': self.path_name,
'name': self.name,
'isUnregistered': self.is_unregistered,
'logs': [log.serialize for log in self.sampled_logs(logs_limit)],
'args': self.args.serialize if self.args is not None else [],
'commands': [cmd.serialize for cmd in self.commands],
'snapshots': [cmd.serialize for cmd in self.snapshots],
'logModifiedAt': self.log_modified_at.isoformat()
}
|
[
"def",
"serialize_with_sampled_logs",
"(",
"self",
",",
"logs_limit",
"=",
"-",
"1",
")",
":",
"return",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'pathName'",
":",
"self",
".",
"path_name",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'isUnregistered'",
":",
"self",
".",
"is_unregistered",
",",
"'logs'",
":",
"[",
"log",
".",
"serialize",
"for",
"log",
"in",
"self",
".",
"sampled_logs",
"(",
"logs_limit",
")",
"]",
",",
"'args'",
":",
"self",
".",
"args",
".",
"serialize",
"if",
"self",
".",
"args",
"is",
"not",
"None",
"else",
"[",
"]",
",",
"'commands'",
":",
"[",
"cmd",
".",
"serialize",
"for",
"cmd",
"in",
"self",
".",
"commands",
"]",
",",
"'snapshots'",
":",
"[",
"cmd",
".",
"serialize",
"for",
"cmd",
"in",
"self",
".",
"snapshots",
"]",
",",
"'logModifiedAt'",
":",
"self",
".",
"log_modified_at",
".",
"isoformat",
"(",
")",
"}"
] |
serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
|
[
"serialize",
"a",
"result",
"with",
"up",
"to",
"logs_limit",
"logs",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L79-L96
|
train
|
chainer/chainerui
|
chainerui/summary.py
|
reporter
|
def reporter(prefix=None, out=None, subdir='', timeout=5, **kwargs):
"""Summary media assets to visualize.
``reporter`` function collects media assets by the ``with`` statement and
aggregates in same row to visualize. This function returns an object which
provides the following methods.
* :meth:`~chainerui.summary._Reporter.image`: collect images. almost same \
as :func:`~chainerui.summary.image`
* :meth:`~chainerui.summary._Reporter.audio`: collect audio. almost same \
as :func:`~chainerui.summary.audio`
Example of how to set several assets::
>>> from chainerui.summary import reporter
>>> summary.set_out('/path/to/output') # same as 'log' file directory
>>>
>>> with reporter(epoch=1, iteration=10) as r:
>>> r.image(image_array1)
>>> r.image(image_array2)
>>> r.audio(audio_array, 44100)
>>> # image_array1 and image_array2 are visualized on a browser
>>> # audio_array can be listened on a browser
Args:
prefix (str): prefix of column name.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp is added.
"""
report = _Reporter(prefix, out, subdir, **kwargs)
yield report
report.save(timeout)
|
python
|
def reporter(prefix=None, out=None, subdir='', timeout=5, **kwargs):
"""Summary media assets to visualize.
``reporter`` function collects media assets by the ``with`` statement and
aggregates in same row to visualize. This function returns an object which
provides the following methods.
* :meth:`~chainerui.summary._Reporter.image`: collect images. almost same \
as :func:`~chainerui.summary.image`
* :meth:`~chainerui.summary._Reporter.audio`: collect audio. almost same \
as :func:`~chainerui.summary.audio`
Example of how to set several assets::
>>> from chainerui.summary import reporter
>>> summary.set_out('/path/to/output') # same as 'log' file directory
>>>
>>> with reporter(epoch=1, iteration=10) as r:
>>> r.image(image_array1)
>>> r.image(image_array2)
>>> r.audio(audio_array, 44100)
>>> # image_array1 and image_array2 are visualized on a browser
>>> # audio_array can be listened on a browser
Args:
prefix (str): prefix of column name.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp is added.
"""
report = _Reporter(prefix, out, subdir, **kwargs)
yield report
report.save(timeout)
|
[
"def",
"reporter",
"(",
"prefix",
"=",
"None",
",",
"out",
"=",
"None",
",",
"subdir",
"=",
"''",
",",
"timeout",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"report",
"=",
"_Reporter",
"(",
"prefix",
",",
"out",
",",
"subdir",
",",
"*",
"*",
"kwargs",
")",
"yield",
"report",
"report",
".",
"save",
"(",
"timeout",
")"
] |
Summary media assets to visualize.
``reporter`` function collects media assets by the ``with`` statement and
aggregates in same row to visualize. This function returns an object which
provides the following methods.
* :meth:`~chainerui.summary._Reporter.image`: collect images. almost same \
as :func:`~chainerui.summary.image`
* :meth:`~chainerui.summary._Reporter.audio`: collect audio. almost same \
as :func:`~chainerui.summary.audio`
Example of how to set several assets::
>>> from chainerui.summary import reporter
>>> summary.set_out('/path/to/output') # same as 'log' file directory
>>>
>>> with reporter(epoch=1, iteration=10) as r:
>>> r.image(image_array1)
>>> r.image(image_array2)
>>> r.audio(audio_array, 44100)
>>> # image_array1 and image_array2 are visualized on a browser
>>> # audio_array can be listened on a browser
Args:
prefix (str): prefix of column name.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp is added.
|
[
"Summary",
"media",
"assets",
"to",
"visualize",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L174-L208
|
train
|
chainer/chainerui
|
chainerui/summary.py
|
audio
|
def audio(audio, sample_rate, name=None, out=None, subdir='', timeout=5,
**kwargs):
"""summary audio files to listen on a browser.
An sampled array is converted as WAV audio file, saved to output directory,
and reported to the ChainerUI server. The audio file is saved every called
this function. The audio file will be listened on `assets` endpoint
vertically. If need to aggregate audio files in row, use
:func:`~chainerui.summary.reporter`.
Example of how to set arguments::
>>> from chainerui import summary
>>> summary.set_out('/path/to/output')
>>> rate = 44100
>>>
>>> summary.audio(sampled_array, rate, name='test')
>>> # sampled_array can be listened on a browser.
Add description about the audio file::
>>> summary.image(
>>> sampled_array, rate, name='test', epoch=1, iteration=100)
>>> # 'epoch' and 'iteration' column will be shown.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'``.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp on created the image is added.
"""
from chainerui.report.audio_report import check_available
if not check_available():
return
from chainerui.report.audio_report import report as _audio
out_root = _chainerui_asset_observer.get_outpath(out)
out_path = os.path.join(out_root, subdir)
if not os.path.isdir(out_path):
os.makedirs(out_path)
col_name = name
if col_name is None:
col_name = 'audio'
filename, created_at = _audio(audio, sample_rate, out_path, col_name)
value = kwargs
value['timestamp'] = created_at.isoformat()
value['audios'] = {col_name: os.path.join(subdir, filename)}
_chainerui_asset_observer.add(value)
_chainerui_asset_observer.save(out_root, timeout)
|
python
|
def audio(audio, sample_rate, name=None, out=None, subdir='', timeout=5,
**kwargs):
"""summary audio files to listen on a browser.
An sampled array is converted as WAV audio file, saved to output directory,
and reported to the ChainerUI server. The audio file is saved every called
this function. The audio file will be listened on `assets` endpoint
vertically. If need to aggregate audio files in row, use
:func:`~chainerui.summary.reporter`.
Example of how to set arguments::
>>> from chainerui import summary
>>> summary.set_out('/path/to/output')
>>> rate = 44100
>>>
>>> summary.audio(sampled_array, rate, name='test')
>>> # sampled_array can be listened on a browser.
Add description about the audio file::
>>> summary.image(
>>> sampled_array, rate, name='test', epoch=1, iteration=100)
>>> # 'epoch' and 'iteration' column will be shown.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'``.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp on created the image is added.
"""
from chainerui.report.audio_report import check_available
if not check_available():
return
from chainerui.report.audio_report import report as _audio
out_root = _chainerui_asset_observer.get_outpath(out)
out_path = os.path.join(out_root, subdir)
if not os.path.isdir(out_path):
os.makedirs(out_path)
col_name = name
if col_name is None:
col_name = 'audio'
filename, created_at = _audio(audio, sample_rate, out_path, col_name)
value = kwargs
value['timestamp'] = created_at.isoformat()
value['audios'] = {col_name: os.path.join(subdir, filename)}
_chainerui_asset_observer.add(value)
_chainerui_asset_observer.save(out_root, timeout)
|
[
"def",
"audio",
"(",
"audio",
",",
"sample_rate",
",",
"name",
"=",
"None",
",",
"out",
"=",
"None",
",",
"subdir",
"=",
"''",
",",
"timeout",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"chainerui",
".",
"report",
".",
"audio_report",
"import",
"check_available",
"if",
"not",
"check_available",
"(",
")",
":",
"return",
"from",
"chainerui",
".",
"report",
".",
"audio_report",
"import",
"report",
"as",
"_audio",
"out_root",
"=",
"_chainerui_asset_observer",
".",
"get_outpath",
"(",
"out",
")",
"out_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_root",
",",
"subdir",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"out_path",
")",
":",
"os",
".",
"makedirs",
"(",
"out_path",
")",
"col_name",
"=",
"name",
"if",
"col_name",
"is",
"None",
":",
"col_name",
"=",
"'audio'",
"filename",
",",
"created_at",
"=",
"_audio",
"(",
"audio",
",",
"sample_rate",
",",
"out_path",
",",
"col_name",
")",
"value",
"=",
"kwargs",
"value",
"[",
"'timestamp'",
"]",
"=",
"created_at",
".",
"isoformat",
"(",
")",
"value",
"[",
"'audios'",
"]",
"=",
"{",
"col_name",
":",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"filename",
")",
"}",
"_chainerui_asset_observer",
".",
"add",
"(",
"value",
")",
"_chainerui_asset_observer",
".",
"save",
"(",
"out_root",
",",
"timeout",
")"
] |
summary audio files to listen on a browser.
An sampled array is converted as WAV audio file, saved to output directory,
and reported to the ChainerUI server. The audio file is saved every called
this function. The audio file will be listened on `assets` endpoint
vertically. If need to aggregate audio files in row, use
:func:`~chainerui.summary.reporter`.
Example of how to set arguments::
>>> from chainerui import summary
>>> summary.set_out('/path/to/output')
>>> rate = 44100
>>>
>>> summary.audio(sampled_array, rate, name='test')
>>> # sampled_array can be listened on a browser.
Add description about the audio file::
>>> summary.image(
>>> sampled_array, rate, name='test', epoch=1, iteration=100)
>>> # 'epoch' and 'iteration' column will be shown.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'``.
out (str): directory path of output.
subdir (str): sub-directory path of output.
**kwargs (dict): key-value pair to show as description. regardless of
empty or not, timestamp on created the image is added.
|
[
"summary",
"audio",
"files",
"to",
"listen",
"on",
"a",
"browser",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L304-L359
|
train
|
chainer/chainerui
|
chainerui/summary.py
|
_Reporter.audio
|
def audio(self, audio, sample_rate, name=None, subdir=''):
"""Summary audio to listen on web browser.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'`` + sequential number.
subdir (str): sub-directory path of output.
"""
from chainerui.report.audio_report import check_available
if not check_available():
return
from chainerui.report.audio_report import report as _audio
col_name = self.get_col_name(name, 'audio')
out_dir, rel_out_dir = self.get_subdir(subdir)
filename, _ = _audio(audio, sample_rate, out_dir, col_name)
self.audios[col_name] = os.path.join(rel_out_dir, filename)
self.count += 1
|
python
|
def audio(self, audio, sample_rate, name=None, subdir=''):
"""Summary audio to listen on web browser.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'`` + sequential number.
subdir (str): sub-directory path of output.
"""
from chainerui.report.audio_report import check_available
if not check_available():
return
from chainerui.report.audio_report import report as _audio
col_name = self.get_col_name(name, 'audio')
out_dir, rel_out_dir = self.get_subdir(subdir)
filename, _ = _audio(audio, sample_rate, out_dir, col_name)
self.audios[col_name] = os.path.join(rel_out_dir, filename)
self.count += 1
|
[
"def",
"audio",
"(",
"self",
",",
"audio",
",",
"sample_rate",
",",
"name",
"=",
"None",
",",
"subdir",
"=",
"''",
")",
":",
"from",
"chainerui",
".",
"report",
".",
"audio_report",
"import",
"check_available",
"if",
"not",
"check_available",
"(",
")",
":",
"return",
"from",
"chainerui",
".",
"report",
".",
"audio_report",
"import",
"report",
"as",
"_audio",
"col_name",
"=",
"self",
".",
"get_col_name",
"(",
"name",
",",
"'audio'",
")",
"out_dir",
",",
"rel_out_dir",
"=",
"self",
".",
"get_subdir",
"(",
"subdir",
")",
"filename",
",",
"_",
"=",
"_audio",
"(",
"audio",
",",
"sample_rate",
",",
"out_dir",
",",
"col_name",
")",
"self",
".",
"audios",
"[",
"col_name",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rel_out_dir",
",",
"filename",
")",
"self",
".",
"count",
"+=",
"1"
] |
Summary audio to listen on web browser.
Args:
audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \
:class:`chainer.Variable`): sampled wave array.
sample_rate (int): sampling rate.
name (str): name of image. set as column name. when not setting,
assigned ``'audio'`` + sequential number.
subdir (str): sub-directory path of output.
|
[
"Summary",
"audio",
"to",
"listen",
"on",
"web",
"browser",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L106-L128
|
train
|
chainer/chainerui
|
chainerui/models/project.py
|
Project.create
|
def create(cls, path_name=None, name=None, crawlable=True):
"""initialize an instance and save it to db."""
project = cls(path_name, name, crawlable)
db.session.add(project)
db.session.commit()
return collect_results(project, force=True)
|
python
|
def create(cls, path_name=None, name=None, crawlable=True):
"""initialize an instance and save it to db."""
project = cls(path_name, name, crawlable)
db.session.add(project)
db.session.commit()
return collect_results(project, force=True)
|
[
"def",
"create",
"(",
"cls",
",",
"path_name",
"=",
"None",
",",
"name",
"=",
"None",
",",
"crawlable",
"=",
"True",
")",
":",
"project",
"=",
"cls",
"(",
"path_name",
",",
"name",
",",
"crawlable",
")",
"db",
".",
"session",
".",
"add",
"(",
"project",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"collect_results",
"(",
"project",
",",
"force",
"=",
"True",
")"
] |
initialize an instance and save it to db.
|
[
"initialize",
"an",
"instance",
"and",
"save",
"it",
"to",
"db",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/project.py#L36-L44
|
train
|
chainer/chainerui
|
chainerui/tasks/collect_assets.py
|
collect_assets
|
def collect_assets(result, force=False):
"""collect assets from meta file
Collecting assets only when the metafile is updated. If number of assets
are decreased, assets are reset and re-collect the assets.
"""
path_name = result.path_name
info_path = os.path.join(path_name, summary.CHAINERUI_ASSETS_METAFILE_NAME)
if not os.path.isfile(info_path):
return
start_idx = len(result.assets)
file_modified_at = datetime.datetime.fromtimestamp(os.path.getmtime(
info_path))
if start_idx > 0:
if result.assets[-1].file_modified_at == file_modified_at:
return
with open(info_path, 'r') as f:
info_list = json.load(f, object_pairs_hook=OrderedDict)
if len(info_list) < start_idx:
start_idx = 0
result.assets = []
for base_info in info_list[start_idx:]:
asset_path = base_info.pop('images', {})
asset_path.update(base_info.pop('audios', {}))
asset = Asset.create(
result_id=result.id, summary=base_info,
file_modified_at=file_modified_at)
for key, path in asset_path.items():
with open(os.path.join(path_name, path), 'rb') as f:
data = f.read()
content = Bindata(
asset_id=asset.id, name=path, tag=key, content=data)
asset.content_list.append(content)
result.assets.append(asset)
db.session.commit()
|
python
|
def collect_assets(result, force=False):
"""collect assets from meta file
Collecting assets only when the metafile is updated. If number of assets
are decreased, assets are reset and re-collect the assets.
"""
path_name = result.path_name
info_path = os.path.join(path_name, summary.CHAINERUI_ASSETS_METAFILE_NAME)
if not os.path.isfile(info_path):
return
start_idx = len(result.assets)
file_modified_at = datetime.datetime.fromtimestamp(os.path.getmtime(
info_path))
if start_idx > 0:
if result.assets[-1].file_modified_at == file_modified_at:
return
with open(info_path, 'r') as f:
info_list = json.load(f, object_pairs_hook=OrderedDict)
if len(info_list) < start_idx:
start_idx = 0
result.assets = []
for base_info in info_list[start_idx:]:
asset_path = base_info.pop('images', {})
asset_path.update(base_info.pop('audios', {}))
asset = Asset.create(
result_id=result.id, summary=base_info,
file_modified_at=file_modified_at)
for key, path in asset_path.items():
with open(os.path.join(path_name, path), 'rb') as f:
data = f.read()
content = Bindata(
asset_id=asset.id, name=path, tag=key, content=data)
asset.content_list.append(content)
result.assets.append(asset)
db.session.commit()
|
[
"def",
"collect_assets",
"(",
"result",
",",
"force",
"=",
"False",
")",
":",
"path_name",
"=",
"result",
".",
"path_name",
"info_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_name",
",",
"summary",
".",
"CHAINERUI_ASSETS_METAFILE_NAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"info_path",
")",
":",
"return",
"start_idx",
"=",
"len",
"(",
"result",
".",
"assets",
")",
"file_modified_at",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"info_path",
")",
")",
"if",
"start_idx",
">",
"0",
":",
"if",
"result",
".",
"assets",
"[",
"-",
"1",
"]",
".",
"file_modified_at",
"==",
"file_modified_at",
":",
"return",
"with",
"open",
"(",
"info_path",
",",
"'r'",
")",
"as",
"f",
":",
"info_list",
"=",
"json",
".",
"load",
"(",
"f",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"if",
"len",
"(",
"info_list",
")",
"<",
"start_idx",
":",
"start_idx",
"=",
"0",
"result",
".",
"assets",
"=",
"[",
"]",
"for",
"base_info",
"in",
"info_list",
"[",
"start_idx",
":",
"]",
":",
"asset_path",
"=",
"base_info",
".",
"pop",
"(",
"'images'",
",",
"{",
"}",
")",
"asset_path",
".",
"update",
"(",
"base_info",
".",
"pop",
"(",
"'audios'",
",",
"{",
"}",
")",
")",
"asset",
"=",
"Asset",
".",
"create",
"(",
"result_id",
"=",
"result",
".",
"id",
",",
"summary",
"=",
"base_info",
",",
"file_modified_at",
"=",
"file_modified_at",
")",
"for",
"key",
",",
"path",
"in",
"asset_path",
".",
"items",
"(",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path_name",
",",
"path",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"content",
"=",
"Bindata",
"(",
"asset_id",
"=",
"asset",
".",
"id",
",",
"name",
"=",
"path",
",",
"tag",
"=",
"key",
",",
"content",
"=",
"data",
")",
"asset",
".",
"content_list",
".",
"append",
"(",
"content",
")",
"result",
".",
"assets",
".",
"append",
"(",
"asset",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
collect assets from meta file
Collecting assets only when the metafile is updated. If number of assets
are decreased, assets are reset and re-collect the assets.
|
[
"collect",
"assets",
"from",
"meta",
"file"
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/tasks/collect_assets.py#L12-L50
|
train
|
chainer/chainerui
|
chainerui/utils/save_args.py
|
save_args
|
def save_args(conditions, out_path):
"""A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
"""
if isinstance(conditions, argparse.Namespace):
args = vars(conditions)
else:
args = conditions
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path)
|
python
|
def save_args(conditions, out_path):
"""A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
"""
if isinstance(conditions, argparse.Namespace):
args = vars(conditions)
else:
args = conditions
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path)
|
[
"def",
"save_args",
"(",
"conditions",
",",
"out_path",
")",
":",
"if",
"isinstance",
"(",
"conditions",
",",
"argparse",
".",
"Namespace",
")",
":",
"args",
"=",
"vars",
"(",
"conditions",
")",
"else",
":",
"args",
"=",
"conditions",
"try",
":",
"os",
".",
"makedirs",
"(",
"out_path",
")",
"except",
"OSError",
":",
"pass",
"with",
"tempdir",
"(",
"prefix",
"=",
"'args'",
",",
"dir",
"=",
"out_path",
")",
"as",
"tempd",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempd",
",",
"'args.json'",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"args",
",",
"f",
",",
"indent",
"=",
"4",
")",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_path",
",",
"'args'",
")",
"shutil",
".",
"move",
"(",
"path",
",",
"new_path",
")"
] |
A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
|
[
"A",
"util",
"function",
"to",
"save",
"experiment",
"condition",
"for",
"job",
"table",
"."
] |
87ad25e875bc332bfdad20197fd3d0cb81a078e8
|
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/utils/save_args.py#L9-L36
|
train
|
sunt05/SuPy
|
src/supy/supy_misc.py
|
_path_insensitive
|
def _path_insensitive(path):
"""
Recursive part of path_insensitive to do the work.
"""
path = str(path)
if path == '' or os.path.exists(path):
return path
base = os.path.basename(path) # may be a directory or a file
dirname = os.path.dirname(path)
suffix = ''
if not base: # dir ends with a slash?
if len(dirname) < len(path):
suffix = path[:len(path) - len(dirname)]
base = os.path.basename(dirname)
dirname = os.path.dirname(dirname)
if not os.path.exists(dirname):
dirname = _path_insensitive(dirname)
if not dirname:
return
# at this point, the directory exists but not the file
try: # we are expecting dirname to be a directory, but it could be a file
files = os.listdir(dirname)
except OSError:
return
baselow = base.lower()
try:
basefinal = next(fl for fl in files if fl.lower() == baselow)
except StopIteration:
return
if basefinal:
return os.path.join(dirname, basefinal) + suffix
else:
return
|
python
|
def _path_insensitive(path):
"""
Recursive part of path_insensitive to do the work.
"""
path = str(path)
if path == '' or os.path.exists(path):
return path
base = os.path.basename(path) # may be a directory or a file
dirname = os.path.dirname(path)
suffix = ''
if not base: # dir ends with a slash?
if len(dirname) < len(path):
suffix = path[:len(path) - len(dirname)]
base = os.path.basename(dirname)
dirname = os.path.dirname(dirname)
if not os.path.exists(dirname):
dirname = _path_insensitive(dirname)
if not dirname:
return
# at this point, the directory exists but not the file
try: # we are expecting dirname to be a directory, but it could be a file
files = os.listdir(dirname)
except OSError:
return
baselow = base.lower()
try:
basefinal = next(fl for fl in files if fl.lower() == baselow)
except StopIteration:
return
if basefinal:
return os.path.join(dirname, basefinal) + suffix
else:
return
|
[
"def",
"_path_insensitive",
"(",
"path",
")",
":",
"path",
"=",
"str",
"(",
"path",
")",
"if",
"path",
"==",
"''",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"# may be a directory or a file",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"suffix",
"=",
"''",
"if",
"not",
"base",
":",
"# dir ends with a slash?",
"if",
"len",
"(",
"dirname",
")",
"<",
"len",
"(",
"path",
")",
":",
"suffix",
"=",
"path",
"[",
":",
"len",
"(",
"path",
")",
"-",
"len",
"(",
"dirname",
")",
"]",
"base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dirname",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dirname",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"dirname",
"=",
"_path_insensitive",
"(",
"dirname",
")",
"if",
"not",
"dirname",
":",
"return",
"# at this point, the directory exists but not the file",
"try",
":",
"# we are expecting dirname to be a directory, but it could be a file",
"files",
"=",
"os",
".",
"listdir",
"(",
"dirname",
")",
"except",
"OSError",
":",
"return",
"baselow",
"=",
"base",
".",
"lower",
"(",
")",
"try",
":",
"basefinal",
"=",
"next",
"(",
"fl",
"for",
"fl",
"in",
"files",
"if",
"fl",
".",
"lower",
"(",
")",
"==",
"baselow",
")",
"except",
"StopIteration",
":",
"return",
"if",
"basefinal",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"basefinal",
")",
"+",
"suffix",
"else",
":",
"return"
] |
Recursive part of path_insensitive to do the work.
|
[
"Recursive",
"part",
"of",
"path_insensitive",
"to",
"do",
"the",
"work",
"."
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_misc.py#L34-L74
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/nml_rst_proc.py
|
form_option
|
def form_option(str_opt):
'''generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str
'''
str_base = '#cmdoption-arg-'
str_opt_x = str_base+str_opt.lower()\
.replace('_', '-')\
.replace('(', '-')\
.replace(')', '')
return str_opt_x
|
python
|
def form_option(str_opt):
'''generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str
'''
str_base = '#cmdoption-arg-'
str_opt_x = str_base+str_opt.lower()\
.replace('_', '-')\
.replace('(', '-')\
.replace(')', '')
return str_opt_x
|
[
"def",
"form_option",
"(",
"str_opt",
")",
":",
"str_base",
"=",
"'#cmdoption-arg-'",
"str_opt_x",
"=",
"str_base",
"+",
"str_opt",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
".",
"replace",
"(",
"'('",
",",
"'-'",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"return",
"str_opt_x"
] |
generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str
|
[
"generate",
"option",
"name",
"based",
"suffix",
"for",
"URL"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/nml_rst_proc.py#L83-L97
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/nml_rst_proc.py
|
gen_url_option
|
def gen_url_option(
str_opt,
set_site=set_site,
set_runcontrol=set_runcontrol,
set_initcond=set_initcond,
source='docs'):
'''construct a URL for option based on source
:param str_opt: option name, defaults to ''
:param str_opt: str, optional
:param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs'
:param source: str, optional
:return: a valid URL pointing to the option related resources
:rtype: urlpath.URL
'''
dict_base = {
'docs': URL('https://suews-docs.readthedocs.io/en/latest/input_files/'),
'github': URL('https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'),
}
url_base = dict_base[source]
url_page = choose_page(
str_opt, set_site, set_runcontrol, set_initcond, source=source)
# print('str_opt', str_opt, url_base, url_page)
str_opt_x = form_option(str_opt)
url_opt = url_base/(url_page+str_opt_x)
return url_opt
|
python
|
def gen_url_option(
str_opt,
set_site=set_site,
set_runcontrol=set_runcontrol,
set_initcond=set_initcond,
source='docs'):
'''construct a URL for option based on source
:param str_opt: option name, defaults to ''
:param str_opt: str, optional
:param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs'
:param source: str, optional
:return: a valid URL pointing to the option related resources
:rtype: urlpath.URL
'''
dict_base = {
'docs': URL('https://suews-docs.readthedocs.io/en/latest/input_files/'),
'github': URL('https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'),
}
url_base = dict_base[source]
url_page = choose_page(
str_opt, set_site, set_runcontrol, set_initcond, source=source)
# print('str_opt', str_opt, url_base, url_page)
str_opt_x = form_option(str_opt)
url_opt = url_base/(url_page+str_opt_x)
return url_opt
|
[
"def",
"gen_url_option",
"(",
"str_opt",
",",
"set_site",
"=",
"set_site",
",",
"set_runcontrol",
"=",
"set_runcontrol",
",",
"set_initcond",
"=",
"set_initcond",
",",
"source",
"=",
"'docs'",
")",
":",
"dict_base",
"=",
"{",
"'docs'",
":",
"URL",
"(",
"'https://suews-docs.readthedocs.io/en/latest/input_files/'",
")",
",",
"'github'",
":",
"URL",
"(",
"'https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'",
")",
",",
"}",
"url_base",
"=",
"dict_base",
"[",
"source",
"]",
"url_page",
"=",
"choose_page",
"(",
"str_opt",
",",
"set_site",
",",
"set_runcontrol",
",",
"set_initcond",
",",
"source",
"=",
"source",
")",
"# print('str_opt', str_opt, url_base, url_page)",
"str_opt_x",
"=",
"form_option",
"(",
"str_opt",
")",
"url_opt",
"=",
"url_base",
"/",
"(",
"url_page",
"+",
"str_opt_x",
")",
"return",
"url_opt"
] |
construct a URL for option based on source
:param str_opt: option name, defaults to ''
:param str_opt: str, optional
:param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs'
:param source: str, optional
:return: a valid URL pointing to the option related resources
:rtype: urlpath.URL
|
[
"construct",
"a",
"URL",
"for",
"option",
"based",
"on",
"source"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/nml_rst_proc.py#L154-L180
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_forcing_output_csv.py
|
gen_df_forcing
|
def gen_df_forcing(
path_csv_in='SSss_YYYY_data_tt.csv',
url_base=url_repo_input,)->pd.DataFrame:
'''Generate description info of supy forcing data into a dataframe
Parameters
----------
path_csv_in : str, optional
path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv'])
url_base : urlpath.URL, optional
URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy forcing data
'''
try:
# load info from SUEWS docs repo
# this is regarded as the official source
urlpath_table = url_base/path_csv_in
df_var_info = pd.read_csv(urlpath_table)
except:
print(f'{urlpath_table} not existing!')
else:
# clean info dataframe
df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1)
# set index with `Column name`
df_var_forcing = df_var_forcing.set_index('Column Name')
df_var_forcing.index = df_var_forcing.index\
.map(lambda x: x.replace('`', ''))\
.rename('variable')
# add `Second` info
df_var_forcing.loc['isec'] = 'Second [S]'
return df_var_forcing
|
python
|
def gen_df_forcing(
path_csv_in='SSss_YYYY_data_tt.csv',
url_base=url_repo_input,)->pd.DataFrame:
'''Generate description info of supy forcing data into a dataframe
Parameters
----------
path_csv_in : str, optional
path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv'])
url_base : urlpath.URL, optional
URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy forcing data
'''
try:
# load info from SUEWS docs repo
# this is regarded as the official source
urlpath_table = url_base/path_csv_in
df_var_info = pd.read_csv(urlpath_table)
except:
print(f'{urlpath_table} not existing!')
else:
# clean info dataframe
df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1)
# set index with `Column name`
df_var_forcing = df_var_forcing.set_index('Column Name')
df_var_forcing.index = df_var_forcing.index\
.map(lambda x: x.replace('`', ''))\
.rename('variable')
# add `Second` info
df_var_forcing.loc['isec'] = 'Second [S]'
return df_var_forcing
|
[
"def",
"gen_df_forcing",
"(",
"path_csv_in",
"=",
"'SSss_YYYY_data_tt.csv'",
",",
"url_base",
"=",
"url_repo_input",
",",
")",
"->",
"pd",
".",
"DataFrame",
":",
"try",
":",
"# load info from SUEWS docs repo",
"# this is regarded as the official source",
"urlpath_table",
"=",
"url_base",
"/",
"path_csv_in",
"df_var_info",
"=",
"pd",
".",
"read_csv",
"(",
"urlpath_table",
")",
"except",
":",
"print",
"(",
"f'{urlpath_table} not existing!'",
")",
"else",
":",
"# clean info dataframe",
"df_var_forcing",
"=",
"df_var_info",
".",
"drop",
"(",
"[",
"'No.'",
",",
"'Use'",
"]",
",",
"axis",
"=",
"1",
")",
"# set index with `Column name`",
"df_var_forcing",
"=",
"df_var_forcing",
".",
"set_index",
"(",
"'Column Name'",
")",
"df_var_forcing",
".",
"index",
"=",
"df_var_forcing",
".",
"index",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"replace",
"(",
"'`'",
",",
"''",
")",
")",
".",
"rename",
"(",
"'variable'",
")",
"# add `Second` info",
"df_var_forcing",
".",
"loc",
"[",
"'isec'",
"]",
"=",
"'Second [S]'",
"return",
"df_var_forcing"
] |
Generate description info of supy forcing data into a dataframe
Parameters
----------
path_csv_in : str, optional
path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv'])
url_base : urlpath.URL, optional
URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy forcing data
|
[
"Generate",
"description",
"info",
"of",
"supy",
"forcing",
"data",
"into",
"a",
"dataframe"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_forcing_output_csv.py#L38-L76
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_forcing_output_csv.py
|
gen_df_output
|
def gen_df_output(
list_csv_in=[
'SSss_YYYY_SUEWS_TT.csv',
'SSss_DailyState.csv',
'SSss_YYYY_snow_TT.csv',
],
url_base=url_repo_output)->Path:
'''Generate description info of supy output results into dataframe
Parameters
----------
list_csv_in : list, optional
list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])
url_base : [type], optional
URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy output results
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat(
[pd.read_csv(f) for f in list_url_table],
sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.set_index('Name')\
.loc[:, ['Description']]\
.drop_duplicates()
df_var_output = df_var_info_x\
.copy()\
.assign(lower=df_var_info_x.index.str.lower())\
.reset_index()\
.set_index('lower')
df_var_group = df_output_sample.columns.to_frame()
df_var_group.index = df_var_group.index.droplevel(0).rename('Name')
# wrap into a dataframe
df_var_output = df_var_group\
.merge(
df_var_output.set_index('Name'),
left_on='Name',
right_on='Name')\
.rename(columns={
'var': 'variable',
'group': 'Group',
})\
.set_index('variable')\
.drop_duplicates()
return df_var_output
|
python
|
def gen_df_output(
list_csv_in=[
'SSss_YYYY_SUEWS_TT.csv',
'SSss_DailyState.csv',
'SSss_YYYY_snow_TT.csv',
],
url_base=url_repo_output)->Path:
'''Generate description info of supy output results into dataframe
Parameters
----------
list_csv_in : list, optional
list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])
url_base : [type], optional
URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy output results
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat(
[pd.read_csv(f) for f in list_url_table],
sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.set_index('Name')\
.loc[:, ['Description']]\
.drop_duplicates()
df_var_output = df_var_info_x\
.copy()\
.assign(lower=df_var_info_x.index.str.lower())\
.reset_index()\
.set_index('lower')
df_var_group = df_output_sample.columns.to_frame()
df_var_group.index = df_var_group.index.droplevel(0).rename('Name')
# wrap into a dataframe
df_var_output = df_var_group\
.merge(
df_var_output.set_index('Name'),
left_on='Name',
right_on='Name')\
.rename(columns={
'var': 'variable',
'group': 'Group',
})\
.set_index('variable')\
.drop_duplicates()
return df_var_output
|
[
"def",
"gen_df_output",
"(",
"list_csv_in",
"=",
"[",
"'SSss_YYYY_SUEWS_TT.csv'",
",",
"'SSss_DailyState.csv'",
",",
"'SSss_YYYY_snow_TT.csv'",
",",
"]",
",",
"url_base",
"=",
"url_repo_output",
")",
"->",
"Path",
":",
"# list of URLs",
"list_url_table",
"=",
"[",
"url_base",
"/",
"table",
"for",
"table",
"in",
"list_csv_in",
"]",
"try",
":",
"df_var_info",
"=",
"pd",
".",
"concat",
"(",
"[",
"pd",
".",
"read_csv",
"(",
"f",
")",
"for",
"f",
"in",
"list_url_table",
"]",
",",
"sort",
"=",
"False",
")",
"except",
":",
"for",
"url",
"in",
"list_url_table",
":",
"if",
"not",
"url",
".",
"get",
"(",
")",
".",
"ok",
":",
"print",
"(",
"f'{url} not existing!'",
")",
"else",
":",
"# clean meta info",
"df_var_info_x",
"=",
"df_var_info",
".",
"set_index",
"(",
"'Name'",
")",
".",
"loc",
"[",
":",
",",
"[",
"'Description'",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"df_var_output",
"=",
"df_var_info_x",
".",
"copy",
"(",
")",
".",
"assign",
"(",
"lower",
"=",
"df_var_info_x",
".",
"index",
".",
"str",
".",
"lower",
"(",
")",
")",
".",
"reset_index",
"(",
")",
".",
"set_index",
"(",
"'lower'",
")",
"df_var_group",
"=",
"df_output_sample",
".",
"columns",
".",
"to_frame",
"(",
")",
"df_var_group",
".",
"index",
"=",
"df_var_group",
".",
"index",
".",
"droplevel",
"(",
"0",
")",
".",
"rename",
"(",
"'Name'",
")",
"# wrap into a dataframe",
"df_var_output",
"=",
"df_var_group",
".",
"merge",
"(",
"df_var_output",
".",
"set_index",
"(",
"'Name'",
")",
",",
"left_on",
"=",
"'Name'",
",",
"right_on",
"=",
"'Name'",
")",
".",
"rename",
"(",
"columns",
"=",
"{",
"'var'",
":",
"'variable'",
",",
"'group'",
":",
"'Group'",
",",
"}",
")",
".",
"set_index",
"(",
"'variable'",
")",
".",
"drop_duplicates",
"(",
")",
"return",
"df_var_output"
] |
Generate description info of supy output results into dataframe
Parameters
----------
list_csv_in : list, optional
list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])
url_base : [type], optional
URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy output results
|
[
"Generate",
"description",
"info",
"of",
"supy",
"output",
"results",
"into",
"dataframe"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_forcing_output_csv.py#L84-L147
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_rst.py
|
gen_opt_str
|
def gen_opt_str(ser_rec: pd.Series)->str:
'''generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
'''
name = ser_rec.name
indent = r' '
str_opt = f'.. option:: {name}'+'\n\n'
for spec in ser_rec.sort_index().index:
str_opt += indent+f':{spec}:'+'\n'
spec_content = ser_rec[spec]
str_opt += indent+indent+f'{spec_content}'+'\n'
return str_opt
|
python
|
def gen_opt_str(ser_rec: pd.Series)->str:
'''generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
'''
name = ser_rec.name
indent = r' '
str_opt = f'.. option:: {name}'+'\n\n'
for spec in ser_rec.sort_index().index:
str_opt += indent+f':{spec}:'+'\n'
spec_content = ser_rec[spec]
str_opt += indent+indent+f'{spec_content}'+'\n'
return str_opt
|
[
"def",
"gen_opt_str",
"(",
"ser_rec",
":",
"pd",
".",
"Series",
")",
"->",
"str",
":",
"name",
"=",
"ser_rec",
".",
"name",
"indent",
"=",
"r' '",
"str_opt",
"=",
"f'.. option:: {name}'",
"+",
"'\\n\\n'",
"for",
"spec",
"in",
"ser_rec",
".",
"sort_index",
"(",
")",
".",
"index",
":",
"str_opt",
"+=",
"indent",
"+",
"f':{spec}:'",
"+",
"'\\n'",
"spec_content",
"=",
"ser_rec",
"[",
"spec",
"]",
"str_opt",
"+=",
"indent",
"+",
"indent",
"+",
"f'{spec_content}'",
"+",
"'\\n'",
"return",
"str_opt"
] |
generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
|
[
"generate",
"rst",
"option",
"string"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_rst.py#L71-L92
|
train
|
sunt05/SuPy
|
src/supy/supy_module.py
|
init_supy
|
def init_supy(path_init: str)->pd.DataFrame:
'''Initialise supy by loading initial model states.
Parameters
----------
path_init : str
Path to a file that can initialise SuPy, which can be either of the follows:
* SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations
* SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy`
Returns
-------
df_state_init: pandas.DataFrame
Initial model states.
See `df_state_var` for details.
Examples
--------
1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy
>>> path_init = "~/SUEWS_sims/RunControl.nml"
>>> df_state_init = supy.init_supy(path_init)
2. Use ``df_state.csv`` to initialise SuPy
>>> path_init = "~/SuPy_res/df_state_test.csv"
>>> df_state_init = supy.init_supy(path_init)
'''
try:
path_init_x = Path(path_init).expanduser().resolve()
except FileNotFoundError:
print('{path} does not exists!'.format(path=path_init_x))
else:
if path_init_x.suffix == '.nml':
# SUEWS `RunControl.nml`:
df_state_init = load_InitialCond_grid_df(path_init_x)
elif path_init_x.suffix == '.csv':
# SuPy `df_state.csv`:
df_state_init = load_df_state(path_init_x)
else:
print('{path} is NOT a valid file to initialise SuPy!'.format(
path=path_init_x))
sys.exit()
return df_state_init
|
python
|
def init_supy(path_init: str)->pd.DataFrame:
'''Initialise supy by loading initial model states.
Parameters
----------
path_init : str
Path to a file that can initialise SuPy, which can be either of the follows:
* SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations
* SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy`
Returns
-------
df_state_init: pandas.DataFrame
Initial model states.
See `df_state_var` for details.
Examples
--------
1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy
>>> path_init = "~/SUEWS_sims/RunControl.nml"
>>> df_state_init = supy.init_supy(path_init)
2. Use ``df_state.csv`` to initialise SuPy
>>> path_init = "~/SuPy_res/df_state_test.csv"
>>> df_state_init = supy.init_supy(path_init)
'''
try:
path_init_x = Path(path_init).expanduser().resolve()
except FileNotFoundError:
print('{path} does not exists!'.format(path=path_init_x))
else:
if path_init_x.suffix == '.nml':
# SUEWS `RunControl.nml`:
df_state_init = load_InitialCond_grid_df(path_init_x)
elif path_init_x.suffix == '.csv':
# SuPy `df_state.csv`:
df_state_init = load_df_state(path_init_x)
else:
print('{path} is NOT a valid file to initialise SuPy!'.format(
path=path_init_x))
sys.exit()
return df_state_init
|
[
"def",
"init_supy",
"(",
"path_init",
":",
"str",
")",
"->",
"pd",
".",
"DataFrame",
":",
"try",
":",
"path_init_x",
"=",
"Path",
"(",
"path_init",
")",
".",
"expanduser",
"(",
")",
".",
"resolve",
"(",
")",
"except",
"FileNotFoundError",
":",
"print",
"(",
"'{path} does not exists!'",
".",
"format",
"(",
"path",
"=",
"path_init_x",
")",
")",
"else",
":",
"if",
"path_init_x",
".",
"suffix",
"==",
"'.nml'",
":",
"# SUEWS `RunControl.nml`:",
"df_state_init",
"=",
"load_InitialCond_grid_df",
"(",
"path_init_x",
")",
"elif",
"path_init_x",
".",
"suffix",
"==",
"'.csv'",
":",
"# SuPy `df_state.csv`:",
"df_state_init",
"=",
"load_df_state",
"(",
"path_init_x",
")",
"else",
":",
"print",
"(",
"'{path} is NOT a valid file to initialise SuPy!'",
".",
"format",
"(",
"path",
"=",
"path_init_x",
")",
")",
"sys",
".",
"exit",
"(",
")",
"return",
"df_state_init"
] |
Initialise supy by loading initial model states.
Parameters
----------
path_init : str
Path to a file that can initialise SuPy, which can be either of the follows:
* SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations
* SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy`
Returns
-------
df_state_init: pandas.DataFrame
Initial model states.
See `df_state_var` for details.
Examples
--------
1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy
>>> path_init = "~/SUEWS_sims/RunControl.nml"
>>> df_state_init = supy.init_supy(path_init)
2. Use ``df_state.csv`` to initialise SuPy
>>> path_init = "~/SuPy_res/df_state_test.csv"
>>> df_state_init = supy.init_supy(path_init)
|
[
"Initialise",
"supy",
"by",
"loading",
"initial",
"model",
"states",
"."
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L50-L95
|
train
|
sunt05/SuPy
|
src/supy/supy_module.py
|
load_SampleData
|
def load_SampleData()->Tuple[pandas.DataFrame, pandas.DataFrame]:
'''Load sample data for quickly starting a demo run.
Returns
-------
df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame]
- df_state_init: `initial model states <df_state_var>`
- df_forcing: `forcing data <df_forcing_var>`
Examples
--------
>>> df_state_init, df_forcing = supy.load_SampleData()
'''
path_SampleData = Path(path_supy_module) / 'sample_run'
path_runcontrol = path_SampleData / 'RunControl.nml'
df_state_init = init_supy(path_runcontrol)
# path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath']
df_forcing = load_forcing_grid(
path_runcontrol,
df_state_init.index[0]
)
return df_state_init, df_forcing
|
python
|
def load_SampleData()->Tuple[pandas.DataFrame, pandas.DataFrame]:
'''Load sample data for quickly starting a demo run.
Returns
-------
df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame]
- df_state_init: `initial model states <df_state_var>`
- df_forcing: `forcing data <df_forcing_var>`
Examples
--------
>>> df_state_init, df_forcing = supy.load_SampleData()
'''
path_SampleData = Path(path_supy_module) / 'sample_run'
path_runcontrol = path_SampleData / 'RunControl.nml'
df_state_init = init_supy(path_runcontrol)
# path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath']
df_forcing = load_forcing_grid(
path_runcontrol,
df_state_init.index[0]
)
return df_state_init, df_forcing
|
[
"def",
"load_SampleData",
"(",
")",
"->",
"Tuple",
"[",
"pandas",
".",
"DataFrame",
",",
"pandas",
".",
"DataFrame",
"]",
":",
"path_SampleData",
"=",
"Path",
"(",
"path_supy_module",
")",
"/",
"'sample_run'",
"path_runcontrol",
"=",
"path_SampleData",
"/",
"'RunControl.nml'",
"df_state_init",
"=",
"init_supy",
"(",
"path_runcontrol",
")",
"# path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath']",
"df_forcing",
"=",
"load_forcing_grid",
"(",
"path_runcontrol",
",",
"df_state_init",
".",
"index",
"[",
"0",
"]",
")",
"return",
"df_state_init",
",",
"df_forcing"
] |
Load sample data for quickly starting a demo run.
Returns
-------
df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame]
- df_state_init: `initial model states <df_state_var>`
- df_forcing: `forcing data <df_forcing_var>`
Examples
--------
>>> df_state_init, df_forcing = supy.load_SampleData()
|
[
"Load",
"sample",
"data",
"for",
"quickly",
"starting",
"a",
"demo",
"run",
"."
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L218-L242
|
train
|
sunt05/SuPy
|
src/supy/supy_module.py
|
save_supy
|
def save_supy(
df_output: pandas.DataFrame,
df_state_final: pandas.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: str = Path('.'),
path_runcontrol: str = None,)->list:
'''Save SuPy run results to files
Parameters
----------
df_output : pandas.DataFrame
DataFrame of output
df_state_final : pandas.DataFrame
DataFrame of final model states
freq_s : int, optional
Output frequency in seconds (the default is 3600, which indicates hourly output)
site : str, optional
Site identifier (the default is '', which indicates site identifier will be left empty)
path_dir_save : str, optional
Path to directory to saving the files (the default is Path('.'), which indicates the current working directory)
path_runcontrol : str, optional
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`.
(the default is None, which is unset)
Returns
-------
list
a list of paths of saved files
Examples
--------
1. save results of a supy run to the current working directory with default settings
>>> list_path_save = supy.save_supy(df_output, df_state_final)
2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>`
>>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml')
3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir'
>>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir')
'''
# get necessary information for saving procedure
if path_runcontrol is not None:
freq_s, path_dir_save, site = get_save_info(path_runcontrol)
# save df_output to several files
list_path_save = save_df_output(df_output, freq_s, site, path_dir_save)
# save df_state
path_state_save = save_df_state(df_state_final, site, path_dir_save)
# update list_path_save
list_path_save.append(path_state_save)
return list_path_save
|
python
|
def save_supy(
df_output: pandas.DataFrame,
df_state_final: pandas.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: str = Path('.'),
path_runcontrol: str = None,)->list:
'''Save SuPy run results to files
Parameters
----------
df_output : pandas.DataFrame
DataFrame of output
df_state_final : pandas.DataFrame
DataFrame of final model states
freq_s : int, optional
Output frequency in seconds (the default is 3600, which indicates hourly output)
site : str, optional
Site identifier (the default is '', which indicates site identifier will be left empty)
path_dir_save : str, optional
Path to directory to saving the files (the default is Path('.'), which indicates the current working directory)
path_runcontrol : str, optional
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`.
(the default is None, which is unset)
Returns
-------
list
a list of paths of saved files
Examples
--------
1. save results of a supy run to the current working directory with default settings
>>> list_path_save = supy.save_supy(df_output, df_state_final)
2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>`
>>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml')
3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir'
>>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir')
'''
# get necessary information for saving procedure
if path_runcontrol is not None:
freq_s, path_dir_save, site = get_save_info(path_runcontrol)
# save df_output to several files
list_path_save = save_df_output(df_output, freq_s, site, path_dir_save)
# save df_state
path_state_save = save_df_state(df_state_final, site, path_dir_save)
# update list_path_save
list_path_save.append(path_state_save)
return list_path_save
|
[
"def",
"save_supy",
"(",
"df_output",
":",
"pandas",
".",
"DataFrame",
",",
"df_state_final",
":",
"pandas",
".",
"DataFrame",
",",
"freq_s",
":",
"int",
"=",
"3600",
",",
"site",
":",
"str",
"=",
"''",
",",
"path_dir_save",
":",
"str",
"=",
"Path",
"(",
"'.'",
")",
",",
"path_runcontrol",
":",
"str",
"=",
"None",
",",
")",
"->",
"list",
":",
"# get necessary information for saving procedure",
"if",
"path_runcontrol",
"is",
"not",
"None",
":",
"freq_s",
",",
"path_dir_save",
",",
"site",
"=",
"get_save_info",
"(",
"path_runcontrol",
")",
"# save df_output to several files",
"list_path_save",
"=",
"save_df_output",
"(",
"df_output",
",",
"freq_s",
",",
"site",
",",
"path_dir_save",
")",
"# save df_state",
"path_state_save",
"=",
"save_df_state",
"(",
"df_state_final",
",",
"site",
",",
"path_dir_save",
")",
"# update list_path_save",
"list_path_save",
".",
"append",
"(",
"path_state_save",
")",
"return",
"list_path_save"
] |
Save SuPy run results to files
Parameters
----------
df_output : pandas.DataFrame
DataFrame of output
df_state_final : pandas.DataFrame
DataFrame of final model states
freq_s : int, optional
Output frequency in seconds (the default is 3600, which indicates hourly output)
site : str, optional
Site identifier (the default is '', which indicates site identifier will be left empty)
path_dir_save : str, optional
Path to directory to saving the files (the default is Path('.'), which indicates the current working directory)
path_runcontrol : str, optional
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`.
(the default is None, which is unset)
Returns
-------
list
a list of paths of saved files
Examples
--------
1. save results of a supy run to the current working directory with default settings
>>> list_path_save = supy.save_supy(df_output, df_state_final)
2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>`
>>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml')
3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir'
>>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir')
|
[
"Save",
"SuPy",
"run",
"results",
"to",
"files"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L488-L547
|
train
|
sunt05/SuPy
|
src/supy/supy_load.py
|
load_df_state
|
def load_df_state(path_csv: Path)->pd.DataFrame:
'''load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run
'''
df_state = pd.read_csv(
path_csv,
header=[0, 1],
index_col=[0, 1],
parse_dates=True,
infer_datetime_format=True,
)
return df_state
|
python
|
def load_df_state(path_csv: Path)->pd.DataFrame:
'''load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run
'''
df_state = pd.read_csv(
path_csv,
header=[0, 1],
index_col=[0, 1],
parse_dates=True,
infer_datetime_format=True,
)
return df_state
|
[
"def",
"load_df_state",
"(",
"path_csv",
":",
"Path",
")",
"->",
"pd",
".",
"DataFrame",
":",
"df_state",
"=",
"pd",
".",
"read_csv",
"(",
"path_csv",
",",
"header",
"=",
"[",
"0",
",",
"1",
"]",
",",
"index_col",
"=",
"[",
"0",
",",
"1",
"]",
",",
"parse_dates",
"=",
"True",
",",
"infer_datetime_format",
"=",
"True",
",",
")",
"return",
"df_state"
] |
load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run
|
[
"load",
"df_state",
"from",
"path_csv"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_load.py#L1600-L1621
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_state_csv.py
|
extract_var_suews
|
def extract_var_suews(dict_var_full: dict, var_supy: str)->list:
'''extract related SUEWS variables for a supy variable `var_supy`
Parameters
----------
dict_var_full : dict
dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect)
var_supy : str
supy variable name
Returns
-------
list
related SUEWS variables for `var_supy`
'''
x = sp.supy_load.flatten_list(dict_var_full[var_supy])
x = np.unique(x)
x = [
xx for xx in x
if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)]
]
x = [xx for xx in x if 'Code' not in xx]
return x
|
python
|
def extract_var_suews(dict_var_full: dict, var_supy: str)->list:
'''extract related SUEWS variables for a supy variable `var_supy`
Parameters
----------
dict_var_full : dict
dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect)
var_supy : str
supy variable name
Returns
-------
list
related SUEWS variables for `var_supy`
'''
x = sp.supy_load.flatten_list(dict_var_full[var_supy])
x = np.unique(x)
x = [
xx for xx in x
if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)]
]
x = [xx for xx in x if 'Code' not in xx]
return x
|
[
"def",
"extract_var_suews",
"(",
"dict_var_full",
":",
"dict",
",",
"var_supy",
":",
"str",
")",
"->",
"list",
":",
"x",
"=",
"sp",
".",
"supy_load",
".",
"flatten_list",
"(",
"dict_var_full",
"[",
"var_supy",
"]",
")",
"x",
"=",
"np",
".",
"unique",
"(",
"x",
")",
"x",
"=",
"[",
"xx",
"for",
"xx",
"in",
"x",
"if",
"xx",
"not",
"in",
"[",
"'base'",
",",
"'const'",
",",
"'0.0'",
"]",
"+",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"24",
")",
"]",
"]",
"x",
"=",
"[",
"xx",
"for",
"xx",
"in",
"x",
"if",
"'Code'",
"not",
"in",
"xx",
"]",
"return",
"x"
] |
extract related SUEWS variables for a supy variable `var_supy`
Parameters
----------
dict_var_full : dict
dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect)
var_supy : str
supy variable name
Returns
-------
list
related SUEWS variables for `var_supy`
|
[
"extract",
"related",
"SUEWS",
"variables",
"for",
"a",
"supy",
"variable",
"var_supy"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L56-L79
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_state_csv.py
|
gen_df_site
|
def gen_df_site(
list_csv_in=list_table,
url_base=url_repo_input_site)->pd.DataFrame:
'''Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table])
# df_var_info = pd.concat(
# [pd.read_csv(f) for f in list_url_table],
# sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.drop(['No.', 'Use'], axis=1)\
.set_index('Column Name')
df_var_info_x.index = df_var_info_x.index.map(
lambda x: x.replace('`', ''))
# retrieve SUEWS-related variables
dict_var_full = sp.supy_load.exp_dict_full(
sp.supy_load.dict_var2SiteSelect)
dict_var_ref_suews = {
k: extract_var_suews(dict_var_full, k)
for k in dict_var_full
}
df_var_ref_suews = pd.DataFrame(
{k: ', '.join(dict_var_ref_suews[k])
for k in dict_var_ref_suews},
index=[0]).T.rename({
0: 'SUEWS-related variables'
}, axis=1)
# retrive supy variable description
dict_var_desc = {
k: '\n'.join(df_var_info_x.loc[v].values.flatten())
for k, v in dict_var_ref_suews.items()
}
df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\
.rename(columns={0: 'Description'})
# retrieve variable dimensionality
df_var_dim = gen_df_dim(df_init_sample)
df_var_site_raw = pd.concat(
[df_var_dim, df_var_desc, df_var_ref_suews],
axis=1, sort=False)
df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna()
return df_var_site
|
python
|
def gen_df_site(
list_csv_in=list_table,
url_base=url_repo_input_site)->pd.DataFrame:
'''Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table])
# df_var_info = pd.concat(
# [pd.read_csv(f) for f in list_url_table],
# sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.drop(['No.', 'Use'], axis=1)\
.set_index('Column Name')
df_var_info_x.index = df_var_info_x.index.map(
lambda x: x.replace('`', ''))
# retrieve SUEWS-related variables
dict_var_full = sp.supy_load.exp_dict_full(
sp.supy_load.dict_var2SiteSelect)
dict_var_ref_suews = {
k: extract_var_suews(dict_var_full, k)
for k in dict_var_full
}
df_var_ref_suews = pd.DataFrame(
{k: ', '.join(dict_var_ref_suews[k])
for k in dict_var_ref_suews},
index=[0]).T.rename({
0: 'SUEWS-related variables'
}, axis=1)
# retrive supy variable description
dict_var_desc = {
k: '\n'.join(df_var_info_x.loc[v].values.flatten())
for k, v in dict_var_ref_suews.items()
}
df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\
.rename(columns={0: 'Description'})
# retrieve variable dimensionality
df_var_dim = gen_df_dim(df_init_sample)
df_var_site_raw = pd.concat(
[df_var_dim, df_var_desc, df_var_ref_suews],
axis=1, sort=False)
df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna()
return df_var_site
|
[
"def",
"gen_df_site",
"(",
"list_csv_in",
"=",
"list_table",
",",
"url_base",
"=",
"url_repo_input_site",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# list of URLs",
"list_url_table",
"=",
"[",
"url_base",
"/",
"table",
"for",
"table",
"in",
"list_csv_in",
"]",
"try",
":",
"df_var_info",
"=",
"pd",
".",
"concat",
"(",
"[",
"pd",
".",
"read_csv",
"(",
"f",
")",
"for",
"f",
"in",
"list_url_table",
"]",
")",
"# df_var_info = pd.concat(",
"# [pd.read_csv(f) for f in list_url_table],",
"# sort=False)",
"except",
":",
"for",
"url",
"in",
"list_url_table",
":",
"if",
"not",
"url",
".",
"get",
"(",
")",
".",
"ok",
":",
"print",
"(",
"f'{url} not existing!'",
")",
"else",
":",
"# clean meta info",
"df_var_info_x",
"=",
"df_var_info",
".",
"drop",
"(",
"[",
"'No.'",
",",
"'Use'",
"]",
",",
"axis",
"=",
"1",
")",
".",
"set_index",
"(",
"'Column Name'",
")",
"df_var_info_x",
".",
"index",
"=",
"df_var_info_x",
".",
"index",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"replace",
"(",
"'`'",
",",
"''",
")",
")",
"# retrieve SUEWS-related variables",
"dict_var_full",
"=",
"sp",
".",
"supy_load",
".",
"exp_dict_full",
"(",
"sp",
".",
"supy_load",
".",
"dict_var2SiteSelect",
")",
"dict_var_ref_suews",
"=",
"{",
"k",
":",
"extract_var_suews",
"(",
"dict_var_full",
",",
"k",
")",
"for",
"k",
"in",
"dict_var_full",
"}",
"df_var_ref_suews",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"k",
":",
"', '",
".",
"join",
"(",
"dict_var_ref_suews",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"dict_var_ref_suews",
"}",
",",
"index",
"=",
"[",
"0",
"]",
")",
".",
"T",
".",
"rename",
"(",
"{",
"0",
":",
"'SUEWS-related variables'",
"}",
",",
"axis",
"=",
"1",
")",
"# retrive supy variable description",
"dict_var_desc",
"=",
"{",
"k",
":",
"'\\n'",
".",
"join",
"(",
"df_var_info_x",
".",
"loc",
"[",
"v",
"]",
".",
"values",
".",
"flatten",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"dict_var_ref_suews",
".",
"items",
"(",
")",
"}",
"df_var_desc",
"=",
"pd",
".",
"DataFrame",
"(",
"dict_var_desc",
",",
"index",
"=",
"[",
"0",
"]",
")",
".",
"T",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"'Description'",
"}",
")",
"# retrieve variable dimensionality",
"df_var_dim",
"=",
"gen_df_dim",
"(",
"df_init_sample",
")",
"df_var_site_raw",
"=",
"pd",
".",
"concat",
"(",
"[",
"df_var_dim",
",",
"df_var_desc",
",",
"df_var_ref_suews",
"]",
",",
"axis",
"=",
"1",
",",
"sort",
"=",
"False",
")",
"df_var_site",
"=",
"df_var_site_raw",
".",
"filter",
"(",
"items",
"=",
"set_input",
",",
"axis",
"=",
"0",
")",
".",
"dropna",
"(",
")",
"return",
"df_var_site"
] |
Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file
|
[
"Generate",
"description",
"info",
"of",
"supy",
"output",
"results",
"as",
"a",
"dataframe"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L105-L178
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_state_csv.py
|
gen_rst_url_split_opts
|
def gen_rst_url_split_opts(opts_str):
"""generate option list for RST docs
Parameters
----------
opts_str : str
a string including all SUEWS related options/variables.
e.g. 'SUEWS_a, SUEWS_b'
Returns
-------
list
a list of parsed RST `:ref:` roles.
e.g. [':option:`SUEWS_a <suews:SUEWS_a>`']
"""
if opts_str is not 'None':
list_opts = opts_str.split(',')
# list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts]
list_rst = [opt.strip() for opt in list_opts]
# list_rst = [f'`{opt}`' for opt in list_rst]
# more properly handle SUEWS options by explicitly adding prefix `suews`:
list_rst = [f':option:`{opt} <suews:{opt}>`' for opt in list_rst]
list_url_rst = ', '.join(list_rst)
else:
list_url_rst = 'None'
return list_url_rst
|
python
|
def gen_rst_url_split_opts(opts_str):
"""generate option list for RST docs
Parameters
----------
opts_str : str
a string including all SUEWS related options/variables.
e.g. 'SUEWS_a, SUEWS_b'
Returns
-------
list
a list of parsed RST `:ref:` roles.
e.g. [':option:`SUEWS_a <suews:SUEWS_a>`']
"""
if opts_str is not 'None':
list_opts = opts_str.split(',')
# list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts]
list_rst = [opt.strip() for opt in list_opts]
# list_rst = [f'`{opt}`' for opt in list_rst]
# more properly handle SUEWS options by explicitly adding prefix `suews`:
list_rst = [f':option:`{opt} <suews:{opt}>`' for opt in list_rst]
list_url_rst = ', '.join(list_rst)
else:
list_url_rst = 'None'
return list_url_rst
|
[
"def",
"gen_rst_url_split_opts",
"(",
"opts_str",
")",
":",
"if",
"opts_str",
"is",
"not",
"'None'",
":",
"list_opts",
"=",
"opts_str",
".",
"split",
"(",
"','",
")",
"# list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts]",
"list_rst",
"=",
"[",
"opt",
".",
"strip",
"(",
")",
"for",
"opt",
"in",
"list_opts",
"]",
"# list_rst = [f'`{opt}`' for opt in list_rst]",
"# more properly handle SUEWS options by explicitly adding prefix `suews`:",
"list_rst",
"=",
"[",
"f':option:`{opt} <suews:{opt}>`'",
"for",
"opt",
"in",
"list_rst",
"]",
"list_url_rst",
"=",
"', '",
".",
"join",
"(",
"list_rst",
")",
"else",
":",
"list_url_rst",
"=",
"'None'",
"return",
"list_url_rst"
] |
generate option list for RST docs
Parameters
----------
opts_str : str
a string including all SUEWS related options/variables.
e.g. 'SUEWS_a, SUEWS_b'
Returns
-------
list
a list of parsed RST `:ref:` roles.
e.g. [':option:`SUEWS_a <suews:SUEWS_a>`']
|
[
"generate",
"option",
"list",
"for",
"RST",
"docs"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L344-L370
|
train
|
sunt05/SuPy
|
docs/source/proc_var_info/gen_df_state_csv.py
|
gen_df_state
|
def gen_df_state(
list_table: list,
set_initcond: set,
set_runcontrol: set,
set_input_runcontrol: set)->pd.DataFrame:
'''generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy
'''
# generate a base df for site characteristics related variables
df_var_site = gen_df_site(list_table)
# generate a base df for runcontrol related variables
df_var_runcontrol = gen_df_runcontrol(
set_initcond, set_runcontrol, set_input_runcontrol)
# generate a base df for initial condition related variables
df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol)
# further processing by modifying several entries
df_var_state = proc_df_state(
df_var_site, df_var_runcontrol, df_var_initcond)
# reorganising the result:
df_var_state = df_var_state.sort_index()
# delete duplicates while considering the variable name (stored as index)
df_var_state = df_var_state.reset_index()
df_var_state = df_var_state.drop_duplicates()
# convert index back
df_var_state = df_var_state.set_index('variable')
return df_var_state
|
python
|
def gen_df_state(
list_table: list,
set_initcond: set,
set_runcontrol: set,
set_input_runcontrol: set)->pd.DataFrame:
'''generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy
'''
# generate a base df for site characteristics related variables
df_var_site = gen_df_site(list_table)
# generate a base df for runcontrol related variables
df_var_runcontrol = gen_df_runcontrol(
set_initcond, set_runcontrol, set_input_runcontrol)
# generate a base df for initial condition related variables
df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol)
# further processing by modifying several entries
df_var_state = proc_df_state(
df_var_site, df_var_runcontrol, df_var_initcond)
# reorganising the result:
df_var_state = df_var_state.sort_index()
# delete duplicates while considering the variable name (stored as index)
df_var_state = df_var_state.reset_index()
df_var_state = df_var_state.drop_duplicates()
# convert index back
df_var_state = df_var_state.set_index('variable')
return df_var_state
|
[
"def",
"gen_df_state",
"(",
"list_table",
":",
"list",
",",
"set_initcond",
":",
"set",
",",
"set_runcontrol",
":",
"set",
",",
"set_input_runcontrol",
":",
"set",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# generate a base df for site characteristics related variables",
"df_var_site",
"=",
"gen_df_site",
"(",
"list_table",
")",
"# generate a base df for runcontrol related variables",
"df_var_runcontrol",
"=",
"gen_df_runcontrol",
"(",
"set_initcond",
",",
"set_runcontrol",
",",
"set_input_runcontrol",
")",
"# generate a base df for initial condition related variables",
"df_var_initcond",
"=",
"gen_df_initcond",
"(",
"set_initcond",
",",
"set_runcontrol",
")",
"# further processing by modifying several entries",
"df_var_state",
"=",
"proc_df_state",
"(",
"df_var_site",
",",
"df_var_runcontrol",
",",
"df_var_initcond",
")",
"# reorganising the result:",
"df_var_state",
"=",
"df_var_state",
".",
"sort_index",
"(",
")",
"# delete duplicates while considering the variable name (stored as index)",
"df_var_state",
"=",
"df_var_state",
".",
"reset_index",
"(",
")",
"df_var_state",
"=",
"df_var_state",
".",
"drop_duplicates",
"(",
")",
"# convert index back",
"df_var_state",
"=",
"df_var_state",
".",
"set_index",
"(",
"'variable'",
")",
"return",
"df_var_state"
] |
generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy
|
[
"generate",
"dataframe",
"of",
"all",
"state",
"variables",
"used",
"by",
"supy"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L510-L552
|
train
|
sunt05/SuPy
|
src/supy/supy_save.py
|
gen_df_save
|
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame:
'''generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
'''
# generate df_datetime for prepending
idx_dt = df_grid_group.index
ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year')
ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY')
ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour')
ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min')
df_datetime = pd.concat([
ser_year,
ser_DOY,
ser_hour,
ser_min,
], axis=1)
df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta(
'd').total_seconds()/(24*60*60)
df_save = pd.concat([df_datetime, df_grid_group], axis=1)
return df_save
|
python
|
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame:
'''generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
'''
# generate df_datetime for prepending
idx_dt = df_grid_group.index
ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year')
ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY')
ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour')
ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min')
df_datetime = pd.concat([
ser_year,
ser_DOY,
ser_hour,
ser_min,
], axis=1)
df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta(
'd').total_seconds()/(24*60*60)
df_save = pd.concat([df_datetime, df_grid_group], axis=1)
return df_save
|
[
"def",
"gen_df_save",
"(",
"df_grid_group",
":",
"pd",
".",
"DataFrame",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# generate df_datetime for prepending",
"idx_dt",
"=",
"df_grid_group",
".",
"index",
"ser_year",
"=",
"pd",
".",
"Series",
"(",
"idx_dt",
".",
"year",
",",
"index",
"=",
"idx_dt",
",",
"name",
"=",
"'Year'",
")",
"ser_DOY",
"=",
"pd",
".",
"Series",
"(",
"idx_dt",
".",
"dayofyear",
",",
"index",
"=",
"idx_dt",
",",
"name",
"=",
"'DOY'",
")",
"ser_hour",
"=",
"pd",
".",
"Series",
"(",
"idx_dt",
".",
"hour",
",",
"index",
"=",
"idx_dt",
",",
"name",
"=",
"'Hour'",
")",
"ser_min",
"=",
"pd",
".",
"Series",
"(",
"idx_dt",
".",
"minute",
",",
"index",
"=",
"idx_dt",
",",
"name",
"=",
"'Min'",
")",
"df_datetime",
"=",
"pd",
".",
"concat",
"(",
"[",
"ser_year",
",",
"ser_DOY",
",",
"ser_hour",
",",
"ser_min",
",",
"]",
",",
"axis",
"=",
"1",
")",
"df_datetime",
"[",
"'Dectime'",
"]",
"=",
"ser_DOY",
"-",
"1",
"+",
"idx_dt",
".",
"to_perioddelta",
"(",
"'d'",
")",
".",
"total_seconds",
"(",
")",
"/",
"(",
"24",
"*",
"60",
"*",
"60",
")",
"df_save",
"=",
"pd",
".",
"concat",
"(",
"[",
"df_datetime",
",",
"df_grid_group",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"df_save"
] |
generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
|
[
"generate",
"a",
"dataframe",
"for",
"saving"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L12-L40
|
train
|
sunt05/SuPy
|
src/supy/supy_save.py
|
save_df_output
|
def save_df_output(
df_output: pd.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: Path = Path('.'),)->list:
'''save supy output dataframe to txt files
Parameters
----------
df_output : pd.DataFrame
output dataframe of supy simulation
freq_s : int, optional
output frequency in second (the default is 3600, which indicates the a txt with hourly values)
path_dir_save : Path, optional
directory to save txt files (the default is '.', which the current working directory)
site : str, optional
site code used for filename (the default is '', which indicates no site name prepended to the filename)
path_runcontrol : str or anything that can be parsed as `Path`, optional
path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters)
Returns
-------
list
a list of `Path` objects for saved txt files
'''
list_path_save = []
list_group = df_output.columns.get_level_values('group').unique()
list_grid = df_output.index.get_level_values('grid').unique()
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_output\
.loc[grid, group]\
.dropna(how='all', axis=0)
# save output at the runtime frequency (usually 5 min)
# 'DailyState' group will be save a daily frequency
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
# resample output if freq_s is different from runtime freq (usually 5 min)
freq_save = pd.Timedelta(freq_s, 's')
# resample `df_output` at `freq_save`
df_rsmp = resample_output(df_output, freq_save)
# 'DailyState' group will be dropped in `resample_output` as resampling is not needed
df_rsmp = df_rsmp.drop(columns='DailyState')
list_group = df_rsmp.columns.get_level_values('group').unique()
list_grid = df_rsmp.index.get_level_values('grid').unique()
# save output at the resampling frequency
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_rsmp.loc[grid, group]
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
return list_path_save
|
python
|
def save_df_output(
df_output: pd.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: Path = Path('.'),)->list:
'''save supy output dataframe to txt files
Parameters
----------
df_output : pd.DataFrame
output dataframe of supy simulation
freq_s : int, optional
output frequency in second (the default is 3600, which indicates the a txt with hourly values)
path_dir_save : Path, optional
directory to save txt files (the default is '.', which the current working directory)
site : str, optional
site code used for filename (the default is '', which indicates no site name prepended to the filename)
path_runcontrol : str or anything that can be parsed as `Path`, optional
path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters)
Returns
-------
list
a list of `Path` objects for saved txt files
'''
list_path_save = []
list_group = df_output.columns.get_level_values('group').unique()
list_grid = df_output.index.get_level_values('grid').unique()
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_output\
.loc[grid, group]\
.dropna(how='all', axis=0)
# save output at the runtime frequency (usually 5 min)
# 'DailyState' group will be save a daily frequency
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
# resample output if freq_s is different from runtime freq (usually 5 min)
freq_save = pd.Timedelta(freq_s, 's')
# resample `df_output` at `freq_save`
df_rsmp = resample_output(df_output, freq_save)
# 'DailyState' group will be dropped in `resample_output` as resampling is not needed
df_rsmp = df_rsmp.drop(columns='DailyState')
list_group = df_rsmp.columns.get_level_values('group').unique()
list_grid = df_rsmp.index.get_level_values('grid').unique()
# save output at the resampling frequency
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_rsmp.loc[grid, group]
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
return list_path_save
|
[
"def",
"save_df_output",
"(",
"df_output",
":",
"pd",
".",
"DataFrame",
",",
"freq_s",
":",
"int",
"=",
"3600",
",",
"site",
":",
"str",
"=",
"''",
",",
"path_dir_save",
":",
"Path",
"=",
"Path",
"(",
"'.'",
")",
",",
")",
"->",
"list",
":",
"list_path_save",
"=",
"[",
"]",
"list_group",
"=",
"df_output",
".",
"columns",
".",
"get_level_values",
"(",
"'group'",
")",
".",
"unique",
"(",
")",
"list_grid",
"=",
"df_output",
".",
"index",
".",
"get_level_values",
"(",
"'grid'",
")",
".",
"unique",
"(",
")",
"for",
"grid",
"in",
"list_grid",
":",
"for",
"group",
"in",
"list_group",
":",
"df_output_grid_group",
"=",
"df_output",
".",
"loc",
"[",
"grid",
",",
"group",
"]",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"0",
")",
"# save output at the runtime frequency (usually 5 min)",
"# 'DailyState' group will be save a daily frequency",
"path_save",
"=",
"save_df_grid_group",
"(",
"df_output_grid_group",
",",
"grid",
",",
"group",
",",
"site",
"=",
"site",
",",
"dir_save",
"=",
"path_dir_save",
")",
"list_path_save",
".",
"append",
"(",
"path_save",
")",
"# resample output if freq_s is different from runtime freq (usually 5 min)",
"freq_save",
"=",
"pd",
".",
"Timedelta",
"(",
"freq_s",
",",
"'s'",
")",
"# resample `df_output` at `freq_save`",
"df_rsmp",
"=",
"resample_output",
"(",
"df_output",
",",
"freq_save",
")",
"# 'DailyState' group will be dropped in `resample_output` as resampling is not needed",
"df_rsmp",
"=",
"df_rsmp",
".",
"drop",
"(",
"columns",
"=",
"'DailyState'",
")",
"list_group",
"=",
"df_rsmp",
".",
"columns",
".",
"get_level_values",
"(",
"'group'",
")",
".",
"unique",
"(",
")",
"list_grid",
"=",
"df_rsmp",
".",
"index",
".",
"get_level_values",
"(",
"'grid'",
")",
".",
"unique",
"(",
")",
"# save output at the resampling frequency",
"for",
"grid",
"in",
"list_grid",
":",
"for",
"group",
"in",
"list_group",
":",
"df_output_grid_group",
"=",
"df_rsmp",
".",
"loc",
"[",
"grid",
",",
"group",
"]",
"path_save",
"=",
"save_df_grid_group",
"(",
"df_output_grid_group",
",",
"grid",
",",
"group",
",",
"site",
"=",
"site",
",",
"dir_save",
"=",
"path_dir_save",
")",
"list_path_save",
".",
"append",
"(",
"path_save",
")",
"return",
"list_path_save"
] |
save supy output dataframe to txt files
Parameters
----------
df_output : pd.DataFrame
output dataframe of supy simulation
freq_s : int, optional
output frequency in second (the default is 3600, which indicates the a txt with hourly values)
path_dir_save : Path, optional
directory to save txt files (the default is '.', which the current working directory)
site : str, optional
site code used for filename (the default is '', which indicates no site name prepended to the filename)
path_runcontrol : str or anything that can be parsed as `Path`, optional
path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters)
Returns
-------
list
a list of `Path` objects for saved txt files
|
[
"save",
"supy",
"output",
"dataframe",
"to",
"txt",
"files"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L130-L189
|
train
|
sunt05/SuPy
|
src/supy/supy_save.py
|
save_df_state
|
def save_df_state(
df_state: pd.DataFrame,
site: str = '',
path_dir_save: Path = Path('.'),)->Path:
'''save `df_state` to a csv file
Parameters
----------
df_state : pd.DataFrame
a dataframe of model states produced by a supy run
site : str, optional
site identifier (the default is '', which indicates an empty site code)
path_dir_save : Path, optional
path to directory to save results (the default is Path('.'), which the current working directory)
Returns
-------
Path
path to the saved csv file
'''
file_state_save = 'df_state_{site}.csv'.format(site=site)
# trim filename if site == ''
file_state_save = file_state_save.replace('_.csv', '.csv')
path_state_save = path_dir_save/file_state_save
print('writing out: {path_out}'.format(path_out=path_state_save))
df_state.to_csv(path_state_save)
return path_state_save
|
python
|
def save_df_state(
df_state: pd.DataFrame,
site: str = '',
path_dir_save: Path = Path('.'),)->Path:
'''save `df_state` to a csv file
Parameters
----------
df_state : pd.DataFrame
a dataframe of model states produced by a supy run
site : str, optional
site identifier (the default is '', which indicates an empty site code)
path_dir_save : Path, optional
path to directory to save results (the default is Path('.'), which the current working directory)
Returns
-------
Path
path to the saved csv file
'''
file_state_save = 'df_state_{site}.csv'.format(site=site)
# trim filename if site == ''
file_state_save = file_state_save.replace('_.csv', '.csv')
path_state_save = path_dir_save/file_state_save
print('writing out: {path_out}'.format(path_out=path_state_save))
df_state.to_csv(path_state_save)
return path_state_save
|
[
"def",
"save_df_state",
"(",
"df_state",
":",
"pd",
".",
"DataFrame",
",",
"site",
":",
"str",
"=",
"''",
",",
"path_dir_save",
":",
"Path",
"=",
"Path",
"(",
"'.'",
")",
",",
")",
"->",
"Path",
":",
"file_state_save",
"=",
"'df_state_{site}.csv'",
".",
"format",
"(",
"site",
"=",
"site",
")",
"# trim filename if site == ''",
"file_state_save",
"=",
"file_state_save",
".",
"replace",
"(",
"'_.csv'",
",",
"'.csv'",
")",
"path_state_save",
"=",
"path_dir_save",
"/",
"file_state_save",
"print",
"(",
"'writing out: {path_out}'",
".",
"format",
"(",
"path_out",
"=",
"path_state_save",
")",
")",
"df_state",
".",
"to_csv",
"(",
"path_state_save",
")",
"return",
"path_state_save"
] |
save `df_state` to a csv file
Parameters
----------
df_state : pd.DataFrame
a dataframe of model states produced by a supy run
site : str, optional
site identifier (the default is '', which indicates an empty site code)
path_dir_save : Path, optional
path to directory to save results (the default is Path('.'), which the current working directory)
Returns
-------
Path
path to the saved csv file
|
[
"save",
"df_state",
"to",
"a",
"csv",
"file"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L194-L221
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
gen_FS_DF
|
def gen_FS_DF(df_output):
"""generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs
|
python
|
def gen_FS_DF(df_output):
"""generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs
|
[
"def",
"gen_FS_DF",
"(",
"df_output",
")",
":",
"df_day",
"=",
"pd",
".",
"pivot_table",
"(",
"df_output",
",",
"values",
"=",
"[",
"'T2'",
",",
"'U10'",
",",
"'Kdown'",
",",
"'RH2'",
"]",
",",
"index",
"=",
"[",
"'Year'",
",",
"'Month'",
",",
"'Day'",
"]",
",",
"aggfunc",
"=",
"[",
"min",
",",
"max",
",",
"np",
".",
"mean",
",",
"]",
")",
"df_day_all_year",
"=",
"pd",
".",
"pivot_table",
"(",
"df_output",
",",
"values",
"=",
"[",
"'T2'",
",",
"'U10'",
",",
"'Kdown'",
",",
"'RH2'",
"]",
",",
"index",
"=",
"[",
"'Month'",
",",
"'Day'",
"]",
",",
"aggfunc",
"=",
"[",
"min",
",",
"max",
",",
"np",
".",
"mean",
",",
"]",
")",
"array_yr_mon",
"=",
"df_day",
".",
"index",
".",
"droplevel",
"(",
"'Day'",
")",
".",
"to_frame",
"(",
")",
".",
"drop_duplicates",
"(",
")",
".",
"values",
"df_fs",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"(",
"yr",
",",
"mon",
")",
":",
"(",
"df_day",
".",
"loc",
"[",
"(",
"yr",
",",
"mon",
")",
"]",
".",
"apply",
"(",
"gen_score_ser",
")",
"-",
"df_day_all_year",
".",
"loc",
"[",
"mon",
"]",
".",
"apply",
"(",
"gen_score_ser",
")",
")",
".",
"abs",
"(",
")",
".",
"mean",
"(",
")",
"for",
"yr",
",",
"mon",
"in",
"array_yr_mon",
"}",
")",
"return",
"df_fs"
] |
generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
|
[
"generate",
"DataFrame",
"of",
"scores",
"."
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L140-L174
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
gen_WS_DF
|
def gen_WS_DF(df_WS_data):
"""generate DataFrame of weighted sums.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_fs = gen_FS_DF(df_WS_data)
list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'),
('mean', 'U10'), ('max', 'U10'), ('min', 'U10'),
('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'),
('mean', 'Kdown')]
list_const = [getattr(const, attr)
for attr in ['T_MEAN', 'T_MAX', 'T_MIN',
'WIND_MEAN', 'WIND_MAX', 'WIND_MIN',
'RH_MEAN', 'RH_MAX', 'RH_MIN',
'SOLAR_RADIATION_GLOBAL']]
list_ws = [df_fs.loc[idx] * cst
for idx, cst
in zip(list_index, list_const)]
df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna()
return df_ws
|
python
|
def gen_WS_DF(df_WS_data):
"""generate DataFrame of weighted sums.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_fs = gen_FS_DF(df_WS_data)
list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'),
('mean', 'U10'), ('max', 'U10'), ('min', 'U10'),
('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'),
('mean', 'Kdown')]
list_const = [getattr(const, attr)
for attr in ['T_MEAN', 'T_MAX', 'T_MIN',
'WIND_MEAN', 'WIND_MAX', 'WIND_MIN',
'RH_MEAN', 'RH_MAX', 'RH_MIN',
'SOLAR_RADIATION_GLOBAL']]
list_ws = [df_fs.loc[idx] * cst
for idx, cst
in zip(list_index, list_const)]
df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna()
return df_ws
|
[
"def",
"gen_WS_DF",
"(",
"df_WS_data",
")",
":",
"df_fs",
"=",
"gen_FS_DF",
"(",
"df_WS_data",
")",
"list_index",
"=",
"[",
"(",
"'mean'",
",",
"'T2'",
")",
",",
"(",
"'max'",
",",
"'T2'",
")",
",",
"(",
"'min'",
",",
"'T2'",
")",
",",
"(",
"'mean'",
",",
"'U10'",
")",
",",
"(",
"'max'",
",",
"'U10'",
")",
",",
"(",
"'min'",
",",
"'U10'",
")",
",",
"(",
"'mean'",
",",
"'RH2'",
")",
",",
"(",
"'max'",
",",
"'RH2'",
")",
",",
"(",
"'min'",
",",
"'RH2'",
")",
",",
"(",
"'mean'",
",",
"'Kdown'",
")",
"]",
"list_const",
"=",
"[",
"getattr",
"(",
"const",
",",
"attr",
")",
"for",
"attr",
"in",
"[",
"'T_MEAN'",
",",
"'T_MAX'",
",",
"'T_MIN'",
",",
"'WIND_MEAN'",
",",
"'WIND_MAX'",
",",
"'WIND_MIN'",
",",
"'RH_MEAN'",
",",
"'RH_MAX'",
",",
"'RH_MIN'",
",",
"'SOLAR_RADIATION_GLOBAL'",
"]",
"]",
"list_ws",
"=",
"[",
"df_fs",
".",
"loc",
"[",
"idx",
"]",
"*",
"cst",
"for",
"idx",
",",
"cst",
"in",
"zip",
"(",
"list_index",
",",
"list_const",
")",
"]",
"df_ws",
"=",
"pd",
".",
"concat",
"(",
"list_ws",
",",
"axis",
"=",
"1",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"unstack",
"(",
")",
".",
"dropna",
"(",
")",
"return",
"df_ws"
] |
generate DataFrame of weighted sums.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
|
[
"generate",
"DataFrame",
"of",
"weighted",
"sums",
"."
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L177-L208
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
_geoid_radius
|
def _geoid_radius(latitude: float) -> float:
"""Calculates the GEOID radius at a given latitude
Parameters
----------
latitude : float
Latitude (degrees)
Returns
-------
R : float
GEOID Radius (meters)
"""
lat = deg2rad(latitude)
return sqrt(1/(cos(lat) ** 2 / Rmax_WGS84 ** 2 + sin(lat) ** 2 / Rmin_WGS84 ** 2))
|
python
|
def _geoid_radius(latitude: float) -> float:
"""Calculates the GEOID radius at a given latitude
Parameters
----------
latitude : float
Latitude (degrees)
Returns
-------
R : float
GEOID Radius (meters)
"""
lat = deg2rad(latitude)
return sqrt(1/(cos(lat) ** 2 / Rmax_WGS84 ** 2 + sin(lat) ** 2 / Rmin_WGS84 ** 2))
|
[
"def",
"_geoid_radius",
"(",
"latitude",
":",
"float",
")",
"->",
"float",
":",
"lat",
"=",
"deg2rad",
"(",
"latitude",
")",
"return",
"sqrt",
"(",
"1",
"/",
"(",
"cos",
"(",
"lat",
")",
"**",
"2",
"/",
"Rmax_WGS84",
"**",
"2",
"+",
"sin",
"(",
"lat",
")",
"**",
"2",
"/",
"Rmin_WGS84",
"**",
"2",
")",
")"
] |
Calculates the GEOID radius at a given latitude
Parameters
----------
latitude : float
Latitude (degrees)
Returns
-------
R : float
GEOID Radius (meters)
|
[
"Calculates",
"the",
"GEOID",
"radius",
"at",
"a",
"given",
"latitude"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L518-L532
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
geometric2geopotential
|
def geometric2geopotential(z: float, latitude: float) -> float:
"""Converts geometric height to geopoential height
Parameters
----------
z : float
Geometric height (meters)
latitude : float
Latitude (degrees)
Returns
-------
h : float
Geopotential Height (meters) above the reference ellipsoid
"""
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return z * g * re / (re + z)
|
python
|
def geometric2geopotential(z: float, latitude: float) -> float:
"""Converts geometric height to geopoential height
Parameters
----------
z : float
Geometric height (meters)
latitude : float
Latitude (degrees)
Returns
-------
h : float
Geopotential Height (meters) above the reference ellipsoid
"""
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return z * g * re / (re + z)
|
[
"def",
"geometric2geopotential",
"(",
"z",
":",
"float",
",",
"latitude",
":",
"float",
")",
"->",
"float",
":",
"twolat",
"=",
"deg2rad",
"(",
"2",
"*",
"latitude",
")",
"g",
"=",
"9.80616",
"*",
"(",
"1",
"-",
"0.002637",
"*",
"cos",
"(",
"twolat",
")",
"+",
"0.0000059",
"*",
"cos",
"(",
"twolat",
")",
"**",
"2",
")",
"re",
"=",
"_geoid_radius",
"(",
"latitude",
")",
"return",
"z",
"*",
"g",
"*",
"re",
"/",
"(",
"re",
"+",
"z",
")"
] |
Converts geometric height to geopoential height
Parameters
----------
z : float
Geometric height (meters)
latitude : float
Latitude (degrees)
Returns
-------
h : float
Geopotential Height (meters) above the reference ellipsoid
|
[
"Converts",
"geometric",
"height",
"to",
"geopoential",
"height"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L535-L553
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
geopotential2geometric
|
def geopotential2geometric(h: float, latitude: float) -> float:
"""Converts geopoential height to geometric height
Parameters
----------
h : float
Geopotential height (meters)
latitude : float
Latitude (degrees)
Returns
-------
z : float
Geometric Height (meters) above the reference ellipsoid
"""
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return h * re / (g * re - h)
|
python
|
def geopotential2geometric(h: float, latitude: float) -> float:
"""Converts geopoential height to geometric height
Parameters
----------
h : float
Geopotential height (meters)
latitude : float
Latitude (degrees)
Returns
-------
z : float
Geometric Height (meters) above the reference ellipsoid
"""
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return h * re / (g * re - h)
|
[
"def",
"geopotential2geometric",
"(",
"h",
":",
"float",
",",
"latitude",
":",
"float",
")",
"->",
"float",
":",
"twolat",
"=",
"deg2rad",
"(",
"2",
"*",
"latitude",
")",
"g",
"=",
"9.80616",
"*",
"(",
"1",
"-",
"0.002637",
"*",
"cos",
"(",
"twolat",
")",
"+",
"0.0000059",
"*",
"cos",
"(",
"twolat",
")",
"**",
"2",
")",
"re",
"=",
"_geoid_radius",
"(",
"latitude",
")",
"return",
"h",
"*",
"re",
"/",
"(",
"g",
"*",
"re",
"-",
"h",
")"
] |
Converts geopoential height to geometric height
Parameters
----------
h : float
Geopotential height (meters)
latitude : float
Latitude (degrees)
Returns
-------
z : float
Geometric Height (meters) above the reference ellipsoid
|
[
"Converts",
"geopoential",
"height",
"to",
"geometric",
"height"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L556-L574
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
get_ser_val_alt
|
def get_ser_val_alt(lat: float, lon: float,
da_alt_x: xr.DataArray,
da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series:
'''interpolate atmospheric variable to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.Series
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
alt_t_1d = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
val_t_1d = da_val.sel(
latitude=lat, longitude=lon, method='nearest')
alt_x = da_alt_x.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_alt = np.array(
[interp1d(alt_1d, val_1d)(alt_x)
for alt_1d, val_1d
in zip(alt_t_1d, val_t_1d)])
ser_alt = pd.Series(
val_alt,
index=da_val.time.values,
name=da_val.name,
)
return ser_alt
|
python
|
def get_ser_val_alt(lat: float, lon: float,
da_alt_x: xr.DataArray,
da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series:
'''interpolate atmospheric variable to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.Series
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
alt_t_1d = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
val_t_1d = da_val.sel(
latitude=lat, longitude=lon, method='nearest')
alt_x = da_alt_x.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_alt = np.array(
[interp1d(alt_1d, val_1d)(alt_x)
for alt_1d, val_1d
in zip(alt_t_1d, val_t_1d)])
ser_alt = pd.Series(
val_alt,
index=da_val.time.values,
name=da_val.name,
)
return ser_alt
|
[
"def",
"get_ser_val_alt",
"(",
"lat",
":",
"float",
",",
"lon",
":",
"float",
",",
"da_alt_x",
":",
"xr",
".",
"DataArray",
",",
"da_alt",
":",
"xr",
".",
"DataArray",
",",
"da_val",
":",
"xr",
".",
"DataArray",
")",
"->",
"pd",
".",
"Series",
":",
"alt_t_1d",
"=",
"da_alt",
".",
"sel",
"(",
"latitude",
"=",
"lat",
",",
"longitude",
"=",
"lon",
",",
"method",
"=",
"'nearest'",
")",
"val_t_1d",
"=",
"da_val",
".",
"sel",
"(",
"latitude",
"=",
"lat",
",",
"longitude",
"=",
"lon",
",",
"method",
"=",
"'nearest'",
")",
"alt_x",
"=",
"da_alt_x",
".",
"sel",
"(",
"latitude",
"=",
"lat",
",",
"longitude",
"=",
"lon",
",",
"method",
"=",
"'nearest'",
")",
"[",
"0",
"]",
"val_alt",
"=",
"np",
".",
"array",
"(",
"[",
"interp1d",
"(",
"alt_1d",
",",
"val_1d",
")",
"(",
"alt_x",
")",
"for",
"alt_1d",
",",
"val_1d",
"in",
"zip",
"(",
"alt_t_1d",
",",
"val_t_1d",
")",
"]",
")",
"ser_alt",
"=",
"pd",
".",
"Series",
"(",
"val_alt",
",",
"index",
"=",
"da_val",
".",
"time",
".",
"values",
",",
"name",
"=",
"da_val",
".",
"name",
",",
")",
"return",
"ser_alt"
] |
interpolate atmospheric variable to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.Series
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
|
[
"interpolate",
"atmospheric",
"variable",
"to",
"a",
"specified",
"altitude"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L578-L617
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
get_df_val_alt
|
def get_df_val_alt(lat: float, lon: float, da_alt_meas: xr.DataArray, ds_val: xr.Dataset):
'''interpolate atmospheric variables to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.DataFrame
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
da_alt = geopotential2geometric(ds_val.z, ds_val.latitude)
# generate pressure series for grid x
da_alt_x = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
alt_meas_x = da_alt_meas.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_pres = np.array([interp1d(alt, da_alt_x.level)(alt_meas_x)
for alt in da_alt_x])
df_val_alt = pd.concat(
[get_ser_val_alt(
lat, lon, da_alt_meas, da_alt, ds_val[var])
for var in ds_val.data_vars],
axis=1
)
# add pressure
df_val_alt['p'] = val_pres
df_val_alt.index = df_val_alt.index.set_names('time')
df_val_alt.columns = df_val_alt.columns.set_names('var')
return df_val_alt
|
python
|
def get_df_val_alt(lat: float, lon: float, da_alt_meas: xr.DataArray, ds_val: xr.Dataset):
'''interpolate atmospheric variables to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.DataFrame
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
da_alt = geopotential2geometric(ds_val.z, ds_val.latitude)
# generate pressure series for grid x
da_alt_x = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
alt_meas_x = da_alt_meas.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_pres = np.array([interp1d(alt, da_alt_x.level)(alt_meas_x)
for alt in da_alt_x])
df_val_alt = pd.concat(
[get_ser_val_alt(
lat, lon, da_alt_meas, da_alt, ds_val[var])
for var in ds_val.data_vars],
axis=1
)
# add pressure
df_val_alt['p'] = val_pres
df_val_alt.index = df_val_alt.index.set_names('time')
df_val_alt.columns = df_val_alt.columns.set_names('var')
return df_val_alt
|
[
"def",
"get_df_val_alt",
"(",
"lat",
":",
"float",
",",
"lon",
":",
"float",
",",
"da_alt_meas",
":",
"xr",
".",
"DataArray",
",",
"ds_val",
":",
"xr",
".",
"Dataset",
")",
":",
"da_alt",
"=",
"geopotential2geometric",
"(",
"ds_val",
".",
"z",
",",
"ds_val",
".",
"latitude",
")",
"# generate pressure series for grid x",
"da_alt_x",
"=",
"da_alt",
".",
"sel",
"(",
"latitude",
"=",
"lat",
",",
"longitude",
"=",
"lon",
",",
"method",
"=",
"'nearest'",
")",
"alt_meas_x",
"=",
"da_alt_meas",
".",
"sel",
"(",
"latitude",
"=",
"lat",
",",
"longitude",
"=",
"lon",
",",
"method",
"=",
"'nearest'",
")",
"[",
"0",
"]",
"val_pres",
"=",
"np",
".",
"array",
"(",
"[",
"interp1d",
"(",
"alt",
",",
"da_alt_x",
".",
"level",
")",
"(",
"alt_meas_x",
")",
"for",
"alt",
"in",
"da_alt_x",
"]",
")",
"df_val_alt",
"=",
"pd",
".",
"concat",
"(",
"[",
"get_ser_val_alt",
"(",
"lat",
",",
"lon",
",",
"da_alt_meas",
",",
"da_alt",
",",
"ds_val",
"[",
"var",
"]",
")",
"for",
"var",
"in",
"ds_val",
".",
"data_vars",
"]",
",",
"axis",
"=",
"1",
")",
"# add pressure",
"df_val_alt",
"[",
"'p'",
"]",
"=",
"val_pres",
"df_val_alt",
".",
"index",
"=",
"df_val_alt",
".",
"index",
".",
"set_names",
"(",
"'time'",
")",
"df_val_alt",
".",
"columns",
"=",
"df_val_alt",
".",
"columns",
".",
"set_names",
"(",
"'var'",
")",
"return",
"df_val_alt"
] |
interpolate atmospheric variables to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.DataFrame
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
|
[
"interpolate",
"atmospheric",
"variables",
"to",
"a",
"specified",
"altitude"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L620-L661
|
train
|
sunt05/SuPy
|
src/supy/supy_util.py
|
sel_list_pres
|
def sel_list_pres(ds_sfc_x):
'''
select proper levels for model level data download
'''
p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values
list_pres_level = [
'1', '2', '3',
'5', '7', '10',
'20', '30', '50',
'70', '100', '125',
'150', '175', '200',
'225', '250', '300',
'350', '400', '450',
'500', '550', '600',
'650', '700', '750',
'775', '800', '825',
'850', '875', '900',
'925', '950', '975',
'1000',
]
ser_pres_level = pd.Series(list_pres_level).map(int)*100
pos_lev_max, pos_lev_min = (
ser_pres_level[ser_pres_level > p_max].idxmin(),
ser_pres_level[ser_pres_level < p_min].idxmax()
)
list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100
list_pres_sel = list_pres_sel.map(int).map(str).to_list()
return list_pres_sel
|
python
|
def sel_list_pres(ds_sfc_x):
'''
select proper levels for model level data download
'''
p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values
list_pres_level = [
'1', '2', '3',
'5', '7', '10',
'20', '30', '50',
'70', '100', '125',
'150', '175', '200',
'225', '250', '300',
'350', '400', '450',
'500', '550', '600',
'650', '700', '750',
'775', '800', '825',
'850', '875', '900',
'925', '950', '975',
'1000',
]
ser_pres_level = pd.Series(list_pres_level).map(int)*100
pos_lev_max, pos_lev_min = (
ser_pres_level[ser_pres_level > p_max].idxmin(),
ser_pres_level[ser_pres_level < p_min].idxmax()
)
list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100
list_pres_sel = list_pres_sel.map(int).map(str).to_list()
return list_pres_sel
|
[
"def",
"sel_list_pres",
"(",
"ds_sfc_x",
")",
":",
"p_min",
",",
"p_max",
"=",
"ds_sfc_x",
".",
"sp",
".",
"min",
"(",
")",
".",
"values",
",",
"ds_sfc_x",
".",
"sp",
".",
"max",
"(",
")",
".",
"values",
"list_pres_level",
"=",
"[",
"'1'",
",",
"'2'",
",",
"'3'",
",",
"'5'",
",",
"'7'",
",",
"'10'",
",",
"'20'",
",",
"'30'",
",",
"'50'",
",",
"'70'",
",",
"'100'",
",",
"'125'",
",",
"'150'",
",",
"'175'",
",",
"'200'",
",",
"'225'",
",",
"'250'",
",",
"'300'",
",",
"'350'",
",",
"'400'",
",",
"'450'",
",",
"'500'",
",",
"'550'",
",",
"'600'",
",",
"'650'",
",",
"'700'",
",",
"'750'",
",",
"'775'",
",",
"'800'",
",",
"'825'",
",",
"'850'",
",",
"'875'",
",",
"'900'",
",",
"'925'",
",",
"'950'",
",",
"'975'",
",",
"'1000'",
",",
"]",
"ser_pres_level",
"=",
"pd",
".",
"Series",
"(",
"list_pres_level",
")",
".",
"map",
"(",
"int",
")",
"*",
"100",
"pos_lev_max",
",",
"pos_lev_min",
"=",
"(",
"ser_pres_level",
"[",
"ser_pres_level",
">",
"p_max",
"]",
".",
"idxmin",
"(",
")",
",",
"ser_pres_level",
"[",
"ser_pres_level",
"<",
"p_min",
"]",
".",
"idxmax",
"(",
")",
")",
"list_pres_sel",
"=",
"ser_pres_level",
".",
"loc",
"[",
"pos_lev_min",
":",
"pos_lev_max",
"]",
"/",
"100",
"list_pres_sel",
"=",
"list_pres_sel",
".",
"map",
"(",
"int",
")",
".",
"map",
"(",
"str",
")",
".",
"to_list",
"(",
")",
"return",
"list_pres_sel"
] |
select proper levels for model level data download
|
[
"select",
"proper",
"levels",
"for",
"model",
"level",
"data",
"download"
] |
47178bd5aee50a059414e3e504940662fbfae0dc
|
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L811-L838
|
train
|
ecell/ecell4
|
ecell4/util/simulation.py
|
load_world
|
def load_world(filename):
"""
Load a world from the given HDF5 filename.
The return type is determined by ``ecell4_base.core.load_version_information``.
Parameters
----------
filename : str
A HDF5 filename.
Returns
-------
w : World
Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``,
``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``.
"""
import ecell4_base
vinfo = ecell4_base.core.load_version_information(filename)
if vinfo.startswith("ecell4-bd"):
return ecell4_base.bd.World(filename)
elif vinfo.startswith("ecell4-egfrd"):
return ecell4_base.egfrd.World(filename)
elif vinfo.startswith("ecell4-meso"):
return ecell4_base.meso.World(filename)
elif vinfo.startswith("ecell4-ode"):
return ecell4_base.ode.World(filename)
elif vinfo.startswith("ecell4-gillespie"):
return ecell4_base.gillespie.World(filename)
elif vinfo.startswith("ecell4-spatiocyte"):
return ecell4_base.spatiocyte.World(filename)
elif vinfo == "":
raise RuntimeError("No version information was found in [{0}]".format(filename))
raise RuntimeError("Unkown version information [{0}]".format(vinfo))
|
python
|
def load_world(filename):
"""
Load a world from the given HDF5 filename.
The return type is determined by ``ecell4_base.core.load_version_information``.
Parameters
----------
filename : str
A HDF5 filename.
Returns
-------
w : World
Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``,
``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``.
"""
import ecell4_base
vinfo = ecell4_base.core.load_version_information(filename)
if vinfo.startswith("ecell4-bd"):
return ecell4_base.bd.World(filename)
elif vinfo.startswith("ecell4-egfrd"):
return ecell4_base.egfrd.World(filename)
elif vinfo.startswith("ecell4-meso"):
return ecell4_base.meso.World(filename)
elif vinfo.startswith("ecell4-ode"):
return ecell4_base.ode.World(filename)
elif vinfo.startswith("ecell4-gillespie"):
return ecell4_base.gillespie.World(filename)
elif vinfo.startswith("ecell4-spatiocyte"):
return ecell4_base.spatiocyte.World(filename)
elif vinfo == "":
raise RuntimeError("No version information was found in [{0}]".format(filename))
raise RuntimeError("Unkown version information [{0}]".format(vinfo))
|
[
"def",
"load_world",
"(",
"filename",
")",
":",
"import",
"ecell4_base",
"vinfo",
"=",
"ecell4_base",
".",
"core",
".",
"load_version_information",
"(",
"filename",
")",
"if",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-bd\"",
")",
":",
"return",
"ecell4_base",
".",
"bd",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-egfrd\"",
")",
":",
"return",
"ecell4_base",
".",
"egfrd",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-meso\"",
")",
":",
"return",
"ecell4_base",
".",
"meso",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-ode\"",
")",
":",
"return",
"ecell4_base",
".",
"ode",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-gillespie\"",
")",
":",
"return",
"ecell4_base",
".",
"gillespie",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
".",
"startswith",
"(",
"\"ecell4-spatiocyte\"",
")",
":",
"return",
"ecell4_base",
".",
"spatiocyte",
".",
"World",
"(",
"filename",
")",
"elif",
"vinfo",
"==",
"\"\"",
":",
"raise",
"RuntimeError",
"(",
"\"No version information was found in [{0}]\"",
".",
"format",
"(",
"filename",
")",
")",
"raise",
"RuntimeError",
"(",
"\"Unkown version information [{0}]\"",
".",
"format",
"(",
"vinfo",
")",
")"
] |
Load a world from the given HDF5 filename.
The return type is determined by ``ecell4_base.core.load_version_information``.
Parameters
----------
filename : str
A HDF5 filename.
Returns
-------
w : World
Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``,
``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``.
|
[
"Load",
"a",
"world",
"from",
"the",
"given",
"HDF5",
"filename",
".",
"The",
"return",
"type",
"is",
"determined",
"by",
"ecell4_base",
".",
"core",
".",
"load_version_information",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/simulation.py#L10-L44
|
train
|
ecell/ecell4
|
ecell4/util/show.py
|
show
|
def show(target, *args, **kwargs):
"""
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
"""
if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )):
plot_number_observer(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)):
plot_trajectory(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)):
plot_world(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)):
dump_model(target)
elif isinstance(target, str):
try:
w = simulation.load_world(target)
except RuntimeError as e:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
else:
show(w, *args, **kwargs)
else:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
|
python
|
def show(target, *args, **kwargs):
"""
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
"""
if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )):
plot_number_observer(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)):
plot_trajectory(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)):
plot_world(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)):
dump_model(target)
elif isinstance(target, str):
try:
w = simulation.load_world(target)
except RuntimeError as e:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
else:
show(w, *args, **kwargs)
else:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
|
[
"def",
"show",
"(",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"target",
",",
"(",
"ecell4_base",
".",
"core",
".",
"FixedIntervalNumberObserver",
",",
"ecell4_base",
".",
"core",
".",
"NumberObserver",
",",
"ecell4_base",
".",
"core",
".",
"TimingNumberObserver",
",",
")",
")",
":",
"plot_number_observer",
"(",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"target",
",",
"(",
"ecell4_base",
".",
"core",
".",
"FixedIntervalTrajectoryObserver",
",",
"ecell4_base",
".",
"core",
".",
"FixedIntervalTrackingObserver",
")",
")",
":",
"plot_trajectory",
"(",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"target",
",",
"(",
"ecell4_base",
".",
"ode",
".",
"ODEWorld",
",",
"ecell4_base",
".",
"gillespie",
".",
"GillespieWorld",
",",
"ecell4_base",
".",
"spatiocyte",
".",
"SpatiocyteWorld",
",",
"ecell4_base",
".",
"meso",
".",
"MesoscopicWorld",
",",
"ecell4_base",
".",
"bd",
".",
"BDWorld",
",",
"ecell4_base",
".",
"egfrd",
".",
"EGFRDWorld",
")",
")",
":",
"plot_world",
"(",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"target",
",",
"(",
"ecell4_base",
".",
"core",
".",
"Model",
",",
"ecell4_base",
".",
"core",
".",
"NetworkModel",
",",
"ecell4_base",
".",
"core",
".",
"NetfreeModel",
")",
")",
":",
"dump_model",
"(",
"target",
")",
"elif",
"isinstance",
"(",
"target",
",",
"str",
")",
":",
"try",
":",
"w",
"=",
"simulation",
".",
"load_world",
"(",
"target",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"The given target [{}] is not supported.\"",
".",
"format",
"(",
"repr",
"(",
"target",
")",
")",
")",
"else",
":",
"show",
"(",
"w",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"The given target [{}] is not supported.\"",
".",
"format",
"(",
"repr",
"(",
"target",
")",
")",
")"
] |
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
|
[
"An",
"utility",
"function",
"to",
"display",
"the",
"given",
"target",
"object",
"in",
"the",
"proper",
"way",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/show.py#L15-L43
|
train
|
ecell/ecell4
|
ecell4/extra/azure_batch.py
|
print_batch_exception
|
def print_batch_exception(batch_exception):
"""Prints the contents of the specified Batch exception.
:param batch_exception:
"""
_log.error('-------------------------------------------')
_log.error('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
_log.error(batch_exception.error.message.value)
if batch_exception.error.values:
_log.error('')
for mesg in batch_exception.error.values:
_log.error('{}:\t{}'.format(mesg.key, mesg.value))
_log.error('-------------------------------------------')
|
python
|
def print_batch_exception(batch_exception):
"""Prints the contents of the specified Batch exception.
:param batch_exception:
"""
_log.error('-------------------------------------------')
_log.error('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
_log.error(batch_exception.error.message.value)
if batch_exception.error.values:
_log.error('')
for mesg in batch_exception.error.values:
_log.error('{}:\t{}'.format(mesg.key, mesg.value))
_log.error('-------------------------------------------')
|
[
"def",
"print_batch_exception",
"(",
"batch_exception",
")",
":",
"_log",
".",
"error",
"(",
"'-------------------------------------------'",
")",
"_log",
".",
"error",
"(",
"'Exception encountered:'",
")",
"if",
"batch_exception",
".",
"error",
"and",
"batch_exception",
".",
"error",
".",
"message",
"and",
"batch_exception",
".",
"error",
".",
"message",
".",
"value",
":",
"_log",
".",
"error",
"(",
"batch_exception",
".",
"error",
".",
"message",
".",
"value",
")",
"if",
"batch_exception",
".",
"error",
".",
"values",
":",
"_log",
".",
"error",
"(",
"''",
")",
"for",
"mesg",
"in",
"batch_exception",
".",
"error",
".",
"values",
":",
"_log",
".",
"error",
"(",
"'{}:\\t{}'",
".",
"format",
"(",
"mesg",
".",
"key",
",",
"mesg",
".",
"value",
")",
")",
"_log",
".",
"error",
"(",
"'-------------------------------------------'",
")"
] |
Prints the contents of the specified Batch exception.
:param batch_exception:
|
[
"Prints",
"the",
"contents",
"of",
"the",
"specified",
"Batch",
"exception",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L45-L60
|
train
|
ecell/ecell4
|
ecell4/extra/azure_batch.py
|
upload_file_to_container
|
def upload_file_to_container(block_blob_client, container_name, file_path):
"""Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
_log.info('Uploading file {} to container [{}]...'.format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
|
python
|
def upload_file_to_container(block_blob_client, container_name, file_path):
"""Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
_log.info('Uploading file {} to container [{}]...'.format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
|
[
"def",
"upload_file_to_container",
"(",
"block_blob_client",
",",
"container_name",
",",
"file_path",
")",
":",
"blob_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
"_log",
".",
"info",
"(",
"'Uploading file {} to container [{}]...'",
".",
"format",
"(",
"file_path",
",",
"container_name",
")",
")",
"block_blob_client",
".",
"create_blob_from_path",
"(",
"container_name",
",",
"blob_name",
",",
"file_path",
")",
"sas_token",
"=",
"block_blob_client",
".",
"generate_blob_shared_access_signature",
"(",
"container_name",
",",
"blob_name",
",",
"permission",
"=",
"azureblob",
".",
"BlobPermissions",
".",
"READ",
",",
"expiry",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"2",
")",
")",
"sas_url",
"=",
"block_blob_client",
".",
"make_blob_url",
"(",
"container_name",
",",
"blob_name",
",",
"sas_token",
"=",
"sas_token",
")",
"return",
"batchmodels",
".",
"ResourceFile",
"(",
"http_url",
"=",
"sas_url",
",",
"file_path",
"=",
"blob_name",
")"
] |
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
|
[
"Uploads",
"a",
"local",
"file",
"to",
"an",
"Azure",
"Blob",
"storage",
"container",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L62-L91
|
train
|
ecell/ecell4
|
ecell4/extra/azure_batch.py
|
get_container_sas_token
|
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
|
python
|
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
|
[
"def",
"get_container_sas_token",
"(",
"block_blob_client",
",",
"container_name",
",",
"blob_permissions",
")",
":",
"# Obtain the SAS token for the container, setting the expiry time and",
"# permissions. In this case, no start time is specified, so the shared",
"# access signature becomes valid immediately.",
"container_sas_token",
"=",
"block_blob_client",
".",
"generate_container_shared_access_signature",
"(",
"container_name",
",",
"permission",
"=",
"blob_permissions",
",",
"expiry",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"2",
")",
")",
"return",
"container_sas_token"
] |
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
|
[
"Obtains",
"a",
"shared",
"access",
"signature",
"granting",
"the",
"specified",
"permissions",
"to",
"the",
"container",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L93-L114
|
train
|
ecell/ecell4
|
ecell4/extra/azure_batch.py
|
create_pool
|
def create_pool(batch_service_client, pool_id,
resource_files, publisher, offer, sku,
task_file, vm_size, node_count):
"""Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param list resource_files: A collection of resource files for the pool's
start task.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
:param str task_file: A file name of the script
:param str vm_size: A type of vm
:param str node_count: The number of nodes
"""
_log.info('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# Specify the commands for the pool's start task. The start task is run
# on each node as it joins the pool, and when it's rebooted or re-imaged.
# We use the start task to prep the node for running our task script.
task_commands = [
# Copy the python_tutorial_task.py script to the "shared" directory
# that all tasks that run on the node have access to. Note that
# we are using the -p flag with cp to preserve the file uid/gid,
# otherwise since this start task is run as an admin, it would not
# be accessible by tasks run as a non-admin user.
'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)),
# Install pip
'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python',
# Install the azure-storage module so that the task script can access
# Azure Blob storage, pre-cryptography version
'pip install azure-storage==0.32.0',
# Install E-Cell 4
'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl']
# Get the node agent SKU and image reference for the virtual machine
# configuration.
# For more information about the virtual machine configuration, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
sku_to_use, image_ref_to_use = \
select_latest_verified_vm_image_with_node_agent_sku(
batch_service_client, publisher, offer, sku)
user = batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use),
vm_size=vm_size,
target_dedicated_nodes=0,
target_low_priority_nodes=node_count,
start_task=batch.models.StartTask(
command_line=wrap_commands_in_shell('linux', task_commands),
user_identity=batchmodels.UserIdentity(auto_user=user),
wait_for_success=True,
resource_files=resource_files),
)
try:
batch_service_client.pool.add(new_pool)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise
|
python
|
def create_pool(batch_service_client, pool_id,
resource_files, publisher, offer, sku,
task_file, vm_size, node_count):
"""Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param list resource_files: A collection of resource files for the pool's
start task.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
:param str task_file: A file name of the script
:param str vm_size: A type of vm
:param str node_count: The number of nodes
"""
_log.info('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# Specify the commands for the pool's start task. The start task is run
# on each node as it joins the pool, and when it's rebooted or re-imaged.
# We use the start task to prep the node for running our task script.
task_commands = [
# Copy the python_tutorial_task.py script to the "shared" directory
# that all tasks that run on the node have access to. Note that
# we are using the -p flag with cp to preserve the file uid/gid,
# otherwise since this start task is run as an admin, it would not
# be accessible by tasks run as a non-admin user.
'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)),
# Install pip
'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python',
# Install the azure-storage module so that the task script can access
# Azure Blob storage, pre-cryptography version
'pip install azure-storage==0.32.0',
# Install E-Cell 4
'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl']
# Get the node agent SKU and image reference for the virtual machine
# configuration.
# For more information about the virtual machine configuration, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
sku_to_use, image_ref_to_use = \
select_latest_verified_vm_image_with_node_agent_sku(
batch_service_client, publisher, offer, sku)
user = batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use),
vm_size=vm_size,
target_dedicated_nodes=0,
target_low_priority_nodes=node_count,
start_task=batch.models.StartTask(
command_line=wrap_commands_in_shell('linux', task_commands),
user_identity=batchmodels.UserIdentity(auto_user=user),
wait_for_success=True,
resource_files=resource_files),
)
try:
batch_service_client.pool.add(new_pool)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise
|
[
"def",
"create_pool",
"(",
"batch_service_client",
",",
"pool_id",
",",
"resource_files",
",",
"publisher",
",",
"offer",
",",
"sku",
",",
"task_file",
",",
"vm_size",
",",
"node_count",
")",
":",
"_log",
".",
"info",
"(",
"'Creating pool [{}]...'",
".",
"format",
"(",
"pool_id",
")",
")",
"# Create a new pool of Linux compute nodes using an Azure Virtual Machines",
"# Marketplace image. For more information about creating pools of Linux",
"# nodes, see:",
"# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/",
"# Specify the commands for the pool's start task. The start task is run",
"# on each node as it joins the pool, and when it's rebooted or re-imaged.",
"# We use the start task to prep the node for running our task script.",
"task_commands",
"=",
"[",
"# Copy the python_tutorial_task.py script to the \"shared\" directory",
"# that all tasks that run on the node have access to. Note that",
"# we are using the -p flag with cp to preserve the file uid/gid,",
"# otherwise since this start task is run as an admin, it would not",
"# be accessible by tasks run as a non-admin user.",
"'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"task_file",
")",
")",
",",
"# Install pip",
"'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python'",
",",
"# Install the azure-storage module so that the task script can access",
"# Azure Blob storage, pre-cryptography version",
"'pip install azure-storage==0.32.0'",
",",
"# Install E-Cell 4",
"'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl'",
"]",
"# Get the node agent SKU and image reference for the virtual machine",
"# configuration.",
"# For more information about the virtual machine configuration, see:",
"# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/",
"sku_to_use",
",",
"image_ref_to_use",
"=",
"select_latest_verified_vm_image_with_node_agent_sku",
"(",
"batch_service_client",
",",
"publisher",
",",
"offer",
",",
"sku",
")",
"user",
"=",
"batchmodels",
".",
"AutoUserSpecification",
"(",
"scope",
"=",
"batchmodels",
".",
"AutoUserScope",
".",
"pool",
",",
"elevation_level",
"=",
"batchmodels",
".",
"ElevationLevel",
".",
"admin",
")",
"new_pool",
"=",
"batch",
".",
"models",
".",
"PoolAddParameter",
"(",
"id",
"=",
"pool_id",
",",
"virtual_machine_configuration",
"=",
"batchmodels",
".",
"VirtualMachineConfiguration",
"(",
"image_reference",
"=",
"image_ref_to_use",
",",
"node_agent_sku_id",
"=",
"sku_to_use",
")",
",",
"vm_size",
"=",
"vm_size",
",",
"target_dedicated_nodes",
"=",
"0",
",",
"target_low_priority_nodes",
"=",
"node_count",
",",
"start_task",
"=",
"batch",
".",
"models",
".",
"StartTask",
"(",
"command_line",
"=",
"wrap_commands_in_shell",
"(",
"'linux'",
",",
"task_commands",
")",
",",
"user_identity",
"=",
"batchmodels",
".",
"UserIdentity",
"(",
"auto_user",
"=",
"user",
")",
",",
"wait_for_success",
"=",
"True",
",",
"resource_files",
"=",
"resource_files",
")",
",",
")",
"try",
":",
"batch_service_client",
".",
"pool",
".",
"add",
"(",
"new_pool",
")",
"except",
"batchmodels",
".",
"BatchErrorException",
"as",
"err",
":",
"print_batch_exception",
"(",
"err",
")",
"raise"
] |
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param list resource_files: A collection of resource files for the pool's
start task.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
:param str task_file: A file name of the script
:param str vm_size: A type of vm
:param str node_count: The number of nodes
|
[
"Creates",
"a",
"pool",
"of",
"compute",
"nodes",
"with",
"the",
"specified",
"OS",
"settings",
"."
] |
a4a1229661c39b2059adbbacae9090e5ba664e01
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L161-L232
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.