repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
ghukill/pyfc4 | pyfc4/models.py | Repository.start_txn | def start_txn(self, txn_name=None):
'''
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction
'''
# if no name provided, create one
if not txn_name:
txn_name = uuid.uuid4().hex
# request new transaction
txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None)
# if 201, transaction was created
if txn_response.status_code == 201:
txn_uri = txn_response.headers['Location']
logger.debug("spawning transaction: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = txn_response.headers['Expires'])
# append to self
self.txns[txn_name] = txn
# return
return txn | python | def start_txn(self, txn_name=None):
'''
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction
'''
# if no name provided, create one
if not txn_name:
txn_name = uuid.uuid4().hex
# request new transaction
txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None)
# if 201, transaction was created
if txn_response.status_code == 201:
txn_uri = txn_response.headers['Location']
logger.debug("spawning transaction: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = txn_response.headers['Expires'])
# append to self
self.txns[txn_name] = txn
# return
return txn | [
"def",
"start_txn",
"(",
"self",
",",
"txn_name",
"=",
"None",
")",
":",
"# if no name provided, create one",
"if",
"not",
"txn_name",
":",
"txn_name",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"# request new transaction",
"txn_response",
"=",
"self",
".",
"api",
".",
"http_request",
"(",
"'POST'",
",",
"'%s/fcr:tx'",
"%",
"self",
".",
"root",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
"# if 201, transaction was created",
"if",
"txn_response",
".",
"status_code",
"==",
"201",
":",
"txn_uri",
"=",
"txn_response",
".",
"headers",
"[",
"'Location'",
"]",
"logger",
".",
"debug",
"(",
"\"spawning transaction: %s\"",
"%",
"txn_uri",
")",
"# init new Transaction, and pass Expires header",
"txn",
"=",
"Transaction",
"(",
"self",
",",
"# pass the repository",
"txn_name",
",",
"txn_uri",
",",
"expires",
"=",
"txn_response",
".",
"headers",
"[",
"'Expires'",
"]",
")",
"# append to self",
"self",
".",
"txns",
"[",
"txn_name",
"]",
"=",
"txn",
"# return",
"return",
"txn"
]
| Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction | [
"Request",
"new",
"transaction",
"from",
"repository",
"init",
"new",
"Transaction",
"store",
"in",
"self",
".",
"txns"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L229-L266 | train |
ghukill/pyfc4 | pyfc4/models.py | Repository.get_txn | def get_txn(self, txn_name, txn_uri):
'''
Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri]
'''
# parse uri
txn_uri = self.parse_uri(txn_uri)
# request new transaction
txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None)
# if 200, transaction exists
if txn_response.status_code == 200:
logger.debug("transactoin found: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = None)
# append to self
self.txns[txn_name] = txn
# return
return txn
# if 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % txn_uri)
return False
else:
raise Exception('HTTP %s, could not retrieve transaction' % txn_response.status_code) | python | def get_txn(self, txn_name, txn_uri):
'''
Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri]
'''
# parse uri
txn_uri = self.parse_uri(txn_uri)
# request new transaction
txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None)
# if 200, transaction exists
if txn_response.status_code == 200:
logger.debug("transactoin found: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = None)
# append to self
self.txns[txn_name] = txn
# return
return txn
# if 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % txn_uri)
return False
else:
raise Exception('HTTP %s, could not retrieve transaction' % txn_response.status_code) | [
"def",
"get_txn",
"(",
"self",
",",
"txn_name",
",",
"txn_uri",
")",
":",
"# parse uri",
"txn_uri",
"=",
"self",
".",
"parse_uri",
"(",
"txn_uri",
")",
"# request new transaction",
"txn_response",
"=",
"self",
".",
"api",
".",
"http_request",
"(",
"'GET'",
",",
"txn_uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
"# if 200, transaction exists",
"if",
"txn_response",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"debug",
"(",
"\"transactoin found: %s\"",
"%",
"txn_uri",
")",
"# init new Transaction, and pass Expires header",
"txn",
"=",
"Transaction",
"(",
"self",
",",
"# pass the repository",
"txn_name",
",",
"txn_uri",
",",
"expires",
"=",
"None",
")",
"# append to self",
"self",
".",
"txns",
"[",
"txn_name",
"]",
"=",
"txn",
"# return",
"return",
"txn",
"# if 404, transaction does not exist",
"elif",
"txn_response",
".",
"status_code",
"in",
"[",
"404",
",",
"410",
"]",
":",
"logger",
".",
"debug",
"(",
"\"transaction does not exist: %s\"",
"%",
"txn_uri",
")",
"return",
"False",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not retrieve transaction'",
"%",
"txn_response",
".",
"status_code",
")"
]
| Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri] | [
"Retrieves",
"known",
"transaction",
"and",
"adds",
"to",
"self",
".",
"txns",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L269-L314 | train |
ghukill/pyfc4 | pyfc4/models.py | Transaction.keep_alive | def keep_alive(self):
'''
Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires
'''
# keep transaction alive
txn_response = self.api.http_request('POST','%sfcr:tx' % self.root, data=None, headers=None)
# if 204, transaction kept alive
if txn_response.status_code == 204:
logger.debug("continuing transaction: %s" % self.root)
# update status and timer
self.active = True
self.expires = txn_response.headers['Expires']
return True
# if 410, transaction does not exist
elif txn_response.status_code == 410:
logger.debug("transaction does not exist: %s" % self.root)
self.active = False
return False
else:
raise Exception('HTTP %s, could not continue transaction' % txn_response.status_code) | python | def keep_alive(self):
'''
Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires
'''
# keep transaction alive
txn_response = self.api.http_request('POST','%sfcr:tx' % self.root, data=None, headers=None)
# if 204, transaction kept alive
if txn_response.status_code == 204:
logger.debug("continuing transaction: %s" % self.root)
# update status and timer
self.active = True
self.expires = txn_response.headers['Expires']
return True
# if 410, transaction does not exist
elif txn_response.status_code == 410:
logger.debug("transaction does not exist: %s" % self.root)
self.active = False
return False
else:
raise Exception('HTTP %s, could not continue transaction' % txn_response.status_code) | [
"def",
"keep_alive",
"(",
"self",
")",
":",
"# keep transaction alive",
"txn_response",
"=",
"self",
".",
"api",
".",
"http_request",
"(",
"'POST'",
",",
"'%sfcr:tx'",
"%",
"self",
".",
"root",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
"# if 204, transaction kept alive",
"if",
"txn_response",
".",
"status_code",
"==",
"204",
":",
"logger",
".",
"debug",
"(",
"\"continuing transaction: %s\"",
"%",
"self",
".",
"root",
")",
"# update status and timer",
"self",
".",
"active",
"=",
"True",
"self",
".",
"expires",
"=",
"txn_response",
".",
"headers",
"[",
"'Expires'",
"]",
"return",
"True",
"# if 410, transaction does not exist",
"elif",
"txn_response",
".",
"status_code",
"==",
"410",
":",
"logger",
".",
"debug",
"(",
"\"transaction does not exist: %s\"",
"%",
"self",
".",
"root",
")",
"self",
".",
"active",
"=",
"False",
"return",
"False",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not continue transaction'",
"%",
"txn_response",
".",
"status_code",
")"
]
| Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires | [
"Keep",
"current",
"transaction",
"alive",
"updates",
"self",
".",
"expires"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L357-L387 | train |
ghukill/pyfc4 | pyfc4/models.py | Transaction._close | def _close(self, close_type):
'''
Ends transaction by committing, or rolling back, all changes during transaction.
Args:
close_type (str): expects "commit" or "rollback"
Return:
(bool)
'''
# commit transaction
txn_response = self.api.http_request('POST','%sfcr:tx/fcr:%s' % (self.root, close_type), data=None, headers=None)
# if 204, transaction was closed
if txn_response.status_code == 204:
logger.debug("%s for transaction: %s, successful" % (close_type, self.root))
# update self.active
self.active = False
# return
return True
# if 410 or 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % self.root)
# update self.active
self.active = False
return False
else:
raise Exception('HTTP %s, could not commit transaction' % txn_response.status_code) | python | def _close(self, close_type):
'''
Ends transaction by committing, or rolling back, all changes during transaction.
Args:
close_type (str): expects "commit" or "rollback"
Return:
(bool)
'''
# commit transaction
txn_response = self.api.http_request('POST','%sfcr:tx/fcr:%s' % (self.root, close_type), data=None, headers=None)
# if 204, transaction was closed
if txn_response.status_code == 204:
logger.debug("%s for transaction: %s, successful" % (close_type, self.root))
# update self.active
self.active = False
# return
return True
# if 410 or 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % self.root)
# update self.active
self.active = False
return False
else:
raise Exception('HTTP %s, could not commit transaction' % txn_response.status_code) | [
"def",
"_close",
"(",
"self",
",",
"close_type",
")",
":",
"# commit transaction",
"txn_response",
"=",
"self",
".",
"api",
".",
"http_request",
"(",
"'POST'",
",",
"'%sfcr:tx/fcr:%s'",
"%",
"(",
"self",
".",
"root",
",",
"close_type",
")",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
"# if 204, transaction was closed",
"if",
"txn_response",
".",
"status_code",
"==",
"204",
":",
"logger",
".",
"debug",
"(",
"\"%s for transaction: %s, successful\"",
"%",
"(",
"close_type",
",",
"self",
".",
"root",
")",
")",
"# update self.active",
"self",
".",
"active",
"=",
"False",
"# return",
"return",
"True",
"# if 410 or 404, transaction does not exist",
"elif",
"txn_response",
".",
"status_code",
"in",
"[",
"404",
",",
"410",
"]",
":",
"logger",
".",
"debug",
"(",
"\"transaction does not exist: %s\"",
"%",
"self",
".",
"root",
")",
"# update self.active",
"self",
".",
"active",
"=",
"False",
"return",
"False",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not commit transaction'",
"%",
"txn_response",
".",
"status_code",
")"
]
| Ends transaction by committing, or rolling back, all changes during transaction.
Args:
close_type (str): expects "commit" or "rollback"
Return:
(bool) | [
"Ends",
"transaction",
"by",
"committing",
"or",
"rolling",
"back",
"all",
"changes",
"during",
"transaction",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L390-L421 | train |
ghukill/pyfc4 | pyfc4/models.py | API.http_request | def http_request(self,
verb,
uri,
data=None,
headers=None,
files=None,
response_format=None,
is_rdf = True,
stream = False
):
'''
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response
'''
# set content negotiated response format for RDFSources
if is_rdf:
'''
Acceptable content negotiated response formats include:
application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)
application/n-triples
application/rdf+xml
text/n3 (or text/rdf+n3)
text/plain
text/turtle (or application/x-turtle)
'''
# set for GET requests only
if verb == 'GET':
# if no response_format has been requested to this point, use repository instance default
if not response_format:
response_format = self.repo.default_serialization
# if headers present, append
if headers and 'Accept' not in headers.keys():
headers['Accept'] = response_format
# if headers are blank, init dictionary
else:
headers = {'Accept':response_format}
# prepare uri for HTTP request
if type(uri) == rdflib.term.URIRef:
uri = uri.toPython()
logger.debug("%s request for %s, format %s, headers %s" %
(verb, uri, response_format, headers))
# manually prepare request
session = requests.Session()
request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)
prepped_request = session.prepare_request(request)
response = session.send(prepped_request,
stream=stream,
)
return response | python | def http_request(self,
verb,
uri,
data=None,
headers=None,
files=None,
response_format=None,
is_rdf = True,
stream = False
):
'''
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response
'''
# set content negotiated response format for RDFSources
if is_rdf:
'''
Acceptable content negotiated response formats include:
application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)
application/n-triples
application/rdf+xml
text/n3 (or text/rdf+n3)
text/plain
text/turtle (or application/x-turtle)
'''
# set for GET requests only
if verb == 'GET':
# if no response_format has been requested to this point, use repository instance default
if not response_format:
response_format = self.repo.default_serialization
# if headers present, append
if headers and 'Accept' not in headers.keys():
headers['Accept'] = response_format
# if headers are blank, init dictionary
else:
headers = {'Accept':response_format}
# prepare uri for HTTP request
if type(uri) == rdflib.term.URIRef:
uri = uri.toPython()
logger.debug("%s request for %s, format %s, headers %s" %
(verb, uri, response_format, headers))
# manually prepare request
session = requests.Session()
request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)
prepped_request = session.prepare_request(request)
response = session.send(prepped_request,
stream=stream,
)
return response | [
"def",
"http_request",
"(",
"self",
",",
"verb",
",",
"uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"files",
"=",
"None",
",",
"response_format",
"=",
"None",
",",
"is_rdf",
"=",
"True",
",",
"stream",
"=",
"False",
")",
":",
"# set content negotiated response format for RDFSources",
"if",
"is_rdf",
":",
"'''\n\t\t\tAcceptable content negotiated response formats include:\n\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n\t\t\t\tapplication/n-triples\n\t\t\t\tapplication/rdf+xml\n\t\t\t\ttext/n3 (or text/rdf+n3)\n\t\t\t\ttext/plain\n\t\t\t\ttext/turtle (or application/x-turtle)\n\t\t\t'''",
"# set for GET requests only",
"if",
"verb",
"==",
"'GET'",
":",
"# if no response_format has been requested to this point, use repository instance default",
"if",
"not",
"response_format",
":",
"response_format",
"=",
"self",
".",
"repo",
".",
"default_serialization",
"# if headers present, append",
"if",
"headers",
"and",
"'Accept'",
"not",
"in",
"headers",
".",
"keys",
"(",
")",
":",
"headers",
"[",
"'Accept'",
"]",
"=",
"response_format",
"# if headers are blank, init dictionary",
"else",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"response_format",
"}",
"# prepare uri for HTTP request",
"if",
"type",
"(",
"uri",
")",
"==",
"rdflib",
".",
"term",
".",
"URIRef",
":",
"uri",
"=",
"uri",
".",
"toPython",
"(",
")",
"logger",
".",
"debug",
"(",
"\"%s request for %s, format %s, headers %s\"",
"%",
"(",
"verb",
",",
"uri",
",",
"response_format",
",",
"headers",
")",
")",
"# manually prepare request",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"request",
"=",
"requests",
".",
"Request",
"(",
"verb",
",",
"uri",
",",
"auth",
"=",
"(",
"self",
".",
"repo",
".",
"username",
",",
"self",
".",
"repo",
".",
"password",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"files",
"=",
"files",
")",
"prepped_request",
"=",
"session",
".",
"prepare_request",
"(",
"request",
")",
"response",
"=",
"session",
".",
"send",
"(",
"prepped_request",
",",
"stream",
"=",
"stream",
",",
")",
"return",
"response"
]
| Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response | [
"Primary",
"route",
"for",
"all",
"HTTP",
"requests",
"to",
"repository",
".",
"Ability",
"to",
"set",
"most",
"parameters",
"for",
"requests",
"library",
"with",
"some",
"additional",
"convenience",
"parameters",
"as",
"well",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L471-L537 | train |
ghukill/pyfc4 | pyfc4/models.py | API.parse_rdf_payload | def parse_rdf_payload(self, data, headers):
'''
small function to parse RDF payloads from various repository endpoints
Args:
data (response.data): data from requests response
headers (response.headers): headers from requests response
Returns:
(rdflib.Graph): parsed graph
'''
# handle edge case for content-types not recognized by rdflib parser
if headers['Content-Type'].startswith('text/plain'):
logger.debug('text/plain Content-Type detected, using application/n-triples for parser')
parse_format = 'application/n-triples'
else:
parse_format = headers['Content-Type']
# clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types)
if ';charset' in parse_format:
parse_format = parse_format.split(';')[0]
# parse graph
graph = rdflib.Graph().parse(
data=data.decode('utf-8'),
format=parse_format)
# return graph
return graph | python | def parse_rdf_payload(self, data, headers):
'''
small function to parse RDF payloads from various repository endpoints
Args:
data (response.data): data from requests response
headers (response.headers): headers from requests response
Returns:
(rdflib.Graph): parsed graph
'''
# handle edge case for content-types not recognized by rdflib parser
if headers['Content-Type'].startswith('text/plain'):
logger.debug('text/plain Content-Type detected, using application/n-triples for parser')
parse_format = 'application/n-triples'
else:
parse_format = headers['Content-Type']
# clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types)
if ';charset' in parse_format:
parse_format = parse_format.split(';')[0]
# parse graph
graph = rdflib.Graph().parse(
data=data.decode('utf-8'),
format=parse_format)
# return graph
return graph | [
"def",
"parse_rdf_payload",
"(",
"self",
",",
"data",
",",
"headers",
")",
":",
"# handle edge case for content-types not recognized by rdflib parser",
"if",
"headers",
"[",
"'Content-Type'",
"]",
".",
"startswith",
"(",
"'text/plain'",
")",
":",
"logger",
".",
"debug",
"(",
"'text/plain Content-Type detected, using application/n-triples for parser'",
")",
"parse_format",
"=",
"'application/n-triples'",
"else",
":",
"parse_format",
"=",
"headers",
"[",
"'Content-Type'",
"]",
"# clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types)",
"if",
"';charset'",
"in",
"parse_format",
":",
"parse_format",
"=",
"parse_format",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"# parse graph",
"graph",
"=",
"rdflib",
".",
"Graph",
"(",
")",
".",
"parse",
"(",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"format",
"=",
"parse_format",
")",
"# return graph",
"return",
"graph"
]
| small function to parse RDF payloads from various repository endpoints
Args:
data (response.data): data from requests response
headers (response.headers): headers from requests response
Returns:
(rdflib.Graph): parsed graph | [
"small",
"function",
"to",
"parse",
"RDF",
"payloads",
"from",
"various",
"repository",
"endpoints"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L585-L615 | train |
ghukill/pyfc4 | pyfc4/models.py | SparqlUpdate._derive_namespaces | def _derive_namespaces(self):
'''
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes
'''
# iterate through graphs and get unique namespace uris
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for s,p,o in graph:
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(o) # objects
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
logger.debug(self.update_namespaces)
# build unique prefixes dictionary
# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)
for ns_uri in self.update_namespaces:
for k in self.prefixes.__dict__:
if str(ns_uri) == str(self.prefixes.__dict__[k]):
logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri)))
self.update_prefixes[k] = self.prefixes.__dict__[k] | python | def _derive_namespaces(self):
'''
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes
'''
# iterate through graphs and get unique namespace uris
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for s,p,o in graph:
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(o) # objects
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
logger.debug(self.update_namespaces)
# build unique prefixes dictionary
# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)
for ns_uri in self.update_namespaces:
for k in self.prefixes.__dict__:
if str(ns_uri) == str(self.prefixes.__dict__[k]):
logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri)))
self.update_prefixes[k] = self.prefixes.__dict__[k] | [
"def",
"_derive_namespaces",
"(",
"self",
")",
":",
"# iterate through graphs and get unique namespace uris",
"for",
"graph",
"in",
"[",
"self",
".",
"diffs",
".",
"overlap",
",",
"self",
".",
"diffs",
".",
"removed",
",",
"self",
".",
"diffs",
".",
"added",
"]",
":",
"for",
"s",
",",
"p",
",",
"o",
"in",
"graph",
":",
"try",
":",
"ns_prefix",
",",
"ns_uri",
",",
"predicate",
"=",
"graph",
".",
"compute_qname",
"(",
"p",
")",
"# predicates",
"self",
".",
"update_namespaces",
".",
"add",
"(",
"ns_uri",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'could not parse Object URI: %s'",
"%",
"ns_uri",
")",
"try",
":",
"ns_prefix",
",",
"ns_uri",
",",
"predicate",
"=",
"graph",
".",
"compute_qname",
"(",
"o",
")",
"# objects",
"self",
".",
"update_namespaces",
".",
"add",
"(",
"ns_uri",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'could not parse Object URI: %s'",
"%",
"ns_uri",
")",
"logger",
".",
"debug",
"(",
"self",
".",
"update_namespaces",
")",
"# build unique prefixes dictionary",
"# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)",
"for",
"ns_uri",
"in",
"self",
".",
"update_namespaces",
":",
"for",
"k",
"in",
"self",
".",
"prefixes",
".",
"__dict__",
":",
"if",
"str",
"(",
"ns_uri",
")",
"==",
"str",
"(",
"self",
".",
"prefixes",
".",
"__dict__",
"[",
"k",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"'adding prefix %s for uri %s to unique_prefixes'",
"%",
"(",
"k",
",",
"str",
"(",
"ns_uri",
")",
")",
")",
"self",
".",
"update_prefixes",
"[",
"k",
"]",
"=",
"self",
".",
"prefixes",
".",
"__dict__",
"[",
"k",
"]"
]
| Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes | [
"Small",
"method",
"to",
"loop",
"through",
"three",
"graphs",
"in",
"self",
".",
"diffs",
"identify",
"unique",
"namespace",
"URIs",
".",
"Then",
"loop",
"through",
"provided",
"dictionary",
"of",
"prefixes",
"and",
"pin",
"one",
"to",
"another",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L641-L675 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.check_exists | def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists | python | def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists | [
"def",
"check_exists",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'HEAD'",
",",
"self",
".",
"uri",
")",
"self",
".",
"status_code",
"=",
"response",
".",
"status_code",
"# resource exists",
"if",
"self",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"exists",
"=",
"True",
"# resource no longer here",
"elif",
"self",
".",
"status_code",
"==",
"410",
":",
"self",
".",
"exists",
"=",
"False",
"# resource not found",
"elif",
"self",
".",
"status_code",
"==",
"404",
":",
"self",
".",
"exists",
"=",
"False",
"return",
"self",
".",
"exists"
]
| Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists | [
"Check",
"if",
"resource",
"exists",
"update",
"self",
".",
"exists",
"returns"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L801-L821 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.create | def create(self, specify_uri=False, ignore_tombstone=False, serialization_format=None, stream=False, auto_refresh=None):
'''
Primary method to create resources.
Args:
specify_uri (bool): If True, uses PUT verb and sets the URI during creation. If False, uses POST and gets repository minted URI
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
serialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
'''
# if resource claims existence, raise exception
if self.exists:
raise Exception('resource exists attribute True, aborting')
# else, continue
else:
# determine verb based on specify_uri parameter
if specify_uri:
verb = 'PUT'
else:
verb = 'POST'
logger.debug('creating resource %s with verb %s' % (self.uri, verb))
# check if NonRDFSource, or extension thereof
#if so, run self.binary._prep_binary()
if issubclass(type(self),NonRDFSource):
self.binary._prep_binary()
data = self.binary.data
# otherwise, prep for RDF
else:
# determine serialization
if not serialization_format:
serialization_format = self.repo.default_serialization
data = self.rdf.graph.serialize(format=serialization_format)
logger.debug('Serialized graph used for resource creation:')
logger.debug(data.decode('utf-8'))
self.headers['Content-Type'] = serialization_format
# fire creation request
response = self.repo.api.http_request(verb, self.uri, data=data, headers=self.headers, stream=stream)
return self._handle_create(response, ignore_tombstone, auto_refresh) | python | def create(self, specify_uri=False, ignore_tombstone=False, serialization_format=None, stream=False, auto_refresh=None):
'''
Primary method to create resources.
Args:
specify_uri (bool): If True, uses PUT verb and sets the URI during creation. If False, uses POST and gets repository minted URI
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
serialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
'''
# if resource claims existence, raise exception
if self.exists:
raise Exception('resource exists attribute True, aborting')
# else, continue
else:
# determine verb based on specify_uri parameter
if specify_uri:
verb = 'PUT'
else:
verb = 'POST'
logger.debug('creating resource %s with verb %s' % (self.uri, verb))
# check if NonRDFSource, or extension thereof
#if so, run self.binary._prep_binary()
if issubclass(type(self),NonRDFSource):
self.binary._prep_binary()
data = self.binary.data
# otherwise, prep for RDF
else:
# determine serialization
if not serialization_format:
serialization_format = self.repo.default_serialization
data = self.rdf.graph.serialize(format=serialization_format)
logger.debug('Serialized graph used for resource creation:')
logger.debug(data.decode('utf-8'))
self.headers['Content-Type'] = serialization_format
# fire creation request
response = self.repo.api.http_request(verb, self.uri, data=data, headers=self.headers, stream=stream)
return self._handle_create(response, ignore_tombstone, auto_refresh) | [
"def",
"create",
"(",
"self",
",",
"specify_uri",
"=",
"False",
",",
"ignore_tombstone",
"=",
"False",
",",
"serialization_format",
"=",
"None",
",",
"stream",
"=",
"False",
",",
"auto_refresh",
"=",
"None",
")",
":",
"# if resource claims existence, raise exception",
"if",
"self",
".",
"exists",
":",
"raise",
"Exception",
"(",
"'resource exists attribute True, aborting'",
")",
"# else, continue",
"else",
":",
"# determine verb based on specify_uri parameter",
"if",
"specify_uri",
":",
"verb",
"=",
"'PUT'",
"else",
":",
"verb",
"=",
"'POST'",
"logger",
".",
"debug",
"(",
"'creating resource %s with verb %s'",
"%",
"(",
"self",
".",
"uri",
",",
"verb",
")",
")",
"# check if NonRDFSource, or extension thereof",
"#if so, run self.binary._prep_binary()",
"if",
"issubclass",
"(",
"type",
"(",
"self",
")",
",",
"NonRDFSource",
")",
":",
"self",
".",
"binary",
".",
"_prep_binary",
"(",
")",
"data",
"=",
"self",
".",
"binary",
".",
"data",
"# otherwise, prep for RDF",
"else",
":",
"# determine serialization",
"if",
"not",
"serialization_format",
":",
"serialization_format",
"=",
"self",
".",
"repo",
".",
"default_serialization",
"data",
"=",
"self",
".",
"rdf",
".",
"graph",
".",
"serialize",
"(",
"format",
"=",
"serialization_format",
")",
"logger",
".",
"debug",
"(",
"'Serialized graph used for resource creation:'",
")",
"logger",
".",
"debug",
"(",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"self",
".",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"serialization_format",
"# fire creation request",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"verb",
",",
"self",
".",
"uri",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"stream",
"=",
"stream",
")",
"return",
"self",
".",
"_handle_create",
"(",
"response",
",",
"ignore_tombstone",
",",
"auto_refresh",
")"
]
| Primary method to create resources.
Args:
specify_uri (bool): If True, uses PUT verb and sets the URI during creation. If False, uses POST and gets repository minted URI
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
serialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh | [
"Primary",
"method",
"to",
"create",
"resources",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L824-L869 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.options | def options(self):
'''
Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request
'''
# http request
response = self.repo.api.http_request('OPTIONS', self.uri)
return response.headers | python | def options(self):
'''
Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request
'''
# http request
response = self.repo.api.http_request('OPTIONS', self.uri)
return response.headers | [
"def",
"options",
"(",
"self",
")",
":",
"# http request",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'OPTIONS'",
",",
"self",
".",
"uri",
")",
"return",
"response",
".",
"headers"
]
| Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request | [
"Small",
"method",
"to",
"return",
"headers",
"of",
"an",
"OPTIONS",
"request",
"to",
"self",
".",
"uri"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L928-L942 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.copy | def copy(self, destination):
'''
Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource
'''
# set move headers
destination_uri = self.repo.parse_uri(destination)
# http request
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
# handle response
if response.status_code == 201:
return destination_uri
else:
raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri)) | python | def copy(self, destination):
'''
Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource
'''
# set move headers
destination_uri = self.repo.parse_uri(destination)
# http request
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
# handle response
if response.status_code == 201:
return destination_uri
else:
raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri)) | [
"def",
"copy",
"(",
"self",
",",
"destination",
")",
":",
"# set move headers",
"destination_uri",
"=",
"self",
".",
"repo",
".",
"parse_uri",
"(",
"destination",
")",
"# http request",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'COPY'",
",",
"self",
".",
"uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"{",
"'Destination'",
":",
"destination_uri",
".",
"toPython",
"(",
")",
"}",
")",
"# handle response",
"if",
"response",
".",
"status_code",
"==",
"201",
":",
"return",
"destination_uri",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not move resource %s to %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"self",
".",
"uri",
",",
"destination_uri",
")",
")"
]
| Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource | [
"Method",
"to",
"copy",
"resource",
"to",
"another",
"location"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L986-L1008 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.delete | def delete(self, remove_tombstone=True):
'''
Method to delete resources.
Args:
remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.
Returns:
(bool)
'''
response = self.repo.api.http_request('DELETE', self.uri)
# update exists
if response.status_code == 204:
# removal successful, updating self
self._empty_resource_attributes()
if remove_tombstone:
self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)
return True | python | def delete(self, remove_tombstone=True):
'''
Method to delete resources.
Args:
remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.
Returns:
(bool)
'''
response = self.repo.api.http_request('DELETE', self.uri)
# update exists
if response.status_code == 204:
# removal successful, updating self
self._empty_resource_attributes()
if remove_tombstone:
self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)
return True | [
"def",
"delete",
"(",
"self",
",",
"remove_tombstone",
"=",
"True",
")",
":",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'DELETE'",
",",
"self",
".",
"uri",
")",
"# update exists",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"# removal successful, updating self",
"self",
".",
"_empty_resource_attributes",
"(",
")",
"if",
"remove_tombstone",
":",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'DELETE'",
",",
"'%s/fcr:tombstone'",
"%",
"self",
".",
"uri",
")",
"return",
"True"
]
| Method to delete resources.
Args:
remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.
Returns:
(bool) | [
"Method",
"to",
"delete",
"resources",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1011-L1033 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.refresh | def refresh(self, refresh_binary=True):
'''
Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None
'''
updated_self = self.repo.get_resource(self.uri)
# if resource type of updated_self != self, raise exception
if not isinstance(self, type(updated_self)):
raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) )
if updated_self:
# update attributes
self.status_code = updated_self.status_code
self.rdf.data = updated_self.rdf.data
self.headers = updated_self.headers
self.exists = updated_self.exists
# update graph if RDFSource
if type(self) != NonRDFSource:
self._parse_graph()
# empty versions
self.versions = SimpleNamespace()
# if NonRDF, set binary attributes
if type(updated_self) == NonRDFSource and refresh_binary:
self.binary.refresh(updated_self)
# fire resource._post_create hook if exists
if hasattr(self,'_post_refresh'):
self._post_refresh()
# cleanup
del(updated_self)
else:
logger.debug('resource %s not found, dumping values')
self._empty_resource_attributes() | python | def refresh(self, refresh_binary=True):
'''
Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None
'''
updated_self = self.repo.get_resource(self.uri)
# if resource type of updated_self != self, raise exception
if not isinstance(self, type(updated_self)):
raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) )
if updated_self:
# update attributes
self.status_code = updated_self.status_code
self.rdf.data = updated_self.rdf.data
self.headers = updated_self.headers
self.exists = updated_self.exists
# update graph if RDFSource
if type(self) != NonRDFSource:
self._parse_graph()
# empty versions
self.versions = SimpleNamespace()
# if NonRDF, set binary attributes
if type(updated_self) == NonRDFSource and refresh_binary:
self.binary.refresh(updated_self)
# fire resource._post_create hook if exists
if hasattr(self,'_post_refresh'):
self._post_refresh()
# cleanup
del(updated_self)
else:
logger.debug('resource %s not found, dumping values')
self._empty_resource_attributes() | [
"def",
"refresh",
"(",
"self",
",",
"refresh_binary",
"=",
"True",
")",
":",
"updated_self",
"=",
"self",
".",
"repo",
".",
"get_resource",
"(",
"self",
".",
"uri",
")",
"# if resource type of updated_self != self, raise exception",
"if",
"not",
"isinstance",
"(",
"self",
",",
"type",
"(",
"updated_self",
")",
")",
":",
"raise",
"Exception",
"(",
"'Instantiated %s, but repository reports this resource is %s'",
"%",
"(",
"type",
"(",
"updated_self",
")",
",",
"type",
"(",
"self",
")",
")",
")",
"if",
"updated_self",
":",
"# update attributes",
"self",
".",
"status_code",
"=",
"updated_self",
".",
"status_code",
"self",
".",
"rdf",
".",
"data",
"=",
"updated_self",
".",
"rdf",
".",
"data",
"self",
".",
"headers",
"=",
"updated_self",
".",
"headers",
"self",
".",
"exists",
"=",
"updated_self",
".",
"exists",
"# update graph if RDFSource",
"if",
"type",
"(",
"self",
")",
"!=",
"NonRDFSource",
":",
"self",
".",
"_parse_graph",
"(",
")",
"# empty versions",
"self",
".",
"versions",
"=",
"SimpleNamespace",
"(",
")",
"# if NonRDF, set binary attributes",
"if",
"type",
"(",
"updated_self",
")",
"==",
"NonRDFSource",
"and",
"refresh_binary",
":",
"self",
".",
"binary",
".",
"refresh",
"(",
"updated_self",
")",
"# fire resource._post_create hook if exists",
"if",
"hasattr",
"(",
"self",
",",
"'_post_refresh'",
")",
":",
"self",
".",
"_post_refresh",
"(",
")",
"# cleanup",
"del",
"(",
"updated_self",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'resource %s not found, dumping values'",
")",
"self",
".",
"_empty_resource_attributes",
"(",
")"
]
| Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None | [
"Performs",
"GET",
"request",
"and",
"refreshes",
"RDF",
"information",
"for",
"resource",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1036-L1082 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource._build_rdf | def _build_rdf(self, data=None):
'''
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None
'''
# recreate rdf data
self.rdf = SimpleNamespace()
self.rdf.data = data
self.rdf.prefixes = SimpleNamespace()
self.rdf.uris = SimpleNamespace()
# populate prefixes
for prefix,uri in self.repo.context.items():
setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
# graph
self._parse_graph() | python | def _build_rdf(self, data=None):
'''
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None
'''
# recreate rdf data
self.rdf = SimpleNamespace()
self.rdf.data = data
self.rdf.prefixes = SimpleNamespace()
self.rdf.uris = SimpleNamespace()
# populate prefixes
for prefix,uri in self.repo.context.items():
setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
# graph
self._parse_graph() | [
"def",
"_build_rdf",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"# recreate rdf data",
"self",
".",
"rdf",
"=",
"SimpleNamespace",
"(",
")",
"self",
".",
"rdf",
".",
"data",
"=",
"data",
"self",
".",
"rdf",
".",
"prefixes",
"=",
"SimpleNamespace",
"(",
")",
"self",
".",
"rdf",
".",
"uris",
"=",
"SimpleNamespace",
"(",
")",
"# populate prefixes",
"for",
"prefix",
",",
"uri",
"in",
"self",
".",
"repo",
".",
"context",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
".",
"rdf",
".",
"prefixes",
",",
"prefix",
",",
"rdflib",
".",
"Namespace",
"(",
"uri",
")",
")",
"# graph",
"self",
".",
"_parse_graph",
"(",
")"
]
| Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None | [
"Parse",
"incoming",
"rdf",
"as",
"self",
".",
"rdf",
".",
"orig_graph",
"create",
"copy",
"at",
"self",
".",
"rdf",
".",
"graph"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1085-L1106 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource._parse_graph | def _parse_graph(self):
'''
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
'''
# if resource exists, parse self.rdf.data
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
# else, create empty graph
else:
self.rdf.graph = rdflib.Graph()
# bind any additional namespaces from repo instance, but do not override
self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
# conversely, add namespaces from parsed graph to self.rdf.prefixes
for ns_prefix, ns_uri in self.rdf.graph.namespaces():
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)
# pin old graph to resource, create copy graph for modifications
self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)
# parse triples for object-like access
self.parse_object_like_triples() | python | def _parse_graph(self):
'''
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
'''
# if resource exists, parse self.rdf.data
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
# else, create empty graph
else:
self.rdf.graph = rdflib.Graph()
# bind any additional namespaces from repo instance, but do not override
self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
# conversely, add namespaces from parsed graph to self.rdf.prefixes
for ns_prefix, ns_uri in self.rdf.graph.namespaces():
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)
# pin old graph to resource, create copy graph for modifications
self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)
# parse triples for object-like access
self.parse_object_like_triples() | [
"def",
"_parse_graph",
"(",
"self",
")",
":",
"# if resource exists, parse self.rdf.data",
"if",
"self",
".",
"exists",
":",
"self",
".",
"rdf",
".",
"graph",
"=",
"self",
".",
"repo",
".",
"api",
".",
"parse_rdf_payload",
"(",
"self",
".",
"rdf",
".",
"data",
",",
"self",
".",
"headers",
")",
"# else, create empty graph",
"else",
":",
"self",
".",
"rdf",
".",
"graph",
"=",
"rdflib",
".",
"Graph",
"(",
")",
"# bind any additional namespaces from repo instance, but do not override",
"self",
".",
"rdf",
".",
"namespace_manager",
"=",
"rdflib",
".",
"namespace",
".",
"NamespaceManager",
"(",
"self",
".",
"rdf",
".",
"graph",
")",
"for",
"ns_prefix",
",",
"ns_uri",
"in",
"self",
".",
"rdf",
".",
"prefixes",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"self",
".",
"rdf",
".",
"namespace_manager",
".",
"bind",
"(",
"ns_prefix",
",",
"ns_uri",
",",
"override",
"=",
"False",
")",
"# conversely, add namespaces from parsed graph to self.rdf.prefixes",
"for",
"ns_prefix",
",",
"ns_uri",
"in",
"self",
".",
"rdf",
".",
"graph",
".",
"namespaces",
"(",
")",
":",
"setattr",
"(",
"self",
".",
"rdf",
".",
"prefixes",
",",
"ns_prefix",
",",
"rdflib",
".",
"Namespace",
"(",
"ns_uri",
")",
")",
"setattr",
"(",
"self",
".",
"rdf",
".",
"uris",
",",
"rdflib",
".",
"Namespace",
"(",
"ns_uri",
")",
",",
"ns_prefix",
")",
"# pin old graph to resource, create copy graph for modifications",
"self",
".",
"rdf",
".",
"_orig_graph",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"rdf",
".",
"graph",
")",
"# parse triples for object-like access",
"self",
".",
"parse_object_like_triples",
"(",
")"
]
| use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist | [
"use",
"Content",
"-",
"Type",
"from",
"headers",
"to",
"determine",
"parsing",
"method"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1109-L1143 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.parse_object_like_triples | def parse_object_like_triples(self):
'''
method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples
'''
# parse triples as object-like attributes in self.rdf.triples
self.rdf.triples = SimpleNamespace() # prepare triples
for s,p,o in self.rdf.graph:
# get ns info
ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)
# if prefix as list not yet added, add
if not hasattr(self.rdf.triples, ns_prefix):
setattr(self.rdf.triples, ns_prefix, SimpleNamespace())
# same for predicate
if not hasattr(getattr(self.rdf.triples, ns_prefix), predicate):
setattr(getattr(self.rdf.triples, ns_prefix), predicate, [])
# append object for this prefix
getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o) | python | def parse_object_like_triples(self):
'''
method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples
'''
# parse triples as object-like attributes in self.rdf.triples
self.rdf.triples = SimpleNamespace() # prepare triples
for s,p,o in self.rdf.graph:
# get ns info
ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)
# if prefix as list not yet added, add
if not hasattr(self.rdf.triples, ns_prefix):
setattr(self.rdf.triples, ns_prefix, SimpleNamespace())
# same for predicate
if not hasattr(getattr(self.rdf.triples, ns_prefix), predicate):
setattr(getattr(self.rdf.triples, ns_prefix), predicate, [])
# append object for this prefix
getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o) | [
"def",
"parse_object_like_triples",
"(",
"self",
")",
":",
"# parse triples as object-like attributes in self.rdf.triples",
"self",
".",
"rdf",
".",
"triples",
"=",
"SimpleNamespace",
"(",
")",
"# prepare triples",
"for",
"s",
",",
"p",
",",
"o",
"in",
"self",
".",
"rdf",
".",
"graph",
":",
"# get ns info",
"ns_prefix",
",",
"ns_uri",
",",
"predicate",
"=",
"self",
".",
"rdf",
".",
"graph",
".",
"compute_qname",
"(",
"p",
")",
"# if prefix as list not yet added, add",
"if",
"not",
"hasattr",
"(",
"self",
".",
"rdf",
".",
"triples",
",",
"ns_prefix",
")",
":",
"setattr",
"(",
"self",
".",
"rdf",
".",
"triples",
",",
"ns_prefix",
",",
"SimpleNamespace",
"(",
")",
")",
"# same for predicate",
"if",
"not",
"hasattr",
"(",
"getattr",
"(",
"self",
".",
"rdf",
".",
"triples",
",",
"ns_prefix",
")",
",",
"predicate",
")",
":",
"setattr",
"(",
"getattr",
"(",
"self",
".",
"rdf",
".",
"triples",
",",
"ns_prefix",
")",
",",
"predicate",
",",
"[",
"]",
")",
"# append object for this prefix",
"getattr",
"(",
"getattr",
"(",
"self",
".",
"rdf",
".",
"triples",
",",
"ns_prefix",
")",
",",
"predicate",
")",
".",
"append",
"(",
"o",
")"
]
| method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples | [
"method",
"to",
"parse",
"triples",
"from",
"self",
".",
"rdf",
".",
"graph",
"for",
"object",
"-",
"like",
"access"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1146-L1175 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource._empty_resource_attributes | def _empty_resource_attributes(self):
'''
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
'''
self.status_code = 404
self.headers = {}
self.exists = False
# build RDF
self.rdf = self._build_rdf()
# if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty() | python | def _empty_resource_attributes(self):
'''
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
'''
self.status_code = 404
self.headers = {}
self.exists = False
# build RDF
self.rdf = self._build_rdf()
# if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty() | [
"def",
"_empty_resource_attributes",
"(",
"self",
")",
":",
"self",
".",
"status_code",
"=",
"404",
"self",
".",
"headers",
"=",
"{",
"}",
"self",
".",
"exists",
"=",
"False",
"# build RDF",
"self",
".",
"rdf",
"=",
"self",
".",
"_build_rdf",
"(",
")",
"# if NonRDF, empty binary data",
"if",
"type",
"(",
"self",
")",
"==",
"NonRDFSource",
":",
"self",
".",
"binary",
".",
"empty",
"(",
")"
]
| small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes | [
"small",
"method",
"to",
"empty",
"values",
"if",
"resource",
"is",
"removed",
"or",
"absent"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1233-L1254 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.add_triple | def add_triple(self, p, o, auto_refresh=True):
'''
add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph
'''
self.rdf.graph.add((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | python | def add_triple(self, p, o, auto_refresh=True):
'''
add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph
'''
self.rdf.graph.add((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | [
"def",
"add_triple",
"(",
"self",
",",
"p",
",",
"o",
",",
"auto_refresh",
"=",
"True",
")",
":",
"self",
".",
"rdf",
".",
"graph",
".",
"add",
"(",
"(",
"self",
".",
"uri",
",",
"p",
",",
"self",
".",
"_handle_object",
"(",
"o",
")",
")",
")",
"# determine if triples refreshed",
"self",
".",
"_handle_triple_refresh",
"(",
"auto_refresh",
")"
]
| add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph | [
"add",
"triple",
"by",
"providing",
"p",
"o",
"assumes",
"s",
"=",
"subject"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1290-L1307 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.set_triple | def set_triple(self, p, o, auto_refresh=True):
'''
Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph
'''
self.rdf.graph.set((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | python | def set_triple(self, p, o, auto_refresh=True):
'''
Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph
'''
self.rdf.graph.set((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | [
"def",
"set_triple",
"(",
"self",
",",
"p",
",",
"o",
",",
"auto_refresh",
"=",
"True",
")",
":",
"self",
".",
"rdf",
".",
"graph",
".",
"set",
"(",
"(",
"self",
".",
"uri",
",",
"p",
",",
"self",
".",
"_handle_object",
"(",
"o",
")",
")",
")",
"# determine if triples refreshed",
"self",
".",
"_handle_triple_refresh",
"(",
"auto_refresh",
")"
]
| Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph | [
"Assuming",
"the",
"predicate",
"or",
"object",
"matches",
"a",
"single",
"triple",
"sets",
"the",
"other",
"for",
"that",
"triple",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1310-L1327 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.remove_triple | def remove_triple(self, p, o, auto_refresh=True):
'''
remove triple by supplying p,o
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: removes triple from self.rdf.graph
'''
self.rdf.graph.remove((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | python | def remove_triple(self, p, o, auto_refresh=True):
'''
remove triple by supplying p,o
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: removes triple from self.rdf.graph
'''
self.rdf.graph.remove((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | [
"def",
"remove_triple",
"(",
"self",
",",
"p",
",",
"o",
",",
"auto_refresh",
"=",
"True",
")",
":",
"self",
".",
"rdf",
".",
"graph",
".",
"remove",
"(",
"(",
"self",
".",
"uri",
",",
"p",
",",
"self",
".",
"_handle_object",
"(",
"o",
")",
")",
")",
"# determine if triples refreshed",
"self",
".",
"_handle_triple_refresh",
"(",
"auto_refresh",
")"
]
| remove triple by supplying p,o
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: removes triple from self.rdf.graph | [
"remove",
"triple",
"by",
"supplying",
"p",
"o"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1330-L1347 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource._handle_triple_refresh | def _handle_triple_refresh(self, auto_refresh):
'''
method to refresh self.rdf.triples if auto_refresh or defaults set to True
'''
# if auto_refresh set, and True, refresh
if auto_refresh:
self.parse_object_like_triples()
# else, if auto_refresh is not set (None), check repository instance default
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.parse_object_like_triples() | python | def _handle_triple_refresh(self, auto_refresh):
'''
method to refresh self.rdf.triples if auto_refresh or defaults set to True
'''
# if auto_refresh set, and True, refresh
if auto_refresh:
self.parse_object_like_triples()
# else, if auto_refresh is not set (None), check repository instance default
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.parse_object_like_triples() | [
"def",
"_handle_triple_refresh",
"(",
"self",
",",
"auto_refresh",
")",
":",
"# if auto_refresh set, and True, refresh",
"if",
"auto_refresh",
":",
"self",
".",
"parse_object_like_triples",
"(",
")",
"# else, if auto_refresh is not set (None), check repository instance default",
"elif",
"auto_refresh",
"==",
"None",
":",
"if",
"self",
".",
"repo",
".",
"default_auto_refresh",
":",
"self",
".",
"parse_object_like_triples",
"(",
")"
]
| method to refresh self.rdf.triples if auto_refresh or defaults set to True | [
"method",
"to",
"refresh",
"self",
".",
"rdf",
".",
"triples",
"if",
"auto_refresh",
"or",
"defaults",
"set",
"to",
"True"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1350-L1363 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.update | def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):
'''
Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,
creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.
Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata
If the resource is NonRDF (Binary), this also method also updates the binary data.
Args:
sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
update_binary (bool): If True, and resource is NonRDF, updates binary data as well
Returns:
(bool)
'''
# run diff on graphs, send as PATCH request
self._diff_graph()
sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)
if sparql_query_only:
return sq.build_query()
response = self.repo.api.http_request(
'PATCH',
'%s/fcr:metadata' % self.uri, # send RDF updates to URI/fcr:metadata
data=sq.build_query(),
headers={'Content-Type':'application/sparql-update'})
# if RDF update not 204, raise Exception
if response.status_code != 204:
logger.debug(response.content)
raise Exception('HTTP %s, expecting 204' % response.status_code)
# if NonRDFSource, and self.binary.data is not a Response object, update binary as well
if type(self) == NonRDFSource and update_binary and type(self.binary.data) != requests.models.Response:
self.binary._prep_binary()
binary_data = self.binary.data
binary_response = self.repo.api.http_request(
'PUT',
self.uri,
data=binary_data,
headers={'Content-Type':self.binary.mimetype})
# if not refreshing RDF, still update binary here
if not auto_refresh and not self.repo.default_auto_refresh:
logger.debug("not refreshing resource RDF, but updated binary, so must refresh binary data")
updated_self = self.repo.get_resource(self.uri)
self.binary.refresh(updated_self)
# fire optional post-update hook
if hasattr(self,'_post_update'):
self._post_update()
# determine refreshing
'''
If not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data
'''
if auto_refresh:
self.refresh(refresh_binary=update_binary)
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh(refresh_binary=update_binary)
return True | python | def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):
'''
Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,
creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.
Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata
If the resource is NonRDF (Binary), this also method also updates the binary data.
Args:
sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
update_binary (bool): If True, and resource is NonRDF, updates binary data as well
Returns:
(bool)
'''
# run diff on graphs, send as PATCH request
self._diff_graph()
sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)
if sparql_query_only:
return sq.build_query()
response = self.repo.api.http_request(
'PATCH',
'%s/fcr:metadata' % self.uri, # send RDF updates to URI/fcr:metadata
data=sq.build_query(),
headers={'Content-Type':'application/sparql-update'})
# if RDF update not 204, raise Exception
if response.status_code != 204:
logger.debug(response.content)
raise Exception('HTTP %s, expecting 204' % response.status_code)
# if NonRDFSource, and self.binary.data is not a Response object, update binary as well
if type(self) == NonRDFSource and update_binary and type(self.binary.data) != requests.models.Response:
self.binary._prep_binary()
binary_data = self.binary.data
binary_response = self.repo.api.http_request(
'PUT',
self.uri,
data=binary_data,
headers={'Content-Type':self.binary.mimetype})
# if not refreshing RDF, still update binary here
if not auto_refresh and not self.repo.default_auto_refresh:
logger.debug("not refreshing resource RDF, but updated binary, so must refresh binary data")
updated_self = self.repo.get_resource(self.uri)
self.binary.refresh(updated_self)
# fire optional post-update hook
if hasattr(self,'_post_update'):
self._post_update()
# determine refreshing
'''
If not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data
'''
if auto_refresh:
self.refresh(refresh_binary=update_binary)
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh(refresh_binary=update_binary)
return True | [
"def",
"update",
"(",
"self",
",",
"sparql_query_only",
"=",
"False",
",",
"auto_refresh",
"=",
"None",
",",
"update_binary",
"=",
"True",
")",
":",
"# run diff on graphs, send as PATCH request",
"self",
".",
"_diff_graph",
"(",
")",
"sq",
"=",
"SparqlUpdate",
"(",
"self",
".",
"rdf",
".",
"prefixes",
",",
"self",
".",
"rdf",
".",
"diffs",
")",
"if",
"sparql_query_only",
":",
"return",
"sq",
".",
"build_query",
"(",
")",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'PATCH'",
",",
"'%s/fcr:metadata'",
"%",
"self",
".",
"uri",
",",
"# send RDF updates to URI/fcr:metadata",
"data",
"=",
"sq",
".",
"build_query",
"(",
")",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/sparql-update'",
"}",
")",
"# if RDF update not 204, raise Exception",
"if",
"response",
".",
"status_code",
"!=",
"204",
":",
"logger",
".",
"debug",
"(",
"response",
".",
"content",
")",
"raise",
"Exception",
"(",
"'HTTP %s, expecting 204'",
"%",
"response",
".",
"status_code",
")",
"# if NonRDFSource, and self.binary.data is not a Response object, update binary as well",
"if",
"type",
"(",
"self",
")",
"==",
"NonRDFSource",
"and",
"update_binary",
"and",
"type",
"(",
"self",
".",
"binary",
".",
"data",
")",
"!=",
"requests",
".",
"models",
".",
"Response",
":",
"self",
".",
"binary",
".",
"_prep_binary",
"(",
")",
"binary_data",
"=",
"self",
".",
"binary",
".",
"data",
"binary_response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'PUT'",
",",
"self",
".",
"uri",
",",
"data",
"=",
"binary_data",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"self",
".",
"binary",
".",
"mimetype",
"}",
")",
"# if not refreshing RDF, still update binary here",
"if",
"not",
"auto_refresh",
"and",
"not",
"self",
".",
"repo",
".",
"default_auto_refresh",
":",
"logger",
".",
"debug",
"(",
"\"not refreshing resource RDF, but updated binary, so must refresh binary data\"",
")",
"updated_self",
"=",
"self",
".",
"repo",
".",
"get_resource",
"(",
"self",
".",
"uri",
")",
"self",
".",
"binary",
".",
"refresh",
"(",
"updated_self",
")",
"# fire optional post-update hook",
"if",
"hasattr",
"(",
"self",
",",
"'_post_update'",
")",
":",
"self",
".",
"_post_update",
"(",
")",
"# determine refreshing",
"'''\n\t\tIf not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data\n\t\t'''",
"if",
"auto_refresh",
":",
"self",
".",
"refresh",
"(",
"refresh_binary",
"=",
"update_binary",
")",
"elif",
"auto_refresh",
"==",
"None",
":",
"if",
"self",
".",
"repo",
".",
"default_auto_refresh",
":",
"self",
".",
"refresh",
"(",
"refresh_binary",
"=",
"update_binary",
")",
"return",
"True"
]
| Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,
creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.
Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata
If the resource is NonRDF (Binary), this also method also updates the binary data.
Args:
sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
update_binary (bool): If True, and resource is NonRDF, updates binary data as well
Returns:
(bool) | [
"Method",
"to",
"update",
"resources",
"in",
"repository",
".",
"Firing",
"this",
"method",
"computes",
"the",
"difference",
"in",
"the",
"local",
"modified",
"graph",
"and",
"the",
"original",
"one",
"creates",
"an",
"instance",
"of",
"SparqlUpdate",
"and",
"builds",
"a",
"sparql",
"query",
"that",
"represents",
"these",
"differences",
"and",
"sends",
"this",
"as",
"a",
"PATCH",
"request",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1366-L1430 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.children | def children(self, as_resources=False):
'''
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(child) for child in children ]
return children | python | def children(self, as_resources=False):
'''
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(child) for child in children ]
return children | [
"def",
"children",
"(",
"self",
",",
"as_resources",
"=",
"False",
")",
":",
"children",
"=",
"[",
"o",
"for",
"s",
",",
"p",
",",
"o",
"in",
"self",
".",
"rdf",
".",
"graph",
".",
"triples",
"(",
"(",
"None",
",",
"self",
".",
"rdf",
".",
"prefixes",
".",
"ldp",
".",
"contains",
",",
"None",
")",
")",
"]",
"# if as_resources, issue GET requests for children and return",
"if",
"as_resources",
":",
"logger",
".",
"debug",
"(",
"'retrieving children as resources'",
")",
"children",
"=",
"[",
"self",
".",
"repo",
".",
"get_resource",
"(",
"child",
")",
"for",
"child",
"in",
"children",
"]",
"return",
"children"
]
| method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | [
"method",
"to",
"return",
"hierarchical",
"children",
"of",
"this",
"resource"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1433-L1452 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.parents | def parents(self, as_resources=False):
'''
method to return hierarchical parents of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving parent as resource')
parents = [ self.repo.get_resource(parent) for parent in parents ]
return parents | python | def parents(self, as_resources=False):
'''
method to return hierarchical parents of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving parent as resource')
parents = [ self.repo.get_resource(parent) for parent in parents ]
return parents | [
"def",
"parents",
"(",
"self",
",",
"as_resources",
"=",
"False",
")",
":",
"parents",
"=",
"[",
"o",
"for",
"s",
",",
"p",
",",
"o",
"in",
"self",
".",
"rdf",
".",
"graph",
".",
"triples",
"(",
"(",
"None",
",",
"self",
".",
"rdf",
".",
"prefixes",
".",
"fedora",
".",
"hasParent",
",",
"None",
")",
")",
"]",
"# if as_resources, issue GET requests for children and return",
"if",
"as_resources",
":",
"logger",
".",
"debug",
"(",
"'retrieving parent as resource'",
")",
"parents",
"=",
"[",
"self",
".",
"repo",
".",
"get_resource",
"(",
"parent",
")",
"for",
"parent",
"in",
"parents",
"]",
"return",
"parents"
]
| method to return hierarchical parents of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | [
"method",
"to",
"return",
"hierarchical",
"parents",
"of",
"this",
"resource"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1455-L1474 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.siblings | def siblings(self, as_resources=False):
'''
method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
siblings = set()
# loop through parents and get children
for parent in self.parents(as_resources=True):
for sibling in parent.children(as_resources=as_resources):
siblings.add(sibling)
# remove self
if as_resources:
siblings.remove(self)
if not as_resources:
siblings.remove(self.uri)
return list(siblings) | python | def siblings(self, as_resources=False):
'''
method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
siblings = set()
# loop through parents and get children
for parent in self.parents(as_resources=True):
for sibling in parent.children(as_resources=as_resources):
siblings.add(sibling)
# remove self
if as_resources:
siblings.remove(self)
if not as_resources:
siblings.remove(self.uri)
return list(siblings) | [
"def",
"siblings",
"(",
"self",
",",
"as_resources",
"=",
"False",
")",
":",
"siblings",
"=",
"set",
"(",
")",
"# loop through parents and get children",
"for",
"parent",
"in",
"self",
".",
"parents",
"(",
"as_resources",
"=",
"True",
")",
":",
"for",
"sibling",
"in",
"parent",
".",
"children",
"(",
"as_resources",
"=",
"as_resources",
")",
":",
"siblings",
".",
"add",
"(",
"sibling",
")",
"# remove self",
"if",
"as_resources",
":",
"siblings",
".",
"remove",
"(",
"self",
")",
"if",
"not",
"as_resources",
":",
"siblings",
".",
"remove",
"(",
"self",
".",
"uri",
")",
"return",
"list",
"(",
"siblings",
")"
]
| method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | [
"method",
"to",
"return",
"hierarchical",
"siblings",
"of",
"this",
"resource",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1477-L1502 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.create_version | def create_version(self, version_label):
'''
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions
'''
# create version
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
# if 201, assume success
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response.headers['Location'])
# affix version
self._affix_version(version_response.headers['Location'], version_label) | python | def create_version(self, version_label):
'''
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions
'''
# create version
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
# if 201, assume success
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response.headers['Location'])
# affix version
self._affix_version(version_response.headers['Location'], version_label) | [
"def",
"create_version",
"(",
"self",
",",
"version_label",
")",
":",
"# create version",
"version_response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'POST'",
",",
"'%s/fcr:versions'",
"%",
"self",
".",
"uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"{",
"'Slug'",
":",
"version_label",
"}",
")",
"# if 201, assume success",
"if",
"version_response",
".",
"status_code",
"==",
"201",
":",
"logger",
".",
"debug",
"(",
"'version created: %s'",
"%",
"version_response",
".",
"headers",
"[",
"'Location'",
"]",
")",
"# affix version",
"self",
".",
"_affix_version",
"(",
"version_response",
".",
"headers",
"[",
"'Location'",
"]",
",",
"version_label",
")"
]
| method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions | [
"method",
"to",
"create",
"a",
"new",
"version",
"of",
"the",
"resource",
"as",
"it",
"currently",
"stands"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1517-L1540 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.get_versions | def get_versions(self):
'''
retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances
'''
# get all versions
versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri)
# parse response
versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)
# loop through fedora.hasVersion
for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):
# get label
version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython()
# affix version
self._affix_version(version_uri, version_label) | python | def get_versions(self):
'''
retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances
'''
# get all versions
versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri)
# parse response
versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)
# loop through fedora.hasVersion
for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):
# get label
version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython()
# affix version
self._affix_version(version_uri, version_label) | [
"def",
"get_versions",
"(",
"self",
")",
":",
"# get all versions",
"versions_response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'GET'",
",",
"'%s/fcr:versions'",
"%",
"self",
".",
"uri",
")",
"# parse response",
"versions_graph",
"=",
"self",
".",
"repo",
".",
"api",
".",
"parse_rdf_payload",
"(",
"versions_response",
".",
"content",
",",
"versions_response",
".",
"headers",
")",
"# loop through fedora.hasVersion",
"for",
"version_uri",
"in",
"versions_graph",
".",
"objects",
"(",
"self",
".",
"uri",
",",
"self",
".",
"rdf",
".",
"prefixes",
".",
"fedora",
".",
"hasVersion",
")",
":",
"# get label",
"version_label",
"=",
"versions_graph",
".",
"value",
"(",
"version_uri",
",",
"self",
".",
"rdf",
".",
"prefixes",
".",
"fedora",
".",
"hasVersionLabel",
",",
"None",
")",
".",
"toPython",
"(",
")",
"# affix version",
"self",
".",
"_affix_version",
"(",
"version_uri",
",",
"version_label",
")"
]
| retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances | [
"retrieves",
"all",
"versions",
"of",
"an",
"object",
"and",
"stores",
"them",
"at",
"self",
".",
"versions"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1543-L1568 | train |
ghukill/pyfc4 | pyfc4/models.py | Resource.dump | def dump(self,format='ttl'):
'''
Convenience method to return RDF data for resource,
optionally selecting serialization format.
Inspired by .dump from Samvera.
Args:
format (str): expecting serialization formats accepted by rdflib.serialization(format=)
'''
return self.rdf.graph.serialize(format=format).decode('utf-8') | python | def dump(self,format='ttl'):
'''
Convenience method to return RDF data for resource,
optionally selecting serialization format.
Inspired by .dump from Samvera.
Args:
format (str): expecting serialization formats accepted by rdflib.serialization(format=)
'''
return self.rdf.graph.serialize(format=format).decode('utf-8') | [
"def",
"dump",
"(",
"self",
",",
"format",
"=",
"'ttl'",
")",
":",
"return",
"self",
".",
"rdf",
".",
"graph",
".",
"serialize",
"(",
"format",
"=",
"format",
")",
".",
"decode",
"(",
"'utf-8'",
")"
]
| Convenience method to return RDF data for resource,
optionally selecting serialization format.
Inspired by .dump from Samvera.
Args:
format (str): expecting serialization formats accepted by rdflib.serialization(format=) | [
"Convenience",
"method",
"to",
"return",
"RDF",
"data",
"for",
"resource",
"optionally",
"selecting",
"serialization",
"format",
".",
"Inspired",
"by",
".",
"dump",
"from",
"Samvera",
"."
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1571-L1582 | train |
ghukill/pyfc4 | pyfc4/models.py | ResourceVersion.revert_to | def revert_to(self):
'''
method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource
'''
# send patch
response = self.resource.repo.api.http_request('PATCH', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('reverting to previous version of resource, %s' % self.uri)
# refresh current resource handle
self._current_resource.refresh()
else:
raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri)) | python | def revert_to(self):
'''
method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource
'''
# send patch
response = self.resource.repo.api.http_request('PATCH', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('reverting to previous version of resource, %s' % self.uri)
# refresh current resource handle
self._current_resource.refresh()
else:
raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri)) | [
"def",
"revert_to",
"(",
"self",
")",
":",
"# send patch",
"response",
"=",
"self",
".",
"resource",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'PATCH'",
",",
"self",
".",
"uri",
")",
"# if response 204",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"logger",
".",
"debug",
"(",
"'reverting to previous version of resource, %s'",
"%",
"self",
".",
"uri",
")",
"# refresh current resource handle",
"self",
".",
"_current_resource",
".",
"refresh",
"(",
")",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not revert to resource version, %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"self",
".",
"uri",
")",
")"
]
| method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource | [
"method",
"to",
"revert",
"resource",
"to",
"this",
"version",
"by",
"issuing",
"PATCH"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1609-L1632 | train |
ghukill/pyfc4 | pyfc4/models.py | ResourceVersion.delete | def delete(self):
'''
method to remove version from resource's history
'''
# send patch
response = self.resource.repo.api.http_request('DELETE', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('deleting previous version of resource, %s' % self.uri)
# remove from resource versions
delattr(self._current_resource.versions, self.label)
# if 400, likely most recent version and cannot remove
elif response.status_code == 400:
raise Exception('HTTP 400, likely most recent resource version which cannot be removed')
else:
raise Exception('HTTP %s, could not delete resource version: %s' % (response.status_code, self.uri)) | python | def delete(self):
'''
method to remove version from resource's history
'''
# send patch
response = self.resource.repo.api.http_request('DELETE', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('deleting previous version of resource, %s' % self.uri)
# remove from resource versions
delattr(self._current_resource.versions, self.label)
# if 400, likely most recent version and cannot remove
elif response.status_code == 400:
raise Exception('HTTP 400, likely most recent resource version which cannot be removed')
else:
raise Exception('HTTP %s, could not delete resource version: %s' % (response.status_code, self.uri)) | [
"def",
"delete",
"(",
"self",
")",
":",
"# send patch",
"response",
"=",
"self",
".",
"resource",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'DELETE'",
",",
"self",
".",
"uri",
")",
"# if response 204",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"logger",
".",
"debug",
"(",
"'deleting previous version of resource, %s'",
"%",
"self",
".",
"uri",
")",
"# remove from resource versions",
"delattr",
"(",
"self",
".",
"_current_resource",
".",
"versions",
",",
"self",
".",
"label",
")",
"# if 400, likely most recent version and cannot remove",
"elif",
"response",
".",
"status_code",
"==",
"400",
":",
"raise",
"Exception",
"(",
"'HTTP 400, likely most recent resource version which cannot be removed'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not delete resource version: %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"self",
".",
"uri",
")",
")"
]
| method to remove version from resource's history | [
"method",
"to",
"remove",
"version",
"from",
"resource",
"s",
"history"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1635-L1656 | train |
ghukill/pyfc4 | pyfc4/models.py | BinaryData.empty | def empty(self):
'''
Method to empty attributes, particularly for use when
object is deleted but remains as variable
'''
self.resource = None
self.delivery = None
self.data = None
self.stream = False
self.mimetype = None
self.location = None | python | def empty(self):
'''
Method to empty attributes, particularly for use when
object is deleted but remains as variable
'''
self.resource = None
self.delivery = None
self.data = None
self.stream = False
self.mimetype = None
self.location = None | [
"def",
"empty",
"(",
"self",
")",
":",
"self",
".",
"resource",
"=",
"None",
"self",
".",
"delivery",
"=",
"None",
"self",
".",
"data",
"=",
"None",
"self",
".",
"stream",
"=",
"False",
"self",
".",
"mimetype",
"=",
"None",
"self",
".",
"location",
"=",
"None"
]
| Method to empty attributes, particularly for use when
object is deleted but remains as variable | [
"Method",
"to",
"empty",
"attributes",
"particularly",
"for",
"use",
"when",
"object",
"is",
"deleted",
"but",
"remains",
"as",
"variable"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1686-L1698 | train |
ghukill/pyfc4 | pyfc4/models.py | BinaryData.refresh | def refresh(self, updated_self):
'''
method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes
'''
logger.debug('refreshing binary attributes')
self.mimetype = updated_self.binary.mimetype
self.data = updated_self.binary.data | python | def refresh(self, updated_self):
'''
method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes
'''
logger.debug('refreshing binary attributes')
self.mimetype = updated_self.binary.mimetype
self.data = updated_self.binary.data | [
"def",
"refresh",
"(",
"self",
",",
"updated_self",
")",
":",
"logger",
".",
"debug",
"(",
"'refreshing binary attributes'",
")",
"self",
".",
"mimetype",
"=",
"updated_self",
".",
"binary",
".",
"mimetype",
"self",
".",
"data",
"=",
"updated_self",
".",
"binary",
".",
"data"
]
| method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes | [
"method",
"to",
"refresh",
"binary",
"attributes",
"and",
"data"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1701-L1715 | train |
ghukill/pyfc4 | pyfc4/models.py | BinaryData.parse_binary | def parse_binary(self):
'''
when retrieving a NonRDF resource, parse binary data and make available
via generators
'''
# derive mimetype
self.mimetype = self.resource.rdf.graph.value(
self.resource.uri,
self.resource.rdf.prefixes.ebucore.hasMimeType).toPython()
# get binary content as stremable response
self.data = self.resource.repo.api.http_request(
'GET',
self.resource.uri,
data=None,
headers={'Content-Type':self.resource.mimetype},
is_rdf=False,
stream=True) | python | def parse_binary(self):
'''
when retrieving a NonRDF resource, parse binary data and make available
via generators
'''
# derive mimetype
self.mimetype = self.resource.rdf.graph.value(
self.resource.uri,
self.resource.rdf.prefixes.ebucore.hasMimeType).toPython()
# get binary content as stremable response
self.data = self.resource.repo.api.http_request(
'GET',
self.resource.uri,
data=None,
headers={'Content-Type':self.resource.mimetype},
is_rdf=False,
stream=True) | [
"def",
"parse_binary",
"(",
"self",
")",
":",
"# derive mimetype",
"self",
".",
"mimetype",
"=",
"self",
".",
"resource",
".",
"rdf",
".",
"graph",
".",
"value",
"(",
"self",
".",
"resource",
".",
"uri",
",",
"self",
".",
"resource",
".",
"rdf",
".",
"prefixes",
".",
"ebucore",
".",
"hasMimeType",
")",
".",
"toPython",
"(",
")",
"# get binary content as stremable response",
"self",
".",
"data",
"=",
"self",
".",
"resource",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'GET'",
",",
"self",
".",
"resource",
".",
"uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"self",
".",
"resource",
".",
"mimetype",
"}",
",",
"is_rdf",
"=",
"False",
",",
"stream",
"=",
"True",
")"
]
| when retrieving a NonRDF resource, parse binary data and make available
via generators | [
"when",
"retrieving",
"a",
"NonRDF",
"resource",
"parse",
"binary",
"data",
"and",
"make",
"available",
"via",
"generators"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1718-L1737 | train |
ghukill/pyfc4 | pyfc4/models.py | BinaryData._prep_binary_content | def _prep_binary_content(self):
'''
Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
'''
# nothing present
if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():
raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')
elif 'Content-Location' in self.resource.headers.keys():
logger.debug('Content-Location header found, using')
self.delivery = 'header'
# if Content-Location is not set, look for self.data_location then self.data
elif 'Content-Location' not in self.resource.headers.keys():
# data_location set, trumps Content self.data
if self.location:
# set appropriate header
self.resource.headers['Content-Location'] = self.location
self.delivery = 'header'
# data attribute is plain text, binary, or file-like object
elif self.data:
# if file-like object, set flag for api.http_request
if isinstance(self.data, io.BufferedIOBase):
logger.debug('detected file-like object')
self.delivery = 'payload'
# else, just bytes
else:
logger.debug('detected bytes')
self.delivery = 'payload' | python | def _prep_binary_content(self):
'''
Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
'''
# nothing present
if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():
raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')
elif 'Content-Location' in self.resource.headers.keys():
logger.debug('Content-Location header found, using')
self.delivery = 'header'
# if Content-Location is not set, look for self.data_location then self.data
elif 'Content-Location' not in self.resource.headers.keys():
# data_location set, trumps Content self.data
if self.location:
# set appropriate header
self.resource.headers['Content-Location'] = self.location
self.delivery = 'header'
# data attribute is plain text, binary, or file-like object
elif self.data:
# if file-like object, set flag for api.http_request
if isinstance(self.data, io.BufferedIOBase):
logger.debug('detected file-like object')
self.delivery = 'payload'
# else, just bytes
else:
logger.debug('detected bytes')
self.delivery = 'payload' | [
"def",
"_prep_binary_content",
"(",
"self",
")",
":",
"# nothing present",
"if",
"not",
"self",
".",
"data",
"and",
"not",
"self",
".",
"location",
"and",
"'Content-Location'",
"not",
"in",
"self",
".",
"resource",
".",
"headers",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"'creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header'",
")",
"elif",
"'Content-Location'",
"in",
"self",
".",
"resource",
".",
"headers",
".",
"keys",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"'Content-Location header found, using'",
")",
"self",
".",
"delivery",
"=",
"'header'",
"# if Content-Location is not set, look for self.data_location then self.data",
"elif",
"'Content-Location'",
"not",
"in",
"self",
".",
"resource",
".",
"headers",
".",
"keys",
"(",
")",
":",
"# data_location set, trumps Content self.data",
"if",
"self",
".",
"location",
":",
"# set appropriate header",
"self",
".",
"resource",
".",
"headers",
"[",
"'Content-Location'",
"]",
"=",
"self",
".",
"location",
"self",
".",
"delivery",
"=",
"'header'",
"# data attribute is plain text, binary, or file-like object",
"elif",
"self",
".",
"data",
":",
"# if file-like object, set flag for api.http_request",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"io",
".",
"BufferedIOBase",
")",
":",
"logger",
".",
"debug",
"(",
"'detected file-like object'",
")",
"self",
".",
"delivery",
"=",
"'payload'",
"# else, just bytes",
"else",
":",
"logger",
".",
"debug",
"(",
"'detected bytes'",
")",
"self",
".",
"delivery",
"=",
"'payload'"
]
| Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers | [
"Sets",
"delivery",
"method",
"of",
"either",
"payload",
"or",
"header",
"Favors",
"Content",
"-",
"Location",
"header",
"if",
"set"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1785-L1826 | train |
ghukill/pyfc4 | pyfc4/models.py | NonRDFSource.fixity | def fixity(self, response_format=None):
'''
Issues fixity check, return parsed graph
Args:
None
Returns:
(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)
'''
# if no response_format, use default
if not response_format:
response_format = self.repo.default_serialization
# issue GET request for fixity check
response = self.repo.api.http_request('GET', '%s/fcr:fixity' % self.uri)
# parse
fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers)
# determine verdict
for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome):
if outcome.toPython() == 'SUCCESS':
verdict = True
else:
verdict = False
return {
'verdict':verdict,
'premis_graph':fixity_graph
} | python | def fixity(self, response_format=None):
'''
Issues fixity check, return parsed graph
Args:
None
Returns:
(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)
'''
# if no response_format, use default
if not response_format:
response_format = self.repo.default_serialization
# issue GET request for fixity check
response = self.repo.api.http_request('GET', '%s/fcr:fixity' % self.uri)
# parse
fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers)
# determine verdict
for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome):
if outcome.toPython() == 'SUCCESS':
verdict = True
else:
verdict = False
return {
'verdict':verdict,
'premis_graph':fixity_graph
} | [
"def",
"fixity",
"(",
"self",
",",
"response_format",
"=",
"None",
")",
":",
"# if no response_format, use default",
"if",
"not",
"response_format",
":",
"response_format",
"=",
"self",
".",
"repo",
".",
"default_serialization",
"# issue GET request for fixity check",
"response",
"=",
"self",
".",
"repo",
".",
"api",
".",
"http_request",
"(",
"'GET'",
",",
"'%s/fcr:fixity'",
"%",
"self",
".",
"uri",
")",
"# parse",
"fixity_graph",
"=",
"self",
".",
"repo",
".",
"api",
".",
"parse_rdf_payload",
"(",
"response",
".",
"content",
",",
"response",
".",
"headers",
")",
"# determine verdict",
"for",
"outcome",
"in",
"fixity_graph",
".",
"objects",
"(",
"None",
",",
"self",
".",
"rdf",
".",
"prefixes",
".",
"premis",
".",
"hasEventOutcome",
")",
":",
"if",
"outcome",
".",
"toPython",
"(",
")",
"==",
"'SUCCESS'",
":",
"verdict",
"=",
"True",
"else",
":",
"verdict",
"=",
"False",
"return",
"{",
"'verdict'",
":",
"verdict",
",",
"'premis_graph'",
":",
"fixity_graph",
"}"
]
| Issues fixity check, return parsed graph
Args:
None
Returns:
(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check) | [
"Issues",
"fixity",
"check",
"return",
"parsed",
"graph"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1896-L1928 | train |
sleibman/python-conduit | conduit/core.py | Channel.get_value | def get_value(self, consumer=None):
"""
If consumer is specified, the channel will record that consumer as having consumed the value.
"""
if consumer:
self.consumers[consumer] = True
return self.value | python | def get_value(self, consumer=None):
"""
If consumer is specified, the channel will record that consumer as having consumed the value.
"""
if consumer:
self.consumers[consumer] = True
return self.value | [
"def",
"get_value",
"(",
"self",
",",
"consumer",
"=",
"None",
")",
":",
"if",
"consumer",
":",
"self",
".",
"consumers",
"[",
"consumer",
"]",
"=",
"True",
"return",
"self",
".",
"value"
]
| If consumer is specified, the channel will record that consumer as having consumed the value. | [
"If",
"consumer",
"is",
"specified",
"the",
"channel",
"will",
"record",
"that",
"consumer",
"as",
"having",
"consumed",
"the",
"value",
"."
]
| f6002d45c4f25e4418591a72fdac9ac6fb422d80 | https://github.com/sleibman/python-conduit/blob/f6002d45c4f25e4418591a72fdac9ac6fb422d80/conduit/core.py#L105-L111 | train |
sleibman/python-conduit | conduit/core.py | DataBlock.set_input_data | def set_input_data(self, key, value):
"""
set_input_data will automatically create an input channel if necessary.
Automatic channel creation is intended for the case where users are trying to set initial values on a block
whose input channels aren't subscribed to anything in the graph.
"""
if not key in self.input_channels.keys():
self.set_input_channel(key, Channel())
self.input_channels[key].set_value(Data(self.time, value)) | python | def set_input_data(self, key, value):
"""
set_input_data will automatically create an input channel if necessary.
Automatic channel creation is intended for the case where users are trying to set initial values on a block
whose input channels aren't subscribed to anything in the graph.
"""
if not key in self.input_channels.keys():
self.set_input_channel(key, Channel())
self.input_channels[key].set_value(Data(self.time, value)) | [
"def",
"set_input_data",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"not",
"key",
"in",
"self",
".",
"input_channels",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_input_channel",
"(",
"key",
",",
"Channel",
"(",
")",
")",
"self",
".",
"input_channels",
"[",
"key",
"]",
".",
"set_value",
"(",
"Data",
"(",
"self",
".",
"time",
",",
"value",
")",
")"
]
| set_input_data will automatically create an input channel if necessary.
Automatic channel creation is intended for the case where users are trying to set initial values on a block
whose input channels aren't subscribed to anything in the graph. | [
"set_input_data",
"will",
"automatically",
"create",
"an",
"input",
"channel",
"if",
"necessary",
".",
"Automatic",
"channel",
"creation",
"is",
"intended",
"for",
"the",
"case",
"where",
"users",
"are",
"trying",
"to",
"set",
"initial",
"values",
"on",
"a",
"block",
"whose",
"input",
"channels",
"aren",
"t",
"subscribed",
"to",
"anything",
"in",
"the",
"graph",
"."
]
| f6002d45c4f25e4418591a72fdac9ac6fb422d80 | https://github.com/sleibman/python-conduit/blob/f6002d45c4f25e4418591a72fdac9ac6fb422d80/conduit/core.py#L276-L284 | train |
sleibman/python-conduit | conduit/core.py | DataBlock.get_output_channel | def get_output_channel(self, output_channel_name):
"""
get_output_channel will create a new channel object if necessary.
"""
if not output_channel_name in self.output_channels.keys():
self.output_channels[output_channel_name] = Channel()
self.output_channels[output_channel_name].add_producer(self)
return self.output_channels[output_channel_name] | python | def get_output_channel(self, output_channel_name):
"""
get_output_channel will create a new channel object if necessary.
"""
if not output_channel_name in self.output_channels.keys():
self.output_channels[output_channel_name] = Channel()
self.output_channels[output_channel_name].add_producer(self)
return self.output_channels[output_channel_name] | [
"def",
"get_output_channel",
"(",
"self",
",",
"output_channel_name",
")",
":",
"if",
"not",
"output_channel_name",
"in",
"self",
".",
"output_channels",
".",
"keys",
"(",
")",
":",
"self",
".",
"output_channels",
"[",
"output_channel_name",
"]",
"=",
"Channel",
"(",
")",
"self",
".",
"output_channels",
"[",
"output_channel_name",
"]",
".",
"add_producer",
"(",
"self",
")",
"return",
"self",
".",
"output_channels",
"[",
"output_channel_name",
"]"
]
| get_output_channel will create a new channel object if necessary. | [
"get_output_channel",
"will",
"create",
"a",
"new",
"channel",
"object",
"if",
"necessary",
"."
]
| f6002d45c4f25e4418591a72fdac9ac6fb422d80 | https://github.com/sleibman/python-conduit/blob/f6002d45c4f25e4418591a72fdac9ac6fb422d80/conduit/core.py#L336-L343 | train |
adamhadani/python-yelp | yelp/api.py | ReviewSearchApi.by_bounding_box | def by_bounding_box(self, tl_lat, tl_long, br_lat, br_long, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
tl_lat = tl_lat,
tl_long = tl_long,
br_lat = br_lat,
br_long = br_long,
term = term,
category = category,
num_biz_requested = num_biz_requested
)
return json.loads(content) | python | def by_bounding_box(self, tl_lat, tl_long, br_lat, br_long, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
tl_lat = tl_lat,
tl_long = tl_long,
br_lat = br_lat,
br_long = br_long,
term = term,
category = category,
num_biz_requested = num_biz_requested
)
return json.loads(content) | [
"def",
"by_bounding_box",
"(",
"self",
",",
"tl_lat",
",",
"tl_long",
",",
"br_lat",
",",
"br_long",
",",
"term",
"=",
"None",
",",
"num_biz_requested",
"=",
"None",
",",
"category",
"=",
"None",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"tl_lat",
"=",
"tl_lat",
",",
"tl_long",
"=",
"tl_long",
",",
"br_lat",
"=",
"br_lat",
",",
"br_long",
"=",
"br_long",
",",
"term",
"=",
"term",
",",
"category",
"=",
"category",
",",
"num_biz_requested",
"=",
"num_biz_requested",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional) | [
"Perform",
"a",
"Yelp",
"Review",
"Search",
"based",
"on",
"a",
"map",
"bounding",
"box",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L104-L131 | train |
adamhadani/python-yelp | yelp/api.py | ReviewSearchApi.by_geopoint | def by_geopoint(self, lat, long, radius, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a geopoint and radius tuple.
Args:
lat - geopoint latitude
long - geopoint longitude
radius - search radius (in miles)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
lat = lat,
long = long,
radius = radius,
term = None,
num_biz_requested = None
)
return json.loads(content) | python | def by_geopoint(self, lat, long, radius, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a geopoint and radius tuple.
Args:
lat - geopoint latitude
long - geopoint longitude
radius - search radius (in miles)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
lat = lat,
long = long,
radius = radius,
term = None,
num_biz_requested = None
)
return json.loads(content) | [
"def",
"by_geopoint",
"(",
"self",
",",
"lat",
",",
"long",
",",
"radius",
",",
"term",
"=",
"None",
",",
"num_biz_requested",
"=",
"None",
",",
"category",
"=",
"None",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"lat",
"=",
"lat",
",",
"long",
"=",
"long",
",",
"radius",
"=",
"radius",
",",
"term",
"=",
"None",
",",
"num_biz_requested",
"=",
"None",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Review Search based on a geopoint and radius tuple.
Args:
lat - geopoint latitude
long - geopoint longitude
radius - search radius (in miles)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional) | [
"Perform",
"a",
"Yelp",
"Review",
"Search",
"based",
"on",
"a",
"geopoint",
"and",
"radius",
"tuple",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L134-L158 | train |
adamhadani/python-yelp | yelp/api.py | ReviewSearchApi.by_location | def by_location(self, location, cc=None, radius=None, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a location specifier.
Args:
location - textual location specifier of form: "address, neighborhood, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
radius - search radius (in miles) (Optional)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
location = location,
cc = cc,
radius = radius,
term = term,
num_biz_requested = num_biz_requested
)
return json.loads(content) | python | def by_location(self, location, cc=None, radius=None, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a location specifier.
Args:
location - textual location specifier of form: "address, neighborhood, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
radius - search radius (in miles) (Optional)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
location = location,
cc = cc,
radius = radius,
term = term,
num_biz_requested = num_biz_requested
)
return json.loads(content) | [
"def",
"by_location",
"(",
"self",
",",
"location",
",",
"cc",
"=",
"None",
",",
"radius",
"=",
"None",
",",
"term",
"=",
"None",
",",
"num_biz_requested",
"=",
"None",
",",
"category",
"=",
"None",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"location",
"=",
"location",
",",
"cc",
"=",
"cc",
",",
"radius",
"=",
"radius",
",",
"term",
"=",
"term",
",",
"num_biz_requested",
"=",
"num_biz_requested",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Review Search based on a location specifier.
Args:
location - textual location specifier of form: "address, neighborhood, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
radius - search radius (in miles) (Optional)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional) | [
"Perform",
"a",
"Yelp",
"Review",
"Search",
"based",
"on",
"a",
"location",
"specifier",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L161-L184 | train |
adamhadani/python-yelp | yelp/api.py | PhoneApi.by_phone | def by_phone(self, phone, cc=None):
"""
Perform a Yelp Phone API Search based on phone number given.
Args:
phone - Phone number to search by
cc - ISO 3166-1 alpha-2 country code. (Optional)
"""
header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc)
return json.loads(content) | python | def by_phone(self, phone, cc=None):
"""
Perform a Yelp Phone API Search based on phone number given.
Args:
phone - Phone number to search by
cc - ISO 3166-1 alpha-2 country code. (Optional)
"""
header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc)
return json.loads(content) | [
"def",
"by_phone",
"(",
"self",
",",
"phone",
",",
"cc",
"=",
"None",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"phone",
"=",
"phone",
",",
"cc",
"=",
"cc",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Phone API Search based on phone number given.
Args:
phone - Phone number to search by
cc - ISO 3166-1 alpha-2 country code. (Optional) | [
"Perform",
"a",
"Yelp",
"Phone",
"API",
"Search",
"based",
"on",
"phone",
"number",
"given",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L203-L214 | train |
adamhadani/python-yelp | yelp/api.py | NeighborhoodApi.by_geopoint | def by_geopoint(self, lat, long):
"""
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
"""
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content) | python | def by_geopoint(self, lat, long):
"""
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
"""
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content) | [
"def",
"by_geopoint",
"(",
"self",
",",
"lat",
",",
"long",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"lat",
"=",
"lat",
",",
"long",
"=",
"long",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude | [
"Perform",
"a",
"Yelp",
"Neighborhood",
"API",
"Search",
"based",
"on",
"a",
"geopoint",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L233-L243 | train |
adamhadani/python-yelp | yelp/api.py | NeighborhoodApi.by_location | def by_location(self, location, cc=None):
"""
Perform a Yelp Neighborhood API Search based on a location specifier.
Args:
location - textual location specifier of form: "address, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
"""
header, content = self._http_request(self.BASE_URL, location=location, cc=cc)
return json.loads(content) | python | def by_location(self, location, cc=None):
"""
Perform a Yelp Neighborhood API Search based on a location specifier.
Args:
location - textual location specifier of form: "address, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
"""
header, content = self._http_request(self.BASE_URL, location=location, cc=cc)
return json.loads(content) | [
"def",
"by_location",
"(",
"self",
",",
"location",
",",
"cc",
"=",
"None",
")",
":",
"header",
",",
"content",
"=",
"self",
".",
"_http_request",
"(",
"self",
".",
"BASE_URL",
",",
"location",
"=",
"location",
",",
"cc",
"=",
"cc",
")",
"return",
"json",
".",
"loads",
"(",
"content",
")"
]
| Perform a Yelp Neighborhood API Search based on a location specifier.
Args:
location - textual location specifier of form: "address, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional) | [
"Perform",
"a",
"Yelp",
"Neighborhood",
"API",
"Search",
"based",
"on",
"a",
"location",
"specifier",
"."
]
| 7694ccb7274cc3c5783250ed0c3396cda2fcfa1a | https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L246-L256 | train |
eventifyio/eventify | eventify/drivers/zeromq.py | Service.check_transport_host | def check_transport_host(self):
"""
Check if zeromq socket is available
on transport host
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('events-server', 8080))
if result == 0:
logging.info('port 8080 on zmq is open!')
return True
return False | python | def check_transport_host(self):
"""
Check if zeromq socket is available
on transport host
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('events-server', 8080))
if result == 0:
logging.info('port 8080 on zmq is open!')
return True
return False | [
"def",
"check_transport_host",
"(",
"self",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"result",
"=",
"sock",
".",
"connect_ex",
"(",
"(",
"'events-server'",
",",
"8080",
")",
")",
"if",
"result",
"==",
"0",
":",
"logging",
".",
"info",
"(",
"'port 8080 on zmq is open!'",
")",
"return",
"True",
"return",
"False"
]
| Check if zeromq socket is available
on transport host | [
"Check",
"if",
"zeromq",
"socket",
"is",
"available",
"on",
"transport",
"host"
]
| 0e519964a56bd07a879b266f21f177749c63aaed | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/zeromq.py#L130-L140 | train |
255BITS/hyperchamber | hyperchamber/io/__init__.py | sample | def sample(config, samples):
"""Upload a series of samples. Each sample has keys 'image' and 'label'.
Images are ignored if the rate limit is hit."""
url = get_api_path('sample.json')
multiple_files = []
images = [s['image'] for s in samples]
labels = [s['label'] for s in samples]
for image in images:
multiple_files.append(('images', (image, open(image, 'rb'), 'image/png')))
headers=get_headers(no_content_type=True)
headers["config"]= json.dumps(config, cls=HCEncoder)
headers["labels"]= json.dumps(labels)
print("With headers", headers)
try:
r = requests.post(url, files=multiple_files, headers=headers, timeout=30)
return r.text
except requests.exceptions.RequestException:
e = sys.exc_info()[0]
print("Error while calling hyperchamber - ", e)
return None | python | def sample(config, samples):
"""Upload a series of samples. Each sample has keys 'image' and 'label'.
Images are ignored if the rate limit is hit."""
url = get_api_path('sample.json')
multiple_files = []
images = [s['image'] for s in samples]
labels = [s['label'] for s in samples]
for image in images:
multiple_files.append(('images', (image, open(image, 'rb'), 'image/png')))
headers=get_headers(no_content_type=True)
headers["config"]= json.dumps(config, cls=HCEncoder)
headers["labels"]= json.dumps(labels)
print("With headers", headers)
try:
r = requests.post(url, files=multiple_files, headers=headers, timeout=30)
return r.text
except requests.exceptions.RequestException:
e = sys.exc_info()[0]
print("Error while calling hyperchamber - ", e)
return None | [
"def",
"sample",
"(",
"config",
",",
"samples",
")",
":",
"url",
"=",
"get_api_path",
"(",
"'sample.json'",
")",
"multiple_files",
"=",
"[",
"]",
"images",
"=",
"[",
"s",
"[",
"'image'",
"]",
"for",
"s",
"in",
"samples",
"]",
"labels",
"=",
"[",
"s",
"[",
"'label'",
"]",
"for",
"s",
"in",
"samples",
"]",
"for",
"image",
"in",
"images",
":",
"multiple_files",
".",
"append",
"(",
"(",
"'images'",
",",
"(",
"image",
",",
"open",
"(",
"image",
",",
"'rb'",
")",
",",
"'image/png'",
")",
")",
")",
"headers",
"=",
"get_headers",
"(",
"no_content_type",
"=",
"True",
")",
"headers",
"[",
"\"config\"",
"]",
"=",
"json",
".",
"dumps",
"(",
"config",
",",
"cls",
"=",
"HCEncoder",
")",
"headers",
"[",
"\"labels\"",
"]",
"=",
"json",
".",
"dumps",
"(",
"labels",
")",
"print",
"(",
"\"With headers\"",
",",
"headers",
")",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"files",
"=",
"multiple_files",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"30",
")",
"return",
"r",
".",
"text",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
"print",
"(",
"\"Error while calling hyperchamber - \"",
",",
"e",
")",
"return",
"None"
]
| Upload a series of samples. Each sample has keys 'image' and 'label'.
Images are ignored if the rate limit is hit. | [
"Upload",
"a",
"series",
"of",
"samples",
".",
"Each",
"sample",
"has",
"keys",
"image",
"and",
"label",
".",
"Images",
"are",
"ignored",
"if",
"the",
"rate",
"limit",
"is",
"hit",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/io/__init__.py#L43-L63 | train |
255BITS/hyperchamber | hyperchamber/io/__init__.py | measure | def measure(config, result, max_retries=10):
"""Records results on hyperchamber.io. Used when you are done testing a config."""
url = get_api_path('measurement.json')
data = {'config': config, 'result': result}
retries = 0
while(retries < max_retries):
try:
r = requests.post(url, data=json.dumps(data, cls=HCEncoder), headers=get_headers(), timeout=30)
return r.text
except requests.exceptions.RequestException:
e = sys.exc_info()[0]
print("Error while calling hyperchamber - retrying ", e)
retries += 1 | python | def measure(config, result, max_retries=10):
"""Records results on hyperchamber.io. Used when you are done testing a config."""
url = get_api_path('measurement.json')
data = {'config': config, 'result': result}
retries = 0
while(retries < max_retries):
try:
r = requests.post(url, data=json.dumps(data, cls=HCEncoder), headers=get_headers(), timeout=30)
return r.text
except requests.exceptions.RequestException:
e = sys.exc_info()[0]
print("Error while calling hyperchamber - retrying ", e)
retries += 1 | [
"def",
"measure",
"(",
"config",
",",
"result",
",",
"max_retries",
"=",
"10",
")",
":",
"url",
"=",
"get_api_path",
"(",
"'measurement.json'",
")",
"data",
"=",
"{",
"'config'",
":",
"config",
",",
"'result'",
":",
"result",
"}",
"retries",
"=",
"0",
"while",
"(",
"retries",
"<",
"max_retries",
")",
":",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"HCEncoder",
")",
",",
"headers",
"=",
"get_headers",
"(",
")",
",",
"timeout",
"=",
"30",
")",
"return",
"r",
".",
"text",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
"print",
"(",
"\"Error while calling hyperchamber - retrying \"",
",",
"e",
")",
"retries",
"+=",
"1"
]
| Records results on hyperchamber.io. Used when you are done testing a config. | [
"Records",
"results",
"on",
"hyperchamber",
".",
"io",
".",
"Used",
"when",
"you",
"are",
"done",
"testing",
"a",
"config",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/io/__init__.py#L65-L77 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | move_selection | def move_selection(reverse=False):
"""
Goes through the list of gunicorns, setting the selected as the one after
the currently selected.
"""
global selected_pid
if selected_pid not in gunicorns:
selected_pid = None
found = False
pids = sorted(gunicorns.keys(), reverse=reverse)
# Iterate items twice to enable wrapping.
for pid in pids + pids:
if selected_pid is None or found:
selected_pid = pid
return
found = pid == selected_pid | python | def move_selection(reverse=False):
"""
Goes through the list of gunicorns, setting the selected as the one after
the currently selected.
"""
global selected_pid
if selected_pid not in gunicorns:
selected_pid = None
found = False
pids = sorted(gunicorns.keys(), reverse=reverse)
# Iterate items twice to enable wrapping.
for pid in pids + pids:
if selected_pid is None or found:
selected_pid = pid
return
found = pid == selected_pid | [
"def",
"move_selection",
"(",
"reverse",
"=",
"False",
")",
":",
"global",
"selected_pid",
"if",
"selected_pid",
"not",
"in",
"gunicorns",
":",
"selected_pid",
"=",
"None",
"found",
"=",
"False",
"pids",
"=",
"sorted",
"(",
"gunicorns",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"reverse",
")",
"# Iterate items twice to enable wrapping.",
"for",
"pid",
"in",
"pids",
"+",
"pids",
":",
"if",
"selected_pid",
"is",
"None",
"or",
"found",
":",
"selected_pid",
"=",
"pid",
"return",
"found",
"=",
"pid",
"==",
"selected_pid"
]
| Goes through the list of gunicorns, setting the selected as the one after
the currently selected. | [
"Goes",
"through",
"the",
"list",
"of",
"gunicorns",
"setting",
"the",
"selected",
"as",
"the",
"one",
"after",
"the",
"currently",
"selected",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L87-L102 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | update_gunicorns | def update_gunicorns():
"""
Updates the dict of gunicorn processes. Run the ps command and parse its
output for processes named after gunicorn, building up a dict of gunicorn
processes. When new gunicorns are discovered, run the netstat command to
determine the ports they're serving on.
"""
global tick
tick += 1
if (tick * screen_delay) % ps_delay != 0:
return
tick = 0
for pid in gunicorns:
gunicorns[pid].update({"workers": 0, "mem": 0})
ps = Popen(PS_ARGS, stdout=PIPE).communicate()[0].split("\n")
headings = ps.pop(0).split()
name_col = headings.index(cmd_heading)
num_cols = len(headings) - 1
for row in ps:
cols = row.split(None, num_cols)
if cols and "gunicorn: " in cols[name_col]:
if "gunicorn: worker" in cols[name_col]:
is_worker = True
else:
is_worker = False
if is_worker:
pid = cols[headings.index("PPID")]
else:
pid = cols[headings.index("PID")]
if pid not in gunicorns:
gunicorns[pid] = {"workers": 0, "mem": 0, "port": None, "name":
cols[name_col].strip().split("[",1)[1].split("]",1)[:-1]}
gunicorns[pid]["mem"] += int(cols[headings.index("RSS")])
if is_worker:
gunicorns[pid]["workers"] += 1
# Remove gunicorns that were not found in the process list.
for pid in gunicorns.keys()[:]:
if gunicorns[pid]["workers"] == 0:
del gunicorns[pid]
# Determine ports if any are missing.
if not [g for g in gunicorns.values() if g["port"] is None]:
return
for (pid, port) in ports_for_pids(gunicorns.keys()):
if pid in gunicorns:
gunicorns[pid]["port"] = port | python | def update_gunicorns():
"""
Updates the dict of gunicorn processes. Run the ps command and parse its
output for processes named after gunicorn, building up a dict of gunicorn
processes. When new gunicorns are discovered, run the netstat command to
determine the ports they're serving on.
"""
global tick
tick += 1
if (tick * screen_delay) % ps_delay != 0:
return
tick = 0
for pid in gunicorns:
gunicorns[pid].update({"workers": 0, "mem": 0})
ps = Popen(PS_ARGS, stdout=PIPE).communicate()[0].split("\n")
headings = ps.pop(0).split()
name_col = headings.index(cmd_heading)
num_cols = len(headings) - 1
for row in ps:
cols = row.split(None, num_cols)
if cols and "gunicorn: " in cols[name_col]:
if "gunicorn: worker" in cols[name_col]:
is_worker = True
else:
is_worker = False
if is_worker:
pid = cols[headings.index("PPID")]
else:
pid = cols[headings.index("PID")]
if pid not in gunicorns:
gunicorns[pid] = {"workers": 0, "mem": 0, "port": None, "name":
cols[name_col].strip().split("[",1)[1].split("]",1)[:-1]}
gunicorns[pid]["mem"] += int(cols[headings.index("RSS")])
if is_worker:
gunicorns[pid]["workers"] += 1
# Remove gunicorns that were not found in the process list.
for pid in gunicorns.keys()[:]:
if gunicorns[pid]["workers"] == 0:
del gunicorns[pid]
# Determine ports if any are missing.
if not [g for g in gunicorns.values() if g["port"] is None]:
return
for (pid, port) in ports_for_pids(gunicorns.keys()):
if pid in gunicorns:
gunicorns[pid]["port"] = port | [
"def",
"update_gunicorns",
"(",
")",
":",
"global",
"tick",
"tick",
"+=",
"1",
"if",
"(",
"tick",
"*",
"screen_delay",
")",
"%",
"ps_delay",
"!=",
"0",
":",
"return",
"tick",
"=",
"0",
"for",
"pid",
"in",
"gunicorns",
":",
"gunicorns",
"[",
"pid",
"]",
".",
"update",
"(",
"{",
"\"workers\"",
":",
"0",
",",
"\"mem\"",
":",
"0",
"}",
")",
"ps",
"=",
"Popen",
"(",
"PS_ARGS",
",",
"stdout",
"=",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"headings",
"=",
"ps",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
")",
"name_col",
"=",
"headings",
".",
"index",
"(",
"cmd_heading",
")",
"num_cols",
"=",
"len",
"(",
"headings",
")",
"-",
"1",
"for",
"row",
"in",
"ps",
":",
"cols",
"=",
"row",
".",
"split",
"(",
"None",
",",
"num_cols",
")",
"if",
"cols",
"and",
"\"gunicorn: \"",
"in",
"cols",
"[",
"name_col",
"]",
":",
"if",
"\"gunicorn: worker\"",
"in",
"cols",
"[",
"name_col",
"]",
":",
"is_worker",
"=",
"True",
"else",
":",
"is_worker",
"=",
"False",
"if",
"is_worker",
":",
"pid",
"=",
"cols",
"[",
"headings",
".",
"index",
"(",
"\"PPID\"",
")",
"]",
"else",
":",
"pid",
"=",
"cols",
"[",
"headings",
".",
"index",
"(",
"\"PID\"",
")",
"]",
"if",
"pid",
"not",
"in",
"gunicorns",
":",
"gunicorns",
"[",
"pid",
"]",
"=",
"{",
"\"workers\"",
":",
"0",
",",
"\"mem\"",
":",
"0",
",",
"\"port\"",
":",
"None",
",",
"\"name\"",
":",
"cols",
"[",
"name_col",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"[\"",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
"\"]\"",
",",
"1",
")",
"[",
":",
"-",
"1",
"]",
"}",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"mem\"",
"]",
"+=",
"int",
"(",
"cols",
"[",
"headings",
".",
"index",
"(",
"\"RSS\"",
")",
"]",
")",
"if",
"is_worker",
":",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"workers\"",
"]",
"+=",
"1",
"# Remove gunicorns that were not found in the process list.",
"for",
"pid",
"in",
"gunicorns",
".",
"keys",
"(",
")",
"[",
":",
"]",
":",
"if",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"workers\"",
"]",
"==",
"0",
":",
"del",
"gunicorns",
"[",
"pid",
"]",
"# Determine ports if any are missing.",
"if",
"not",
"[",
"g",
"for",
"g",
"in",
"gunicorns",
".",
"values",
"(",
")",
"if",
"g",
"[",
"\"port\"",
"]",
"is",
"None",
"]",
":",
"return",
"for",
"(",
"pid",
",",
"port",
")",
"in",
"ports_for_pids",
"(",
"gunicorns",
".",
"keys",
"(",
")",
")",
":",
"if",
"pid",
"in",
"gunicorns",
":",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"port\"",
"]",
"=",
"port"
]
| Updates the dict of gunicorn processes. Run the ps command and parse its
output for processes named after gunicorn, building up a dict of gunicorn
processes. When new gunicorns are discovered, run the netstat command to
determine the ports they're serving on. | [
"Updates",
"the",
"dict",
"of",
"gunicorn",
"processes",
".",
"Run",
"the",
"ps",
"command",
"and",
"parse",
"its",
"output",
"for",
"processes",
"named",
"after",
"gunicorn",
"building",
"up",
"a",
"dict",
"of",
"gunicorn",
"processes",
".",
"When",
"new",
"gunicorns",
"are",
"discovered",
"run",
"the",
"netstat",
"command",
"to",
"determine",
"the",
"ports",
"they",
"re",
"serving",
"on",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L105-L150 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | handle_keypress | def handle_keypress(screen):
"""
Check for a key being pressed and handle it if applicable.
"""
global selected_pid
try:
key = screen.getkey().upper()
except:
return
if key in ("KEY_DOWN", "J"):
move_selection()
elif key in ("KEY_UP", "K"):
move_selection(reverse=True)
elif key in ("A", "+"):
send_signal("TTIN")
if selected_pid in gunicorns:
gunicorns[selected_pid]["workers"] = 0
elif key in ("W", "-"):
if selected_pid in gunicorns:
if gunicorns[selected_pid]["workers"] != 1:
send_signal("TTOU")
gunicorns[selected_pid]["workers"] = 0
elif key in ("R",):
if selected_pid in gunicorns:
send_signal("HUP")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("T",):
for pid in gunicorns.copy().iterkeys():
selected_pid = pid
send_signal("HUP")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("M", "-"):
if selected_pid in gunicorns:
send_signal("QUIT")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("Q",):
raise KeyboardInterrupt | python | def handle_keypress(screen):
"""
Check for a key being pressed and handle it if applicable.
"""
global selected_pid
try:
key = screen.getkey().upper()
except:
return
if key in ("KEY_DOWN", "J"):
move_selection()
elif key in ("KEY_UP", "K"):
move_selection(reverse=True)
elif key in ("A", "+"):
send_signal("TTIN")
if selected_pid in gunicorns:
gunicorns[selected_pid]["workers"] = 0
elif key in ("W", "-"):
if selected_pid in gunicorns:
if gunicorns[selected_pid]["workers"] != 1:
send_signal("TTOU")
gunicorns[selected_pid]["workers"] = 0
elif key in ("R",):
if selected_pid in gunicorns:
send_signal("HUP")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("T",):
for pid in gunicorns.copy().iterkeys():
selected_pid = pid
send_signal("HUP")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("M", "-"):
if selected_pid in gunicorns:
send_signal("QUIT")
del gunicorns[selected_pid]
selected_pid = None
elif key in ("Q",):
raise KeyboardInterrupt | [
"def",
"handle_keypress",
"(",
"screen",
")",
":",
"global",
"selected_pid",
"try",
":",
"key",
"=",
"screen",
".",
"getkey",
"(",
")",
".",
"upper",
"(",
")",
"except",
":",
"return",
"if",
"key",
"in",
"(",
"\"KEY_DOWN\"",
",",
"\"J\"",
")",
":",
"move_selection",
"(",
")",
"elif",
"key",
"in",
"(",
"\"KEY_UP\"",
",",
"\"K\"",
")",
":",
"move_selection",
"(",
"reverse",
"=",
"True",
")",
"elif",
"key",
"in",
"(",
"\"A\"",
",",
"\"+\"",
")",
":",
"send_signal",
"(",
"\"TTIN\"",
")",
"if",
"selected_pid",
"in",
"gunicorns",
":",
"gunicorns",
"[",
"selected_pid",
"]",
"[",
"\"workers\"",
"]",
"=",
"0",
"elif",
"key",
"in",
"(",
"\"W\"",
",",
"\"-\"",
")",
":",
"if",
"selected_pid",
"in",
"gunicorns",
":",
"if",
"gunicorns",
"[",
"selected_pid",
"]",
"[",
"\"workers\"",
"]",
"!=",
"1",
":",
"send_signal",
"(",
"\"TTOU\"",
")",
"gunicorns",
"[",
"selected_pid",
"]",
"[",
"\"workers\"",
"]",
"=",
"0",
"elif",
"key",
"in",
"(",
"\"R\"",
",",
")",
":",
"if",
"selected_pid",
"in",
"gunicorns",
":",
"send_signal",
"(",
"\"HUP\"",
")",
"del",
"gunicorns",
"[",
"selected_pid",
"]",
"selected_pid",
"=",
"None",
"elif",
"key",
"in",
"(",
"\"T\"",
",",
")",
":",
"for",
"pid",
"in",
"gunicorns",
".",
"copy",
"(",
")",
".",
"iterkeys",
"(",
")",
":",
"selected_pid",
"=",
"pid",
"send_signal",
"(",
"\"HUP\"",
")",
"del",
"gunicorns",
"[",
"selected_pid",
"]",
"selected_pid",
"=",
"None",
"elif",
"key",
"in",
"(",
"\"M\"",
",",
"\"-\"",
")",
":",
"if",
"selected_pid",
"in",
"gunicorns",
":",
"send_signal",
"(",
"\"QUIT\"",
")",
"del",
"gunicorns",
"[",
"selected_pid",
"]",
"selected_pid",
"=",
"None",
"elif",
"key",
"in",
"(",
"\"Q\"",
",",
")",
":",
"raise",
"KeyboardInterrupt"
]
| Check for a key being pressed and handle it if applicable. | [
"Check",
"for",
"a",
"key",
"being",
"pressed",
"and",
"handle",
"it",
"if",
"applicable",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L153-L192 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | format_row | def format_row(pid="", port="", name="", mem="", workers="", prefix_char=" "):
"""
Applies consistant padding to each of the columns in a row and serves as
the source of the overall screen width.
"""
row = "%s%-5s %-6s %-25s %8s %7s " \
% (prefix_char, pid, port, name, mem, workers)
global screen_width
if screen_width is None:
screen_width = len(row)
return row | python | def format_row(pid="", port="", name="", mem="", workers="", prefix_char=" "):
"""
Applies consistant padding to each of the columns in a row and serves as
the source of the overall screen width.
"""
row = "%s%-5s %-6s %-25s %8s %7s " \
% (prefix_char, pid, port, name, mem, workers)
global screen_width
if screen_width is None:
screen_width = len(row)
return row | [
"def",
"format_row",
"(",
"pid",
"=",
"\"\"",
",",
"port",
"=",
"\"\"",
",",
"name",
"=",
"\"\"",
",",
"mem",
"=",
"\"\"",
",",
"workers",
"=",
"\"\"",
",",
"prefix_char",
"=",
"\" \"",
")",
":",
"row",
"=",
"\"%s%-5s %-6s %-25s %8s %7s \"",
"%",
"(",
"prefix_char",
",",
"pid",
",",
"port",
",",
"name",
",",
"mem",
",",
"workers",
")",
"global",
"screen_width",
"if",
"screen_width",
"is",
"None",
":",
"screen_width",
"=",
"len",
"(",
"row",
")",
"return",
"row"
]
| Applies consistant padding to each of the columns in a row and serves as
the source of the overall screen width. | [
"Applies",
"consistant",
"padding",
"to",
"each",
"of",
"the",
"columns",
"in",
"a",
"row",
"and",
"serves",
"as",
"the",
"source",
"of",
"the",
"overall",
"screen",
"width",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L195-L206 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | display_output | def display_output(screen):
"""
Display the menu list of gunicorns.
"""
format_row() # Sets up the screen width.
screen_height = len(gunicorns) + len(instructions.split("\n")) + 9
if not gunicorns:
screen_height += 2 # A couple of blank lines are added when empty.
screen.erase()
win = curses.newwin(screen_height, screen_width + 6, 1, 3)
win.bkgd(" ", curses.color_pair(1))
win.border()
x = 3
blank_line = y = count(2).next
win.addstr(y(), x, title.center(screen_width), curses.A_NORMAL)
blank_line()
win.addstr(y(), x, format_row(" PID", "PORT", "NAME", "MEM (MB)", "WORKERS"),
curses.A_STANDOUT)
if not gunicorns:
blank_line()
win.addstr(y(), x, no_gunicorns.center(screen_width),
curses.A_NORMAL)
blank_line()
else:
win.hline(y(), x, curses.ACS_HLINE, screen_width)
for (i, pid) in enumerate(sorted(gunicorns.keys())):
port = gunicorns[pid]["port"]
name = gunicorns[pid]["name"]
mem = "%#.3f" % (gunicorns[pid]["mem"] / 1000.)
workers = gunicorns[pid]["workers"]
# When a signal is sent to update the number of workers, the number
# of workers is set to zero as a marker to signify an update has
# occurred. We then piggyback this variable and use it as a counter
# to animate the display until the gunicorn is next updated.
if workers < 1:
gunicorns[pid]["workers"] -= 1
chars = "|/-\\"
workers *= -1
if workers == len(chars):
gunicorns[pid]["workers"] = workers = 0
workers = chars[workers]
if pid == selected_pid:
attr = curses.A_STANDOUT
prefix_char = '> '
else:
attr = curses.A_NORMAL
prefix_char = ' '
win.addstr(y(), x, format_row(pid, port, name, mem, workers,
prefix_char), attr)
win.hline(y(), x, curses.ACS_HLINE, screen_width)
blank_line()
for line in instructions.split("\n"):
win.addstr(y(), x, line.center(screen_width), curses.A_NORMAL)
win.refresh() | python | def display_output(screen):
"""
Display the menu list of gunicorns.
"""
format_row() # Sets up the screen width.
screen_height = len(gunicorns) + len(instructions.split("\n")) + 9
if not gunicorns:
screen_height += 2 # A couple of blank lines are added when empty.
screen.erase()
win = curses.newwin(screen_height, screen_width + 6, 1, 3)
win.bkgd(" ", curses.color_pair(1))
win.border()
x = 3
blank_line = y = count(2).next
win.addstr(y(), x, title.center(screen_width), curses.A_NORMAL)
blank_line()
win.addstr(y(), x, format_row(" PID", "PORT", "NAME", "MEM (MB)", "WORKERS"),
curses.A_STANDOUT)
if not gunicorns:
blank_line()
win.addstr(y(), x, no_gunicorns.center(screen_width),
curses.A_NORMAL)
blank_line()
else:
win.hline(y(), x, curses.ACS_HLINE, screen_width)
for (i, pid) in enumerate(sorted(gunicorns.keys())):
port = gunicorns[pid]["port"]
name = gunicorns[pid]["name"]
mem = "%#.3f" % (gunicorns[pid]["mem"] / 1000.)
workers = gunicorns[pid]["workers"]
# When a signal is sent to update the number of workers, the number
# of workers is set to zero as a marker to signify an update has
# occurred. We then piggyback this variable and use it as a counter
# to animate the display until the gunicorn is next updated.
if workers < 1:
gunicorns[pid]["workers"] -= 1
chars = "|/-\\"
workers *= -1
if workers == len(chars):
gunicorns[pid]["workers"] = workers = 0
workers = chars[workers]
if pid == selected_pid:
attr = curses.A_STANDOUT
prefix_char = '> '
else:
attr = curses.A_NORMAL
prefix_char = ' '
win.addstr(y(), x, format_row(pid, port, name, mem, workers,
prefix_char), attr)
win.hline(y(), x, curses.ACS_HLINE, screen_width)
blank_line()
for line in instructions.split("\n"):
win.addstr(y(), x, line.center(screen_width), curses.A_NORMAL)
win.refresh() | [
"def",
"display_output",
"(",
"screen",
")",
":",
"format_row",
"(",
")",
"# Sets up the screen width.",
"screen_height",
"=",
"len",
"(",
"gunicorns",
")",
"+",
"len",
"(",
"instructions",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"+",
"9",
"if",
"not",
"gunicorns",
":",
"screen_height",
"+=",
"2",
"# A couple of blank lines are added when empty.",
"screen",
".",
"erase",
"(",
")",
"win",
"=",
"curses",
".",
"newwin",
"(",
"screen_height",
",",
"screen_width",
"+",
"6",
",",
"1",
",",
"3",
")",
"win",
".",
"bkgd",
"(",
"\" \"",
",",
"curses",
".",
"color_pair",
"(",
"1",
")",
")",
"win",
".",
"border",
"(",
")",
"x",
"=",
"3",
"blank_line",
"=",
"y",
"=",
"count",
"(",
"2",
")",
".",
"next",
"win",
".",
"addstr",
"(",
"y",
"(",
")",
",",
"x",
",",
"title",
".",
"center",
"(",
"screen_width",
")",
",",
"curses",
".",
"A_NORMAL",
")",
"blank_line",
"(",
")",
"win",
".",
"addstr",
"(",
"y",
"(",
")",
",",
"x",
",",
"format_row",
"(",
"\" PID\"",
",",
"\"PORT\"",
",",
"\"NAME\"",
",",
"\"MEM (MB)\"",
",",
"\"WORKERS\"",
")",
",",
"curses",
".",
"A_STANDOUT",
")",
"if",
"not",
"gunicorns",
":",
"blank_line",
"(",
")",
"win",
".",
"addstr",
"(",
"y",
"(",
")",
",",
"x",
",",
"no_gunicorns",
".",
"center",
"(",
"screen_width",
")",
",",
"curses",
".",
"A_NORMAL",
")",
"blank_line",
"(",
")",
"else",
":",
"win",
".",
"hline",
"(",
"y",
"(",
")",
",",
"x",
",",
"curses",
".",
"ACS_HLINE",
",",
"screen_width",
")",
"for",
"(",
"i",
",",
"pid",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"gunicorns",
".",
"keys",
"(",
")",
")",
")",
":",
"port",
"=",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"port\"",
"]",
"name",
"=",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"name\"",
"]",
"mem",
"=",
"\"%#.3f\"",
"%",
"(",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"mem\"",
"]",
"/",
"1000.",
")",
"workers",
"=",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"workers\"",
"]",
"# When a signal is sent to update the number of workers, the number",
"# of workers is set to zero as a marker to signify an update has",
"# occurred. We then piggyback this variable and use it as a counter",
"# to animate the display until the gunicorn is next updated.",
"if",
"workers",
"<",
"1",
":",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"workers\"",
"]",
"-=",
"1",
"chars",
"=",
"\"|/-\\\\\"",
"workers",
"*=",
"-",
"1",
"if",
"workers",
"==",
"len",
"(",
"chars",
")",
":",
"gunicorns",
"[",
"pid",
"]",
"[",
"\"workers\"",
"]",
"=",
"workers",
"=",
"0",
"workers",
"=",
"chars",
"[",
"workers",
"]",
"if",
"pid",
"==",
"selected_pid",
":",
"attr",
"=",
"curses",
".",
"A_STANDOUT",
"prefix_char",
"=",
"'> '",
"else",
":",
"attr",
"=",
"curses",
".",
"A_NORMAL",
"prefix_char",
"=",
"' '",
"win",
".",
"addstr",
"(",
"y",
"(",
")",
",",
"x",
",",
"format_row",
"(",
"pid",
",",
"port",
",",
"name",
",",
"mem",
",",
"workers",
",",
"prefix_char",
")",
",",
"attr",
")",
"win",
".",
"hline",
"(",
"y",
"(",
")",
",",
"x",
",",
"curses",
".",
"ACS_HLINE",
",",
"screen_width",
")",
"blank_line",
"(",
")",
"for",
"line",
"in",
"instructions",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"win",
".",
"addstr",
"(",
"y",
"(",
")",
",",
"x",
",",
"line",
".",
"center",
"(",
"screen_width",
")",
",",
"curses",
".",
"A_NORMAL",
")",
"win",
".",
"refresh",
"(",
")"
]
| Display the menu list of gunicorns. | [
"Display",
"the",
"menu",
"list",
"of",
"gunicorns",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L209-L262 | train |
stephenmcd/gunicorn-console | gunicorn_console.py | main | def main():
"""
Main entry point for gunicorn_console.
"""
# Set up curses.
stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, foreground_colour, background_colour)
curses.noecho()
stdscr.keypad(True)
stdscr.nodelay(True)
try:
curses.curs_set(False)
except:
pass
try:
# Run main event loop until quit.
while True:
try:
update_gunicorns()
handle_keypress(stdscr)
display_output(stdscr)
curses.napms(int(screen_delay * 1000))
except KeyboardInterrupt:
break
finally:
# Tear down curses.
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin() | python | def main():
"""
Main entry point for gunicorn_console.
"""
# Set up curses.
stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, foreground_colour, background_colour)
curses.noecho()
stdscr.keypad(True)
stdscr.nodelay(True)
try:
curses.curs_set(False)
except:
pass
try:
# Run main event loop until quit.
while True:
try:
update_gunicorns()
handle_keypress(stdscr)
display_output(stdscr)
curses.napms(int(screen_delay * 1000))
except KeyboardInterrupt:
break
finally:
# Tear down curses.
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin() | [
"def",
"main",
"(",
")",
":",
"# Set up curses.",
"stdscr",
"=",
"curses",
".",
"initscr",
"(",
")",
"curses",
".",
"start_color",
"(",
")",
"curses",
".",
"init_pair",
"(",
"1",
",",
"foreground_colour",
",",
"background_colour",
")",
"curses",
".",
"noecho",
"(",
")",
"stdscr",
".",
"keypad",
"(",
"True",
")",
"stdscr",
".",
"nodelay",
"(",
"True",
")",
"try",
":",
"curses",
".",
"curs_set",
"(",
"False",
")",
"except",
":",
"pass",
"try",
":",
"# Run main event loop until quit.",
"while",
"True",
":",
"try",
":",
"update_gunicorns",
"(",
")",
"handle_keypress",
"(",
"stdscr",
")",
"display_output",
"(",
"stdscr",
")",
"curses",
".",
"napms",
"(",
"int",
"(",
"screen_delay",
"*",
"1000",
")",
")",
"except",
"KeyboardInterrupt",
":",
"break",
"finally",
":",
"# Tear down curses.",
"curses",
".",
"nocbreak",
"(",
")",
"stdscr",
".",
"keypad",
"(",
"False",
")",
"curses",
".",
"echo",
"(",
")",
"curses",
".",
"endwin",
"(",
")"
]
| Main entry point for gunicorn_console. | [
"Main",
"entry",
"point",
"for",
"gunicorn_console",
"."
]
| f5c9b9a69ea1f2ca00aac3565cb99491684d868a | https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L265-L295 | train |
pgxcentre/geneparse | geneparse/extract/extractor.py | _get_variant_silent | def _get_variant_silent(parser, variant):
"""Gets a variant from the parser while disabling logging."""
prev_log = config.LOG_NOT_FOUND
config.LOG_NOT_FOUND = False
results = parser.get_variant_genotypes(variant)
config.LOG_NOT_FOUND = prev_log
return results | python | def _get_variant_silent(parser, variant):
"""Gets a variant from the parser while disabling logging."""
prev_log = config.LOG_NOT_FOUND
config.LOG_NOT_FOUND = False
results = parser.get_variant_genotypes(variant)
config.LOG_NOT_FOUND = prev_log
return results | [
"def",
"_get_variant_silent",
"(",
"parser",
",",
"variant",
")",
":",
"prev_log",
"=",
"config",
".",
"LOG_NOT_FOUND",
"config",
".",
"LOG_NOT_FOUND",
"=",
"False",
"results",
"=",
"parser",
".",
"get_variant_genotypes",
"(",
"variant",
")",
"config",
".",
"LOG_NOT_FOUND",
"=",
"prev_log",
"return",
"results"
]
| Gets a variant from the parser while disabling logging. | [
"Gets",
"a",
"variant",
"from",
"the",
"parser",
"while",
"disabling",
"logging",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/extract/extractor.py#L93-L99 | train |
KnightConan/sspdatatables | src/sspdatatables/utils/enum.py | ExtendedEnumMeta._attrs_ | def _attrs_(mcs, cls, attr_name: str) -> Tuple[Any, ...]:
"""
Returns a tuple containing just the value of the given attr_name of all
the elements from the cls.
:return: tuple of different types
"""
return tuple(map(lambda x: getattr(x, attr_name), list(cls))) | python | def _attrs_(mcs, cls, attr_name: str) -> Tuple[Any, ...]:
"""
Returns a tuple containing just the value of the given attr_name of all
the elements from the cls.
:return: tuple of different types
"""
return tuple(map(lambda x: getattr(x, attr_name), list(cls))) | [
"def",
"_attrs_",
"(",
"mcs",
",",
"cls",
",",
"attr_name",
":",
"str",
")",
"->",
"Tuple",
"[",
"Any",
",",
"...",
"]",
":",
"return",
"tuple",
"(",
"map",
"(",
"lambda",
"x",
":",
"getattr",
"(",
"x",
",",
"attr_name",
")",
",",
"list",
"(",
"cls",
")",
")",
")"
]
| Returns a tuple containing just the value of the given attr_name of all
the elements from the cls.
:return: tuple of different types | [
"Returns",
"a",
"tuple",
"containing",
"just",
"the",
"value",
"of",
"the",
"given",
"attr_name",
"of",
"all",
"the",
"elements",
"from",
"the",
"cls",
"."
]
| 1179a11358734e5e472e5eee703e8d34fa49e9bf | https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/enum.py#L55-L62 | train |
KnightConan/sspdatatables | src/sspdatatables/utils/enum.py | ExtendedEnumMeta._from_attr_ | def _from_attr_(mcs, cls, attr_name: str, attr_value: Any) -> TypeVar:
"""
Returns the enumeration item regarding to the attribute name and value,
or None if not found for the given cls
:param attr_name: str: attribute's name
:param attr_value: different values: key to search for
:return: Enumeration Item
"""
return next(iter(filter(lambda x: getattr(x, attr_name) == attr_value,
list(cls))), None) | python | def _from_attr_(mcs, cls, attr_name: str, attr_value: Any) -> TypeVar:
"""
Returns the enumeration item regarding to the attribute name and value,
or None if not found for the given cls
:param attr_name: str: attribute's name
:param attr_value: different values: key to search for
:return: Enumeration Item
"""
return next(iter(filter(lambda x: getattr(x, attr_name) == attr_value,
list(cls))), None) | [
"def",
"_from_attr_",
"(",
"mcs",
",",
"cls",
",",
"attr_name",
":",
"str",
",",
"attr_value",
":",
"Any",
")",
"->",
"TypeVar",
":",
"return",
"next",
"(",
"iter",
"(",
"filter",
"(",
"lambda",
"x",
":",
"getattr",
"(",
"x",
",",
"attr_name",
")",
"==",
"attr_value",
",",
"list",
"(",
"cls",
")",
")",
")",
",",
"None",
")"
]
| Returns the enumeration item regarding to the attribute name and value,
or None if not found for the given cls
:param attr_name: str: attribute's name
:param attr_value: different values: key to search for
:return: Enumeration Item | [
"Returns",
"the",
"enumeration",
"item",
"regarding",
"to",
"the",
"attribute",
"name",
"and",
"value",
"or",
"None",
"if",
"not",
"found",
"for",
"the",
"given",
"cls"
]
| 1179a11358734e5e472e5eee703e8d34fa49e9bf | https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/enum.py#L65-L75 | train |
KnightConan/sspdatatables | src/sspdatatables/utils/enum.py | ExtendedEnum.describe | def describe(cls) -> None:
"""
Prints in the console a table showing all the attributes for all the
definitions inside the class
:return: None
"""
max_lengths = []
for attr_name in cls.attr_names():
attr_func = "%ss" % attr_name
attr_list = list(map(str, getattr(cls, attr_func)())) + [attr_name]
max_lengths.append(max(list(map(len, attr_list))))
row_format = "{:>%d} | {:>%d} | {:>%d}" % tuple(max_lengths)
headers = [attr_name.capitalize() for attr_name in cls.attr_names()]
header_line = row_format.format(*headers)
output = "Class: %s\n" % cls.__name__
output += header_line + "\n"
output += "-"*(len(header_line)) + "\n"
for item in cls:
format_list = [str(getattr(item, attr_name))
for attr_name in cls.attr_names()]
output += row_format.format(*format_list) + "\n"
print(output) | python | def describe(cls) -> None:
"""
Prints in the console a table showing all the attributes for all the
definitions inside the class
:return: None
"""
max_lengths = []
for attr_name in cls.attr_names():
attr_func = "%ss" % attr_name
attr_list = list(map(str, getattr(cls, attr_func)())) + [attr_name]
max_lengths.append(max(list(map(len, attr_list))))
row_format = "{:>%d} | {:>%d} | {:>%d}" % tuple(max_lengths)
headers = [attr_name.capitalize() for attr_name in cls.attr_names()]
header_line = row_format.format(*headers)
output = "Class: %s\n" % cls.__name__
output += header_line + "\n"
output += "-"*(len(header_line)) + "\n"
for item in cls:
format_list = [str(getattr(item, attr_name))
for attr_name in cls.attr_names()]
output += row_format.format(*format_list) + "\n"
print(output) | [
"def",
"describe",
"(",
"cls",
")",
"->",
"None",
":",
"max_lengths",
"=",
"[",
"]",
"for",
"attr_name",
"in",
"cls",
".",
"attr_names",
"(",
")",
":",
"attr_func",
"=",
"\"%ss\"",
"%",
"attr_name",
"attr_list",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"getattr",
"(",
"cls",
",",
"attr_func",
")",
"(",
")",
")",
")",
"+",
"[",
"attr_name",
"]",
"max_lengths",
".",
"append",
"(",
"max",
"(",
"list",
"(",
"map",
"(",
"len",
",",
"attr_list",
")",
")",
")",
")",
"row_format",
"=",
"\"{:>%d} | {:>%d} | {:>%d}\"",
"%",
"tuple",
"(",
"max_lengths",
")",
"headers",
"=",
"[",
"attr_name",
".",
"capitalize",
"(",
")",
"for",
"attr_name",
"in",
"cls",
".",
"attr_names",
"(",
")",
"]",
"header_line",
"=",
"row_format",
".",
"format",
"(",
"*",
"headers",
")",
"output",
"=",
"\"Class: %s\\n\"",
"%",
"cls",
".",
"__name__",
"output",
"+=",
"header_line",
"+",
"\"\\n\"",
"output",
"+=",
"\"-\"",
"*",
"(",
"len",
"(",
"header_line",
")",
")",
"+",
"\"\\n\"",
"for",
"item",
"in",
"cls",
":",
"format_list",
"=",
"[",
"str",
"(",
"getattr",
"(",
"item",
",",
"attr_name",
")",
")",
"for",
"attr_name",
"in",
"cls",
".",
"attr_names",
"(",
")",
"]",
"output",
"+=",
"row_format",
".",
"format",
"(",
"*",
"format_list",
")",
"+",
"\"\\n\"",
"print",
"(",
"output",
")"
]
| Prints in the console a table showing all the attributes for all the
definitions inside the class
:return: None | [
"Prints",
"in",
"the",
"console",
"a",
"table",
"showing",
"all",
"the",
"attributes",
"for",
"all",
"the",
"definitions",
"inside",
"the",
"class"
]
| 1179a11358734e5e472e5eee703e8d34fa49e9bf | https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/enum.py#L135-L157 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | read_iter | def read_iter(use_fpi):
'''Return the path to the final .mag file either for the complex or the fpi
inversion.
'''
filename_rhosuffix = 'exe/inv.lastmod_rho'
filename = 'exe/inv.lastmod'
# filename HAS to exist. Otherwise the inversion was not finished
if(not os.path.isfile(filename)):
print('Inversion was not finished! No last iteration found.')
if(use_fpi is True):
if(os.path.isfile(filename_rhosuffix)):
filename = filename_rhosuffix
linestring = open(filename, 'r').readline().strip()
linestring = linestring.replace('\n', '')
linestring = linestring.replace('../', '')
return linestring | python | def read_iter(use_fpi):
'''Return the path to the final .mag file either for the complex or the fpi
inversion.
'''
filename_rhosuffix = 'exe/inv.lastmod_rho'
filename = 'exe/inv.lastmod'
# filename HAS to exist. Otherwise the inversion was not finished
if(not os.path.isfile(filename)):
print('Inversion was not finished! No last iteration found.')
if(use_fpi is True):
if(os.path.isfile(filename_rhosuffix)):
filename = filename_rhosuffix
linestring = open(filename, 'r').readline().strip()
linestring = linestring.replace('\n', '')
linestring = linestring.replace('../', '')
return linestring | [
"def",
"read_iter",
"(",
"use_fpi",
")",
":",
"filename_rhosuffix",
"=",
"'exe/inv.lastmod_rho'",
"filename",
"=",
"'exe/inv.lastmod'",
"# filename HAS to exist. Otherwise the inversion was not finished",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"print",
"(",
"'Inversion was not finished! No last iteration found.'",
")",
"if",
"(",
"use_fpi",
"is",
"True",
")",
":",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"filename_rhosuffix",
")",
")",
":",
"filename",
"=",
"filename_rhosuffix",
"linestring",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"'../'",
",",
"''",
")",
"return",
"linestring"
]
| Return the path to the final .mag file either for the complex or the fpi
inversion. | [
"Return",
"the",
"path",
"to",
"the",
"final",
".",
"mag",
"file",
"either",
"for",
"the",
"complex",
"or",
"the",
"fpi",
"inversion",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L228-L245 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | list_datafiles | def list_datafiles():
'''Get the type of the tomodir and the highest iteration to list all files,
which will be plotted.
'''
is_cplx, is_fpi = td_type()
# get the highest iteration
it_rho = read_iter(is_fpi)
it_phase = read_iter(False)
# list the files
files = ['inv/coverage.mag']
dtype = ['cov']
files.append(it_rho)
dtype.append('mag')
if is_cplx:
files.append(it_rho.replace('mag', 'pha'))
dtype.append('pha')
if is_fpi:
files.append(it_phase.replace('mag', 'pha'))
dtype.append('pha_fpi')
return files, dtype | python | def list_datafiles():
'''Get the type of the tomodir and the highest iteration to list all files,
which will be plotted.
'''
is_cplx, is_fpi = td_type()
# get the highest iteration
it_rho = read_iter(is_fpi)
it_phase = read_iter(False)
# list the files
files = ['inv/coverage.mag']
dtype = ['cov']
files.append(it_rho)
dtype.append('mag')
if is_cplx:
files.append(it_rho.replace('mag', 'pha'))
dtype.append('pha')
if is_fpi:
files.append(it_phase.replace('mag', 'pha'))
dtype.append('pha_fpi')
return files, dtype | [
"def",
"list_datafiles",
"(",
")",
":",
"is_cplx",
",",
"is_fpi",
"=",
"td_type",
"(",
")",
"# get the highest iteration",
"it_rho",
"=",
"read_iter",
"(",
"is_fpi",
")",
"it_phase",
"=",
"read_iter",
"(",
"False",
")",
"# list the files",
"files",
"=",
"[",
"'inv/coverage.mag'",
"]",
"dtype",
"=",
"[",
"'cov'",
"]",
"files",
".",
"append",
"(",
"it_rho",
")",
"dtype",
".",
"append",
"(",
"'mag'",
")",
"if",
"is_cplx",
":",
"files",
".",
"append",
"(",
"it_rho",
".",
"replace",
"(",
"'mag'",
",",
"'pha'",
")",
")",
"dtype",
".",
"append",
"(",
"'pha'",
")",
"if",
"is_fpi",
":",
"files",
".",
"append",
"(",
"it_phase",
".",
"replace",
"(",
"'mag'",
",",
"'pha'",
")",
")",
"dtype",
".",
"append",
"(",
"'pha_fpi'",
")",
"return",
"files",
",",
"dtype"
]
| Get the type of the tomodir and the highest iteration to list all files,
which will be plotted. | [
"Get",
"the",
"type",
"of",
"the",
"tomodir",
"and",
"the",
"highest",
"iteration",
"to",
"list",
"all",
"files",
"which",
"will",
"be",
"plotted",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L265-L285 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | read_datafiles | def read_datafiles(files, dtype, column):
'''Load the datafiles and return cov, mag, phase and fpi phase values.
'''
pha = []
pha_fpi = []
for filename, filetype in zip(files, dtype):
if filetype == 'cov':
cov = load_cov(filename)
elif filetype == 'mag':
mag = load_rho(filename, column)
elif filetype == 'pha':
pha = load_rho(filename, 2)
elif filetype == 'pha_fpi':
pha_fpi = load_rho(filename, 2)
return cov, mag, pha, pha_fpi | python | def read_datafiles(files, dtype, column):
'''Load the datafiles and return cov, mag, phase and fpi phase values.
'''
pha = []
pha_fpi = []
for filename, filetype in zip(files, dtype):
if filetype == 'cov':
cov = load_cov(filename)
elif filetype == 'mag':
mag = load_rho(filename, column)
elif filetype == 'pha':
pha = load_rho(filename, 2)
elif filetype == 'pha_fpi':
pha_fpi = load_rho(filename, 2)
return cov, mag, pha, pha_fpi | [
"def",
"read_datafiles",
"(",
"files",
",",
"dtype",
",",
"column",
")",
":",
"pha",
"=",
"[",
"]",
"pha_fpi",
"=",
"[",
"]",
"for",
"filename",
",",
"filetype",
"in",
"zip",
"(",
"files",
",",
"dtype",
")",
":",
"if",
"filetype",
"==",
"'cov'",
":",
"cov",
"=",
"load_cov",
"(",
"filename",
")",
"elif",
"filetype",
"==",
"'mag'",
":",
"mag",
"=",
"load_rho",
"(",
"filename",
",",
"column",
")",
"elif",
"filetype",
"==",
"'pha'",
":",
"pha",
"=",
"load_rho",
"(",
"filename",
",",
"2",
")",
"elif",
"filetype",
"==",
"'pha_fpi'",
":",
"pha_fpi",
"=",
"load_rho",
"(",
"filename",
",",
"2",
")",
"return",
"cov",
",",
"mag",
",",
"pha",
",",
"pha_fpi"
]
| Load the datafiles and return cov, mag, phase and fpi phase values. | [
"Load",
"the",
"datafiles",
"and",
"return",
"cov",
"mag",
"phase",
"and",
"fpi",
"phase",
"values",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L288-L303 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | load_cov | def load_cov(name):
'''Load a datafile with coverage file structure.
'''
content = np.genfromtxt(name, skip_header=1, skip_footer=1, usecols=([2]))
return content | python | def load_cov(name):
'''Load a datafile with coverage file structure.
'''
content = np.genfromtxt(name, skip_header=1, skip_footer=1, usecols=([2]))
return content | [
"def",
"load_cov",
"(",
"name",
")",
":",
"content",
"=",
"np",
".",
"genfromtxt",
"(",
"name",
",",
"skip_header",
"=",
"1",
",",
"skip_footer",
"=",
"1",
",",
"usecols",
"=",
"(",
"[",
"2",
"]",
")",
")",
"return",
"content"
]
| Load a datafile with coverage file structure. | [
"Load",
"a",
"datafile",
"with",
"coverage",
"file",
"structure",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L306-L311 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | load_rho | def load_rho(name, column):
'''Load a datafile with rho structure like mag and phase
'''
try:
content = np.loadtxt(name, skiprows=1, usecols=([column]))
except:
raise ValueError('Given column to open does not exist.')
return content | python | def load_rho(name, column):
'''Load a datafile with rho structure like mag and phase
'''
try:
content = np.loadtxt(name, skiprows=1, usecols=([column]))
except:
raise ValueError('Given column to open does not exist.')
return content | [
"def",
"load_rho",
"(",
"name",
",",
"column",
")",
":",
"try",
":",
"content",
"=",
"np",
".",
"loadtxt",
"(",
"name",
",",
"skiprows",
"=",
"1",
",",
"usecols",
"=",
"(",
"[",
"column",
"]",
")",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Given column to open does not exist.'",
")",
"return",
"content"
]
| Load a datafile with rho structure like mag and phase | [
"Load",
"a",
"datafile",
"with",
"rho",
"structure",
"like",
"mag",
"and",
"phase"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L314-L322 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | calc_complex | def calc_complex(mag, pha):
''' Calculate real and imaginary part of the complex conductivity from
magnitude and phase in log10.
'''
complx = [10 ** m * math.e ** (1j * p / 1e3) for m, p in zip(mag, pha)]
real = [math.log10((1 / c).real) for c in complx]
imag = []
for c in complx:
if ((1 / c).imag) == 0:
imag.append(math.nan)
else:
i = math.log10(abs((1 / c).imag))
imag.append(i)
return real, imag | python | def calc_complex(mag, pha):
''' Calculate real and imaginary part of the complex conductivity from
magnitude and phase in log10.
'''
complx = [10 ** m * math.e ** (1j * p / 1e3) for m, p in zip(mag, pha)]
real = [math.log10((1 / c).real) for c in complx]
imag = []
for c in complx:
if ((1 / c).imag) == 0:
imag.append(math.nan)
else:
i = math.log10(abs((1 / c).imag))
imag.append(i)
return real, imag | [
"def",
"calc_complex",
"(",
"mag",
",",
"pha",
")",
":",
"complx",
"=",
"[",
"10",
"**",
"m",
"*",
"math",
".",
"e",
"**",
"(",
"1j",
"*",
"p",
"/",
"1e3",
")",
"for",
"m",
",",
"p",
"in",
"zip",
"(",
"mag",
",",
"pha",
")",
"]",
"real",
"=",
"[",
"math",
".",
"log10",
"(",
"(",
"1",
"/",
"c",
")",
".",
"real",
")",
"for",
"c",
"in",
"complx",
"]",
"imag",
"=",
"[",
"]",
"for",
"c",
"in",
"complx",
":",
"if",
"(",
"(",
"1",
"/",
"c",
")",
".",
"imag",
")",
"==",
"0",
":",
"imag",
".",
"append",
"(",
"math",
".",
"nan",
")",
"else",
":",
"i",
"=",
"math",
".",
"log10",
"(",
"abs",
"(",
"(",
"1",
"/",
"c",
")",
".",
"imag",
")",
")",
"imag",
".",
"append",
"(",
"i",
")",
"return",
"real",
",",
"imag"
]
| Calculate real and imaginary part of the complex conductivity from
magnitude and phase in log10. | [
"Calculate",
"real",
"and",
"imaginary",
"part",
"of",
"the",
"complex",
"conductivity",
"from",
"magnitude",
"and",
"phase",
"in",
"log10",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L325-L338 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | plot_ratio | def plot_ratio(cid, ax, plotman, title, alpha, vmin, vmax,
xmin, xmax, zmin, zmax, xunit, cbtiks, elecs):
'''Plot ratio of two conductivity directions.
'''
# handle options
cblabel = 'anisotropy ratio'
zlabel = 'z [' + xunit + ']'
xlabel = 'x [' + xunit + ']'
# cm = 'brg'
cm = 'RdYlGn'
xmin, xmax, zmin, zmax, vmin, vmax = check_minmax(
plotman,
cid,
xmin, xmax,
zmin, zmax,
vmin, vmax,
)
# plot
fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax(
cid=cid,
ax=ax,
xmin=xmin,
xmax=xmax,
zmin=zmin,
zmax=zmax,
cblabel=cblabel,
cbnrticks=cbtiks,
title=title,
zlabel=zlabel,
xlabel=xlabel,
plot_colorbar=True,
cmap_name=cm,
no_elecs=elecs,
cbmin=vmin,
cbmax=vmax,
)
return fig, ax, cnorm, cmap, cb | python | def plot_ratio(cid, ax, plotman, title, alpha, vmin, vmax,
xmin, xmax, zmin, zmax, xunit, cbtiks, elecs):
'''Plot ratio of two conductivity directions.
'''
# handle options
cblabel = 'anisotropy ratio'
zlabel = 'z [' + xunit + ']'
xlabel = 'x [' + xunit + ']'
# cm = 'brg'
cm = 'RdYlGn'
xmin, xmax, zmin, zmax, vmin, vmax = check_minmax(
plotman,
cid,
xmin, xmax,
zmin, zmax,
vmin, vmax,
)
# plot
fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax(
cid=cid,
ax=ax,
xmin=xmin,
xmax=xmax,
zmin=zmin,
zmax=zmax,
cblabel=cblabel,
cbnrticks=cbtiks,
title=title,
zlabel=zlabel,
xlabel=xlabel,
plot_colorbar=True,
cmap_name=cm,
no_elecs=elecs,
cbmin=vmin,
cbmax=vmax,
)
return fig, ax, cnorm, cmap, cb | [
"def",
"plot_ratio",
"(",
"cid",
",",
"ax",
",",
"plotman",
",",
"title",
",",
"alpha",
",",
"vmin",
",",
"vmax",
",",
"xmin",
",",
"xmax",
",",
"zmin",
",",
"zmax",
",",
"xunit",
",",
"cbtiks",
",",
"elecs",
")",
":",
"# handle options",
"cblabel",
"=",
"'anisotropy ratio'",
"zlabel",
"=",
"'z ['",
"+",
"xunit",
"+",
"']'",
"xlabel",
"=",
"'x ['",
"+",
"xunit",
"+",
"']'",
"# cm = 'brg'",
"cm",
"=",
"'RdYlGn'",
"xmin",
",",
"xmax",
",",
"zmin",
",",
"zmax",
",",
"vmin",
",",
"vmax",
"=",
"check_minmax",
"(",
"plotman",
",",
"cid",
",",
"xmin",
",",
"xmax",
",",
"zmin",
",",
"zmax",
",",
"vmin",
",",
"vmax",
",",
")",
"# plot",
"fig",
",",
"ax",
",",
"cnorm",
",",
"cmap",
",",
"cb",
",",
"scalarMap",
"=",
"plotman",
".",
"plot_elements_to_ax",
"(",
"cid",
"=",
"cid",
",",
"ax",
"=",
"ax",
",",
"xmin",
"=",
"xmin",
",",
"xmax",
"=",
"xmax",
",",
"zmin",
"=",
"zmin",
",",
"zmax",
"=",
"zmax",
",",
"cblabel",
"=",
"cblabel",
",",
"cbnrticks",
"=",
"cbtiks",
",",
"title",
"=",
"title",
",",
"zlabel",
"=",
"zlabel",
",",
"xlabel",
"=",
"xlabel",
",",
"plot_colorbar",
"=",
"True",
",",
"cmap_name",
"=",
"cm",
",",
"no_elecs",
"=",
"elecs",
",",
"cbmin",
"=",
"vmin",
",",
"cbmax",
"=",
"vmax",
",",
")",
"return",
"fig",
",",
"ax",
",",
"cnorm",
",",
"cmap",
",",
"cb"
]
| Plot ratio of two conductivity directions. | [
"Plot",
"ratio",
"of",
"two",
"conductivity",
"directions",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L534-L570 | train |
geophysics-ubonn/crtomo_tools | src/td_plot.py | check_minmax | def check_minmax(plotman, cid, xmin, xmax, zmin, zmax, vmin, vmax):
'''Get min and max values for axes and colorbar if not given
'''
if xmin is None:
xmin = plotman.grid.grid['x'].min()
if xmax is None:
xmax = plotman.grid.grid['x'].max()
if zmin is None:
zmin = plotman.grid.grid['z'].min()
if zmax is None:
zmax = plotman.grid.grid['z'].max()
if isinstance(cid, int):
subdata = plotman.parman.parsets[cid]
else:
subdata = cid
if vmin is None:
vmin = subdata.min()
if vmax is None:
vmax = subdata.max()
return xmin, xmax, zmin, zmax, vmin, vmax | python | def check_minmax(plotman, cid, xmin, xmax, zmin, zmax, vmin, vmax):
'''Get min and max values for axes and colorbar if not given
'''
if xmin is None:
xmin = plotman.grid.grid['x'].min()
if xmax is None:
xmax = plotman.grid.grid['x'].max()
if zmin is None:
zmin = plotman.grid.grid['z'].min()
if zmax is None:
zmax = plotman.grid.grid['z'].max()
if isinstance(cid, int):
subdata = plotman.parman.parsets[cid]
else:
subdata = cid
if vmin is None:
vmin = subdata.min()
if vmax is None:
vmax = subdata.max()
return xmin, xmax, zmin, zmax, vmin, vmax | [
"def",
"check_minmax",
"(",
"plotman",
",",
"cid",
",",
"xmin",
",",
"xmax",
",",
"zmin",
",",
"zmax",
",",
"vmin",
",",
"vmax",
")",
":",
"if",
"xmin",
"is",
"None",
":",
"xmin",
"=",
"plotman",
".",
"grid",
".",
"grid",
"[",
"'x'",
"]",
".",
"min",
"(",
")",
"if",
"xmax",
"is",
"None",
":",
"xmax",
"=",
"plotman",
".",
"grid",
".",
"grid",
"[",
"'x'",
"]",
".",
"max",
"(",
")",
"if",
"zmin",
"is",
"None",
":",
"zmin",
"=",
"plotman",
".",
"grid",
".",
"grid",
"[",
"'z'",
"]",
".",
"min",
"(",
")",
"if",
"zmax",
"is",
"None",
":",
"zmax",
"=",
"plotman",
".",
"grid",
".",
"grid",
"[",
"'z'",
"]",
".",
"max",
"(",
")",
"if",
"isinstance",
"(",
"cid",
",",
"int",
")",
":",
"subdata",
"=",
"plotman",
".",
"parman",
".",
"parsets",
"[",
"cid",
"]",
"else",
":",
"subdata",
"=",
"cid",
"if",
"vmin",
"is",
"None",
":",
"vmin",
"=",
"subdata",
".",
"min",
"(",
")",
"if",
"vmax",
"is",
"None",
":",
"vmax",
"=",
"subdata",
".",
"max",
"(",
")",
"return",
"xmin",
",",
"xmax",
",",
"zmin",
",",
"zmax",
",",
"vmin",
",",
"vmax"
]
| Get min and max values for axes and colorbar if not given | [
"Get",
"min",
"and",
"max",
"values",
"for",
"axes",
"and",
"colorbar",
"if",
"not",
"given"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L587-L607 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | citation_director | def citation_director(**kwargs):
"""Direct the citation elements based on their qualifier."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'publicationTitle':
return CitationJournalTitle(content=content)
elif qualifier == 'volume':
return CitationVolume(content=content)
elif qualifier == 'issue':
return CitationIssue(content=content)
elif qualifier == 'pageStart':
return CitationFirstpage(content=content)
elif qualifier == 'pageEnd':
return CitationLastpage(content=content)
else:
return None | python | def citation_director(**kwargs):
"""Direct the citation elements based on their qualifier."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'publicationTitle':
return CitationJournalTitle(content=content)
elif qualifier == 'volume':
return CitationVolume(content=content)
elif qualifier == 'issue':
return CitationIssue(content=content)
elif qualifier == 'pageStart':
return CitationFirstpage(content=content)
elif qualifier == 'pageEnd':
return CitationLastpage(content=content)
else:
return None | [
"def",
"citation_director",
"(",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"''",
")",
"if",
"qualifier",
"==",
"'publicationTitle'",
":",
"return",
"CitationJournalTitle",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'volume'",
":",
"return",
"CitationVolume",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'issue'",
":",
"return",
"CitationIssue",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'pageStart'",
":",
"return",
"CitationFirstpage",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'pageEnd'",
":",
"return",
"CitationLastpage",
"(",
"content",
"=",
"content",
")",
"else",
":",
"return",
"None"
]
| Direct the citation elements based on their qualifier. | [
"Direct",
"the",
"citation",
"elements",
"based",
"on",
"their",
"qualifier",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L237-L252 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | identifier_director | def identifier_director(**kwargs):
"""Direct the identifier elements based on their qualifier."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'ISBN':
return CitationISBN(content=content)
elif qualifier == 'ISSN':
return CitationISSN(content=content)
elif qualifier == 'DOI':
return CitationDOI(content=content)
elif qualifier == 'REP-NO':
return CitationTechnicalReportNumber(content=content)
else:
return None | python | def identifier_director(**kwargs):
"""Direct the identifier elements based on their qualifier."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'ISBN':
return CitationISBN(content=content)
elif qualifier == 'ISSN':
return CitationISSN(content=content)
elif qualifier == 'DOI':
return CitationDOI(content=content)
elif qualifier == 'REP-NO':
return CitationTechnicalReportNumber(content=content)
else:
return None | [
"def",
"identifier_director",
"(",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"''",
")",
"if",
"qualifier",
"==",
"'ISBN'",
":",
"return",
"CitationISBN",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'ISSN'",
":",
"return",
"CitationISSN",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'DOI'",
":",
"return",
"CitationDOI",
"(",
"content",
"=",
"content",
")",
"elif",
"qualifier",
"==",
"'REP-NO'",
":",
"return",
"CitationTechnicalReportNumber",
"(",
"content",
"=",
"content",
")",
"else",
":",
"return",
"None"
]
| Direct the identifier elements based on their qualifier. | [
"Direct",
"the",
"identifier",
"elements",
"based",
"on",
"their",
"qualifier",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L255-L268 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationAuthor.get_author | def get_author(self, **kwargs):
"""Determine the authors from the creator field."""
qualifier = kwargs.get('qualifier', '')
children = kwargs.get('children', [])
creator_type_per = False
author_name = None
# Find the creator type in children.
for child in children:
if child.tag == 'type' and child.content == 'per':
creator_type_per = True
# Get the author name.
elif child.tag == 'name':
author_name = child.content
if qualifier == 'aut' and creator_type_per and author_name:
return author_name
return None | python | def get_author(self, **kwargs):
"""Determine the authors from the creator field."""
qualifier = kwargs.get('qualifier', '')
children = kwargs.get('children', [])
creator_type_per = False
author_name = None
# Find the creator type in children.
for child in children:
if child.tag == 'type' and child.content == 'per':
creator_type_per = True
# Get the author name.
elif child.tag == 'name':
author_name = child.content
if qualifier == 'aut' and creator_type_per and author_name:
return author_name
return None | [
"def",
"get_author",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"creator_type_per",
"=",
"False",
"author_name",
"=",
"None",
"# Find the creator type in children.",
"for",
"child",
"in",
"children",
":",
"if",
"child",
".",
"tag",
"==",
"'type'",
"and",
"child",
".",
"content",
"==",
"'per'",
":",
"creator_type_per",
"=",
"True",
"# Get the author name.",
"elif",
"child",
".",
"tag",
"==",
"'name'",
":",
"author_name",
"=",
"child",
".",
"content",
"if",
"qualifier",
"==",
"'aut'",
"and",
"creator_type_per",
"and",
"author_name",
":",
"return",
"author_name",
"return",
"None"
]
| Determine the authors from the creator field. | [
"Determine",
"the",
"authors",
"from",
"the",
"creator",
"field",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L52-L68 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationPublisher.get_publisher_name | def get_publisher_name(self, **kwargs):
"""Get the publisher name."""
children = kwargs.get('children', [])
# Find the creator type in children.
for child in children:
if child.tag == 'name':
return child.content
return None | python | def get_publisher_name(self, **kwargs):
"""Get the publisher name."""
children = kwargs.get('children', [])
# Find the creator type in children.
for child in children:
if child.tag == 'name':
return child.content
return None | [
"def",
"get_publisher_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"# Find the creator type in children.",
"for",
"child",
"in",
"children",
":",
"if",
"child",
".",
"tag",
"==",
"'name'",
":",
"return",
"child",
".",
"content",
"return",
"None"
]
| Get the publisher name. | [
"Get",
"the",
"publisher",
"name",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L77-L84 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationPublicationDate.get_publication_date | def get_publication_date(self, **kwargs):
"""Determine the creation date for the publication date."""
date_string = kwargs.get('content', '')
date_match = CREATION_DATE_REGEX.match(date_string)
month_match = CREATION_MONTH_REGEX.match(date_string)
year_match = CREATION_YEAR_REGEX.match(date_string)
# Check if a date match exists.
if date_match:
(year, month, day) = date_match.groups('')
# Create the date.
try:
creation_date = datetime.date(int(year), int(month), int(day))
except ValueError:
return None
else:
return '%s/%s/%s' % (
format_date_string(creation_date.month),
format_date_string(creation_date.day),
creation_date.year,
)
elif month_match:
(year, month) = month_match.groups('')
# Create the date.
try:
creation_date = datetime.date(int(year), int(month), 1)
except ValueError:
return None
else:
return '%s/%s' % (
format_date_string(creation_date.month),
creation_date.year,
)
elif year_match:
year = year_match.groups('')[0]
return year
else:
return None | python | def get_publication_date(self, **kwargs):
"""Determine the creation date for the publication date."""
date_string = kwargs.get('content', '')
date_match = CREATION_DATE_REGEX.match(date_string)
month_match = CREATION_MONTH_REGEX.match(date_string)
year_match = CREATION_YEAR_REGEX.match(date_string)
# Check if a date match exists.
if date_match:
(year, month, day) = date_match.groups('')
# Create the date.
try:
creation_date = datetime.date(int(year), int(month), int(day))
except ValueError:
return None
else:
return '%s/%s/%s' % (
format_date_string(creation_date.month),
format_date_string(creation_date.day),
creation_date.year,
)
elif month_match:
(year, month) = month_match.groups('')
# Create the date.
try:
creation_date = datetime.date(int(year), int(month), 1)
except ValueError:
return None
else:
return '%s/%s' % (
format_date_string(creation_date.month),
creation_date.year,
)
elif year_match:
year = year_match.groups('')[0]
return year
else:
return None | [
"def",
"get_publication_date",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"date_string",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"''",
")",
"date_match",
"=",
"CREATION_DATE_REGEX",
".",
"match",
"(",
"date_string",
")",
"month_match",
"=",
"CREATION_MONTH_REGEX",
".",
"match",
"(",
"date_string",
")",
"year_match",
"=",
"CREATION_YEAR_REGEX",
".",
"match",
"(",
"date_string",
")",
"# Check if a date match exists.",
"if",
"date_match",
":",
"(",
"year",
",",
"month",
",",
"day",
")",
"=",
"date_match",
".",
"groups",
"(",
"''",
")",
"# Create the date.",
"try",
":",
"creation_date",
"=",
"datetime",
".",
"date",
"(",
"int",
"(",
"year",
")",
",",
"int",
"(",
"month",
")",
",",
"int",
"(",
"day",
")",
")",
"except",
"ValueError",
":",
"return",
"None",
"else",
":",
"return",
"'%s/%s/%s'",
"%",
"(",
"format_date_string",
"(",
"creation_date",
".",
"month",
")",
",",
"format_date_string",
"(",
"creation_date",
".",
"day",
")",
",",
"creation_date",
".",
"year",
",",
")",
"elif",
"month_match",
":",
"(",
"year",
",",
"month",
")",
"=",
"month_match",
".",
"groups",
"(",
"''",
")",
"# Create the date.",
"try",
":",
"creation_date",
"=",
"datetime",
".",
"date",
"(",
"int",
"(",
"year",
")",
",",
"int",
"(",
"month",
")",
",",
"1",
")",
"except",
"ValueError",
":",
"return",
"None",
"else",
":",
"return",
"'%s/%s'",
"%",
"(",
"format_date_string",
"(",
"creation_date",
".",
"month",
")",
",",
"creation_date",
".",
"year",
",",
")",
"elif",
"year_match",
":",
"year",
"=",
"year_match",
".",
"groups",
"(",
"''",
")",
"[",
"0",
"]",
"return",
"year",
"else",
":",
"return",
"None"
]
| Determine the creation date for the publication date. | [
"Determine",
"the",
"creation",
"date",
"for",
"the",
"publication",
"date",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L93-L129 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationOnlineDate.get_online_date | def get_online_date(self, **kwargs):
"""Get the online date from the meta creation date."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
# Handle meta-creation-date element.
if qualifier == 'metadataCreationDate':
date_match = META_CREATION_DATE_REGEX.match(content)
(year, month, day) = date_match.groups('')
# Create the date.
creation_date = datetime.date(int(year), int(month), int(day))
return '%s/%s/%s' % (
format_date_string(creation_date.month),
format_date_string(creation_date.day),
creation_date.year,
)
return None | python | def get_online_date(self, **kwargs):
"""Get the online date from the meta creation date."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
# Handle meta-creation-date element.
if qualifier == 'metadataCreationDate':
date_match = META_CREATION_DATE_REGEX.match(content)
(year, month, day) = date_match.groups('')
# Create the date.
creation_date = datetime.date(int(year), int(month), int(day))
return '%s/%s/%s' % (
format_date_string(creation_date.month),
format_date_string(creation_date.day),
creation_date.year,
)
return None | [
"def",
"get_online_date",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"''",
")",
"# Handle meta-creation-date element.",
"if",
"qualifier",
"==",
"'metadataCreationDate'",
":",
"date_match",
"=",
"META_CREATION_DATE_REGEX",
".",
"match",
"(",
"content",
")",
"(",
"year",
",",
"month",
",",
"day",
")",
"=",
"date_match",
".",
"groups",
"(",
"''",
")",
"# Create the date.",
"creation_date",
"=",
"datetime",
".",
"date",
"(",
"int",
"(",
"year",
")",
",",
"int",
"(",
"month",
")",
",",
"int",
"(",
"day",
")",
")",
"return",
"'%s/%s/%s'",
"%",
"(",
"format_date_string",
"(",
"creation_date",
".",
"month",
")",
",",
"format_date_string",
"(",
"creation_date",
".",
"day",
")",
",",
"creation_date",
".",
"year",
",",
")",
"return",
"None"
]
| Get the online date from the meta creation date. | [
"Get",
"the",
"online",
"date",
"from",
"the",
"meta",
"creation",
"date",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L138-L153 | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationDissertationInstitution.get_institution | def get_institution(self, **kwargs):
"""Get the dissertation institution."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'grantor':
return content
return None | python | def get_institution(self, **kwargs):
"""Get the dissertation institution."""
qualifier = kwargs.get('qualifier', '')
content = kwargs.get('content', '')
if qualifier == 'grantor':
return content
return None | [
"def",
"get_institution",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"''",
")",
"if",
"qualifier",
"==",
"'grantor'",
":",
"return",
"content",
"return",
"None"
]
| Get the dissertation institution. | [
"Get",
"the",
"dissertation",
"institution",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L216-L222 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | PhaseOutput.model_results | def model_results(self) -> str:
"""
Reads the model.results file
"""
with open(os.path.join(self.directory, "model.results")) as f:
return f.read() | python | def model_results(self) -> str:
"""
Reads the model.results file
"""
with open(os.path.join(self.directory, "model.results")) as f:
return f.read() | [
"def",
"model_results",
"(",
"self",
")",
"->",
"str",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"\"model.results\"",
")",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
]
| Reads the model.results file | [
"Reads",
"the",
"model",
".",
"results",
"file"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L44-L49 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | PhaseOutput.header | def header(self) -> str:
"""
A header created by joining the pipeline, phase and data names
"""
return "/".join((self.pipeline, self.phase, self.data)) | python | def header(self) -> str:
"""
A header created by joining the pipeline, phase and data names
"""
return "/".join((self.pipeline, self.phase, self.data)) | [
"def",
"header",
"(",
"self",
")",
"->",
"str",
":",
"return",
"\"/\"",
".",
"join",
"(",
"(",
"self",
".",
"pipeline",
",",
"self",
".",
"phase",
",",
"self",
".",
"data",
")",
")"
]
| A header created by joining the pipeline, phase and data names | [
"A",
"header",
"created",
"by",
"joining",
"the",
"pipeline",
"phase",
"and",
"data",
"names"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L52-L56 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | PhaseOutput.optimizer | def optimizer(self) -> non_linear.NonLinearOptimizer:
"""
The optimizer object that was used in this phase
"""
if self.__optimizer is None:
with open(os.path.join(self.directory, ".optimizer.pickle"), "r+b") as f:
self.__optimizer = pickle.loads(f.read())
return self.__optimizer | python | def optimizer(self) -> non_linear.NonLinearOptimizer:
"""
The optimizer object that was used in this phase
"""
if self.__optimizer is None:
with open(os.path.join(self.directory, ".optimizer.pickle"), "r+b") as f:
self.__optimizer = pickle.loads(f.read())
return self.__optimizer | [
"def",
"optimizer",
"(",
"self",
")",
"->",
"non_linear",
".",
"NonLinearOptimizer",
":",
"if",
"self",
".",
"__optimizer",
"is",
"None",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"\".optimizer.pickle\"",
")",
",",
"\"r+b\"",
")",
"as",
"f",
":",
"self",
".",
"__optimizer",
"=",
"pickle",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"self",
".",
"__optimizer"
]
| The optimizer object that was used in this phase | [
"The",
"optimizer",
"object",
"that",
"was",
"used",
"in",
"this",
"phase"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L59-L66 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | Aggregator.phases_with | def phases_with(self, **kwargs) -> [PhaseOutput]:
"""
Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with
phase, data or pipeline as the key.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
"""
return [phase for phase in self.phases if
all([getattr(phase, key) == value for key, value in kwargs.items()])] | python | def phases_with(self, **kwargs) -> [PhaseOutput]:
"""
Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with
phase, data or pipeline as the key.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
"""
return [phase for phase in self.phases if
all([getattr(phase, key) == value for key, value in kwargs.items()])] | [
"def",
"phases_with",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"[",
"PhaseOutput",
"]",
":",
"return",
"[",
"phase",
"for",
"phase",
"in",
"self",
".",
"phases",
"if",
"all",
"(",
"[",
"getattr",
"(",
"phase",
",",
"key",
")",
"==",
"value",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
")",
"]"
]
| Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with
phase, data or pipeline as the key.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1 | [
"Filters",
"phases",
".",
"If",
"no",
"arguments",
"are",
"passed",
"all",
"phases",
"are",
"returned",
".",
"Arguments",
"must",
"be",
"key",
"value",
"pairs",
"with",
"phase",
"data",
"or",
"pipeline",
"as",
"the",
"key",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L94-L105 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | Aggregator.optimizers_with | def optimizers_with(self, **kwargs) -> [non_linear.NonLinearOptimizer]:
"""
Load a list of optimizers for phases in the directory with zero or more filters applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
optimizers
A list of optimizers, one for each phase in the directory that matches the filters.
"""
return [phase.optimizer for phase in self.phases_with(**kwargs)] | python | def optimizers_with(self, **kwargs) -> [non_linear.NonLinearOptimizer]:
"""
Load a list of optimizers for phases in the directory with zero or more filters applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
optimizers
A list of optimizers, one for each phase in the directory that matches the filters.
"""
return [phase.optimizer for phase in self.phases_with(**kwargs)] | [
"def",
"optimizers_with",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"[",
"non_linear",
".",
"NonLinearOptimizer",
"]",
":",
"return",
"[",
"phase",
".",
"optimizer",
"for",
"phase",
"in",
"self",
".",
"phases_with",
"(",
"*",
"*",
"kwargs",
")",
"]"
]
| Load a list of optimizers for phases in the directory with zero or more filters applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
optimizers
A list of optimizers, one for each phase in the directory that matches the filters. | [
"Load",
"a",
"list",
"of",
"optimizers",
"for",
"phases",
"in",
"the",
"directory",
"with",
"zero",
"or",
"more",
"filters",
"applied",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L107-L121 | train |
rhayes777/PyAutoFit | autofit/aggregator.py | Aggregator.model_results | def model_results(self, **kwargs) -> str:
"""
Collates model results from all phases in the directory or some subset if filters are applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
model_results
A string joining headers and results for all included phases.
"""
return "\n\n".join("{}\n\n{}".format(phase.header, phase.model_results) for phase in
self.phases_with(**kwargs)) | python | def model_results(self, **kwargs) -> str:
"""
Collates model results from all phases in the directory or some subset if filters are applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
model_results
A string joining headers and results for all included phases.
"""
return "\n\n".join("{}\n\n{}".format(phase.header, phase.model_results) for phase in
self.phases_with(**kwargs)) | [
"def",
"model_results",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"str",
":",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"\"{}\\n\\n{}\"",
".",
"format",
"(",
"phase",
".",
"header",
",",
"phase",
".",
"model_results",
")",
"for",
"phase",
"in",
"self",
".",
"phases_with",
"(",
"*",
"*",
"kwargs",
")",
")"
]
| Collates model results from all phases in the directory or some subset if filters are applied.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1
Returns
-------
model_results
A string joining headers and results for all included phases. | [
"Collates",
"model",
"results",
"from",
"all",
"phases",
"in",
"the",
"directory",
"or",
"some",
"subset",
"if",
"filters",
"are",
"applied",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/aggregator.py#L123-L138 | train |
peterbe/gg | gg/builtins/branches/gg_branches.py | branches | def branches(config, searchstring=""):
"""List all branches. And if exactly 1 found, offer to check it out."""
repo = config.repo
branches_ = list(find(repo, searchstring))
if branches_:
merged = get_merged_branches(repo)
info_out("Found existing branches...")
print_list(branches_, merged)
if len(branches_) == 1 and searchstring:
# If the found branch is the current one, error
active_branch = repo.active_branch
if active_branch == branches_[0]:
error_out("You're already on '{}'".format(branches_[0].name))
branch_name = branches_[0].name
if len(branch_name) > 50:
branch_name = branch_name[:47] + "…"
check_it_out = (
input("Check out '{}'? [Y/n] ".format(branch_name)).lower().strip()
!= "n"
)
if check_it_out:
branches_[0].checkout()
elif searchstring:
error_out("Found no branches matching '{}'.".format(searchstring))
else:
error_out("Found no branches.") | python | def branches(config, searchstring=""):
"""List all branches. And if exactly 1 found, offer to check it out."""
repo = config.repo
branches_ = list(find(repo, searchstring))
if branches_:
merged = get_merged_branches(repo)
info_out("Found existing branches...")
print_list(branches_, merged)
if len(branches_) == 1 and searchstring:
# If the found branch is the current one, error
active_branch = repo.active_branch
if active_branch == branches_[0]:
error_out("You're already on '{}'".format(branches_[0].name))
branch_name = branches_[0].name
if len(branch_name) > 50:
branch_name = branch_name[:47] + "…"
check_it_out = (
input("Check out '{}'? [Y/n] ".format(branch_name)).lower().strip()
!= "n"
)
if check_it_out:
branches_[0].checkout()
elif searchstring:
error_out("Found no branches matching '{}'.".format(searchstring))
else:
error_out("Found no branches.") | [
"def",
"branches",
"(",
"config",
",",
"searchstring",
"=",
"\"\"",
")",
":",
"repo",
"=",
"config",
".",
"repo",
"branches_",
"=",
"list",
"(",
"find",
"(",
"repo",
",",
"searchstring",
")",
")",
"if",
"branches_",
":",
"merged",
"=",
"get_merged_branches",
"(",
"repo",
")",
"info_out",
"(",
"\"Found existing branches...\"",
")",
"print_list",
"(",
"branches_",
",",
"merged",
")",
"if",
"len",
"(",
"branches_",
")",
"==",
"1",
"and",
"searchstring",
":",
"# If the found branch is the current one, error",
"active_branch",
"=",
"repo",
".",
"active_branch",
"if",
"active_branch",
"==",
"branches_",
"[",
"0",
"]",
":",
"error_out",
"(",
"\"You're already on '{}'\"",
".",
"format",
"(",
"branches_",
"[",
"0",
"]",
".",
"name",
")",
")",
"branch_name",
"=",
"branches_",
"[",
"0",
"]",
".",
"name",
"if",
"len",
"(",
"branch_name",
")",
">",
"50",
":",
"branch_name",
"=",
"branch_name",
"[",
":",
"47",
"]",
"+",
"\"…\"",
"check_it_out",
"=",
"(",
"input",
"(",
"\"Check out '{}'? [Y/n] \"",
".",
"format",
"(",
"branch_name",
")",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"!=",
"\"n\"",
")",
"if",
"check_it_out",
":",
"branches_",
"[",
"0",
"]",
".",
"checkout",
"(",
")",
"elif",
"searchstring",
":",
"error_out",
"(",
"\"Found no branches matching '{}'.\"",
".",
"format",
"(",
"searchstring",
")",
")",
"else",
":",
"error_out",
"(",
"\"Found no branches.\"",
")"
]
| List all branches. And if exactly 1 found, offer to check it out. | [
"List",
"all",
"branches",
".",
"And",
"if",
"exactly",
"1",
"found",
"offer",
"to",
"check",
"it",
"out",
"."
]
| 2aace5bdb4a9b1cb65bea717784edf54c63b7bad | https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/branches/gg_branches.py#L13-L39 | train |
Riminder/python-riminder-api | riminder/base64Wrapper.py | decodebytes | def decodebytes(input):
"""Decode base64 string to byte array."""
py_version = sys.version_info[0]
if py_version >= 3:
return _decodebytes_py3(input)
return _decodebytes_py2(input) | python | def decodebytes(input):
"""Decode base64 string to byte array."""
py_version = sys.version_info[0]
if py_version >= 3:
return _decodebytes_py3(input)
return _decodebytes_py2(input) | [
"def",
"decodebytes",
"(",
"input",
")",
":",
"py_version",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"if",
"py_version",
">=",
"3",
":",
"return",
"_decodebytes_py3",
"(",
"input",
")",
"return",
"_decodebytes_py2",
"(",
"input",
")"
]
| Decode base64 string to byte array. | [
"Decode",
"base64",
"string",
"to",
"byte",
"array",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/base64Wrapper.py#L14-L19 | train |
gofed/gofedlib | gofedlib/snapshot/capturer.py | ProjectCapturer.capture | def capture(self, commit = ""):
"""Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string
"""
self._validateProvider(self._provider)
# get client for repository
# TODO(jchaloup): read config file to switch between local and remove clients
# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info
client = RepositoryClientBuilder().buildWithRemoteClient(self._provider)
if self._provider["provider"] == "github":
self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature()
elif self._provider["provider"] == "bitbucket":
self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature()
else:
raise KeyError("Provider '%s' not recognized" % self._provider["provider"])
return self | python | def capture(self, commit = ""):
"""Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string
"""
self._validateProvider(self._provider)
# get client for repository
# TODO(jchaloup): read config file to switch between local and remove clients
# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info
client = RepositoryClientBuilder().buildWithRemoteClient(self._provider)
if self._provider["provider"] == "github":
self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature()
elif self._provider["provider"] == "bitbucket":
self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature()
else:
raise KeyError("Provider '%s' not recognized" % self._provider["provider"])
return self | [
"def",
"capture",
"(",
"self",
",",
"commit",
"=",
"\"\"",
")",
":",
"self",
".",
"_validateProvider",
"(",
"self",
".",
"_provider",
")",
"# get client for repository",
"# TODO(jchaloup): read config file to switch between local and remove clients",
"# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info",
"client",
"=",
"RepositoryClientBuilder",
"(",
")",
".",
"buildWithRemoteClient",
"(",
"self",
".",
"_provider",
")",
"if",
"self",
".",
"_provider",
"[",
"\"provider\"",
"]",
"==",
"\"github\"",
":",
"self",
".",
"_signature",
"=",
"ProjectGithubRepositoryCapturer",
"(",
"self",
".",
"_provider",
",",
"client",
")",
".",
"capture",
"(",
"commit",
")",
".",
"signature",
"(",
")",
"elif",
"self",
".",
"_provider",
"[",
"\"provider\"",
"]",
"==",
"\"bitbucket\"",
":",
"self",
".",
"_signature",
"=",
"ProjectBitbucketRepositoryCapturer",
"(",
"self",
".",
"_provider",
",",
"client",
")",
".",
"capture",
"(",
"commit",
")",
".",
"signature",
"(",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"Provider '%s' not recognized\"",
"%",
"self",
".",
"_provider",
"[",
"\"provider\"",
"]",
")",
"return",
"self"
]
| Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string | [
"Capture",
"the",
"current",
"state",
"of",
"a",
"project",
"based",
"on",
"its",
"provider"
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/snapshot/capturer.py#L25-L51 | train |
pgxcentre/geneparse | geneparse/logging.py | found_duplicates | def found_duplicates(counts):
"""Log that duplicates were found.
:param counts: A list of duplicate marker names along with their number
of occurences.
:type counts: list
"""
_logger.warning("Duplicated markers found")
for marker, count in counts:
_logger.warning(" - {}: {:,d} times".format(marker, count))
_logger.warning("Appending ':dupX' to the duplicated markers according "
"to their location in the file.") | python | def found_duplicates(counts):
"""Log that duplicates were found.
:param counts: A list of duplicate marker names along with their number
of occurences.
:type counts: list
"""
_logger.warning("Duplicated markers found")
for marker, count in counts:
_logger.warning(" - {}: {:,d} times".format(marker, count))
_logger.warning("Appending ':dupX' to the duplicated markers according "
"to their location in the file.") | [
"def",
"found_duplicates",
"(",
"counts",
")",
":",
"_logger",
".",
"warning",
"(",
"\"Duplicated markers found\"",
")",
"for",
"marker",
",",
"count",
"in",
"counts",
":",
"_logger",
".",
"warning",
"(",
"\" - {}: {:,d} times\"",
".",
"format",
"(",
"marker",
",",
"count",
")",
")",
"_logger",
".",
"warning",
"(",
"\"Appending ':dupX' to the duplicated markers according \"",
"\"to their location in the file.\"",
")"
]
| Log that duplicates were found.
:param counts: A list of duplicate marker names along with their number
of occurences.
:type counts: list | [
"Log",
"that",
"duplicates",
"were",
"found",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/logging.py#L45-L57 | train |
lalinsky/mbdata | mbdata/utils/__init__.py | patch_model_schemas | def patch_model_schemas(mapping):
"""Update mbdata.models to use different schema names
The function accepts a dictionary with schema name mapping
and updates the schema for all MusicBrainz tables.
If you want to use the default schema:
>>> patch_model_schemas(NO_SCHEMAS)
If you have just one 'musicbrainz' schema:
>>> patch_model_schemas(SINGLE_MUSICBRAINZ_SCHEMA)
"""
from mbdata.models import Base
for table in Base.metadata.sorted_tables:
if table.schema is None:
continue
table.schema = mapping.get(table.schema, table.schema) | python | def patch_model_schemas(mapping):
"""Update mbdata.models to use different schema names
The function accepts a dictionary with schema name mapping
and updates the schema for all MusicBrainz tables.
If you want to use the default schema:
>>> patch_model_schemas(NO_SCHEMAS)
If you have just one 'musicbrainz' schema:
>>> patch_model_schemas(SINGLE_MUSICBRAINZ_SCHEMA)
"""
from mbdata.models import Base
for table in Base.metadata.sorted_tables:
if table.schema is None:
continue
table.schema = mapping.get(table.schema, table.schema) | [
"def",
"patch_model_schemas",
"(",
"mapping",
")",
":",
"from",
"mbdata",
".",
"models",
"import",
"Base",
"for",
"table",
"in",
"Base",
".",
"metadata",
".",
"sorted_tables",
":",
"if",
"table",
".",
"schema",
"is",
"None",
":",
"continue",
"table",
".",
"schema",
"=",
"mapping",
".",
"get",
"(",
"table",
".",
"schema",
",",
"table",
".",
"schema",
")"
]
| Update mbdata.models to use different schema names
The function accepts a dictionary with schema name mapping
and updates the schema for all MusicBrainz tables.
If you want to use the default schema:
>>> patch_model_schemas(NO_SCHEMAS)
If you have just one 'musicbrainz' schema:
>>> patch_model_schemas(SINGLE_MUSICBRAINZ_SCHEMA) | [
"Update",
"mbdata",
".",
"models",
"to",
"use",
"different",
"schema",
"names"
]
| 1ec788834047ced8614ad9763e430afe1d1e65e7 | https://github.com/lalinsky/mbdata/blob/1ec788834047ced8614ad9763e430afe1d1e65e7/mbdata/utils/__init__.py#L21-L41 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/statementRenderer.py | detectRamPorts | def detectRamPorts(stm: IfContainer, current_en: RtlSignalBase):
"""
Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal
"""
if stm.ifFalse or stm.elIfs:
return
for _stm in stm.ifTrue:
if isinstance(_stm, IfContainer):
yield from detectRamPorts(_stm, _stm.cond & current_en)
elif isinstance(_stm, Assignment):
if isinstance(_stm.dst._dtype, HArray):
assert len(_stm.indexes) == 1, "one address per RAM port"
w_addr = _stm.indexes[0]
mem = _stm.dst
yield (RAM_WRITE, mem, w_addr, current_en, _stm.src)
elif _stm.src.hidden and len(_stm.src.drivers) == 1:
op = _stm.src.drivers[0]
mem = op.operands[0]
if isinstance(mem._dtype, HArray) and op.operator == AllOps.INDEX:
r_addr = op.operands[1]
if _stm.indexes:
raise NotImplementedError()
yield (RAM_READ, mem, r_addr, current_en, _stm.dst) | python | def detectRamPorts(stm: IfContainer, current_en: RtlSignalBase):
"""
Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal
"""
if stm.ifFalse or stm.elIfs:
return
for _stm in stm.ifTrue:
if isinstance(_stm, IfContainer):
yield from detectRamPorts(_stm, _stm.cond & current_en)
elif isinstance(_stm, Assignment):
if isinstance(_stm.dst._dtype, HArray):
assert len(_stm.indexes) == 1, "one address per RAM port"
w_addr = _stm.indexes[0]
mem = _stm.dst
yield (RAM_WRITE, mem, w_addr, current_en, _stm.src)
elif _stm.src.hidden and len(_stm.src.drivers) == 1:
op = _stm.src.drivers[0]
mem = op.operands[0]
if isinstance(mem._dtype, HArray) and op.operator == AllOps.INDEX:
r_addr = op.operands[1]
if _stm.indexes:
raise NotImplementedError()
yield (RAM_READ, mem, r_addr, current_en, _stm.dst) | [
"def",
"detectRamPorts",
"(",
"stm",
":",
"IfContainer",
",",
"current_en",
":",
"RtlSignalBase",
")",
":",
"if",
"stm",
".",
"ifFalse",
"or",
"stm",
".",
"elIfs",
":",
"return",
"for",
"_stm",
"in",
"stm",
".",
"ifTrue",
":",
"if",
"isinstance",
"(",
"_stm",
",",
"IfContainer",
")",
":",
"yield",
"from",
"detectRamPorts",
"(",
"_stm",
",",
"_stm",
".",
"cond",
"&",
"current_en",
")",
"elif",
"isinstance",
"(",
"_stm",
",",
"Assignment",
")",
":",
"if",
"isinstance",
"(",
"_stm",
".",
"dst",
".",
"_dtype",
",",
"HArray",
")",
":",
"assert",
"len",
"(",
"_stm",
".",
"indexes",
")",
"==",
"1",
",",
"\"one address per RAM port\"",
"w_addr",
"=",
"_stm",
".",
"indexes",
"[",
"0",
"]",
"mem",
"=",
"_stm",
".",
"dst",
"yield",
"(",
"RAM_WRITE",
",",
"mem",
",",
"w_addr",
",",
"current_en",
",",
"_stm",
".",
"src",
")",
"elif",
"_stm",
".",
"src",
".",
"hidden",
"and",
"len",
"(",
"_stm",
".",
"src",
".",
"drivers",
")",
"==",
"1",
":",
"op",
"=",
"_stm",
".",
"src",
".",
"drivers",
"[",
"0",
"]",
"mem",
"=",
"op",
".",
"operands",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"mem",
".",
"_dtype",
",",
"HArray",
")",
"and",
"op",
".",
"operator",
"==",
"AllOps",
".",
"INDEX",
":",
"r_addr",
"=",
"op",
".",
"operands",
"[",
"1",
"]",
"if",
"_stm",
".",
"indexes",
":",
"raise",
"NotImplementedError",
"(",
")",
"yield",
"(",
"RAM_READ",
",",
"mem",
",",
"r_addr",
",",
"current_en",
",",
"_stm",
".",
"dst",
")"
]
| Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal | [
"Detect",
"RAM",
"ports",
"in",
"If",
"statement"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/statementRenderer.py#L42-L67 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/statementRenderer.py | StatementRenderer.addInputPort | def addInputPort(self, node, name,
i: Union[Value, RtlSignalBase],
side=PortSide.WEST):
"""
Add and connect input port on subnode
:param node: node where to add input port
:param name: name of newly added port
:param i: input value
:param side: side where input port should be added
"""
root = self.node
port = node.addPort(name, PortType.INPUT, side)
netCtxs = self.netCtxs
if isinstance(i, LPort):
root.addEdge(i, port)
elif isConst(i):
i = i.staticEval()
c, wasThereBefore = self.netCtxs.getDefault(i)
if not wasThereBefore:
v = ValueAsLNode(root, i).east[0]
c.addDriver(v)
c.addEndpoint(port)
elif i.hidden:
# later connect driver of this signal to output port
ctx, wasThereBefore = netCtxs.getDefault(i)
if not wasThereBefore:
self.lazyLoadNet(i)
ctx.addEndpoint(port)
else:
portCtx = self.portCtx
rootCtx, _ = self.rootNetCtxs.getDefault(i)
if self.isVirtual:
# later connect signal in root to input port or input port of
# wrap node
rootCtx.addEndpoint(port)
else:
# spot input port on this wrap node if required
isNewlySpotted = (i, PortType.INPUT) not in portCtx.data
src = portCtx.register(i, PortType.INPUT)
# connect input port on wrap node with specified output port
ctx, _ = netCtxs.getDefault(i)
ctx.addDriver(src)
ctx.addEndpoint(port)
if isNewlySpotted:
# get input port from parent view
_port = portCtx.getOutside(i, PortType.INPUT)
rootCtx.addEndpoint(_port) | python | def addInputPort(self, node, name,
i: Union[Value, RtlSignalBase],
side=PortSide.WEST):
"""
Add and connect input port on subnode
:param node: node where to add input port
:param name: name of newly added port
:param i: input value
:param side: side where input port should be added
"""
root = self.node
port = node.addPort(name, PortType.INPUT, side)
netCtxs = self.netCtxs
if isinstance(i, LPort):
root.addEdge(i, port)
elif isConst(i):
i = i.staticEval()
c, wasThereBefore = self.netCtxs.getDefault(i)
if not wasThereBefore:
v = ValueAsLNode(root, i).east[0]
c.addDriver(v)
c.addEndpoint(port)
elif i.hidden:
# later connect driver of this signal to output port
ctx, wasThereBefore = netCtxs.getDefault(i)
if not wasThereBefore:
self.lazyLoadNet(i)
ctx.addEndpoint(port)
else:
portCtx = self.portCtx
rootCtx, _ = self.rootNetCtxs.getDefault(i)
if self.isVirtual:
# later connect signal in root to input port or input port of
# wrap node
rootCtx.addEndpoint(port)
else:
# spot input port on this wrap node if required
isNewlySpotted = (i, PortType.INPUT) not in portCtx.data
src = portCtx.register(i, PortType.INPUT)
# connect input port on wrap node with specified output port
ctx, _ = netCtxs.getDefault(i)
ctx.addDriver(src)
ctx.addEndpoint(port)
if isNewlySpotted:
# get input port from parent view
_port = portCtx.getOutside(i, PortType.INPUT)
rootCtx.addEndpoint(_port) | [
"def",
"addInputPort",
"(",
"self",
",",
"node",
",",
"name",
",",
"i",
":",
"Union",
"[",
"Value",
",",
"RtlSignalBase",
"]",
",",
"side",
"=",
"PortSide",
".",
"WEST",
")",
":",
"root",
"=",
"self",
".",
"node",
"port",
"=",
"node",
".",
"addPort",
"(",
"name",
",",
"PortType",
".",
"INPUT",
",",
"side",
")",
"netCtxs",
"=",
"self",
".",
"netCtxs",
"if",
"isinstance",
"(",
"i",
",",
"LPort",
")",
":",
"root",
".",
"addEdge",
"(",
"i",
",",
"port",
")",
"elif",
"isConst",
"(",
"i",
")",
":",
"i",
"=",
"i",
".",
"staticEval",
"(",
")",
"c",
",",
"wasThereBefore",
"=",
"self",
".",
"netCtxs",
".",
"getDefault",
"(",
"i",
")",
"if",
"not",
"wasThereBefore",
":",
"v",
"=",
"ValueAsLNode",
"(",
"root",
",",
"i",
")",
".",
"east",
"[",
"0",
"]",
"c",
".",
"addDriver",
"(",
"v",
")",
"c",
".",
"addEndpoint",
"(",
"port",
")",
"elif",
"i",
".",
"hidden",
":",
"# later connect driver of this signal to output port",
"ctx",
",",
"wasThereBefore",
"=",
"netCtxs",
".",
"getDefault",
"(",
"i",
")",
"if",
"not",
"wasThereBefore",
":",
"self",
".",
"lazyLoadNet",
"(",
"i",
")",
"ctx",
".",
"addEndpoint",
"(",
"port",
")",
"else",
":",
"portCtx",
"=",
"self",
".",
"portCtx",
"rootCtx",
",",
"_",
"=",
"self",
".",
"rootNetCtxs",
".",
"getDefault",
"(",
"i",
")",
"if",
"self",
".",
"isVirtual",
":",
"# later connect signal in root to input port or input port of",
"# wrap node",
"rootCtx",
".",
"addEndpoint",
"(",
"port",
")",
"else",
":",
"# spot input port on this wrap node if required",
"isNewlySpotted",
"=",
"(",
"i",
",",
"PortType",
".",
"INPUT",
")",
"not",
"in",
"portCtx",
".",
"data",
"src",
"=",
"portCtx",
".",
"register",
"(",
"i",
",",
"PortType",
".",
"INPUT",
")",
"# connect input port on wrap node with specified output port",
"ctx",
",",
"_",
"=",
"netCtxs",
".",
"getDefault",
"(",
"i",
")",
"ctx",
".",
"addDriver",
"(",
"src",
")",
"ctx",
".",
"addEndpoint",
"(",
"port",
")",
"if",
"isNewlySpotted",
":",
"# get input port from parent view",
"_port",
"=",
"portCtx",
".",
"getOutside",
"(",
"i",
",",
"PortType",
".",
"INPUT",
")",
"rootCtx",
".",
"addEndpoint",
"(",
"_port",
")"
]
| Add and connect input port on subnode
:param node: node where to add input port
:param name: name of newly added port
:param i: input value
:param side: side where input port should be added | [
"Add",
"and",
"connect",
"input",
"port",
"on",
"subnode"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/statementRenderer.py#L99-L149 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/statementRenderer.py | StatementRenderer.addOutputPort | def addOutputPort(self, node: LNode, name: str,
out: Optional[Union[RtlSignalBase, LPort]],
side=PortSide.EAST):
"""
Add and connect output port on subnode
"""
oPort = node.addPort(name, PortType.OUTPUT, side)
if out is not None:
if isinstance(out, LPort):
self.node.addEdge(oPort, out)
elif out.hidden:
raise ValueError("Hidden signals should not be connected to outside", name)
elif self.isVirtual:
# This node is inlined inside of parent.
# Mark that this output of subnode should be connected
# to output of parent node.
ctx, _ = self.netCtxs.getDefault(out)
ctx.addDriver(oPort)
else:
# connect my signal to my output port
_out = self.portCtx.getInside(out, PortType.OUTPUT)
self.node.addEdge(oPort, _out, originObj=out)
# mark connection of output port to parent net
ooPort = self.portCtx.getOutside(out, PortType.OUTPUT)
ctx, _ = self.rootNetCtxs.getDefault(out)
ctx.addDriver(ooPort)
return oPort | python | def addOutputPort(self, node: LNode, name: str,
out: Optional[Union[RtlSignalBase, LPort]],
side=PortSide.EAST):
"""
Add and connect output port on subnode
"""
oPort = node.addPort(name, PortType.OUTPUT, side)
if out is not None:
if isinstance(out, LPort):
self.node.addEdge(oPort, out)
elif out.hidden:
raise ValueError("Hidden signals should not be connected to outside", name)
elif self.isVirtual:
# This node is inlined inside of parent.
# Mark that this output of subnode should be connected
# to output of parent node.
ctx, _ = self.netCtxs.getDefault(out)
ctx.addDriver(oPort)
else:
# connect my signal to my output port
_out = self.portCtx.getInside(out, PortType.OUTPUT)
self.node.addEdge(oPort, _out, originObj=out)
# mark connection of output port to parent net
ooPort = self.portCtx.getOutside(out, PortType.OUTPUT)
ctx, _ = self.rootNetCtxs.getDefault(out)
ctx.addDriver(ooPort)
return oPort | [
"def",
"addOutputPort",
"(",
"self",
",",
"node",
":",
"LNode",
",",
"name",
":",
"str",
",",
"out",
":",
"Optional",
"[",
"Union",
"[",
"RtlSignalBase",
",",
"LPort",
"]",
"]",
",",
"side",
"=",
"PortSide",
".",
"EAST",
")",
":",
"oPort",
"=",
"node",
".",
"addPort",
"(",
"name",
",",
"PortType",
".",
"OUTPUT",
",",
"side",
")",
"if",
"out",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"out",
",",
"LPort",
")",
":",
"self",
".",
"node",
".",
"addEdge",
"(",
"oPort",
",",
"out",
")",
"elif",
"out",
".",
"hidden",
":",
"raise",
"ValueError",
"(",
"\"Hidden signals should not be connected to outside\"",
",",
"name",
")",
"elif",
"self",
".",
"isVirtual",
":",
"# This node is inlined inside of parent.",
"# Mark that this output of subnode should be connected",
"# to output of parent node.",
"ctx",
",",
"_",
"=",
"self",
".",
"netCtxs",
".",
"getDefault",
"(",
"out",
")",
"ctx",
".",
"addDriver",
"(",
"oPort",
")",
"else",
":",
"# connect my signal to my output port",
"_out",
"=",
"self",
".",
"portCtx",
".",
"getInside",
"(",
"out",
",",
"PortType",
".",
"OUTPUT",
")",
"self",
".",
"node",
".",
"addEdge",
"(",
"oPort",
",",
"_out",
",",
"originObj",
"=",
"out",
")",
"# mark connection of output port to parent net",
"ooPort",
"=",
"self",
".",
"portCtx",
".",
"getOutside",
"(",
"out",
",",
"PortType",
".",
"OUTPUT",
")",
"ctx",
",",
"_",
"=",
"self",
".",
"rootNetCtxs",
".",
"getDefault",
"(",
"out",
")",
"ctx",
".",
"addDriver",
"(",
"ooPort",
")",
"return",
"oPort"
]
| Add and connect output port on subnode | [
"Add",
"and",
"connect",
"output",
"port",
"on",
"subnode"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/statementRenderer.py#L151-L178 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/statementRenderer.py | StatementRenderer.renderContent | def renderContent(self):
"""
Walk from outputs to inputs
for each public signal register port of wrap node if required
lazy load all operator and statement nodes for signals
"""
stm = self.stm
portCtx = self.portCtx
# for each inputs and outputs render expression trees
# walk statements and render muxs and memories
for o in stm._outputs:
if not self.isVirtual:
portCtx.register(o, PortType.OUTPUT)
canHaveRamPorts = isinstance(stm, IfContainer) and arr_any(
chain(stm._inputs, stm._outputs),
lambda s: isinstance(s._dtype, HArray))
# render RAM ports
consumedOutputs = set()
if canHaveRamPorts:
for pType, memSig, addrSig, enSig, io in detectRamPorts(stm, stm.cond):
if pType == RAM_READ:
self.createRamReadNode(memSig, enSig, addrSig,
io, True)
consumedOutputs.add(io)
elif pType == RAM_WRITE:
self.createRamWriteNode(memSig, enSig, addrSig,
io, True)
consumedOutputs.add(memSig)
else:
raise TypeError()
for o in stm._outputs:
if o not in consumedOutputs:
self.renderForSignal(stm, o, True)
if not self.isVirtual:
self.netCtxs.applyConnections(self.node) | python | def renderContent(self):
"""
Walk from outputs to inputs
for each public signal register port of wrap node if required
lazy load all operator and statement nodes for signals
"""
stm = self.stm
portCtx = self.portCtx
# for each inputs and outputs render expression trees
# walk statements and render muxs and memories
for o in stm._outputs:
if not self.isVirtual:
portCtx.register(o, PortType.OUTPUT)
canHaveRamPorts = isinstance(stm, IfContainer) and arr_any(
chain(stm._inputs, stm._outputs),
lambda s: isinstance(s._dtype, HArray))
# render RAM ports
consumedOutputs = set()
if canHaveRamPorts:
for pType, memSig, addrSig, enSig, io in detectRamPorts(stm, stm.cond):
if pType == RAM_READ:
self.createRamReadNode(memSig, enSig, addrSig,
io, True)
consumedOutputs.add(io)
elif pType == RAM_WRITE:
self.createRamWriteNode(memSig, enSig, addrSig,
io, True)
consumedOutputs.add(memSig)
else:
raise TypeError()
for o in stm._outputs:
if o not in consumedOutputs:
self.renderForSignal(stm, o, True)
if not self.isVirtual:
self.netCtxs.applyConnections(self.node) | [
"def",
"renderContent",
"(",
"self",
")",
":",
"stm",
"=",
"self",
".",
"stm",
"portCtx",
"=",
"self",
".",
"portCtx",
"# for each inputs and outputs render expression trees",
"# walk statements and render muxs and memories",
"for",
"o",
"in",
"stm",
".",
"_outputs",
":",
"if",
"not",
"self",
".",
"isVirtual",
":",
"portCtx",
".",
"register",
"(",
"o",
",",
"PortType",
".",
"OUTPUT",
")",
"canHaveRamPorts",
"=",
"isinstance",
"(",
"stm",
",",
"IfContainer",
")",
"and",
"arr_any",
"(",
"chain",
"(",
"stm",
".",
"_inputs",
",",
"stm",
".",
"_outputs",
")",
",",
"lambda",
"s",
":",
"isinstance",
"(",
"s",
".",
"_dtype",
",",
"HArray",
")",
")",
"# render RAM ports",
"consumedOutputs",
"=",
"set",
"(",
")",
"if",
"canHaveRamPorts",
":",
"for",
"pType",
",",
"memSig",
",",
"addrSig",
",",
"enSig",
",",
"io",
"in",
"detectRamPorts",
"(",
"stm",
",",
"stm",
".",
"cond",
")",
":",
"if",
"pType",
"==",
"RAM_READ",
":",
"self",
".",
"createRamReadNode",
"(",
"memSig",
",",
"enSig",
",",
"addrSig",
",",
"io",
",",
"True",
")",
"consumedOutputs",
".",
"add",
"(",
"io",
")",
"elif",
"pType",
"==",
"RAM_WRITE",
":",
"self",
".",
"createRamWriteNode",
"(",
"memSig",
",",
"enSig",
",",
"addrSig",
",",
"io",
",",
"True",
")",
"consumedOutputs",
".",
"add",
"(",
"memSig",
")",
"else",
":",
"raise",
"TypeError",
"(",
")",
"for",
"o",
"in",
"stm",
".",
"_outputs",
":",
"if",
"o",
"not",
"in",
"consumedOutputs",
":",
"self",
".",
"renderForSignal",
"(",
"stm",
",",
"o",
",",
"True",
")",
"if",
"not",
"self",
".",
"isVirtual",
":",
"self",
".",
"netCtxs",
".",
"applyConnections",
"(",
"self",
".",
"node",
")"
]
| Walk from outputs to inputs
for each public signal register port of wrap node if required
lazy load all operator and statement nodes for signals | [
"Walk",
"from",
"outputs",
"to",
"inputs",
"for",
"each",
"public",
"signal",
"register",
"port",
"of",
"wrap",
"node",
"if",
"required",
"lazy",
"load",
"all",
"operator",
"and",
"statement",
"nodes",
"for",
"signals"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/statementRenderer.py#L385-L425 | train |
gofed/gofedlib | gofedlib/distribution/packagenamegenerator.py | PackageNameGenerator.generate | def generate(self, project):
"""
Package name construction is based on provider, not on prefix.
Prefix does not have to equal provider_prefix.
"""
for assignment in self.s2n_mapping:
if assignment["ipprefix"] == project:
self._name = assignment["package"]
return self
#
# github.com -> github
# code.google.com/p/ -> googlecode
# golang.org/x/ -> golangorg
# gopkg.in/check.v1 -> gopkg-check
# camlistore.org
#
name = project
if name.startswith("github.com"):
name = re.sub(r"^github\.com", "github", name)
if name.startswith("gopkg.in"):
name = re.sub(r"gopkg\.in", "gopkg", name)
# any version marks?
name = re.sub(r"\.v\d", "", name)
name = re.sub(r"/v\d/", "/", name)
if name.startswith("code.google.com/p"):
name = re.sub(r"^code\.google\.com/p", "googlecode", name)
if name.startswith("golang.org/x"):
name = re.sub(r"^golang\.org/x", "golangorg", name)
if name.startswith("google.golang.org"):
name = re.sub(r"^google\.golang\.org", "googlegolangorg", name)
if name.startswith("bitbucket.org"):
name = re.sub(r"^bitbucket\.org", "bitbucket", name)
if name.startswith("k8s.io"):
name = re.sub(r"^k8s\.io", "k8s", name)
if name.endswith(".org"):
name = re.sub(r"\.org$", "", name)
name = name.replace("/", "-")
self._name = "golang-%s" % name
return self | python | def generate(self, project):
"""
Package name construction is based on provider, not on prefix.
Prefix does not have to equal provider_prefix.
"""
for assignment in self.s2n_mapping:
if assignment["ipprefix"] == project:
self._name = assignment["package"]
return self
#
# github.com -> github
# code.google.com/p/ -> googlecode
# golang.org/x/ -> golangorg
# gopkg.in/check.v1 -> gopkg-check
# camlistore.org
#
name = project
if name.startswith("github.com"):
name = re.sub(r"^github\.com", "github", name)
if name.startswith("gopkg.in"):
name = re.sub(r"gopkg\.in", "gopkg", name)
# any version marks?
name = re.sub(r"\.v\d", "", name)
name = re.sub(r"/v\d/", "/", name)
if name.startswith("code.google.com/p"):
name = re.sub(r"^code\.google\.com/p", "googlecode", name)
if name.startswith("golang.org/x"):
name = re.sub(r"^golang\.org/x", "golangorg", name)
if name.startswith("google.golang.org"):
name = re.sub(r"^google\.golang\.org", "googlegolangorg", name)
if name.startswith("bitbucket.org"):
name = re.sub(r"^bitbucket\.org", "bitbucket", name)
if name.startswith("k8s.io"):
name = re.sub(r"^k8s\.io", "k8s", name)
if name.endswith(".org"):
name = re.sub(r"\.org$", "", name)
name = name.replace("/", "-")
self._name = "golang-%s" % name
return self | [
"def",
"generate",
"(",
"self",
",",
"project",
")",
":",
"for",
"assignment",
"in",
"self",
".",
"s2n_mapping",
":",
"if",
"assignment",
"[",
"\"ipprefix\"",
"]",
"==",
"project",
":",
"self",
".",
"_name",
"=",
"assignment",
"[",
"\"package\"",
"]",
"return",
"self",
"#",
"# github.com -> github",
"# code.google.com/p/ -> googlecode",
"# golang.org/x/ -> golangorg",
"# gopkg.in/check.v1 -> gopkg-check",
"# camlistore.org",
"#",
"name",
"=",
"project",
"if",
"name",
".",
"startswith",
"(",
"\"github.com\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^github\\.com\"",
",",
"\"github\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"gopkg.in\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"gopkg\\.in\"",
",",
"\"gopkg\"",
",",
"name",
")",
"# any version marks?",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"\\.v\\d\"",
",",
"\"\"",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"/v\\d/\"",
",",
"\"/\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"code.google.com/p\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^code\\.google\\.com/p\"",
",",
"\"googlecode\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"golang.org/x\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^golang\\.org/x\"",
",",
"\"golangorg\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"google.golang.org\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^google\\.golang\\.org\"",
",",
"\"googlegolangorg\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"bitbucket.org\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^bitbucket\\.org\"",
",",
"\"bitbucket\"",
",",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"k8s.io\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"^k8s\\.io\"",
",",
"\"k8s\"",
",",
"name",
")",
"if",
"name",
".",
"endswith",
"(",
"\".org\"",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"\\.org$\"",
",",
"\"\"",
",",
"name",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"\"/\"",
",",
"\"-\"",
")",
"self",
".",
"_name",
"=",
"\"golang-%s\"",
"%",
"name",
"return",
"self"
]
| Package name construction is based on provider, not on prefix.
Prefix does not have to equal provider_prefix. | [
"Package",
"name",
"construction",
"is",
"based",
"on",
"provider",
"not",
"on",
"prefix",
".",
"Prefix",
"does",
"not",
"have",
"to",
"equal",
"provider_prefix",
"."
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/distribution/packagenamegenerator.py#L9-L59 | train |
NikolayDachev/jadm | lib/paramiko-1.14.1/paramiko/hostkeys.py | HostKeys.hash_host | def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
salt = os.urandom(sha1().digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = decodebytes(b(salt))
assert len(salt) == sha1().digest_size
hmac = HMAC(salt, b(hostname), sha1).digest()
hostkey = '|1|%s|%s' % (u(encodebytes(salt)), u(encodebytes(hmac)))
return hostkey.replace('\n', '') | python | def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
salt = os.urandom(sha1().digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = decodebytes(b(salt))
assert len(salt) == sha1().digest_size
hmac = HMAC(salt, b(hostname), sha1).digest()
hostkey = '|1|%s|%s' % (u(encodebytes(salt)), u(encodebytes(hmac)))
return hostkey.replace('\n', '') | [
"def",
"hash_host",
"(",
"hostname",
",",
"salt",
"=",
"None",
")",
":",
"if",
"salt",
"is",
"None",
":",
"salt",
"=",
"os",
".",
"urandom",
"(",
"sha1",
"(",
")",
".",
"digest_size",
")",
"else",
":",
"if",
"salt",
".",
"startswith",
"(",
"'|1|'",
")",
":",
"salt",
"=",
"salt",
".",
"split",
"(",
"'|'",
")",
"[",
"2",
"]",
"salt",
"=",
"decodebytes",
"(",
"b",
"(",
"salt",
")",
")",
"assert",
"len",
"(",
"salt",
")",
"==",
"sha1",
"(",
")",
".",
"digest_size",
"hmac",
"=",
"HMAC",
"(",
"salt",
",",
"b",
"(",
"hostname",
")",
",",
"sha1",
")",
".",
"digest",
"(",
")",
"hostkey",
"=",
"'|1|%s|%s'",
"%",
"(",
"u",
"(",
"encodebytes",
"(",
"salt",
")",
")",
",",
"u",
"(",
"encodebytes",
"(",
"hmac",
")",
")",
")",
"return",
"hostkey",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")"
]
| Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str` | [
"Return",
"a",
"hashed",
"form",
"of",
"the",
"hostname",
"as",
"used",
"by",
"OpenSSH",
"when",
"storing",
"hashed",
"hostnames",
"in",
"the",
"known_hosts",
"file",
"."
]
| 12bb550445edfcd87506f7cba7a6a35d413c5511 | https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/hostkeys.py#L258-L276 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid._read_elem_nodes | def _read_elem_nodes(self, fid):
""" Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
"""
nodes = {}
# # prepare nodes
# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)
# nodes = np.zeros((number_of_nodes, 3), dtype=float)
# read in nodes
nodes_raw = np.empty((self.header['nr_nodes'], 3), dtype=float)
for nr in range(0, self.header['nr_nodes']):
node_line = fid.readline().lstrip()
nodes_raw[nr, :] = np.fromstring(
node_line, dtype=float, sep=' ')
# round node coordinates to 5th decimal point. Sometimes this is
# important when we deal with mal-formatted node data
nodes_raw[:, 1:3] = np.round(nodes_raw[:, 1:3], 5)
# check for CutMcK
# The check is based on the first node, but if one node was renumbered,
# so were all the others.
if(nodes_raw[:, 0] != list(range(1, nodes_raw.shape[0]))):
self.header['cutmck'] = True
print(
'This grid was sorted using CutMcK. The nodes were resorted!')
else:
self.header['cutmck'] = False
# Rearrange nodes when CutMcK was used.
if(self.header['cutmck']):
nodes_cutmck = np.empty_like(nodes_raw)
nodes_cutmck_index = np.zeros(nodes_raw.shape[0], dtype=int)
for node in range(0, self.header['nr_nodes']):
new_index = np.where(nodes_raw[:, 0].astype(int) == (node + 1))
nodes_cutmck[new_index[0], 1:3] = nodes_raw[node, 1:3]
nodes_cutmck[new_index[0], 0] = new_index[0]
nodes_cutmck_index[node] = new_index[0]
# sort them
nodes_sorted = nodes_cutmck[nodes_cutmck_index, :]
nodes['presort'] = nodes_cutmck
nodes['cutmck_index'] = nodes_cutmck_index
nodes['rev_cutmck_index'] = np.argsort(nodes_cutmck_index)
else:
nodes_sorted = nodes_raw
nodes['presort'] = nodes_raw
# prepare node dict
nodes['raw'] = nodes_raw
nodes['sorted'] = nodes_sorted
self.nodes = nodes
self.nr_of_nodes = nodes['raw'].shape[0] | python | def _read_elem_nodes(self, fid):
""" Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
"""
nodes = {}
# # prepare nodes
# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)
# nodes = np.zeros((number_of_nodes, 3), dtype=float)
# read in nodes
nodes_raw = np.empty((self.header['nr_nodes'], 3), dtype=float)
for nr in range(0, self.header['nr_nodes']):
node_line = fid.readline().lstrip()
nodes_raw[nr, :] = np.fromstring(
node_line, dtype=float, sep=' ')
# round node coordinates to 5th decimal point. Sometimes this is
# important when we deal with mal-formatted node data
nodes_raw[:, 1:3] = np.round(nodes_raw[:, 1:3], 5)
# check for CutMcK
# The check is based on the first node, but if one node was renumbered,
# so were all the others.
if(nodes_raw[:, 0] != list(range(1, nodes_raw.shape[0]))):
self.header['cutmck'] = True
print(
'This grid was sorted using CutMcK. The nodes were resorted!')
else:
self.header['cutmck'] = False
# Rearrange nodes when CutMcK was used.
if(self.header['cutmck']):
nodes_cutmck = np.empty_like(nodes_raw)
nodes_cutmck_index = np.zeros(nodes_raw.shape[0], dtype=int)
for node in range(0, self.header['nr_nodes']):
new_index = np.where(nodes_raw[:, 0].astype(int) == (node + 1))
nodes_cutmck[new_index[0], 1:3] = nodes_raw[node, 1:3]
nodes_cutmck[new_index[0], 0] = new_index[0]
nodes_cutmck_index[node] = new_index[0]
# sort them
nodes_sorted = nodes_cutmck[nodes_cutmck_index, :]
nodes['presort'] = nodes_cutmck
nodes['cutmck_index'] = nodes_cutmck_index
nodes['rev_cutmck_index'] = np.argsort(nodes_cutmck_index)
else:
nodes_sorted = nodes_raw
nodes['presort'] = nodes_raw
# prepare node dict
nodes['raw'] = nodes_raw
nodes['sorted'] = nodes_sorted
self.nodes = nodes
self.nr_of_nodes = nodes['raw'].shape[0] | [
"def",
"_read_elem_nodes",
"(",
"self",
",",
"fid",
")",
":",
"nodes",
"=",
"{",
"}",
"# # prepare nodes",
"# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)",
"# nodes = np.zeros((number_of_nodes, 3), dtype=float)",
"# read in nodes",
"nodes_raw",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
",",
"3",
")",
",",
"dtype",
"=",
"float",
")",
"for",
"nr",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
")",
":",
"node_line",
"=",
"fid",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"nodes_raw",
"[",
"nr",
",",
":",
"]",
"=",
"np",
".",
"fromstring",
"(",
"node_line",
",",
"dtype",
"=",
"float",
",",
"sep",
"=",
"' '",
")",
"# round node coordinates to 5th decimal point. Sometimes this is",
"# important when we deal with mal-formatted node data",
"nodes_raw",
"[",
":",
",",
"1",
":",
"3",
"]",
"=",
"np",
".",
"round",
"(",
"nodes_raw",
"[",
":",
",",
"1",
":",
"3",
"]",
",",
"5",
")",
"# check for CutMcK",
"# The check is based on the first node, but if one node was renumbered,",
"# so were all the others.",
"if",
"(",
"nodes_raw",
"[",
":",
",",
"0",
"]",
"!=",
"list",
"(",
"range",
"(",
"1",
",",
"nodes_raw",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
":",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
"=",
"True",
"print",
"(",
"'This grid was sorted using CutMcK. The nodes were resorted!'",
")",
"else",
":",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
"=",
"False",
"# Rearrange nodes when CutMcK was used.",
"if",
"(",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
")",
":",
"nodes_cutmck",
"=",
"np",
".",
"empty_like",
"(",
"nodes_raw",
")",
"nodes_cutmck_index",
"=",
"np",
".",
"zeros",
"(",
"nodes_raw",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"int",
")",
"for",
"node",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
")",
":",
"new_index",
"=",
"np",
".",
"where",
"(",
"nodes_raw",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"(",
"node",
"+",
"1",
")",
")",
"nodes_cutmck",
"[",
"new_index",
"[",
"0",
"]",
",",
"1",
":",
"3",
"]",
"=",
"nodes_raw",
"[",
"node",
",",
"1",
":",
"3",
"]",
"nodes_cutmck",
"[",
"new_index",
"[",
"0",
"]",
",",
"0",
"]",
"=",
"new_index",
"[",
"0",
"]",
"nodes_cutmck_index",
"[",
"node",
"]",
"=",
"new_index",
"[",
"0",
"]",
"# sort them",
"nodes_sorted",
"=",
"nodes_cutmck",
"[",
"nodes_cutmck_index",
",",
":",
"]",
"nodes",
"[",
"'presort'",
"]",
"=",
"nodes_cutmck",
"nodes",
"[",
"'cutmck_index'",
"]",
"=",
"nodes_cutmck_index",
"nodes",
"[",
"'rev_cutmck_index'",
"]",
"=",
"np",
".",
"argsort",
"(",
"nodes_cutmck_index",
")",
"else",
":",
"nodes_sorted",
"=",
"nodes_raw",
"nodes",
"[",
"'presort'",
"]",
"=",
"nodes_raw",
"# prepare node dict",
"nodes",
"[",
"'raw'",
"]",
"=",
"nodes_raw",
"nodes",
"[",
"'sorted'",
"]",
"=",
"nodes_sorted",
"self",
".",
"nodes",
"=",
"nodes",
"self",
".",
"nr_of_nodes",
"=",
"nodes",
"[",
"'raw'",
"]",
".",
"shape",
"[",
"0",
"]"
]
| Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index) | [
"Read",
"the",
"nodes",
"from",
"an",
"opened",
"elem",
".",
"dat",
"file",
".",
"Correct",
"for",
"CutMcK",
"transformations",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L160-L229 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.calculate_dimensions | def calculate_dimensions(self):
"""For a regular grid, calculate the element and node dimensions
"""
x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node
self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size
self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)
self.nr_nodes_x = self.nr_elements_x + 1
self.nr_elements_z = self.nr_nodes_z - 1 | python | def calculate_dimensions(self):
"""For a regular grid, calculate the element and node dimensions
"""
x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node
self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size
self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)
self.nr_nodes_x = self.nr_elements_x + 1
self.nr_elements_z = self.nr_nodes_z - 1 | [
"def",
"calculate_dimensions",
"(",
"self",
")",
":",
"x_coordinates",
"=",
"np",
".",
"sort",
"(",
"self",
".",
"grid",
"[",
"'x'",
"]",
"[",
":",
",",
"0",
"]",
")",
"# first x node",
"self",
".",
"nr_nodes_z",
"=",
"np",
".",
"where",
"(",
"x_coordinates",
"==",
"x_coordinates",
"[",
"0",
"]",
")",
"[",
"0",
"]",
".",
"size",
"self",
".",
"nr_elements_x",
"=",
"self",
".",
"elements",
".",
"shape",
"[",
"0",
"]",
"/",
"(",
"self",
".",
"nr_nodes_z",
"-",
"1",
")",
"self",
".",
"nr_nodes_x",
"=",
"self",
".",
"nr_elements_x",
"+",
"1",
"self",
".",
"nr_elements_z",
"=",
"self",
".",
"nr_nodes_z",
"-",
"1"
]
| For a regular grid, calculate the element and node dimensions | [
"For",
"a",
"regular",
"grid",
"calculate",
"the",
"element",
"and",
"node",
"dimensions"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L306-L313 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid._read_elem_neighbors | def _read_elem_neighbors(self, fid):
"""Read the boundary-element-neighbors from the end of the file
"""
# get number of boundary elements
# types 11 and 12 are boundary elements
sizes = sum([len(self.element_data[key]) for key in (11, 12) if
self.element_data.get(key, None) is not None])
self.neighbors = []
try:
for i in range(0, sizes):
self.neighbors.append(int(fid.readline().strip()))
except Exception as e:
raise Exception('Not enough neighbors in file') | python | def _read_elem_neighbors(self, fid):
"""Read the boundary-element-neighbors from the end of the file
"""
# get number of boundary elements
# types 11 and 12 are boundary elements
sizes = sum([len(self.element_data[key]) for key in (11, 12) if
self.element_data.get(key, None) is not None])
self.neighbors = []
try:
for i in range(0, sizes):
self.neighbors.append(int(fid.readline().strip()))
except Exception as e:
raise Exception('Not enough neighbors in file') | [
"def",
"_read_elem_neighbors",
"(",
"self",
",",
"fid",
")",
":",
"# get number of boundary elements",
"# types 11 and 12 are boundary elements",
"sizes",
"=",
"sum",
"(",
"[",
"len",
"(",
"self",
".",
"element_data",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"(",
"11",
",",
"12",
")",
"if",
"self",
".",
"element_data",
".",
"get",
"(",
"key",
",",
"None",
")",
"is",
"not",
"None",
"]",
")",
"self",
".",
"neighbors",
"=",
"[",
"]",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"sizes",
")",
":",
"self",
".",
"neighbors",
".",
"append",
"(",
"int",
"(",
"fid",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'Not enough neighbors in file'",
")"
]
| Read the boundary-element-neighbors from the end of the file | [
"Read",
"the",
"boundary",
"-",
"element",
"-",
"neighbors",
"from",
"the",
"end",
"of",
"the",
"file"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L330-L343 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.load_grid | def load_grid(self, elem_file, elec_file):
"""Load elem.dat and elec.dat
"""
self.load_elem_file(elem_file)
self.load_elec_file(elec_file) | python | def load_grid(self, elem_file, elec_file):
"""Load elem.dat and elec.dat
"""
self.load_elem_file(elem_file)
self.load_elec_file(elec_file) | [
"def",
"load_grid",
"(",
"self",
",",
"elem_file",
",",
"elec_file",
")",
":",
"self",
".",
"load_elem_file",
"(",
"elem_file",
")",
"self",
".",
"load_elec_file",
"(",
"elec_file",
")"
]
| Load elem.dat and elec.dat | [
"Load",
"elem",
".",
"dat",
"and",
"elec",
".",
"dat"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L411-L415 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.get_element_centroids | def get_element_centroids(self):
"""return the central points of all elements
Returns
-------
Nx2 array
x/z coordinates for all (N) elements
"""
centroids = np.vstack((
np.mean(self.grid['x'], axis=1), np.mean(self.grid['z'], axis=1)
)).T
return centroids | python | def get_element_centroids(self):
"""return the central points of all elements
Returns
-------
Nx2 array
x/z coordinates for all (N) elements
"""
centroids = np.vstack((
np.mean(self.grid['x'], axis=1), np.mean(self.grid['z'], axis=1)
)).T
return centroids | [
"def",
"get_element_centroids",
"(",
"self",
")",
":",
"centroids",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"mean",
"(",
"self",
".",
"grid",
"[",
"'x'",
"]",
",",
"axis",
"=",
"1",
")",
",",
"np",
".",
"mean",
"(",
"self",
".",
"grid",
"[",
"'z'",
"]",
",",
"axis",
"=",
"1",
")",
")",
")",
".",
"T",
"return",
"centroids"
]
| return the central points of all elements
Returns
-------
Nx2 array
x/z coordinates for all (N) elements | [
"return",
"the",
"central",
"points",
"of",
"all",
"elements"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L494-L506 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.get_internal_angles | def get_internal_angles(self):
"""Compute all internal angles of the grid
Returns
-------
numpy.ndarray
NxK array with N the number of elements, and K the number of nodes,
filled with the internal angles in degrees
"""
angles = []
for elx, elz in zip(self.grid['x'], self.grid['z']):
el_angles = []
xy = np.vstack((elx, elz))
for i in range(0, elx.size):
i1 = (i - 1) % elx.size
i2 = (i + 1) % elx.size
a = (xy[:, i] - xy[:, i1])
b = (xy[:, i2] - xy[:, i])
# note that nodes are ordered counter-clockwise!
angle = np.pi - np.arctan2(
a[0] * b[1] - a[1] * b[0],
a[0] * b[0] + a[1] * b[1]
)
el_angles.append(angle * 180 / np.pi)
angles.append(el_angles)
return np.array(angles) | python | def get_internal_angles(self):
"""Compute all internal angles of the grid
Returns
-------
numpy.ndarray
NxK array with N the number of elements, and K the number of nodes,
filled with the internal angles in degrees
"""
angles = []
for elx, elz in zip(self.grid['x'], self.grid['z']):
el_angles = []
xy = np.vstack((elx, elz))
for i in range(0, elx.size):
i1 = (i - 1) % elx.size
i2 = (i + 1) % elx.size
a = (xy[:, i] - xy[:, i1])
b = (xy[:, i2] - xy[:, i])
# note that nodes are ordered counter-clockwise!
angle = np.pi - np.arctan2(
a[0] * b[1] - a[1] * b[0],
a[0] * b[0] + a[1] * b[1]
)
el_angles.append(angle * 180 / np.pi)
angles.append(el_angles)
return np.array(angles) | [
"def",
"get_internal_angles",
"(",
"self",
")",
":",
"angles",
"=",
"[",
"]",
"for",
"elx",
",",
"elz",
"in",
"zip",
"(",
"self",
".",
"grid",
"[",
"'x'",
"]",
",",
"self",
".",
"grid",
"[",
"'z'",
"]",
")",
":",
"el_angles",
"=",
"[",
"]",
"xy",
"=",
"np",
".",
"vstack",
"(",
"(",
"elx",
",",
"elz",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"elx",
".",
"size",
")",
":",
"i1",
"=",
"(",
"i",
"-",
"1",
")",
"%",
"elx",
".",
"size",
"i2",
"=",
"(",
"i",
"+",
"1",
")",
"%",
"elx",
".",
"size",
"a",
"=",
"(",
"xy",
"[",
":",
",",
"i",
"]",
"-",
"xy",
"[",
":",
",",
"i1",
"]",
")",
"b",
"=",
"(",
"xy",
"[",
":",
",",
"i2",
"]",
"-",
"xy",
"[",
":",
",",
"i",
"]",
")",
"# note that nodes are ordered counter-clockwise!",
"angle",
"=",
"np",
".",
"pi",
"-",
"np",
".",
"arctan2",
"(",
"a",
"[",
"0",
"]",
"*",
"b",
"[",
"1",
"]",
"-",
"a",
"[",
"1",
"]",
"*",
"b",
"[",
"0",
"]",
",",
"a",
"[",
"0",
"]",
"*",
"b",
"[",
"0",
"]",
"+",
"a",
"[",
"1",
"]",
"*",
"b",
"[",
"1",
"]",
")",
"el_angles",
".",
"append",
"(",
"angle",
"*",
"180",
"/",
"np",
".",
"pi",
")",
"angles",
".",
"append",
"(",
"el_angles",
")",
"return",
"np",
".",
"array",
"(",
"angles",
")"
]
| Compute all internal angles of the grid
Returns
-------
numpy.ndarray
NxK array with N the number of elements, and K the number of nodes,
filled with the internal angles in degrees | [
"Compute",
"all",
"internal",
"angles",
"of",
"the",
"grid"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L519-L547 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.Wm | def Wm(self):
"""Return the smoothing regularization matrix Wm of the grid
"""
centroids = self.get_element_centroids()
Wm = scipy.sparse.csr_matrix(
(self.nr_of_elements, self.nr_of_elements))
# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))
for i, nb in enumerate(self.element_neighbors):
for j, edges in zip(nb, self.element_neighbors_edges[i]):
# side length
edge_coords = self.nodes['presort'][edges][:, 1:]
edge_length = np.linalg.norm(
edge_coords[1, :] - edge_coords[0, :]
)
distance = np.linalg.norm(centroids[i] - centroids[j])
# main diagonal
Wm[i, i] += edge_length / distance
# side diagonals
Wm[i, j] -= edge_length / distance
return Wm | python | def Wm(self):
"""Return the smoothing regularization matrix Wm of the grid
"""
centroids = self.get_element_centroids()
Wm = scipy.sparse.csr_matrix(
(self.nr_of_elements, self.nr_of_elements))
# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))
for i, nb in enumerate(self.element_neighbors):
for j, edges in zip(nb, self.element_neighbors_edges[i]):
# side length
edge_coords = self.nodes['presort'][edges][:, 1:]
edge_length = np.linalg.norm(
edge_coords[1, :] - edge_coords[0, :]
)
distance = np.linalg.norm(centroids[i] - centroids[j])
# main diagonal
Wm[i, i] += edge_length / distance
# side diagonals
Wm[i, j] -= edge_length / distance
return Wm | [
"def",
"Wm",
"(",
"self",
")",
":",
"centroids",
"=",
"self",
".",
"get_element_centroids",
"(",
")",
"Wm",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"self",
".",
"nr_of_elements",
",",
"self",
".",
"nr_of_elements",
")",
")",
"# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))",
"for",
"i",
",",
"nb",
"in",
"enumerate",
"(",
"self",
".",
"element_neighbors",
")",
":",
"for",
"j",
",",
"edges",
"in",
"zip",
"(",
"nb",
",",
"self",
".",
"element_neighbors_edges",
"[",
"i",
"]",
")",
":",
"# side length",
"edge_coords",
"=",
"self",
".",
"nodes",
"[",
"'presort'",
"]",
"[",
"edges",
"]",
"[",
":",
",",
"1",
":",
"]",
"edge_length",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"edge_coords",
"[",
"1",
",",
":",
"]",
"-",
"edge_coords",
"[",
"0",
",",
":",
"]",
")",
"distance",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"centroids",
"[",
"i",
"]",
"-",
"centroids",
"[",
"j",
"]",
")",
"# main diagonal",
"Wm",
"[",
"i",
",",
"i",
"]",
"+=",
"edge_length",
"/",
"distance",
"# side diagonals",
"Wm",
"[",
"i",
",",
"j",
"]",
"-=",
"edge_length",
"/",
"distance",
"return",
"Wm"
]
| Return the smoothing regularization matrix Wm of the grid | [
"Return",
"the",
"smoothing",
"regularization",
"matrix",
"Wm",
"of",
"the",
"grid"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L672-L694 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/tdManager.py | tdMan.create_tomodir | def create_tomodir(self, directory):
"""Create a tomodir subdirectory structure in the given directory
"""
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
'config',
'exe',
'grid',
'mod',
'mod/pot',
'mod/sens',
'rho',
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) | python | def create_tomodir(self, directory):
"""Create a tomodir subdirectory structure in the given directory
"""
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
'config',
'exe',
'grid',
'mod',
'mod/pot',
'mod/sens',
'rho',
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) | [
"def",
"create_tomodir",
"(",
"self",
",",
"directory",
")",
":",
"pwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"os",
".",
"chdir",
"(",
"directory",
")",
"directories",
"=",
"(",
"'config'",
",",
"'exe'",
",",
"'grid'",
",",
"'mod'",
",",
"'mod/pot'",
",",
"'mod/sens'",
",",
"'rho'",
",",
")",
"for",
"directory",
"in",
"directories",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"os",
".",
"chdir",
"(",
"pwd",
")"
]
| Create a tomodir subdirectory structure in the given directory | [
"Create",
"a",
"tomodir",
"subdirectory",
"structure",
"in",
"the",
"given",
"directory"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L260-L281 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/tdManager.py | tdMan.load_rho_file | def load_rho_file(self, filename):
"""Load a forward model from a rho.dat file
Parameters
----------
filename: string
filename to rho.dat file
Returns
-------
pid_mag: int
parameter id for the magnitude model
pid_pha: int
parameter id for the phase model
"""
pids = self.parman.load_from_rho_file(filename)
self.register_magnitude_model(pids[0])
self.register_phase_model(pids[1])
return pids | python | def load_rho_file(self, filename):
"""Load a forward model from a rho.dat file
Parameters
----------
filename: string
filename to rho.dat file
Returns
-------
pid_mag: int
parameter id for the magnitude model
pid_pha: int
parameter id for the phase model
"""
pids = self.parman.load_from_rho_file(filename)
self.register_magnitude_model(pids[0])
self.register_phase_model(pids[1])
return pids | [
"def",
"load_rho_file",
"(",
"self",
",",
"filename",
")",
":",
"pids",
"=",
"self",
".",
"parman",
".",
"load_from_rho_file",
"(",
"filename",
")",
"self",
".",
"register_magnitude_model",
"(",
"pids",
"[",
"0",
"]",
")",
"self",
".",
"register_phase_model",
"(",
"pids",
"[",
"1",
"]",
")",
"return",
"pids"
]
| Load a forward model from a rho.dat file
Parameters
----------
filename: string
filename to rho.dat file
Returns
-------
pid_mag: int
parameter id for the magnitude model
pid_pha: int
parameter id for the phase model | [
"Load",
"a",
"forward",
"model",
"from",
"a",
"rho",
".",
"dat",
"file"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L295-L314 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/tdManager.py | tdMan.save_to_tomodir | def save_to_tomodir(self, directory):
"""Save the tomodir instance to a directory structure.
Note
----
Test cases:
* modeling only
* inversion only
* modeling and inversion
"""
self.create_tomodir(directory)
self.grid.save_elem_file(
directory + os.sep + 'grid/elem.dat'
)
self.grid.save_elec_file(
directory + os.sep + 'grid/elec.dat'
)
# modeling
if self.configs.configs is not None:
self.configs.write_crmod_config(
directory + os.sep + 'config/config.dat'
)
if self.assignments['forward_model'] is not None:
self.parman.save_to_rho_file(
directory + os.sep + 'rho/rho.dat',
self.assignments['forward_model'][0],
self.assignments['forward_model'][1],
)
self.crmod_cfg.write_to_file(
directory + os.sep + 'exe/crmod.cfg'
)
if self.assignments['measurements'] is not None:
self.configs.write_crmod_volt(
directory + os.sep + 'mod/volt.dat',
self.assignments['measurements']
)
if self.assignments['sensitivities'] is not None:
self._save_sensitivities(
directory + os.sep + 'mod/sens',
)
if self.assignments['potentials'] is not None:
self._save_potentials(
directory + os.sep + 'mod/pot',
)
# inversion
self.crtomo_cfg.write_to_file(
directory + os.sep + 'exe/crtomo.cfg'
)
if self.noise_model is not None:
self.noise_model.write_crt_noisemod(
directory + os.sep + 'exe/crt.noisemod'
)
if not os.path.isdir(directory + os.sep + 'inv'):
os.makedirs(directory + os.sep + 'inv') | python | def save_to_tomodir(self, directory):
"""Save the tomodir instance to a directory structure.
Note
----
Test cases:
* modeling only
* inversion only
* modeling and inversion
"""
self.create_tomodir(directory)
self.grid.save_elem_file(
directory + os.sep + 'grid/elem.dat'
)
self.grid.save_elec_file(
directory + os.sep + 'grid/elec.dat'
)
# modeling
if self.configs.configs is not None:
self.configs.write_crmod_config(
directory + os.sep + 'config/config.dat'
)
if self.assignments['forward_model'] is not None:
self.parman.save_to_rho_file(
directory + os.sep + 'rho/rho.dat',
self.assignments['forward_model'][0],
self.assignments['forward_model'][1],
)
self.crmod_cfg.write_to_file(
directory + os.sep + 'exe/crmod.cfg'
)
if self.assignments['measurements'] is not None:
self.configs.write_crmod_volt(
directory + os.sep + 'mod/volt.dat',
self.assignments['measurements']
)
if self.assignments['sensitivities'] is not None:
self._save_sensitivities(
directory + os.sep + 'mod/sens',
)
if self.assignments['potentials'] is not None:
self._save_potentials(
directory + os.sep + 'mod/pot',
)
# inversion
self.crtomo_cfg.write_to_file(
directory + os.sep + 'exe/crtomo.cfg'
)
if self.noise_model is not None:
self.noise_model.write_crt_noisemod(
directory + os.sep + 'exe/crt.noisemod'
)
if not os.path.isdir(directory + os.sep + 'inv'):
os.makedirs(directory + os.sep + 'inv') | [
"def",
"save_to_tomodir",
"(",
"self",
",",
"directory",
")",
":",
"self",
".",
"create_tomodir",
"(",
"directory",
")",
"self",
".",
"grid",
".",
"save_elem_file",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'grid/elem.dat'",
")",
"self",
".",
"grid",
".",
"save_elec_file",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'grid/elec.dat'",
")",
"# modeling",
"if",
"self",
".",
"configs",
".",
"configs",
"is",
"not",
"None",
":",
"self",
".",
"configs",
".",
"write_crmod_config",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'config/config.dat'",
")",
"if",
"self",
".",
"assignments",
"[",
"'forward_model'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"parman",
".",
"save_to_rho_file",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'rho/rho.dat'",
",",
"self",
".",
"assignments",
"[",
"'forward_model'",
"]",
"[",
"0",
"]",
",",
"self",
".",
"assignments",
"[",
"'forward_model'",
"]",
"[",
"1",
"]",
",",
")",
"self",
".",
"crmod_cfg",
".",
"write_to_file",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'exe/crmod.cfg'",
")",
"if",
"self",
".",
"assignments",
"[",
"'measurements'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"configs",
".",
"write_crmod_volt",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'mod/volt.dat'",
",",
"self",
".",
"assignments",
"[",
"'measurements'",
"]",
")",
"if",
"self",
".",
"assignments",
"[",
"'sensitivities'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_save_sensitivities",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'mod/sens'",
",",
")",
"if",
"self",
".",
"assignments",
"[",
"'potentials'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_save_potentials",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'mod/pot'",
",",
")",
"# inversion",
"self",
".",
"crtomo_cfg",
".",
"write_to_file",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'exe/crtomo.cfg'",
")",
"if",
"self",
".",
"noise_model",
"is",
"not",
"None",
":",
"self",
".",
"noise_model",
".",
"write_crt_noisemod",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'exe/crt.noisemod'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'inv'",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
"+",
"os",
".",
"sep",
"+",
"'inv'",
")"
]
| Save the tomodir instance to a directory structure.
Note
----
Test cases:
* modeling only
* inversion only
* modeling and inversion | [
"Save",
"the",
"tomodir",
"instance",
"to",
"a",
"directory",
"structure",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L316-L383 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/tdManager.py | tdMan._save_sensitivities | def _save_sensitivities(self, directory):
"""save sensitivities to a directory
"""
print('saving sensitivities')
digits = int(np.ceil(np.log10(self.configs.configs.shape[0])))
for i in range(0, self.configs.configs.shape[0]):
sens_data, meta_data = self.get_sensitivity(i)
filename_raw = 'sens{0:0' + '{0}'.format(digits) + '}.dat'
filename = directory + os.sep + filename_raw.format(i + 1)
grid_xz = self.grid.get_element_centroids()
all_data = np.vstack((
grid_xz[:, 0],
grid_xz[:, 0],
sens_data[0],
sens_data[1],
)).T
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0} {1}\n'.format(meta_data[0], meta_data[1]),
'utf-8'
))
np.savetxt(fid, all_data) | python | def _save_sensitivities(self, directory):
"""save sensitivities to a directory
"""
print('saving sensitivities')
digits = int(np.ceil(np.log10(self.configs.configs.shape[0])))
for i in range(0, self.configs.configs.shape[0]):
sens_data, meta_data = self.get_sensitivity(i)
filename_raw = 'sens{0:0' + '{0}'.format(digits) + '}.dat'
filename = directory + os.sep + filename_raw.format(i + 1)
grid_xz = self.grid.get_element_centroids()
all_data = np.vstack((
grid_xz[:, 0],
grid_xz[:, 0],
sens_data[0],
sens_data[1],
)).T
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0} {1}\n'.format(meta_data[0], meta_data[1]),
'utf-8'
))
np.savetxt(fid, all_data) | [
"def",
"_save_sensitivities",
"(",
"self",
",",
"directory",
")",
":",
"print",
"(",
"'saving sensitivities'",
")",
"digits",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log10",
"(",
"self",
".",
"configs",
".",
"configs",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"configs",
".",
"configs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"sens_data",
",",
"meta_data",
"=",
"self",
".",
"get_sensitivity",
"(",
"i",
")",
"filename_raw",
"=",
"'sens{0:0'",
"+",
"'{0}'",
".",
"format",
"(",
"digits",
")",
"+",
"'}.dat'",
"filename",
"=",
"directory",
"+",
"os",
".",
"sep",
"+",
"filename_raw",
".",
"format",
"(",
"i",
"+",
"1",
")",
"grid_xz",
"=",
"self",
".",
"grid",
".",
"get_element_centroids",
"(",
")",
"all_data",
"=",
"np",
".",
"vstack",
"(",
"(",
"grid_xz",
"[",
":",
",",
"0",
"]",
",",
"grid_xz",
"[",
":",
",",
"0",
"]",
",",
"sens_data",
"[",
"0",
"]",
",",
"sens_data",
"[",
"1",
"]",
",",
")",
")",
".",
"T",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fid",
":",
"fid",
".",
"write",
"(",
"bytes",
"(",
"'{0} {1}\\n'",
".",
"format",
"(",
"meta_data",
"[",
"0",
"]",
",",
"meta_data",
"[",
"1",
"]",
")",
",",
"'utf-8'",
")",
")",
"np",
".",
"savetxt",
"(",
"fid",
",",
"all_data",
")"
]
| save sensitivities to a directory | [
"save",
"sensitivities",
"to",
"a",
"directory"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L385-L407 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.